diff --git a/.github/workflows/custom_build.yml b/.github/workflows/custom_build.yml index ff384b59c..aca1fac06 100644 --- a/.github/workflows/custom_build.yml +++ b/.github/workflows/custom_build.yml @@ -56,24 +56,6 @@ jobs: which gcov-tool gcov-tool --version - - name: Get newest lcov - run: | - echo "Removing previous lcov version..." - sudo apt-get remove lcov || true - echo "Installing newest lcov version..." - rm -rf newer_lcov || true - mkdir newer_lcov - cd newer_lcov - git clone https://github.com/linux-test-project/lcov --recursive --recurse-submodules - cd lcov - sudo make install - cd .. - cd .. - echo "Checking installed lcov version..." - which lcov - lcov --version - - - name: Extract repo name run: echo ::set-env name=REPOSITORY_NAME::$(echo "$GITHUB_REPOSITORY" | awk -F / '{print $2}') shell: bash @@ -113,18 +95,18 @@ jobs: export CC=gcc-9 export CXX=g++-9 export TARGET=all - export CMAKE_BUILD_TYPE=Release + export CMAKE_BUILD_TYPE=Debug cd deps ./clean.sh rm -f ./libwebsockets-from-git.tar.gz - ./build.sh PARALLEL_COUNT=$(nproc) + ./build.sh PARALLEL_COUNT=$(nproc) DEBUG=1 cd .. - name: Configure all run: | export CC=gcc-9 export CXX=g++-9 export TARGET=all - export CMAKE_BUILD_TYPE=Release + export CMAKE_BUILD_TYPE=Debug mkdir -p build cd build cmake -DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE ${{ github.event.inputs.cmake_options }} .. @@ -134,11 +116,11 @@ jobs: export CC=gcc-9 export CXX=g++-9 export TARGET=all - export CMAKE_BUILD_TYPE=Release + export CMAKE_BUILD_TYPE=Debug cd build make skaled -j$(nproc) - echo "Ensure release mode skaled does not have any debug markers" - strip skaled/skaled + #echo "Ensure release mode skaled does not have any debug markers" + #strip skaled/skaled cd .. - name: Build and publish container @@ -149,8 +131,8 @@ jobs: echo "Version $VERSION" export RELEASE=true bash ./scripts/build_and_publish.sh - - - name: Upload skaled binary as artifact + + - name: Upload skaled binary as artifact uses: actions/upload-artifact@v2 if: ${{ always() }} with: diff --git a/.github/workflows/functional-tests.yml b/.github/workflows/functional-tests.yml new file mode 100644 index 000000000..489c82421 --- /dev/null +++ b/.github/workflows/functional-tests.yml @@ -0,0 +1,96 @@ + name: Run functional tests + on: + workflow_call: + inputs: + version: + required: true + type: string + workflow_dispatch: + inputs: + version: + required: true + type: string + + jobs: + functional-tests: + name: Functional tests + runs-on: ubuntu-20.04 + env: + ACTIONS_ALLOW_UNSECURE_COMMANDS: true + SKALED_RELEASE: ${{ inputs.version }} + NO_ULIMIT_CHECK: 1 + steps: + - uses: actions/checkout@v3 + with: + token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} + repository: skalenetwork/skale-ci-integration_tests + submodules: recursive + - name: Set up Node + uses: actions/setup-node@v3.4.0 + with: + node-version: 16 + - name: Install packages + run: | + sudo apt-get update + sudo apt-get install python3-pip python3-venv jq btrfs-progs nodejs npm + sudo npm install -g truffle + sudo npm install -g yarn + sudo chown -R runner:runner ~/.config # HACK + + - name: Prepare SGX keys/certs + run: | + echo "${{ secrets.sgx_key }}" > sgx_certs/sgx.key + chmod 600 sgx_certs/sgx.key + sudo mkdir /skale_node_data + sudo cp -r sgx_certs /skale_node_data + sudo chown -R runner:runner /skale_node_data/sgx_certs/* + wc /skale_node_data/sgx_certs/sgx.key + - name: Update Environment + run: | + ./update_environment.sh skaled+internals + ./update_environment.sh skaled+load_python + ./update_environment.sh skaled+load_js + ./update_environment.sh skaled+contractsRunningTest + ./update_environment.sh skaled+filestorage + ./update_environment.sh skaled+api + - name: skaled+internals+pytest + run: SKALED_PROVIDER=skaled_providers/binary_from_container ./run_tests.sh skaled+internals+pytest + + - name: skaled+filestorage+all + run: SKALED_PROVIDER=skaled_providers/endpoint_by_container ./run_tests.sh skaled+filestorage+all + + - name: skaled+contractsRunningTest+all + run: SKALED_PROVIDER=skaled_providers/endpoint_by_container ./run_tests.sh skaled+contractsRunningTest+all + + - name: skaled+load_python+all + run: SKALED_PROVIDER=skaled_providers/binary_from_container ./run_tests.sh skaled+load_python+all + + # - name: skaled+load_js+run_angry_cats + # run: SKALED_PROVIDER=skaled_providers/endpoint_by_container ./run_tests.sh skaled+load_js+run_angry_cats + + - name: skaled+internals+test_snapshot_api + run: SKALED_PROVIDER=skaled_providers/binary_from_container ./run_tests.sh skaled+internals+test_snapshot_api + + - name: skaled+internals+test_node_rotation + run: SKALED_PROVIDER=skaled_providers/binary_from_container ./run_tests.sh skaled+internals+test_node_rotation + + - name: Fix access rights + run: | + sudo chown -R runner:runner /tmp/tmp* || true + sudo find ./integration_tests/skaled/internals/third_party/skale-node-tests/btrfs -type d -exec btrfs property set {} ro false \; || true + sudo chown -R runner:runner . || true + for C in $(docker ps -aq); do docker logs $C>$C.log; done || true + if: ${{ always() }} + + - uses: actions/upload-artifact@v2 + if: ${{ always() }} + continue-on-error: true + with: + name: debug + path: | + skaled_providers + !skaled_providers/**/skaled + /tmp/tmp* + *.log + ./integration_tests/skaled/internals/third_party/skale-node-tests/btrfs + !**/.env diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 6f4062e96..63ae10fc6 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -90,7 +90,7 @@ jobs: export CC=gcc-9 export CXX=g++-9 export TARGET=all - export CMAKE_BUILD_TYPE=Release + export CMAKE_BUILD_TYPE=RelWithDebInfo cd deps ./clean.sh rm -f ./libwebsockets-from-git.tar.gz @@ -101,8 +101,7 @@ jobs: export CC=gcc-9 export CXX=g++-9 export TARGET=all - export CMAKE_BUILD_TYPE=Release - export CODE_COVERAGE=ON + export CMAKE_BUILD_TYPE=RelWithDebInfo mkdir -p build cd build # -DCMAKE_C_FLAGS=-O3 -DCMAKE_CXX_FLAGS=-O3 @@ -113,11 +112,11 @@ jobs: export CC=gcc-9 export CXX=g++-9 export TARGET=all - export CMAKE_BUILD_TYPE=Release - export CODE_COVERAGE=ON + export CMAKE_BUILD_TYPE=RelWithDebInfo cd build make skaled -j$(nproc) - echo "Ensure release mode skaled does not have any debug markers" + #echo "Ensure release mode skaled does not have any debug markers" + cp skaled/skaled skaled/skaled-debug strip skaled/skaled cd .. - name: Configure historic state build @@ -125,8 +124,7 @@ jobs: export CC=gcc-9 export CXX=g++-9 export TARGET=all - export CMAKE_BUILD_TYPE=Release - export CODE_COVERAGE=ON + export CMAKE_BUILD_TYPE=RelWithDebInfo mkdir -p build-historic cd build-historic # -DCMAKE_C_FLAGS=-O3 -DCMAKE_CXX_FLAGS=-O3 @@ -137,11 +135,11 @@ jobs: export CC=gcc-9 export CXX=g++-9 export TARGET=all - export CMAKE_BUILD_TYPE=Release - export CODE_COVERAGE=ON + export CMAKE_BUILD_TYPE=RelWithDebInfo cd build-historic make skaled -j$(nproc) - echo "Ensure release mode skaled does not have any debug markers" + #echo "Ensure release mode skaled does not have any debug markers" + cp skaled/skaled skaled/skaled-debug strip skaled/skaled cd .. - name: Build and publish container @@ -177,6 +175,15 @@ jobs: asset_path: ./build/skaled/skaled asset_name: skaled asset_content_type: application/octet-stream + - name: Upload debug binary to Release + uses: actions/upload-release-asset@latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: ./build/skaled/skaled-debug + asset_name: skaled-debug + asset_content_type: application/octet-stream - name: Build and publish historic-state container run: | cp build-historic/skaled/skaled scripts/skale_build/executable/ @@ -197,3 +204,22 @@ jobs: asset_path: ./build-historic/skaled/skaled asset_name: skaled-historic asset_content_type: application/octet-stream + - name: Upload historic-state debug binary to Release + uses: actions/upload-release-asset@latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: ./build-historic/skaled/skaled-debug + asset_name: skaled-debug-historic + asset_content_type: application/octet-stream + outputs: + version: ${{ env.VERSION }} + + functional-tests: + uses: ./.github/workflows/functional-tests.yml + name: Functional testing for build + needs: [build] + with: + version: ${{ needs.build.outputs.version }} + secrets: inherit diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f3cda41d7..49803f389 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -9,6 +9,14 @@ defaults: run: shell: bash jobs: + cancel-runs: + name: Cancel Previous Runs + runs-on: ubuntu-latest + steps: + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.9.1 + with: + access_token: ${{ github.token }} build: runs-on: self-hosted env: @@ -162,7 +170,7 @@ jobs: cd build/test export NO_NTP_CHECK=1 export NO_ULIMIT_CHECK=1 - # we specifically run each test for easier log review + # we specifically run each test for easier log review ./testeth -t BlockchainTests -- --express && touch /tmp/BlockchainTestsPassed ./testeth -t TransitionTests -- --express && touch /tmp/TransitionTestsPassed ./testeth -t TransactionTests -- --express && touch /tmp/TransactionTestsPassed @@ -259,6 +267,22 @@ jobs: ls /tmp/HashSnapshotTestSuitePassed || sudo NO_ULIMIT_CHECK=1 NO_NTP_CHECK=1 ./testeth -t HashSnapshotTestSuite -- --all --verbosity 4 ls /tmp/ClientSnapshotsSuitePassed || sudo NO_ULIMIT_CHECK=1 NO_NTP_CHECK=1 ./testeth -t ClientSnapshotsSuite -- --all --verbosity 4 cd .. + + - name: Create lcov report + run: | + lcov --capture --directory . --output-file coverage.info + lcov --remove coverage.info '/usr/*' --output-file coverage.info # filter system-files + lcov --remove coverage.info 'deps/*' --output-file coverage.info # filter dependency files + lcov --remove coverage.info 'libconsensus/deps/*' --output-file coverage.info # filter dependency files + lcov --remove coverage.info 'libconsensus/libBLS/deps/*' --output-file coverage.info # filter dependency files + lcov --remove coverage.info '.hunter/*' --output-file coverage.info # filter dependency files + + - name: Upload to Codecov + uses: codecov/codecov-action@v3 + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: ./coverage.info + - name: Configure all as historic run: | export PATH="/usr/lib/ccache:/usr/local/opt/ccache/libexec:$PATH" diff --git a/CMakeLists.txt b/CMakeLists.txt index 1853963fb..b034843c3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -96,8 +96,8 @@ endif() option( HUNTER_RUN_UPLOAD "Upload binaries to the cache server" ${run_upload} ) include( HunterGate ) -HunterGate( URL "https://github.com/ruslo/hunter/archive/v0.23.76.tar.gz" SHA1 "c7b60993e841850e2c449afd454f5d5aa4ec04e4" LOCAL ) -#HunterGate( URL "https://github.com/ruslo/hunter/archive/v0.23.214.tar.gz" SHA1 "e14bc153a7f16d6a5eeec845fb0283c8fad8c358" LOCAL ) new leveldb +#HunterGate( URL "https://github.com/ruslo/hunter/archive/v0.23.76.tar.gz" SHA1 "c7b60993e841850e2c449afd454f5d5aa4ec04e4" LOCAL ) +HunterGate( URL "https://github.com/ruslo/hunter/archive/v0.23.214.tar.gz" SHA1 "e14bc153a7f16d6a5eeec845fb0283c8fad8c358" LOCAL ) #leveldb 1.22 set( CMAKE_CXX_STANDARD 17 ) diff --git a/CODEOWNERS b/CODEOWNERS index 9cb8048f1..bf2f53a13 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,2 +1,2 @@ -* @skalenetwork/codeowners +* @DmytroNazarenko @kladkogex *.md @skalenetwork/docowners diff --git a/README.md b/README.md index e20d1df9f..7c22817cb 100644 --- a/README.md +++ b/README.md @@ -58,11 +58,15 @@ If you have already cloned the repo and forgot to pass `--recurse-submodules`, e ``` sudo apt update -sudo apt install autoconf build-essential cmake libprocps-dev libtool texinfo wget yasm flex bison btrfs-progs -sudo apt install make build-essential cmake pkg-config libgnutls28-dev libssl-dev unzip zlib1g-dev libgcrypt20-dev docker.io gcc-9 g++-9 gperf clang-format-11 +sudo apt install autoconf build-essential cmake libprocps-dev libtool texinfo wget yasm flex bison btrfs-progs python3 python3-pip gawk git vim doxygen +sudo apt install make build-essential cmake pkg-config libgnutls28-dev libssl-dev unzip zlib1g-dev libgcrypt20-dev docker.io gcc-9 g++-9 gperf clang-format-11 gnutls-dev +sudo apt install nettle-dev libhiredis-dev redis-server google-perftools libgoogle-perftools-dev lcov ``` -NB cmake needs to be of version >=3.31, git of version >=2.18 + + + +NB cmake needs to be of version >=3.21, git of version >=2.18 ### (for Ubuntu 20.10 or later) Set gcc-9 as default compiler ``` @@ -74,14 +78,15 @@ sudo update-alternatives --install /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-to gcc --version ``` -### Build dependencies +# Install latest cmake ``` -cd deps -./build.sh +sudo apt-get purge cmake +sudo snap install cmake --classic ``` -or, if you want to build debug version of skaled + +### Build dependencies ``` cd deps @@ -107,8 +112,6 @@ cmake -H. -Bbuild -DCMAKE_BUILD_TYPE=Debug cmake --build build -- -j$(nproc) ``` -Note: Currently only Debug build is supported. - ## Testing diff --git a/VERSION b/VERSION index ffaa55f59..3f67e25ce 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.16.1 +3.17.0 diff --git a/codecov.yml b/codecov.yml index f68b22e5a..4fef3815e 100644 --- a/codecov.yml +++ b/codecov.yml @@ -2,11 +2,6 @@ codecov: branch: develop coverage: range: 50...100 - #notify: - #gitter: - #default: - #threshold: 0 - #url: https://gitter.im/ethereum/cpp-ethereum-development status: patch: default: @@ -15,13 +10,6 @@ coverage: default: target: auto threshold: 0.05 - app: - target: auto - threshold: 0.05 - paths: "!test/" - tests: - target: auto - paths: "test/" changes: default: enabled: no diff --git a/deps/build.sh b/deps/build.sh index bf8879b69..16f35d527 100755 --- a/deps/build.sh +++ b/deps/build.sh @@ -703,7 +703,7 @@ if [ "$WITH_UNWIND" = "yes" ]; #eval autoheader #eval automake --add-missing eval autoreconf -i - eval ./configure --disable-shared --prefix="$INSTALL_ROOT" + eval ./configure --disable-shared --disable-tests --prefix="$INSTALL_ROOT" cd .. fi echo -e "${COLOR_INFO}building it${COLOR_DOTS}...${COLOR_RESET}" @@ -876,6 +876,9 @@ then then echo -e "${COLOR_INFO}getting it from git${COLOR_DOTS}...${COLOR_RESET}" eval git clone https://github.com/curl/curl.git + cd curl + git checkout curl-8_2_1 + cd .. echo -e "${COLOR_INFO}archiving it${COLOR_DOTS}...${COLOR_RESET}" eval tar -czf curl-from-git.tar.gz ./curl else @@ -929,13 +932,8 @@ then cd "$SOURCES_ROOT" if [ ! -d "libiconv-1.15" ]; then - if [ ! -f "libiconv-1.15.tar.gz" ]; - then - echo -e "${COLOR_INFO}downloading it${COLOR_DOTS}...${COLOR_RESET}" - eval "$WGET" --no-check-certificate https://ftp.gnu.org/pub/gnu/libiconv/libiconv-1.15.tar.gz - fi echo -e "${COLOR_INFO}unpacking it${COLOR_DOTS}...${COLOR_RESET}" - eval tar -xzf libiconv-1.15.tar.gz + eval tar -xzf "$PREDOWNLOADED_ROOT/libiconv-1.15.tar.gz" echo -e "${COLOR_INFO}configuring it${COLOR_DOTS}...${COLOR_RESET}" cd libiconv-1.15 eval ./configure "${CONF_CROSSCOMPILING_OPTS_GENERIC}" --enable-static --disable-shared --prefix="$INSTALL_ROOT" "${CONF_DEBUG_OPTIONS}" @@ -1110,7 +1108,7 @@ then # .. #cd ../.. eval ./autogen.sh - eval ./configure "${CONF_CROSSCOMPILING_OPTS_GENERIC}" --enable-static --disable-shared --prefix="$INSTALL_ROOT" "${CONF_DEBUG_OPTIONS}" + eval ./configure "${CONF_CROSSCOMPILING_OPTS_GENERIC}" --enable-static --disable-shared --disable-samples --prefix="$INSTALL_ROOT" "${CONF_DEBUG_OPTIONS}" cd .. fi #cd libevent/build @@ -1139,7 +1137,7 @@ then if [ ! -f "libuv-from-git.tar.gz" ]; then echo -e "${COLOR_INFO}getting it from git${COLOR_DOTS}...${COLOR_RESET}" - eval git clone https://github.com/libuv/libuv.git + eval git clone https://github.com/libuv/libuv.git cd libuv eval git checkout v1.x eval git pull @@ -1188,11 +1186,11 @@ then if [ ! -f "libwebsockets-from-git.tar.gz" ]; then echo -e "${COLOR_INFO}downloading it${COLOR_DOTS}...${COLOR_RESET}" - eval git clone https://github.com/warmcat/libwebsockets.git + eval git clone https://github.com/warmcat/libwebsockets.git eval cd libwebsockets - # eval git checkout v4.1-stable - eval git checkout v4.3-stable - eval git pull + # eval git checkout v4.1-stable + eval git checkout v4.3-stable + eval git pull cd .. echo -e "${COLOR_INFO}archiving it${COLOR_DOTS}...${COLOR_RESET}" eval tar -czf libwebsockets-from-git.tar.gz ./libwebsockets @@ -1388,7 +1386,7 @@ then eval "$WGET" https://boostorg.jfrog.io/artifactory/main/release/1.68.0/source/boost_1_68_0.tar.bz2 fi echo -e "${COLOR_INFO}unpacking it${COLOR_DOTS}...${COLOR_RESET}" - eval tar -xf boost_1_68_0.tar.bz2 + eval tar -xf boost_1_68_0.tar.bz2 fi cd boost_1_68_0 echo -e "${COLOR_INFO}configuring and building it${COLOR_DOTS}...${COLOR_RESET}" @@ -1878,6 +1876,7 @@ then fi echo -e "${COLOR_INFO}configuring it${COLOR_DOTS}...${COLOR_RESET}" cd fmt + git checkout 9158bea1e148c190aa3f9f084b82887ecb29d2f8 eval mkdir -p build cd build eval "$CMAKE" "${CMAKE_CROSSCOMPILING_OPTS}" -DCMAKE_INSTALL_PREFIX="$INSTALL_ROOT" -DCMAKE_BUILD_TYPE="$TOP_CMAKE_BUILD_TYPE" .. @@ -1992,10 +1991,10 @@ then then echo -e "${COLOR_INFO}getting it from git${COLOR_DOTS}...${COLOR_RESET}" eval git clone https://github.com/google/glog.git --recursive - cd glog - eval git checkout ee6faf13b20de9536f456bd84584f4ab4db1ceb4 - cd .. - echo -e "${COLOR_INFO}archiving it${COLOR_DOTS}...${COLOR_RESET}" + cd glog + eval git checkout ee6faf13b20de9536f456bd84584f4ab4db1ceb4 + cd .. + echo -e "${COLOR_INFO}archiving it${COLOR_DOTS}...${COLOR_RESET}" eval tar -czf glog-from-git.tar.gz ./glog else echo -e "${COLOR_INFO}unpacking it${COLOR_DOTS}...${COLOR_RESET}" @@ -2006,7 +2005,7 @@ then eval mkdir -p build cd build eval "$CMAKE" "${CMAKE_CROSSCOMPILING_OPTS}" -DCMAKE_INSTALL_PREFIX="$INSTALL_ROOT" -DCMAKE_BUILD_TYPE="$TOP_CMAKE_BUILD_TYPE" \ - -DBUILD_SHARED_LIBS=OFF -DWITH_UNWIND=OFF \ + -DBUILD_SHARED_LIBS=OFF -DWITH_UNWIND=OFF -DWITH_GTEST=OFF \ .. cd .. else @@ -2073,11 +2072,11 @@ then then echo -e "${COLOR_INFO}getting it from git${COLOR_DOTS}...${COLOR_RESET}" eval git clone https://github.com/facebook/folly.git --recursive - cd folly - eval git checkout 5c8fc1b622422a1c73f46d6fb51ac1164d8efb0f - cd .. - echo -e "${COLOR_INFO}archiving it${COLOR_DOTS}...${COLOR_RESET}" - eval tar -czf folly-from-git.tar.gz ./folly + cd folly + eval git checkout 5c8fc1b622422a1c73f46d6fb51ac1164d8efb0f + cd .. + echo -e "${COLOR_INFO}archiving it${COLOR_DOTS}...${COLOR_RESET}" + eval tar -czf folly-from-git.tar.gz ./folly else echo -e "${COLOR_INFO}unpacking it${COLOR_DOTS}...${COLOR_RESET}" eval tar -xzf folly-from-git.tar.gz @@ -2087,9 +2086,9 @@ then echo -e "${COLOR_INFO}configuring it${COLOR_DOTS}...${COLOR_RESET}" cd folly eval mkdir -p build2 - cd build2 + cd build2 eval "$CMAKE" "${CMAKE_CROSSCOMPILING_OPTS}" -DCMAKE_INSTALL_PREFIX="$INSTALL_ROOT" -DCMAKE_BUILD_TYPE="$TOP_CMAKE_BUILD_TYPE" \ - -DBOOST_ROOT="$INSTALL_ROOT" -DBOOST_LIBRARYDIR="$INSTALL_ROOT/lib" -DBoost_NO_WARN_NEW_VERSIONS=1 -DBoost_DEBUG=ON \ + -DBOOST_ROOT="$INSTALL_ROOT" -DBOOST_LIBRARYDIR="$INSTALL_ROOT/lib" -DBoost_NO_WARN_NEW_VERSIONS=1 -DBoost_DEBUG=ON \ -DBUILD_SHARED_LIBS=OFF \ -DBUILD_TESTS=OFF -DBUILD_BROKEN_TESTS=OFF -DBUILD_HANGING_TESTS=OFF -DBUILD_SLOW_TESTS=OFF \ .. @@ -2161,10 +2160,10 @@ then then echo -e "${COLOR_INFO}getting it from git${COLOR_DOTS}...${COLOR_RESET}" eval git clone https://github.com/google/googletest.git --recursive - cd googletest - eval git checkout 4c5650f68866e3c2e60361d5c4c95c6f335fb64b - cd .. - echo -e "${COLOR_INFO}archiving it${COLOR_DOTS}...${COLOR_RESET}" + cd googletest + eval git checkout 4c5650f68866e3c2e60361d5c4c95c6f335fb64b + cd .. + echo -e "${COLOR_INFO}archiving it${COLOR_DOTS}...${COLOR_RESET}" eval tar -czf gtest-from-git.tar.gz ./googletest else echo -e "${COLOR_INFO}unpacking it${COLOR_DOTS}...${COLOR_RESET}" @@ -2204,10 +2203,10 @@ then then echo -e "${COLOR_INFO}getting it from git${COLOR_DOTS}...${COLOR_RESET}" eval git clone https://github.com/facebookincubator/fizz.git --recursive - cd fizz - eval git checkout 93003f4161f7cebe1c121b3232215db8314c2ce7 - cd .. - echo -e "${COLOR_INFO}archiving it${COLOR_DOTS}...${COLOR_RESET}" + cd fizz + eval git checkout 93003f4161f7cebe1c121b3232215db8314c2ce7 + cd .. + echo -e "${COLOR_INFO}archiving it${COLOR_DOTS}...${COLOR_RESET}" eval tar -czf fizz-from-git.tar.gz ./fizz else echo -e "${COLOR_INFO}unpacking it${COLOR_DOTS}...${COLOR_RESET}" @@ -2249,10 +2248,10 @@ then then echo -e "${COLOR_INFO}getting it from git${COLOR_DOTS}...${COLOR_RESET}" eval git clone https://github.com/facebook/wangle.git --recursive - cd wangle - eval git checkout 7249d3f8d18bcd4bc13649d13654ccb2a771f7b3 - cd .. - echo -e "${COLOR_INFO}archiving it${COLOR_DOTS}...${COLOR_RESET}" + cd wangle + eval git checkout 7249d3f8d18bcd4bc13649d13654ccb2a771f7b3 + cd .. + echo -e "${COLOR_INFO}archiving it${COLOR_DOTS}...${COLOR_RESET}" eval tar -czf wangle-from-git.tar.gz ./wangle else echo -e "${COLOR_INFO}unpacking it${COLOR_DOTS}...${COLOR_RESET}" diff --git a/deps/pre_downloaded/libiconv-1.15.tar.gz b/deps/pre_downloaded/libiconv-1.15.tar.gz new file mode 100644 index 000000000..abdb6a4f1 Binary files /dev/null and b/deps/pre_downloaded/libiconv-1.15.tar.gz differ diff --git a/libbatched-io/batched_db.cpp b/libbatched-io/batched_db.cpp index 92e0019a9..4168ca41f 100644 --- a/libbatched-io/batched_db.cpp +++ b/libbatched-io/batched_db.cpp @@ -59,4 +59,15 @@ void db_splitter::prefixed_db::forEach( } ); } +void db_splitter::prefixed_db::forEachWithPrefix( + std::string& _prefix, std::function< bool( dev::db::Slice, dev::db::Slice ) > f ) const { + backend->forEachWithPrefix( _prefix, [&]( dev::db::Slice _key, dev::db::Slice _val ) -> bool { + if ( _key[0] != this->prefix ) + return true; + dev::db::Slice key_short = dev::db::Slice( _key.data() + 1, _key.size() - 1 ); + return f( key_short, _val ); + } ); +} + + } // namespace batched_io diff --git a/libbatched-io/batched_db.h b/libbatched-io/batched_db.h index aad77550d..e2465d087 100644 --- a/libbatched-io/batched_db.h +++ b/libbatched-io/batched_db.h @@ -18,7 +18,8 @@ class db_operations_face { virtual std::string lookup( dev::db::Slice _key ) const = 0; virtual bool exists( dev::db::Slice _key ) const = 0; virtual void forEach( std::function< bool( dev::db::Slice, dev::db::Slice ) > f ) const = 0; - + virtual void forEachWithPrefix( + std::string& _prefix, std::function< bool( dev::db::Slice, dev::db::Slice ) > f ) const = 0; virtual ~db_operations_face() = default; }; @@ -69,6 +70,12 @@ class batched_db : public db_face { m_db->forEach( f ); } + virtual void forEachWithPrefix( + std::string& _prefix, std::function< bool( dev::db::Slice, dev::db::Slice ) > f ) const { + std::lock_guard< std::mutex > foreach_lock( m_batch_mutex ); + m_db->forEachWithPrefix( _prefix, f ); + } + virtual ~batched_db(); protected: @@ -105,6 +112,8 @@ class db_splitter { virtual std::string lookup( dev::db::Slice _key ) const; virtual bool exists( dev::db::Slice _key ) const; virtual void forEach( std::function< bool( dev::db::Slice, dev::db::Slice ) > f ) const; + virtual void forEachWithPrefix( + std::string& _prefix, std::function< bool( dev::db::Slice, dev::db::Slice ) > f ) const; protected: virtual void recover() { /* nothing */ diff --git a/libconsensus b/libconsensus index 93f47e966..5a9b85d8f 160000 --- a/libconsensus +++ b/libconsensus @@ -1 +1 @@ -Subproject commit 93f47e9661addf2312f691fad49b0a0a73b28805 +Subproject commit 5a9b85d8f171e1ba5f7dfe244cff70e0e39aa5f4 diff --git a/libdevcore/Common.cpp b/libdevcore/Common.cpp index 26ad77d2b..ec4c0bc53 100644 --- a/libdevcore/Common.cpp +++ b/libdevcore/Common.cpp @@ -20,6 +20,7 @@ #include "Common.h" #include "Exceptions.h" #include "Log.h" +#include "taskmon.h" #include @@ -34,41 +35,141 @@ char const* Version = skale_get_buildinfo()->project_version; bytes const NullBytes; std::string const EmptyString; -std::shared_ptr< StatusAndControl > ExitHandler::statusAndControl; - bool ExitHandler::shouldExit() { - return skutils::signal::g_bStop; + return s_bStop; } int ExitHandler::getSignal() { - return skutils::signal::g_nStopSignal; + return s_nStopSignal; } void ExitHandler::exitHandler( int s ) { exitHandler( s, ec_success ); } -void ExitHandler::exitHandler( int s, ExitHandler::exit_code_t ec ) { - skutils::signal::g_nStopSignal = s; +void ExitHandler::exitHandler( int nSignalNo, ExitHandler::exit_code_t ec ) { + std::string strMessagePrefix = ExitHandler::shouldExit() ? + cc::error( "\nStop flag was already raised on. " ) + + cc::fatal( "WILL FORCE TERMINATE." ) + + cc::error( " Caught (second) signal. " ) : + cc::error( "\nCaught (first) signal. " ); + std::cerr << strMessagePrefix << cc::error( skutils::signal::signal2str( nSignalNo ) ) + << "\n\n"; + std::cerr.flush(); + + switch ( nSignalNo ) { + case SIGINT: + case SIGTERM: + case SIGHUP: + // exit normally + // just fall through + break; + + case SIGSTOP: + case SIGTSTP: + case SIGPIPE: + // ignore + return; + break; + + case SIGQUIT: + // exit immediately + _exit( ExitHandler::ec_termninated_by_signal ); + break; + + case SIGILL: + case SIGABRT: + case SIGFPE: + case SIGSEGV: + // abort signals + std::cout << "\n" << skutils::signal::generate_stack_trace() << "\n"; + std::cout.flush(); + std::cout << skutils::signal::read_maps() << "\n"; + std::cout.flush(); + + _exit( nSignalNo + 128 ); + + default: + // exit normally + break; + } // switch + + // try to exit nicely - then abort + if ( !ExitHandler::shouldExit() ) { + static volatile bool g_bSelfKillStarted = false; + if ( !g_bSelfKillStarted ) { + g_bSelfKillStarted = true; + + auto start_time = std::chrono::steady_clock::now(); + + std::thread( [nSignalNo, start_time]() { + std::cerr << ( "\n" + cc::fatal( "SELF-KILL:" ) + " " + cc::error( "Will sleep " ) + + cc::size10( ExitHandler::KILL_TIMEOUT ) + + cc::error( " seconds before force exit..." ) + "\n\n" ); + std::cerr.flush(); + + clog( VerbosityInfo, "exit" ) << "THREADS timer started"; + + // while waiting, every 0.1s check whch threades exited + vector< string > threads; + for ( int i = 0; i < ExitHandler::KILL_TIMEOUT * 10; ++i ) { + auto end_time = std::chrono::steady_clock::now(); + float seconds = std::chrono::duration< float >( end_time - start_time ).count(); + + try { + vector< string > new_threads = taskmon::list_names(); + vector< string > threads_diff = taskmon::lists_diff( threads, new_threads ); + threads = new_threads; + + if ( threads_diff.size() ) { + cerr << seconds << " THREADS " << threads.size() << ":"; + for ( const string& t : threads_diff ) + cerr << " " << t; + cerr << endl; + } + } catch ( ... ) { + // swallow it + } + + std::this_thread::sleep_for( 100ms ); + } + + std::cerr << ( "\n" + cc::fatal( "SELF-KILL:" ) + " " + + cc::error( "Will force exit after sleeping " ) + + cc::size10( ExitHandler::KILL_TIMEOUT ) + cc::error( " second(s)" ) + + "\n\n" ); + std::cerr.flush(); + + // TODO deduplicate this with main() before return + ExitHandler::exit_code_t ec = ExitHandler::requestedExitCode(); + if ( ec == ExitHandler::ec_success ) { + if ( nSignalNo != SIGINT && nSignalNo != SIGTERM ) + ec = ExitHandler::ec_failure; + } + + _exit( ec ); + } ).detach(); + } // if( ! g_bSelfKillStarted ) + } // if ( !skutils::signal::g_bStop ) + + // nice exit here: + + if ( ExitHandler::shouldExit() ) { + std::cerr << ( "\n" + cc::fatal( "SIGNAL-HANDLER:" ) + " " + + cc::error( "Will force exit now..." ) + "\n\n" ); + _exit( 13 ); + } + + s_nStopSignal = nSignalNo; if ( ec != ec_success ) { - g_ec = ec; + s_ec = ec; } - // indicate failure if signal is not INT or TERM! - if ( g_ec == ec_success && s != SIGINT && s != SIGTERM ) - g_ec = ExitHandler::ec_failure; - - if ( statusAndControl ) { - statusAndControl->setExitState( StatusAndControl::StartAgain, ( g_ec != ec_success ) ); - statusAndControl->setExitState( - StatusAndControl::StartFromSnapshot, ( g_ec == ec_state_root_mismatch ) ); - statusAndControl->setExitState( - StatusAndControl::ClearDataDir, ( g_ec == ec_state_root_mismatch ) ); - } // if - - skutils::signal::g_bStop = true; - // HACK wait for loop in main to send exit call to consensus et al. - std::this_thread::sleep_for( chrono::milliseconds( 2000 ) ); + // indicate failure if signal is not INT or TERM or internal (-1) + if ( s_ec == ec_success && nSignalNo > 0 && nSignalNo != SIGINT && nSignalNo != SIGTERM ) + s_ec = ExitHandler::ec_failure; + + s_bStop = true; } void InvariantChecker::checkInvariants( @@ -126,6 +227,8 @@ string inUnits( bigint const& _b, strings const& _units ) { return ret.str(); } -volatile ExitHandler::exit_code_t ExitHandler::g_ec = ExitHandler::ec_success; +std::atomic< ExitHandler::exit_code_t > ExitHandler::s_ec = ExitHandler::ec_success; +std::atomic_int ExitHandler::s_nStopSignal{ 0 }; +std::atomic_bool ExitHandler::s_bStop{ false }; } // namespace dev diff --git a/libdevcore/Common.h b/libdevcore/Common.h index 40e8867fb..cc8e4bcec 100644 --- a/libdevcore/Common.h +++ b/libdevcore/Common.h @@ -54,7 +54,9 @@ #pragma warning( push ) #pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-copy" #pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Wpessimizing-move" #include #pragma warning( pop ) #pragma GCC diagnostic pop @@ -351,12 +353,12 @@ class ExitHandler { static void exitHandler( int s, ExitHandler::exit_code_t ec ); static bool shouldExit(); static int getSignal(); - static exit_code_t requestedExitCode() { return g_ec; } - - static std::shared_ptr< StatusAndControl > statusAndControl; + static exit_code_t requestedExitCode() { return s_ec; } private: - static volatile exit_code_t g_ec; + static std::atomic< exit_code_t > s_ec; + static std::atomic_int s_nStopSignal; + static std::atomic_bool s_bStop; ExitHandler() = delete; }; diff --git a/libdevcore/FixedHash.h b/libdevcore/FixedHash.h index 049f6eb1d..037565706 100644 --- a/libdevcore/FixedHash.h +++ b/libdevcore/FixedHash.h @@ -72,6 +72,8 @@ class FixedHash { /// Construct an empty hash. FixedHash() { m_data.fill( 0 ); } + FixedHash( const FixedHash< N >& other ) = default; + /// Construct from another hash, filling with zeroes or cropping as necessary. template < unsigned M > explicit FixedHash( FixedHash< M > const& _h, ConstructFromHashType _t = AlignLeft ) { @@ -312,6 +314,8 @@ class SecureFixedHash : private FixedHash< T > { using ConstructFromStringType = typename FixedHash< T >::ConstructFromStringType; using ConstructFromPointerType = typename FixedHash< T >::ConstructFromPointerType; SecureFixedHash() = default; + SecureFixedHash( const SecureFixedHash< T >& other ) = default; + explicit SecureFixedHash( bytes const& _b, ConstructFromHashType _t = FixedHash< T >::FailIfDifferent ) : FixedHash< T >( _b, _t ) {} @@ -392,6 +396,7 @@ class SecureFixedHash : private FixedHash< T > { using FixedHash< T >::operator<=; using FixedHash< T >::operator>; + // The obvious binary operators. SecureFixedHash& operator^=( FixedHash< T > const& _c ) { static_cast< FixedHash< T >& >( *this ).operator^=( _c ); diff --git a/libdevcore/Guards.h b/libdevcore/Guards.h index 585517ecc..a3242ce33 100644 --- a/libdevcore/Guards.h +++ b/libdevcore/Guards.h @@ -29,6 +29,7 @@ #pragma warning( push ) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Wsign-compare" #include #pragma warning( pop ) #pragma GCC diagnostic pop diff --git a/libdevcore/LevelDB.cpp b/libdevcore/LevelDB.cpp index 9a0aa98d4..2896cf4d0 100644 --- a/libdevcore/LevelDB.cpp +++ b/libdevcore/LevelDB.cpp @@ -70,6 +70,7 @@ class LevelDBWriteBatch : public WriteBatchFace { private: leveldb::WriteBatch m_writeBatch; + std::atomic< uint64_t > keysToBeDeletedCount; }; void LevelDBWriteBatch::insert( Slice _key, Slice _value ) { @@ -78,6 +79,7 @@ void LevelDBWriteBatch::insert( Slice _key, Slice _value ) { } void LevelDBWriteBatch::kill( Slice _key ) { + LevelDB::g_keysToBeDeletedStats++; m_writeBatch.Delete( toLDBSlice( _key ) ); } @@ -88,10 +90,26 @@ leveldb::ReadOptions LevelDB::defaultReadOptions() { } leveldb::WriteOptions LevelDB::defaultWriteOptions() { - return leveldb::WriteOptions(); + leveldb::WriteOptions writeOptions = leveldb::WriteOptions(); + // writeOptions.sync = true; + return writeOptions; } leveldb::Options LevelDB::defaultDBOptions() { + leveldb::Options options; + options.create_if_missing = true; + options.max_open_files = c_maxOpenLeveldbFiles; + options.filter_policy = leveldb::NewBloomFilterPolicy( 10 ); + return options; +} + +leveldb::ReadOptions LevelDB::defaultSnapshotReadOptions() { + leveldb::ReadOptions options; + options.fill_cache = false; + return options; +} + +leveldb::Options LevelDB::defaultSnapshotDBOptions() { leveldb::Options options; options.create_if_missing = true; options.max_open_files = c_maxOpenLeveldbFiles; @@ -103,15 +121,23 @@ LevelDB::LevelDB( boost::filesystem::path const& _path, leveldb::ReadOptions _re : m_db( nullptr ), m_readOptions( std::move( _readOptions ) ), m_writeOptions( std::move( _writeOptions ) ), + m_options( std::move( _dbOptions ) ), m_path( _path ) { auto db = static_cast< leveldb::DB* >( nullptr ); - auto const status = leveldb::DB::Open( _dbOptions, _path.string(), &db ); + auto const status = leveldb::DB::Open( m_options, _path.string(), &db ); checkStatus( status, _path ); assert( db ); m_db.reset( db ); } +LevelDB::~LevelDB() { + if ( m_db ) + m_db.reset(); + if ( m_options.filter_policy ) + delete m_options.filter_policy; +} + std::string LevelDB::lookup( Slice _key ) const { leveldb::Slice const key( _key.data(), _key.size() ); std::string value; @@ -144,6 +170,9 @@ void LevelDB::insert( Slice _key, Slice _value ) { void LevelDB::kill( Slice _key ) { leveldb::Slice const key( _key.data(), _key.size() ); auto const status = m_db->Delete( m_writeOptions, key ); + // At this point the key is not actually deleted. It will be deleted when the batch + // is committed + g_keysToBeDeletedStats++; checkStatus( status ); } @@ -161,6 +190,10 @@ void LevelDB::commit( std::unique_ptr< WriteBatchFace > _batch ) { DatabaseError() << errinfo_comment( "Invalid batch type passed to LevelDB::commit" ) ); } auto const status = m_db->Write( m_writeOptions, &batchPtr->writeBatch() ); + // Commit happened. This means the keys actually got deleted in LevelDB. Increment key deletes + // stats and set g_keysToBeDeletedStats to zero + g_keyDeletesStats += g_keysToBeDeletedStats; + g_keysToBeDeletedStats = 0; checkStatus( status ); } @@ -180,6 +213,26 @@ void LevelDB::forEach( std::function< bool( Slice, Slice ) > f ) const { } } + +void LevelDB::forEachWithPrefix( + std::string& _prefix, std::function< bool( Slice, Slice ) > f ) const { + cnote << "Iterating over the LevelDB prefix: " << _prefix; + std::unique_ptr< leveldb::Iterator > itr( m_db->NewIterator( m_readOptions ) ); + if ( itr == nullptr ) { + BOOST_THROW_EXCEPTION( DatabaseError() << errinfo_comment( "null iterator" ) ); + } + auto keepIterating = true; + auto prefixSlice = leveldb::Slice( _prefix ); + for ( itr->Seek( prefixSlice ); + keepIterating && itr->Valid() && itr->key().starts_with( prefixSlice ); itr->Next() ) { + auto const dbKey = itr->key(); + auto const dbValue = itr->value(); + Slice const key( dbKey.data(), dbKey.size() ); + Slice const value( dbValue.data(), dbValue.size() ); + keepIterating = f( key, value ); + } +} + h256 LevelDB::hashBase() const { std::unique_ptr< leveldb::Iterator > it( m_db->NewIterator( m_readOptions ) ); if ( it == nullptr ) { @@ -227,9 +280,16 @@ h256 LevelDB::hashBaseWithPrefix( char _prefix ) const { return hash; } -// void LevelDB::doCompaction() const { -// m_db->CompactRange( NULL, NULL ); -//} +void LevelDB::doCompaction() const { + m_db->CompactRange( nullptr, nullptr ); +} + +std::atomic< uint64_t > LevelDB::g_keysToBeDeletedStats = 0; +std::atomic< uint64_t > LevelDB::g_keyDeletesStats = 0; + +uint64_t LevelDB::getKeyDeletesStats() { + return g_keyDeletesStats; +} } // namespace db } // namespace dev diff --git a/libdevcore/LevelDB.h b/libdevcore/LevelDB.h index 25edade9d..0a597c51e 100644 --- a/libdevcore/LevelDB.h +++ b/libdevcore/LevelDB.h @@ -22,6 +22,7 @@ #include "db.h" #include +#include #include #include @@ -32,12 +33,16 @@ class LevelDB : public DatabaseFace { static leveldb::ReadOptions defaultReadOptions(); static leveldb::WriteOptions defaultWriteOptions(); static leveldb::Options defaultDBOptions(); + static leveldb::ReadOptions defaultSnapshotReadOptions(); + static leveldb::Options defaultSnapshotDBOptions(); explicit LevelDB( boost::filesystem::path const& _path, leveldb::ReadOptions _readOptions = defaultReadOptions(), leveldb::WriteOptions _writeOptions = defaultWriteOptions(), leveldb::Options _dbOptions = defaultDBOptions() ); + ~LevelDB(); + std::string lookup( Slice _key ) const override; bool exists( Slice _key ) const override; void insert( Slice _key, Slice _value ) override; @@ -48,15 +53,26 @@ class LevelDB : public DatabaseFace { void forEach( std::function< bool( Slice, Slice ) > f ) const override; + void forEachWithPrefix( + std::string& _prefix, std::function< bool( Slice, Slice ) > f ) const override; + h256 hashBase() const override; h256 hashBaseWithPrefix( char _prefix ) const; - // void doCompaction() const; + void doCompaction() const; + + // Return the total count of key deletes since the start + static uint64_t getKeyDeletesStats(); + // count of the keys that were deleted since the start of skaled + static std::atomic< uint64_t > g_keyDeletesStats; + // count of the keys that are scheduled to be deleted but are not yet deleted + static std::atomic< uint64_t > g_keysToBeDeletedStats; private: std::unique_ptr< leveldb::DB > m_db; leveldb::ReadOptions const m_readOptions; leveldb::WriteOptions const m_writeOptions; + leveldb::Options m_options; boost::filesystem::path const m_path; }; diff --git a/libdevcore/Log.cpp b/libdevcore/Log.cpp index 08a7ca608..4e0ca878e 100644 --- a/libdevcore/Log.cpp +++ b/libdevcore/Log.cpp @@ -29,6 +29,9 @@ #include +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-copy" + #include #include #include @@ -39,6 +42,8 @@ #include #include +#pragma GCC diagnostic pop + #include #include diff --git a/libdevcore/Log.h b/libdevcore/Log.h index 2801c03fb..da18916f9 100644 --- a/libdevcore/Log.h +++ b/libdevcore/Log.h @@ -25,11 +25,16 @@ #include #include +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-copy" + #include #include #include #include +#pragma GCC diagnostic pop + #include namespace dev { diff --git a/libdevcore/ManuallyRotatingLevelDB.cpp b/libdevcore/ManuallyRotatingLevelDB.cpp index 16103d474..4f36242c0 100644 --- a/libdevcore/ManuallyRotatingLevelDB.cpp +++ b/libdevcore/ManuallyRotatingLevelDB.cpp @@ -67,6 +67,15 @@ void ManuallyRotatingLevelDB::forEach( std::function< bool( Slice, Slice ) > f ) } } +void ManuallyRotatingLevelDB::forEachWithPrefix( + std::string& _prefix, std::function< bool( Slice, Slice ) > f ) const { + std::shared_lock< std::shared_mutex > lock( m_mutex ); + for ( const auto& p : *io_backend ) { + p->forEachWithPrefix( _prefix, f ); + } +} + + h256 ManuallyRotatingLevelDB::hashBase() const { std::shared_lock< std::shared_mutex > lock( m_mutex ); secp256k1_sha256_t ctx; diff --git a/libdevcore/ManuallyRotatingLevelDB.h b/libdevcore/ManuallyRotatingLevelDB.h index bdedf5d54..12b98ce3f 100644 --- a/libdevcore/ManuallyRotatingLevelDB.h +++ b/libdevcore/ManuallyRotatingLevelDB.h @@ -41,6 +41,9 @@ class ManuallyRotatingLevelDB : public DatabaseFace { } virtual void forEach( std::function< bool( Slice, Slice ) > f ) const; + virtual void forEachWithPrefix( + std::string& _prefix, std::function< bool( Slice, Slice ) > f ) const; + virtual h256 hashBase() const; }; diff --git a/libdevcore/SplitDB.cpp b/libdevcore/SplitDB.cpp index 2ba662738..8965628d5 100644 --- a/libdevcore/SplitDB.cpp +++ b/libdevcore/SplitDB.cpp @@ -100,6 +100,20 @@ void SplitDB::PrefixedDB::forEach( std::function< bool( Slice, Slice ) > f ) con } ); } + +void SplitDB::PrefixedDB::forEachWithPrefix( + std::string& _prefix, std::function< bool( Slice, Slice ) > f ) const { + std::unique_lock< std::shared_mutex > lock( this->backend_mutex ); + auto prefixedString = std::to_string( this->prefix ) + _prefix; + backend->forEachWithPrefix( prefixedString, [&]( Slice _key, Slice _val ) -> bool { + if ( _key[0] != this->prefix ) + return true; + Slice key_short = Slice( _key.data() + 1, _key.size() - 1 ); + return f( key_short, _val ); + } ); +} + + h256 SplitDB::PrefixedDB::hashBase() const { // HACK TODO implement that it would work with any DatabaseFace* const LevelDB* ldb = dynamic_cast< const LevelDB* >( backend.get() ); diff --git a/libdevcore/SplitDB.h b/libdevcore/SplitDB.h index 3a1774b55..1f939ff6d 100644 --- a/libdevcore/SplitDB.h +++ b/libdevcore/SplitDB.h @@ -36,6 +36,9 @@ class SplitDB { virtual void commit( std::unique_ptr< WriteBatchFace > _batch ); virtual void forEach( std::function< bool( Slice, Slice ) > f ) const; + virtual void forEachWithPrefix( + std::string& _prefix, std::function< bool( Slice, Slice ) > f ) const; + virtual h256 hashBase() const; private: diff --git a/libdevcore/StatusAndControl.h b/libdevcore/StatusAndControl.h index b84c84c26..798895ee9 100644 --- a/libdevcore/StatusAndControl.h +++ b/libdevcore/StatusAndControl.h @@ -1,8 +1,13 @@ #ifndef STATUSANDCONTROL_H #define STATUSANDCONTROL_H +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-copy" + #include +#pragma GCC diagnostic pop + #include #include #include diff --git a/libdevcore/db.h b/libdevcore/db.h index a978cb302..4c0f39e22 100644 --- a/libdevcore/db.h +++ b/libdevcore/db.h @@ -66,6 +66,11 @@ class DatabaseFace { // of each record in the database. If `f` returns false, the `forEach` // method must return immediately. virtual void forEach( std::function< bool( Slice, Slice ) > f ) const = 0; + + virtual void forEachWithPrefix( + std::string& _prefix, std::function< bool( Slice, Slice ) > f ) const = 0; + + virtual h256 hashBase() const = 0; virtual bool discardCreatedBatches() { return false; } diff --git a/libdevcore/system_usage.cpp b/libdevcore/system_usage.cpp index c3fdc772c..fb5c4318b 100644 --- a/libdevcore/system_usage.cpp +++ b/libdevcore/system_usage.cpp @@ -55,7 +55,6 @@ void initCPUUSage() { } double getCPUUsage() { - initCPUUSage(); struct tms timeSample; clock_t now; double percent; diff --git a/libdevcore/taskmon.cpp b/libdevcore/taskmon.cpp new file mode 100644 index 000000000..58c0224d8 --- /dev/null +++ b/libdevcore/taskmon.cpp @@ -0,0 +1,142 @@ +#include "taskmon.h" + +#include +#include +#include + +#include + +using namespace std; +namespace fs = filesystem; + +vector< string > taskmon::list_names() { + vector< string > task_names; + int self_pid = getpid(); + fs::path task_dir_path = "/proc/" + to_string( self_pid ) + "/task"; + + if ( !fs::exists( task_dir_path ) ) + return task_names; + + for ( const auto& task_entry : fs::directory_iterator( task_dir_path ) ) { + if ( task_entry.path().filename().string()[0] == '.' ) + continue; + + string task_name = extract_task_name( task_entry ); + if ( !task_name.empty() ) + task_names.push_back( task_name ); + } + + return task_names; +} + +vector< int > taskmon::list_ids() { + vector< int > task_ids; + int self_pid = getpid(); + fs::path task_dir_path = "/proc/" + to_string( self_pid ) + "/task"; + + if ( !fs::exists( task_dir_path ) ) { + return task_ids; + } + + for ( const auto& task_entry : fs::directory_iterator( task_dir_path ) ) { + if ( task_entry.path().filename().string()[0] == '.' ) + continue; + + int task_id = stoi( task_entry.path().filename().string() ); + task_ids.push_back( task_id ); + } + + return task_ids; +} + +string taskmon::id2name( int tid ) { + string task_name; + int self_pid = getpid(); + fs::path task_dir_path = "/proc/" + to_string( self_pid ) + "/task/" + to_string( tid ); + + if ( !fs::exists( task_dir_path ) ) + return task_name; + + task_name = extract_task_name( task_dir_path ); + + return task_name; +} + +int taskmon::name2id( const string& name ) { + vector< int > ids = list_ids(); + + int res = -1; + for_each( ids.begin(), ids.end(), [&name, &res]( int tid ) { + if ( id2name( tid ) == name ) + res = tid; + } ); + + return res; +} + +string taskmon::status( int tid ) { + string task_status; + int self_pid = getpid(); + fs::path task_dir_path = "/proc/" + to_string( self_pid ) + "/task/" + to_string( tid ); + + if ( !fs::exists( task_dir_path ) ) + return task_status; + + ifstream status_file( task_dir_path / "status" ); + string line; + + while ( getline( status_file, line ) ) { + if ( line.find( "State:" ) != string::npos ) { + istringstream iss( line ); + string state_str; + iss >> state_str >> task_status; + break; + } + } + + return task_status; +} + +string taskmon::status( const string& name ) { + int task_id = name2id( name ); + if ( task_id == -1 ) { + return ""; + } + return status( task_id ); +} + +vector< string > taskmon::lists_diff( const vector< string >& from, const vector< string >& to ) { + vector< string > diff; + + vector< string > from_copy = from; + vector< string > to_copy = to; + vector< string >::iterator new_end; + + // keep removed + new_end = remove_if( from_copy.begin(), from_copy.end(), + [&to]( const string& s ) -> bool { return find( to.begin(), to.end(), s ) != to.end(); } ); + from_copy.erase( new_end, from_copy.end() ); + + // keep added + new_end = remove_if( to_copy.begin(), to_copy.end(), [&from]( const string& s ) -> bool { + return find( from.begin(), from.end(), s ) != from.end(); + } ); + to_copy.erase( new_end, to_copy.end() ); + + // combine + for_each( from_copy.begin(), from_copy.end(), + [&diff]( const string& s ) { diff.push_back( "-" + s ); } ); + for_each( + to_copy.begin(), to_copy.end(), [&diff]( const string& s ) { diff.push_back( "+" + s ); } ); + + return diff; +} + +string taskmon::extract_task_name( const fs::path& task_dir_path ) { + string task_name; + + ifstream comm_file( task_dir_path / "comm" ); + getline( comm_file, task_name ); + + return task_name; +} diff --git a/libdevcore/taskmon.h b/libdevcore/taskmon.h new file mode 100644 index 000000000..95e7f15fb --- /dev/null +++ b/libdevcore/taskmon.h @@ -0,0 +1,37 @@ +#ifndef TASKMON_H +#define TASKMON_H + +#include +#include +#include + +// tool to monitor running and exiting tasks +// assumes that task names are more or less unique +// methods can throw exceptions in case if /proc is inaccessible +// so, ber sure to catch! +class taskmon { +public: + // get all thread names of current process (can have duplicates) + static std::vector< std::string > list_names(); + // get all thread ids of current process + static std::vector< int > list_ids(); + // convert thread id to name + static std::string id2name( int tid ); + // convert thread name to id + static int name2id( const std::string& name ); + // get thread status from /proc/PID/task/TID/staus + // e.g. "S" (for "sleeping") + static std::string status( int tid ); + // get thread status by name (see above) + static std::string status( const std::string& name ); + + // return how list1 and list2 differ + // adds '+' or '-' signs to elements + static std::vector< std::string > lists_diff( + const std::vector< std::string >& from, const std::vector< std::string >& to ); + +private: + static std::string extract_task_name( const std::filesystem::path& task_dir_path ); +}; + +#endif // TASKMON_H diff --git a/libethashseal/EthashClient.h b/libethashseal/EthashClient.h index 9ff3de050..60febd916 100644 --- a/libethashseal/EthashClient.h +++ b/libethashseal/EthashClient.h @@ -43,7 +43,8 @@ class EthashClient : public Client { std::shared_ptr< InstanceMonitor > _instanceMonitor, boost::filesystem::path const& _dbPath = {}, WithExisting _forceAction = WithExisting::Trust, - TransactionQueue::Limits const& _l = TransactionQueue::Limits{ 1024, 1024 } ); + TransactionQueue::Limits const& _l = TransactionQueue::Limits{ + 1024, 1024, 12322916, 24645833 } ); ~EthashClient(); Ethash* ethash() const; diff --git a/libethcore/ChainOperationParams.h b/libethcore/ChainOperationParams.h index b4bde16a7..ec57fe4ed 100644 --- a/libethcore/ChainOperationParams.h +++ b/libethcore/ChainOperationParams.h @@ -173,6 +173,9 @@ struct SChain { time_t contractStoragePatchTimestamp = 0; time_t contractStorageZeroValuePatchTimestamp = 0; time_t verifyDaSigsPatchTimestamp = 0; + time_t storageDestructionPatchTimestamp = 0; + time_t powCheckPatchTimestamp = 0; + time_t skipInvalidTransactionsPatchTimestamp = 0; SChain() { name = "TestChain"; diff --git a/libethcore/Common.h b/libethcore/Common.h index d27428baa..980aa7a30 100644 --- a/libethcore/Common.h +++ b/libethcore/Common.h @@ -211,6 +211,11 @@ struct TransactionSkeleton { u256 gas = Invalid256; u256 gasPrice = Invalid256; + TransactionSkeleton() = default; + TransactionSkeleton( const TransactionSkeleton& other ) = default; + + TransactionSkeleton& operator=( const TransactionSkeleton& other ) = default; + std::string userReadable( bool _toProxy, std::function< std::pair< bool, std::string >( TransactionSkeleton const& ) > const& _getNatSpec, @@ -223,7 +228,6 @@ struct TransactionSkeleton { static uint64_t howMany() { return Counter< TransactionSkeleton >::howMany(); } }; - void badBlock( bytesConstRef _header, std::string const& _err ); inline void badBlock( bytes const& _header, std::string const& _err ) { badBlock( &_header, _err ); diff --git a/libethcore/Counter.h b/libethcore/Counter.h index 447df265c..422baa763 100644 --- a/libethcore/Counter.h +++ b/libethcore/Counter.h @@ -9,11 +9,11 @@ template < typename T > class Counter { public: Counter() { ++count; } - Counter( const Counter& ) { ++count; } - ~Counter() { --count; } + Counter& operator=( const Counter& other ) = default; + static uint64_t howMany() { return count; } private: diff --git a/libethcore/LogEntry.h b/libethcore/LogEntry.h index ea52eb90a..639ef6e4a 100644 --- a/libethcore/LogEntry.h +++ b/libethcore/LogEntry.h @@ -34,10 +34,13 @@ namespace eth { struct LogEntry { LogEntry() = default; + LogEntry( const LogEntry& other ) = default; explicit LogEntry( RLP const& _r ); LogEntry( Address const& _address, h256s _topics, bytes _data ) : address( _address ), topics( std::move( _topics ) ), data( std::move( _data ) ) {} + LogEntry& operator=( const LogEntry& other ) = default; + void streamRLP( RLPStream& _s ) const; LogBloom bloom() const; diff --git a/libethcore/TransactionBase.h b/libethcore/TransactionBase.h index e09a52df6..369cb8475 100644 --- a/libethcore/TransactionBase.h +++ b/libethcore/TransactionBase.h @@ -173,7 +173,7 @@ class TransactionBase { /// @returns the total gas to convert, paid for from sender's account. Any unused gas gets /// refunded once the contract is ended. - u256 gas() const; + virtual u256 gas() const; /// @returns the receiving address of the message-call transaction (undefined for /// contract-creation transactions). diff --git a/libethereum/Account.h b/libethereum/Account.h index 66405625c..0c4ac73f9 100644 --- a/libethereum/Account.h +++ b/libethereum/Account.h @@ -40,19 +40,20 @@ namespace eth { class StorageRoot : public h256 { public: + StorageRoot() = default; StorageRoot( const u256& _value ) : h256( _value ) {} - StorageRoot( const StorageRoot& _value ) : h256( _value ){}; + StorageRoot& operator=( const StorageRoot& other ) = default; }; class GlobalRoot : public h256 { public: + GlobalRoot() = default; GlobalRoot( const u256& _value ) : h256( _value ) {} - GlobalRoot( const GlobalRoot& _value ) : h256( _value ){}; + GlobalRoot& operator=( const GlobalRoot& other ) = default; }; - /** * Models the state of a single Ethereum account. * Used to cache a portion of the full Ethereum state. State keeps a mapping of Address's to @@ -99,6 +100,8 @@ class Account { /// Construct a dead Account. Account() {} + Account( const Account& other ) = default; + Account& operator=( const Account& other ) = default; /// Construct an alive Account, with given endowment, for either a normal (non-contract) account /// or for a contract account in the conception phase, where the code is not yet known. @@ -122,7 +125,6 @@ class Account { assert( _contractRoot ); } - /// Kill this account. Useful for the suicide opcode. Following this call, isAlive() returns /// false. void kill() { @@ -197,7 +199,6 @@ class Account { changed(); } - /// Set a key/value pair in the account's storage to a value that is already present inside the /// database. void setStorageCache( u256 _p, u256 _v ) const { diff --git a/libethereum/Block.cpp b/libethereum/Block.cpp index 559cceee3..28e17ad0f 100644 --- a/libethereum/Block.cpp +++ b/libethereum/Block.cpp @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -483,31 +484,38 @@ tuple< TransactionReceipts, unsigned > Block::syncEveryone( LOG( m_logger ) << "Transaction " << tr.sha3() << " WouldNotBeInBlock: gasPrice " << tr.gasPrice() << " < " << _gasPrice; - // Add to the user-originated transactions that we've executed. - m_transactions.push_back( tr ); - m_transactionSet.insert( tr.sha3() ); + if ( SkipInvalidTransactionsPatch::isEnabled() ) { + // Add to the user-originated transactions that we've executed. + m_transactions.push_back( tr ); + m_transactionSet.insert( tr.sha3() ); - // TODO deduplicate - // "bad" transaction receipt for failed transactions - TransactionReceipt const null_receipt = - info().number() >= sealEngine()->chainParams().byzantiumForkBlock ? - TransactionReceipt( 0, info().gasUsed(), LogEntries() ) : - TransactionReceipt( EmptyTrie, info().gasUsed(), LogEntries() ); + // TODO deduplicate + // "bad" transaction receipt for failed transactions + TransactionReceipt const null_receipt = + info().number() >= sealEngine()->chainParams().byzantiumForkBlock ? + TransactionReceipt( 0, info().gasUsed(), LogEntries() ) : + TransactionReceipt( EmptyTrie, info().gasUsed(), LogEntries() ); - m_receipts.push_back( null_receipt ); - receipts.push_back( null_receipt ); + m_receipts.push_back( null_receipt ); + receipts.push_back( null_receipt ); - ++count_bad; + ++count_bad; + } continue; } ExecutionResult res = execute( _bc.lastBlockHashes(), tr, Permanence::Committed, OnOpFunc() ); - receipts.push_back( m_receipts.back() ); - if ( res.excepted == TransactionException::WouldNotBeInBlock ) - ++count_bad; + if ( !SkipInvalidTransactionsPatch::isEnabled() || + res.excepted != TransactionException::WouldNotBeInBlock ) { + receipts.push_back( m_receipts.back() ); + + // if added but bad + if ( res.excepted == TransactionException::WouldNotBeInBlock ) + ++count_bad; + } // // Debug only, related SKALE-2814 partial catchup testing @@ -862,9 +870,12 @@ ExecutionResult Block::execute( if ( _p == Permanence::Committed || _p == Permanence::CommittedWithoutState || _p == Permanence::Uncommitted ) { // Add to the user-originated transactions that we've executed. - m_transactions.push_back( _t ); - m_receipts.push_back( resultReceipt.second ); - m_transactionSet.insert( _t.sha3() ); + if ( !SkipInvalidTransactionsPatch::isEnabled() || + resultReceipt.first.excepted != TransactionException::WouldNotBeInBlock ) { + m_transactions.push_back( _t ); + m_receipts.push_back( resultReceipt.second ); + m_transactionSet.insert( _t.sha3() ); + } } if ( _p == Permanence::Committed || _p == Permanence::Uncommitted ) { m_state = stateSnapshot.createStateModifyCopyAndPassLock(); diff --git a/libethereum/BlockChain.cpp b/libethereum/BlockChain.cpp index 4e15e72eb..fcca55c02 100644 --- a/libethereum/BlockChain.cpp +++ b/libethereum/BlockChain.cpp @@ -229,9 +229,9 @@ void BlockChain::open( fs::path const& _path, bool _applyPatches, WithExisting _ try { fs::create_directories( chainPath / fs::path( "blocks_and_extras" ) ); - auto rotator = std::make_shared< batched_io::rotating_db_io >( + m_rotator = std::make_shared< batched_io::rotating_db_io >( chainPath / fs::path( "blocks_and_extras" ), 5, chainParams().nodeInfo.archiveMode ); - m_rotating_db = std::make_shared< db::ManuallyRotatingLevelDB >( rotator ); + m_rotating_db = std::make_shared< db::ManuallyRotatingLevelDB >( m_rotator ); auto db = std::make_shared< batched_io::batched_db >(); db->open( m_rotating_db ); m_db = db; @@ -1263,7 +1263,8 @@ void BlockChain::garbageCollect( bool _force ) { m_lastCollection = chrono::system_clock::now(); - { + // We subtract memory that blockhashes occupy because it is treated sepaparately + while ( m_lastStats.memTotal() - m_lastStats.memBlockHashes >= c_maxCacheSize ) { Guard l( x_cacheUsage ); for ( CacheID const& id : m_cacheUsage.back() ) { m_inUse.erase( id ); @@ -1310,11 +1311,13 @@ void BlockChain::garbageCollect( bool _force ) { } m_cacheUsage.pop_back(); m_cacheUsage.push_front( std::unordered_set< CacheID >{} ); + updateStats(); } { WriteGuard l( x_blockHashes ); + // This is where block hash memory cleanup is treated // allow only 4096 blockhashes in the cache if ( m_blockHashes.size() > 4096 ) { auto last = m_blockHashes.begin(); @@ -1364,13 +1367,13 @@ void BlockChain::clearCaches() { } } -// void BlockChain::doLevelDbCompaction() const { -// for ( auto it = m_rotator->begin(); it != m_rotator->end(); ++it ) { -// dev::db::LevelDB* ldb = dynamic_cast< dev::db::LevelDB* >( it->get() ); -// assert( ldb ); -// ldb->doCompaction(); -// } -//} +void BlockChain::doLevelDbCompaction() const { + for ( auto it = m_rotator->begin(); it != m_rotator->end(); ++it ) { + dev::db::LevelDB* ldb = dynamic_cast< dev::db::LevelDB* >( it->get() ); + assert( ldb ); + ldb->doCompaction(); + } +} void BlockChain::checkConsistency() { DEV_WRITE_GUARDED( x_details ) { m_details.clear(); } diff --git a/libethereum/BlockChain.h b/libethereum/BlockChain.h index 55f12efd5..cd12f7e5d 100644 --- a/libethereum/BlockChain.h +++ b/libethereum/BlockChain.h @@ -470,8 +470,8 @@ class BlockChain { void open( boost::filesystem::path const& _path, bool _applyPatches, WithExisting _we ); /// Finalise everything and close the database. void close(); - // /// compact db before snapshot - // void doLevelDbCompaction() const; + /// compact db before snapshot + void doLevelDbCompaction() const; private: bool rotateDBIfNeeded( uint64_t pieceUsageBytes ); @@ -589,7 +589,7 @@ class BlockChain { uint64_t m_maxStorageUsage; /// The disk DBs. Thread-safe, so no need for locks. - // std::shared_ptr< batched_io::rotating_db_io > m_rotator; // for compaction + std::shared_ptr< batched_io::rotating_db_io > m_rotator; // for compaction std::shared_ptr< db::ManuallyRotatingLevelDB > m_rotating_db; // rotate() std::shared_ptr< batched_io::db_face > m_db; // insert()/commit() std::unique_ptr< batched_io::db_splitter > m_db_splitter; // new_interface() diff --git a/libethereum/BlockDetails.h b/libethereum/BlockDetails.h index 49cfdb3cf..7bd4d07d9 100644 --- a/libethereum/BlockDetails.h +++ b/libethereum/BlockDetails.h @@ -48,6 +48,8 @@ struct BlockDetails { children( _c ), blockSizeBytes( _blockBytes ) {} BlockDetails( RLP const& _r ); + BlockDetails( const BlockDetails& other ) = default; + BlockDetails& operator=( const BlockDetails& other ) = default; bytes rlp() const; bool isNull() const { return number == c_invalidNumber; } @@ -73,6 +75,8 @@ struct BlockLogBlooms { blooms = _r.toVector< LogBloom >(); size = _r.data().size(); } + BlockLogBlooms( const BlockLogBlooms& other ) = default; + BlockLogBlooms& operator=( const BlockLogBlooms& other ) = default; bytes rlp() const { bytes r = dev::rlp( blooms ); size = r.size(); @@ -89,6 +93,8 @@ struct BlocksBlooms { blooms = _r.toArray< LogBloom, c_bloomIndexSize >(); size = _r.data().size(); } + BlocksBlooms( const BlocksBlooms& other ) = default; + BlocksBlooms& operator=( const BlocksBlooms& other ) = default; bytes rlp() const { bytes r = dev::rlp( blooms ); size = r.size(); @@ -106,6 +112,8 @@ struct BlockReceipts { receipts.emplace_back( i.data() ); size = _r.data().size(); } + BlockReceipts( const BlockReceipts& other ) = default; + BlockReceipts& operator=( const BlockReceipts& other ) = default; bytes rlp() const { RLPStream s( receipts.size() ); for ( TransactionReceipt const& i : receipts ) @@ -122,6 +130,8 @@ struct BlockHash { BlockHash() {} BlockHash( h256 const& _h ) : value( _h ) {} BlockHash( RLP const& _r ) { value = _r.toHash< h256 >(); } + BlockHash( const BlockHash& other ) = default; + BlockHash& operator=( const BlockHash& other ) = default; bytes rlp() const { return dev::rlp( value ); } h256 value; @@ -134,6 +144,8 @@ struct TransactionAddress { blockHash = _rlp[0].toHash< h256 >(); index = _rlp[1].toInt< unsigned >(); } + TransactionAddress( const TransactionAddress& other ) = default; + TransactionAddress& operator=( const TransactionAddress& other ) = default; bytes rlp() const { RLPStream s( 2 ); s << blockHash << index; diff --git a/libethereum/ChainParams.cpp b/libethereum/ChainParams.cpp index ea78be838..fb48f748d 100644 --- a/libethereum/ChainParams.cpp +++ b/libethereum/ChainParams.cpp @@ -257,6 +257,20 @@ ChainParams ChainParams::loadConfig( sChainObj.at( "verifyDaSigsPatchTimestamp" ).get_int64() : 0; + s.storageDestructionPatchTimestamp = + sChainObj.count( "storageDestructionPatchTimestamp" ) ? + sChainObj.at( "storageDestructionPatchTimestamp" ).get_int64() : + 0; + + s.powCheckPatchTimestamp = sChainObj.count( "powCheckPatchTimestamp" ) ? + sChainObj.at( "powCheckPatchTimestamp" ).get_int64() : + 0; + + s.skipInvalidTransactionsPatchTimestamp = + sChainObj.count( "skipInvalidTransactionsPatchTimestamp" ) ? + sChainObj.at( "skipInvalidTransactionsPatchTimestamp" ).get_int64() : + 0; + if ( sChainObj.count( "nodeGroups" ) ) { std::vector< NodeGroup > nodeGroups; for ( const auto& nodeGroupConf : sChainObj["nodeGroups"].get_obj() ) { diff --git a/libethereum/Client.cpp b/libethereum/Client.cpp index bbbc0c653..06cec9516 100644 --- a/libethereum/Client.cpp +++ b/libethereum/Client.cpp @@ -26,12 +26,14 @@ #include "Defaults.h" #include "Executive.h" #include "SkaleHost.h" +#include "SnapshotAgent.h" #include "SnapshotStorage.h" #include "TransactionQueue.h" #include #include #include +#include #include #include @@ -41,6 +43,7 @@ #include #include +#include #include #ifdef HISTORIC_STATE @@ -50,8 +53,11 @@ #include #include +#include #include +#include #include +#include #include #include #include @@ -129,7 +135,8 @@ Client::Client( ChainParams const& _params, int _networkID, m_preSeal( chainParams().accountStartNonce ), m_postSeal( chainParams().accountStartNonce ), m_working( chainParams().accountStartNonce ), - m_snapshotManager( _snapshotManager ), + m_snapshotAgent( make_shared< SnapshotAgent >( + _params.sChain.snapshotIntervalSec, _snapshotManager, m_debugTracer ) ), m_instanceMonitor( _instanceMonitor ), m_dbPath( _dbPath ) { #if ( defined __HAVE_SKALED_LOCK_FILE_INDICATING_CRITICAL_STOP__ ) @@ -141,22 +148,26 @@ Client::Client( ChainParams const& _params, int _networkID, << "TRACEPOINT " << name << " " << m_debugTracer.get_tracepoint_count( name ); } ); - m_debugHandler = [this]( const std::string& arg ) -> std::string { return DebugTracer_handler( arg, this->m_debugTracer ); }; init( _forceAction, _networkID ); + // Set timestamps for patches TotalStorageUsedPatch::g_client = this; - ContractStorageLimitPatch::contractStoragePatchTimestamp = - chainParams().sChain.contractStoragePatchTimestamp; - ContractStorageZeroValuePatch::contractStorageZeroValuePatchTimestamp = - chainParams().sChain.contractStorageZeroValuePatchTimestamp; - VerifyDaSigsPatch::verifyDaSigsPatchTimestamp = chainParams().sChain.verifyDaSigsPatchTimestamp; - RevertableFSPatch::revertableFSPatchTimestamp = chainParams().sChain.revertableFSPatchTimestamp; + ContractStorageLimitPatch::setTimestamp( chainParams().sChain.contractStoragePatchTimestamp ); + ContractStorageZeroValuePatch::setTimestamp( + chainParams().sChain.contractStorageZeroValuePatchTimestamp ); + VerifyDaSigsPatch::setTimestamp( chainParams().sChain.verifyDaSigsPatchTimestamp ); + RevertableFSPatch::setTimestamp( chainParams().sChain.revertableFSPatchTimestamp ); + StorageDestructionPatch::setTimestamp( chainParams().sChain.storageDestructionPatchTimestamp ); + POWCheckPatch::setTimestamp( chainParams().sChain.powCheckPatchTimestamp ); + SkipInvalidTransactionsPatch::setTimestamp( + this->chainParams().sChain.skipInvalidTransactionsPatchTimestamp ); } + Client::~Client() { stopWorking(); } @@ -174,13 +185,7 @@ void Client::stopWorking() { else cerror << "Instance of SkaleHost was not properly created."; - if ( m_snapshotHashComputing != nullptr ) { - try { - if ( m_snapshotHashComputing->joinable() ) - m_snapshotHashComputing->join(); - } catch ( ... ) { - } - } + m_snapshotAgent->terminate(); m_new_block_watch.uninstallAll(); m_new_pending_transaction_watch.uninstallAll(); @@ -306,18 +311,20 @@ void Client::init( WithExisting _forceAction, u256 _networkId ) { if ( m_dbPath.size() ) Defaults::setDBPath( m_dbPath ); - if ( chainParams().sChain.snapshotIntervalSec > 0 ) { - LOG( m_logger ) << "Snapshots enabled, snapshotIntervalSec is: " - << chainParams().sChain.snapshotIntervalSec; - this->initHashes(); - } - if ( ChainParams().sChain.nodeGroups.size() > 0 ) initIMABLSPublicKey(); + // init snapshots for not newly created chains + if ( number() ) { + m_snapshotAgent->init( number(), blockInfo( hashFromNumber( 1 ) ).timestamp() ); + m_snapshotAgentInited = true; + } + // HACK Needed to set env var for consensus AmsterdamFixPatch::isEnabled( *this ); + initCPUUSage(); + doWork( false ); } @@ -526,44 +533,16 @@ size_t Client::importTransactionsAsBlock( const Transactions& _transactions, u256 _gasPrice, uint64_t _timestamp ) { // HACK here was m_blockImportMutex - but now it is acquired in SkaleHost!!! // TODO decouple Client and SkaleHost - int64_t snapshotIntervalSec = chainParams().sChain.snapshotIntervalSec; - // init last block creation time with only robust time source - timestamp of 1st block! - if ( number() == 0 ) { - last_snapshot_creation_time = _timestamp; - LOG( m_logger ) << "Init last snapshot creation time: " - << this->last_snapshot_creation_time; - } else if ( snapshotIntervalSec > 0 && this->isTimeToDoSnapshot( _timestamp ) ) { - LOG( m_logger ) << "Last snapshot creation time: " << this->last_snapshot_creation_time; - - if ( m_snapshotHashComputing != nullptr && m_snapshotHashComputing->joinable() ) - m_snapshotHashComputing->join(); - - // TODO Make this number configurable - // thread can be absent - if hash was already there - // snapshot can be absent too - // but hash cannot be absent - auto latest_snapshots = this->m_snapshotManager->getLatestSnasphots(); - if ( latest_snapshots.second ) { - assert( m_snapshotManager->isSnapshotHashPresent( latest_snapshots.second ) ); - this->last_snapshoted_block_with_hash = latest_snapshots.second; - m_snapshotManager->leaveNLastSnapshots( 2 ); - } - - // also there might be snapshot on disk - // and it's time to make it "last with hash" - if ( last_snapshoted_block_with_hash == 0 ) { - auto latest_snapshots = this->m_snapshotManager->getLatestSnasphots(); - if ( latest_snapshots.second ) { - uint64_t time_of_second = - blockInfo( this->hashFromNumber( latest_snapshots.second ) ).timestamp(); - if ( time_of_second == ( ( uint64_t ) last_snapshot_creation_time ) ) - this->last_snapshoted_block_with_hash = latest_snapshots.second; - } // if second - } // if == 0 + // on schain creation, SnapshotAgent needs timestamp of block 1 + // so we use this HACK + // pass block number 0 as for bigger BN it is initialized in init() + if ( !m_snapshotAgentInited ) { + m_snapshotAgent->init( 0, _timestamp ); + m_snapshotAgentInited = true; } + m_snapshotAgent->finishHashComputingAndUpdateHashesIfNeeded( _timestamp ); - // // begin, detect partially executed block bool bIsPartial = false; dev::h256 shaLastTx = m_state.safeLastExecutedTransactionHash(); @@ -642,74 +621,7 @@ size_t Client::importTransactionsAsBlock( if ( chainParams().sChain.nodeGroups.size() > 0 ) updateIMABLSPublicKey(); - if ( snapshotIntervalSec > 0 ) { - unsigned block_number = this->number(); - - LOG( m_loggerDetail ) << "Block timestamp: " << _timestamp; - - if ( this->isTimeToDoSnapshot( _timestamp ) ) { - try { - boost::chrono::high_resolution_clock::time_point t1; - boost::chrono::high_resolution_clock::time_point t2; - LOG( m_logger ) << "DOING SNAPSHOT: " << block_number; - m_debugTracer.tracepoint( "doing_snapshot" ); - - t1 = boost::chrono::high_resolution_clock::now(); - m_snapshotManager->doSnapshot( block_number ); - t2 = boost::chrono::high_resolution_clock::now(); - this->snapshot_calculation_time_ms = - boost::chrono::duration_cast< boost::chrono::milliseconds >( t2 - t1 ).count(); - } catch ( SnapshotManager::SnapshotPresent& ex ) { - cerror << "WARNING " << dev::nested_exception_what( ex ); - } - - this->last_snapshot_creation_time = _timestamp; - - LOG( m_logger ) << "New snapshot creation time: " << this->last_snapshot_creation_time; - } - - // snapshots without hash can appear either from start, from downloading or from just - // creation - auto latest_snapshots = this->m_snapshotManager->getLatestSnasphots(); - - // start if thread is free and there is work - if ( ( m_snapshotHashComputing == nullptr || !m_snapshotHashComputing->joinable() ) && - latest_snapshots.second && - !m_snapshotManager->isSnapshotHashPresent( latest_snapshots.second ) ) { - m_snapshotHashComputing.reset( new std::thread( [this, latest_snapshots]() { - m_debugTracer.tracepoint( "computeSnapshotHash_start" ); - try { - boost::chrono::high_resolution_clock::time_point t1; - boost::chrono::high_resolution_clock::time_point t2; - - t1 = boost::chrono::high_resolution_clock::now(); - this->m_snapshotManager->computeSnapshotHash( latest_snapshots.second ); - t2 = boost::chrono::high_resolution_clock::now(); - this->snapshot_hash_calculation_time_ms = - boost::chrono::duration_cast< boost::chrono::milliseconds >( t2 - t1 ) - .count(); - LOG( m_logger ) - << "Computed hash for snapshot " << latest_snapshots.second << ": " - << m_snapshotManager->getSnapshotHash( latest_snapshots.second ); - m_debugTracer.tracepoint( "computeSnapshotHash_end" ); - - } catch ( const std::exception& ex ) { - cerror << cc::fatal( "CRITICAL" ) << " " - << cc::warn( dev::nested_exception_what( ex ) ) - << cc::error( " in computeSnapshotHash(). Exiting..." ); - cerror << "\n" << skutils::signal::generate_stack_trace() << "\n" << std::endl; - ExitHandler::exitHandler( SIGABRT, ExitHandler::ec_compute_snapshot_error ); - } catch ( ... ) { - cerror << cc::fatal( "CRITICAL" ) - << cc::error( - " unknown exception in computeSnapshotHash(). " - "Exiting..." ); - cerror << "\n" << skutils::signal::generate_stack_trace() << "\n" << std::endl; - ExitHandler::exitHandler( SIGABRT, ExitHandler::ec_compute_snapshot_error ); - } - } ) ); - } // if thread - } // if snapshots enabled + m_snapshotAgent->doSnapshotIfNeeded( number(), _timestamp ); // TEMPRORARY FIX! // TODO: REVIEW @@ -743,7 +655,9 @@ size_t Client::syncTransactions( ContractStorageLimitPatch::lastBlockTimestamp = blockChain().info().timestamp(); ContractStorageZeroValuePatch::lastBlockTimestamp = blockChain().info().timestamp(); RevertableFSPatch::lastBlockTimestamp = blockChain().info().timestamp(); - + StorageDestructionPatch::lastBlockTimestamp = blockChain().info().timestamp(); + POWCheckPatch::lastBlockTimestamp = blockChain().info().timestamp(); + SkipInvalidTransactionsPatch::lastBlockTimestamp = blockChain().info().timestamp(); DEV_WRITE_GUARDED( x_working ) { assert( !m_working.isSealed() ); @@ -857,12 +771,6 @@ void Client::resetState() { onTransactionQueueReady(); } -bool Client::isTimeToDoSnapshot( uint64_t _timestamp ) const { - int snapshotIntervalSec = chainParams().sChain.snapshotIntervalSec; - return _timestamp / uint64_t( snapshotIntervalSec ) > - this->last_snapshot_creation_time / uint64_t( snapshotIntervalSec ); -} - void Client::setSchainExitTime( uint64_t _timestamp ) const { m_instanceMonitor->initRotationParams( _timestamp ); } @@ -922,9 +830,9 @@ void Client::rejigSealing() { // TODO Deduplicate code! dev::h256 stateRootToSet; - if ( this->last_snapshoted_block_with_hash > 0 ) { - dev::h256 state_root_hash = - this->m_snapshotManager->getSnapshotHash( last_snapshoted_block_with_hash ); + if ( m_snapshotAgent->getLatestSnapshotBlockNumer() > 0 ) { + dev::h256 state_root_hash = this->m_snapshotAgent->getSnapshotHash( + m_snapshotAgent->getLatestSnapshotBlockNumer() ); stateRootToSet = state_root_hash; } // propagate current! @@ -978,9 +886,9 @@ void Client::sealUnconditionally( bool submitToBlockChain ) { // latest hash is really updated after NEXT snapshot already started hash computation! // TODO Deduplicate code! dev::h256 stateRootToSet; - if ( this->last_snapshoted_block_with_hash > 0 ) { - dev::h256 state_root_hash = - this->m_snapshotManager->getSnapshotHash( last_snapshoted_block_with_hash ); + if ( m_snapshotAgent->getLatestSnapshotBlockNumer() > 0 ) { + dev::h256 state_root_hash = this->m_snapshotAgent->getSnapshotHash( + m_snapshotAgent->getLatestSnapshotBlockNumer() ); stateRootToSet = state_root_hash; } // propagate current! @@ -1020,7 +928,8 @@ void Client::sealUnconditionally( bool submitToBlockChain ) { << ":BDS:" << BlockDetails::howMany() << ":TSS:" << TransactionSkeleton::howMany() << ":UTX:" << TransactionQueue::UnverifiedTransaction::howMany() << ":VTX:" << TransactionQueue::VerifiedTransaction::howMany() - << ":CMM:" << bc().getTotalCacheMemory(); + << ":CMM:" << bc().getTotalCacheMemory() + << ":KDS:" << db::LevelDB::getKeyDeletesStats(); if ( number() % 1000 == 0 ) { ssBlockStats << ":RAM:" << getRAMUsage(); ssBlockStats << ":CPU:" << getCPUUsage(); @@ -1376,59 +1285,6 @@ ExecutionResult Client::call( Address const& _from, u256 _value, Address _dest, return ret; } -void Client::initHashes() { - int snapshotIntervalSec = chainParams().sChain.snapshotIntervalSec; - assert( snapshotIntervalSec > 0 ); - ( void ) snapshotIntervalSec; - - auto latest_snapshots = this->m_snapshotManager->getLatestSnasphots(); - - // if two - if ( latest_snapshots.first ) { - assert( latest_snapshots.first != 1 ); // 1 can never be snapshotted - - this->last_snapshoted_block_with_hash = latest_snapshots.first; - - // ignore second as it was "in hash computation" - // check that both are imported!! - // h256 h2 = this->hashFromNumber( latest_snapshots.second ); - // assert( h2 != h256() ); - // last_snapshot_creation_time = blockInfo( h2 ).timestamp(); - - last_snapshot_creation_time = - this->m_snapshotManager->getBlockTimestamp( latest_snapshots.second, chainParams() ); - - // one snapshot - } else if ( latest_snapshots.second ) { - assert( latest_snapshots.second != 1 ); // 1 can never be snapshotted - assert( this->number() > 0 ); // we created snapshot somehow - - // whether it is local or downloaded - we shall ignore it's hash but use it's time - // see also how last_snapshoted_block_with_hash is updated in importTransactionsAsBlock - // h256 h2 = this->hashFromNumber( latest_snapshots.second ); - // uint64_t time_of_second = blockInfo( h2 ).timestamp(); - - this->last_snapshoted_block_with_hash = -1; - // last_snapshot_creation_time = time_of_second; - - last_snapshot_creation_time = - this->m_snapshotManager->getBlockTimestamp( latest_snapshots.second, chainParams() ); - - // no snapshots yet - } else { - this->last_snapshoted_block_with_hash = -1; - - if ( this->number() >= 1 ) - last_snapshot_creation_time = blockInfo( this->hashFromNumber( 1 ) ).timestamp(); - else - this->last_snapshot_creation_time = 0; - } - - LOG( m_logger ) << "Latest snapshots init: " << latest_snapshots.first << " " - << latest_snapshots.second << " -> " << this->last_snapshoted_block_with_hash; - LOG( m_logger ) << "Fake Last snapshot creation time: " << last_snapshot_creation_time; -} - void Client::initIMABLSPublicKey() { if ( number() == 0 ) { imaBLSPublicKeyGroupIndex = 0; diff --git a/libethereum/Client.h b/libethereum/Client.h index b4a8ed68b..2e3155612 100644 --- a/libethereum/Client.h +++ b/libethereum/Client.h @@ -43,7 +43,6 @@ #include #include #include -#include #include #include "Block.h" @@ -52,6 +51,7 @@ #include "CommonNet.h" #include "InstanceMonitor.h" #include "SkaleHost.h" +#include "SnapshotAgent.h" #include "StateImporter.h" #include "ThreadSafeQueue.h" @@ -59,6 +59,7 @@ #include class ConsensusHost; +class SnapshotManager; namespace dev { namespace eth { @@ -86,7 +87,8 @@ class Client : public ClientBase, protected Worker { std::shared_ptr< InstanceMonitor > _instanceMonitor, boost::filesystem::path const& _dbPath = boost::filesystem::path(), WithExisting _forceAction = WithExisting::Trust, - TransactionQueue::Limits const& _l = TransactionQueue::Limits{ 1024, 1024 } ); + TransactionQueue::Limits const& _l = TransactionQueue::Limits{ + 1024, 1024, 12322916, 24645833 } ); /// Destructor. virtual ~Client(); @@ -268,50 +270,39 @@ class Client : public ClientBase, protected Worker { uint64_t _timestamp = ( uint64_t ) utcTime() ); boost::filesystem::path createSnapshotFile( unsigned _blockNumber ) { - if ( _blockNumber > this->getLatestSnapshotBlockNumer() ) - throw std::invalid_argument( "Too new snapshot requested" ); - boost::filesystem::path path = m_snapshotManager->makeOrGetDiff( _blockNumber ); - // TODO Make constant 2 configurable - m_snapshotManager->leaveNLastDiffs( 2 ); - return path; + return m_snapshotAgent->createSnapshotFile( _blockNumber ); } // set exiting time for node rotation void setSchainExitTime( uint64_t _timestamp ) const; dev::h256 getSnapshotHash( unsigned _blockNumber ) const { - if ( _blockNumber > this->last_snapshoted_block_with_hash ) - return dev::h256(); - - try { - dev::h256 res = this->m_snapshotManager->getSnapshotHash( _blockNumber ); - return res; - } catch ( const SnapshotManager::SnapshotAbsent& ) { - return dev::h256(); - } - - // fall through other exceptions + return m_snapshotAgent->getSnapshotHash( _blockNumber ); } uint64_t getBlockTimestampFromSnapshot( unsigned _blockNumber ) const { - return this->m_snapshotManager->getBlockTimestamp( _blockNumber, chainParams() ); + return m_snapshotAgent->getBlockTimestampFromSnapshot( _blockNumber ); } - int64_t getLatestSnapshotBlockNumer() const { return this->last_snapshoted_block_with_hash; } + int64_t getLatestSnapshotBlockNumer() const { + return m_snapshotAgent->getLatestSnapshotBlockNumer(); + } - uint64_t getSnapshotCalculationTime() const { return this->snapshot_calculation_time_ms; } + uint64_t getSnapshotCalculationTime() const { + return m_snapshotAgent->getSnapshotCalculationTime(); + } uint64_t getSnapshotHashCalculationTime() const { - return this->snapshot_hash_calculation_time_ms; + return m_snapshotAgent->getSnapshotHashCalculationTime(); } std::array< std::string, 4 > getIMABLSPublicKey() const { return chainParams().sChain.nodeGroups[imaBLSPublicKeyGroupIndex].blsPublicKey; } - // void doStateDbCompaction() const { m_state.getOriginalDb()->doCompaction(); } + void doStateDbCompaction() const { m_state.getOriginalDb()->doCompaction(); } - // void doBlocksDbCompaction() const { m_bc.doLevelDbCompaction(); } + void doBlocksDbCompaction() const { m_bc.doLevelDbCompaction(); } std::pair< uint64_t, uint64_t > getBlocksDbUsage() const; @@ -529,30 +520,18 @@ class Client : public ClientBase, protected Worker { Logger m_logger{ createLogger( VerbosityInfo, "client" ) }; Logger m_loggerDetail{ createLogger( VerbosityTrace, "client" ) }; + SkaleDebugTracer m_debugTracer; + SkaleDebugInterface::handler m_debugHandler; /// skale std::shared_ptr< SkaleHost > m_skaleHost; - std::shared_ptr< SnapshotManager > m_snapshotManager; + std::shared_ptr< SnapshotAgent > m_snapshotAgent; + bool m_snapshotAgentInited = false; + const static dev::h256 empty_str_hash; std::shared_ptr< InstanceMonitor > m_instanceMonitor; fs::path m_dbPath; - SkaleDebugTracer m_debugTracer; - SkaleDebugInterface::handler m_debugHandler; - private: - inline bool isTimeToDoSnapshot( uint64_t _timestamp ) const; - void initHashes(); - - std::unique_ptr< std::thread > m_snapshotHashComputing; - // time of last physical snapshot - int64_t last_snapshot_creation_time = 0; - // usually this is snapshot before last! - int64_t last_snapshoted_block_with_hash = -1; - const static dev::h256 empty_str_hash; - - uint64_t snapshot_calculation_time_ms; - uint64_t snapshot_hash_calculation_time_ms; - void initIMABLSPublicKey(); void updateIMABLSPublicKey(); diff --git a/libethereum/ClientBase.cpp b/libethereum/ClientBase.cpp index a20c2f9b3..6d646f45a 100644 --- a/libethereum/ClientBase.cpp +++ b/libethereum/ClientBase.cpp @@ -132,7 +132,7 @@ std::pair< u256, ExecutionResult > ClientBase::estimateGas( Address const& _from estimateGasStep( upperBound, bk, _from, _dest, _value, gasPrice, _data ); if ( estimatedStep.first ) { auto executionResult = estimatedStep.second; - auto gasUsed = executionResult.gasUsed.convert_to< int64_t >(); + auto gasUsed = std::max( executionResult.gasUsed.convert_to< int64_t >(), lowerBound ); estimatedStep = estimateGasStep( gasUsed, bk, _from, _dest, _value, gasPrice, _data ); if ( estimatedStep.first ) { diff --git a/libethereum/ClientTest.h b/libethereum/ClientTest.h index d881bd91c..f80023031 100644 --- a/libethereum/ClientTest.h +++ b/libethereum/ClientTest.h @@ -42,7 +42,8 @@ class ClientTest : public Client { std::shared_ptr< InstanceMonitor > _instanceMonitor, boost::filesystem::path const& _dbPath = boost::filesystem::path(), WithExisting _forceAction = WithExisting::Trust, - TransactionQueue::Limits const& _l = TransactionQueue::Limits{ 1024, 1024 } ); + TransactionQueue::Limits const& _l = TransactionQueue::Limits{ + 1024, 1024, 12322916, 24645833 } ); ~ClientTest(); bool mineBlocks( unsigned _count ) noexcept; diff --git a/libethereum/CommonNet.cpp b/libethereum/CommonNet.cpp index 510b0b071..da27744dd 100644 --- a/libethereum/CommonNet.cpp +++ b/libethereum/CommonNet.cpp @@ -26,7 +26,6 @@ using namespace std; using namespace dev; using namespace dev::eth; -#pragma GCC diagnostic ignored "-Wunused-variable" namespace { -char dummy; +[[maybe_unused]] char dummy; } diff --git a/libethereum/Executive.cpp b/libethereum/Executive.cpp index 2ab8015e5..d3e93b6b8 100644 --- a/libethereum/Executive.cpp +++ b/libethereum/Executive.cpp @@ -55,7 +55,8 @@ std::string dumpStackAndMemory( LegacyVM const& _vm ) { std::string dumpStorage( ExtVM const& _ext ) { ostringstream o; o << " STORAGE\n"; - for ( auto const& i : _ext.state().storage( _ext.myAddress ) ) + // this function is called when a lock over the state already is acquired + for ( auto const& i : _ext.state().storage_WITHOUT_LOCK( _ext.myAddress ) ) o << showbase << hex << i.second.first << ": " << i.second.second << "\n"; return o.str(); } diff --git a/libethereum/Precompiled.cpp b/libethereum/Precompiled.cpp index 61d9a517a..6a0a1c63f 100644 --- a/libethereum/Precompiled.cpp +++ b/libethereum/Precompiled.cpp @@ -962,7 +962,7 @@ ETH_REGISTER_PRECOMPILED( getBlockRandom )( bytesConstRef ) { return { false, response }; // 1st false - means bad error occur } -ETH_REGISTER_PRECOMPILED( addBalance )( bytesConstRef _in ) { +ETH_REGISTER_PRECOMPILED( addBalance )( [[maybe_unused]] bytesConstRef _in ) { /* try { auto rawAddress = _in.cropped( 0, 20 ).toBytes(); diff --git a/libethereum/SchainPatch.h b/libethereum/SchainPatch.h index 9994336dd..5bafcdcd0 100644 --- a/libethereum/SchainPatch.h +++ b/libethereum/SchainPatch.h @@ -1,6 +1,17 @@ #ifndef SCHAINPATCH_H #define SCHAINPATCH_H -class SchainPatch {}; +#include + +class SchainPatch { +public: + static void printInfo( const std::string& _patchName, time_t _timeStamp ) { + if ( _timeStamp == 0 ) { + cnote << "Patch " << _patchName << " is disabled"; + } else { + cnote << "Patch " << _patchName << " is set at timestamp " << _timeStamp; + } + } +}; #endif // SCHAINPATCH_H diff --git a/libethereum/SkaleHost.cpp b/libethereum/SkaleHost.cpp index 32c06ebc6..1f15035fe 100644 --- a/libethereum/SkaleHost.cpp +++ b/libethereum/SkaleHost.cpp @@ -62,8 +62,6 @@ using namespace std; using namespace dev; using namespace dev::eth; -const int SkaleHost::EXIT_FORCEFULLTY_SECONDS = 60 * 4; - #ifndef CONSENSUS #define CONSENSUS 1 #endif @@ -247,17 +245,18 @@ void ConsensusExtImpl::createBlock( } void ConsensusExtImpl::terminateApplication() { - dev::ExitHandler::exitHandler( SIGINT, dev::ExitHandler::ec_consensus_terminate_request ); + dev::ExitHandler::exitHandler( -1, dev::ExitHandler::ec_consensus_terminate_request ); } SkaleHost::SkaleHost( dev::eth::Client& _client, const ConsensusFactory* _consFactory, std::shared_ptr< InstanceMonitor > _instanceMonitor, const std::string& _gethURL, - bool _broadcastEnabled ) + [[maybe_unused]] bool _broadcastEnabled ) : m_client( _client ), m_tq( _client.m_tq ), m_instanceMonitor( _instanceMonitor ), total_sent( 0 ), - total_arrived( 0 ) { + total_arrived( 0 ), + latestBlockTime( boost::chrono::high_resolution_clock::time_point() ) { try { m_debugHandler = [this]( const std::string& arg ) -> std::string { return DebugTracer_handler( arg, this->m_debugTracer ); @@ -385,12 +384,6 @@ ConsensusExtFace::transactions_vector SkaleHost::pendingTransactions( if ( m_exitNeeded ) return out_vector; - // HACK this should be field (or better do it another way) - static bool first_run = true; - if ( first_run ) { - m_consensusWorkingMutex.lock(); - first_run = false; - } if ( m_exitNeeded ) return out_vector; @@ -399,12 +392,8 @@ ConsensusExtFace::transactions_vector SkaleHost::pendingTransactions( if ( m_exitNeeded ) return out_vector; - unlock_guard< std::timed_mutex > unlocker( m_consensusWorkingMutex ); - - if ( m_exitNeeded ) { - unlocker.will_exit(); + if ( m_exitNeeded ) return out_vector; - } if ( need_restore_emptyBlockInterval ) { this->m_consensus->setEmptyBlockIntervalMs( this->emptyBlockIntervalMsForRestore.value() ); @@ -474,23 +463,6 @@ ConsensusExtFace::transactions_vector SkaleHost::pendingTransactions( std::lock_guard< std::recursive_mutex > lock( m_pending_createMutex, std::adopt_lock ); - // HACK For IS-348 - auto saved_txns = txns; - std::stable_sort( txns.begin(), txns.end(), TransactionQueue::PriorityCompare{ m_tq } ); - bool found_difference = false; - for ( size_t i = 0; i < txns.size(); ++i ) { - if ( txns[i].sha3() != saved_txns[i].sha3() ) - found_difference = true; - } - if ( found_difference ) { - clog( VerbosityError, "skale-host" ) << "Transaction order disorder detected!!"; - clog( VerbosityTrace, "skale-host" ) << " "; - for ( size_t i = 0; i < txns.size(); ++i ) { - clog( VerbosityTrace, "skale-host" ) - << i << " " << saved_txns[i].sha3() << " " << txns[i].sha3(); - } - } - // drop by block gas limit u256 blockGasLimit = this->m_client.chainParams().gasLimit; u256 gasAcc = 0; @@ -512,13 +484,7 @@ ConsensusExtFace::transactions_vector SkaleHost::pendingTransactions( std::string strPerformanceQueueName_drop_bad_transactions = "bc/fetch_transactions"; std::string strPerformanceActionName_drop_bad_transactions = skutils::tools::format( "fetch task %zu", nDropBadTransactionsTaskNumber ); - skutils::task::performance::json jsn = skutils::task::performance::json::object(); - skutils::task::performance::json jarrDroppedTransactions = - skutils::task::performance::json::array(); - for ( auto sha : to_delete ) { - jarrDroppedTransactions.push_back( toJS( sha ) ); - } - jsn["droppedTransactions"] = jarrDroppedTransactions; + skutils::task::performance::action a_drop_bad_transactions( strPerformanceQueueName_drop_bad_transactions, strPerformanceActionName_drop_bad_transactions, jsn ); @@ -532,10 +498,6 @@ ConsensusExtFace::transactions_vector SkaleHost::pendingTransactions( } } - if ( this->m_exitNeeded ) - unlocker.will_exit(); - - if ( this->emptyBlockIntervalMsForRestore.has_value() ) need_restore_emptyBlockInterval = true; @@ -584,9 +546,6 @@ ConsensusExtFace::transactions_vector SkaleHost::pendingTransactions( m_debugTracer.tracepoint( "send_to_consensus" ); - if ( this->m_exitNeeded ) - unlocker.will_exit(); - return out_vector; } @@ -594,6 +553,8 @@ void SkaleHost::createBlock( const ConsensusExtFace::transactions_vector& _appro uint64_t _timeStamp, uint64_t _blockID, u256 _gasPrice, u256 _stateRoot, uint64_t _winningNodeIndex ) try { // + boost::chrono::high_resolution_clock::time_point skaledTimeStart; + skaledTimeStart = boost::chrono::high_resolution_clock::now(); static std::atomic_size_t g_nCreateBlockTaskNumber = 0; size_t nCreateBlockTaskNumber = g_nCreateBlockTaskNumber++; std::string strPerformanceQueueName_create_block = "bc/create_block"; @@ -604,19 +565,17 @@ void SkaleHost::createBlock( const ConsensusExtFace::transactions_vector& _appro jsn_create_block["timeStamp"] = toJS( _timeStamp ); jsn_create_block["gasPrice"] = toJS( _gasPrice ); jsn_create_block["stateRoot"] = toJS( _stateRoot ); - skutils::task::performance::json jarrApprovedTransactions = - skutils::task::performance::json::array(); - for ( auto it = _approvedTransactions.begin(); it != _approvedTransactions.end(); ++it ) { - const bytes& data = *it; - h256 sha = sha3( data ); - jarrApprovedTransactions.push_back( toJS( sha ) ); - } - jsn_create_block["approvedTransactions"] = jarrApprovedTransactions; skutils::task::performance::action a_create_block( strPerformanceQueueName_create_block, strPerformanceActionName_create_block, jsn_create_block ); std::lock_guard< std::recursive_mutex > lock( m_pending_createMutex ); + if ( m_ignoreNewBlocks ) { + clog( VerbosityWarning, "skale-host" ) << "WARNING: skaled got new block #" << _blockID + << " after timestamp-related exit initiated!"; + return; + } + LOG( m_debugLogger ) << cc::debug( "createBlock " ) << cc::notice( "ID" ) << cc::debug( " = " ) << cc::warn( "#" ) << cc::num10( _blockID ) << std::endl; m_debugTracer.tracepoint( "create_block" ); @@ -646,8 +605,9 @@ void SkaleHost::createBlock( const ConsensusExtFace::transactions_vector& _appro << cc::error( " cleanup is recommended, exiting with code " ) << cc::num10( int( ExitHandler::ec_state_root_mismatch ) ) << "..."; if ( AmsterdamFixPatch::stateRootCheckingEnabled( m_client ) ) { - ExitHandler::exitHandler( SIGABRT, ExitHandler::ec_state_root_mismatch ); - _exit( int( ExitHandler::ec_state_root_mismatch ) ); + m_ignoreNewBlocks = true; + m_consensus->exitGracefully(); + ExitHandler::exitHandler( -1, ExitHandler::ec_state_root_mismatch ); } } @@ -733,11 +693,8 @@ void SkaleHost::createBlock( const ConsensusExtFace::transactions_vector& _appro std::string strPerformanceQueueName_import_block = "bc/import_block"; std::string strPerformanceActionName_import_block = skutils::tools::format( "b-import %zu", nImportBlockTaskNumber ); - skutils::task::performance::json jsn_import_block = - skutils::task::performance::json::object(); - jsn_import_block["txns"] = jarrProcessedTxns; - skutils::task::performance::action a_import_block( strPerformanceQueueName_import_block, - strPerformanceActionName_import_block, jsn_import_block ); + skutils::task::performance::action a_import_block( + strPerformanceQueueName_import_block, strPerformanceActionName_import_block ); // m_debugTracer.tracepoint( "import_block" ); @@ -747,6 +704,26 @@ void SkaleHost::createBlock( const ConsensusExtFace::transactions_vector& _appro if ( n_succeeded != out_txns.size() ) penalizePeer(); + boost::chrono::high_resolution_clock::time_point skaledTimeFinish = + boost::chrono::high_resolution_clock::now(); + if ( latestBlockTime != boost::chrono::high_resolution_clock::time_point() ) { + clog( VerbosityInfo, "skale-host" ) + << "SWT:" + << boost::chrono::duration_cast< boost::chrono::milliseconds >( + skaledTimeFinish - skaledTimeStart ) + .count() + << ':' << "BFT:" + << boost::chrono::duration_cast< boost::chrono::milliseconds >( + skaledTimeFinish - latestBlockTime ) + .count(); + } else { + clog( VerbosityInfo, "skale-host" ) + << "SWT:" + << boost::chrono::duration_cast< boost::chrono::milliseconds >( + skaledTimeFinish - skaledTimeStart ) + .count(); + } + latestBlockTime = skaledTimeFinish; LOG( m_debugLogger ) << cc::success( "Successfully imported " ) << n_succeeded << cc::success( " of " ) << out_txns.size() << cc::success( " transactions" ) << std::endl; @@ -756,11 +733,16 @@ void SkaleHost::createBlock( const ConsensusExtFace::transactions_vector& _appro logState(); + clog( VerbosityInfo, "skale-host" ) + << "TQBYTES:CTQ:" << m_tq.status().currentBytes << ":FTQ:" << m_tq.status().futureBytes + << ":TQSIZE:CTQ:" << m_tq.status().current << ":FTQ:" << m_tq.status().future; + if ( m_instanceMonitor != nullptr ) { if ( m_instanceMonitor->isTimeToRotate( _timeStamp ) ) { m_instanceMonitor->prepareRotation(); + m_ignoreNewBlocks = true; m_consensus->exitGracefully(); - ExitHandler::exitHandler( SIGTERM, ExitHandler::ec_rotation_complete ); + ExitHandler::exitHandler( -1, ExitHandler::ec_rotation_complete ); clog( VerbosityInfo, "skale-host" ) << "Rotation is completed. Instance is exiting"; } } @@ -779,7 +761,6 @@ void SkaleHost::startWorking() { // TODO Should we do it at end of this func? (problem: broadcaster receives transaction and // recursively calls this func - so working is still false!) working = true; - m_exitedForcefully = false; if ( !this->m_client.chainParams().nodeInfo.syncNode ) { try { @@ -793,20 +774,23 @@ void SkaleHost::startWorking() { m_broadcastThread = std::thread( bcast_func ); } - try { - m_consensus->startAll(); - } catch ( const std::exception& ) { - // cleanup - m_exitNeeded = true; - if ( !this->m_client.chainParams().nodeInfo.syncNode ) { - m_broadcastThread.join(); + auto csus_func = [&]() { + try { + m_consensus->startAll(); + } catch ( const std::exception& ) { + // cleanup + m_exitNeeded = true; + if ( !this->m_client.chainParams().nodeInfo.syncNode ) { + m_broadcastThread.join(); + } + ExitHandler::exitHandler( -1, ExitHandler::ec_termninated_by_signal ); + return; } - throw; - } - - std::promise< void > bootstrap_promise; - auto csus_func = [&]() { + // comment out as this hack is in consensus now + // HACK Prevent consensus from hanging up for emptyBlockIntervalMs at bootstrapAll()! + // uint64_t tmp_interval = m_consensus->getEmptyBlockIntervalMs(); + // m_consensus->setEmptyBlockIntervalMs( 50 ); try { static const char g_strThreadName[] = "bootStrapAll"; dev::setThreadName( g_strThreadName ); @@ -825,15 +809,11 @@ void SkaleHost::startWorking() { << skutils::signal::generate_stack_trace() << "\n"; } - bootstrap_promise.set_value(); + // comment out as this hack is in consensus now + // m_consensus->setEmptyBlockIntervalMs( tmp_interval ); }; // func - // HACK Prevent consensus from hanging up for emptyBlockIntervalMs at bootstrapAll()! - uint64_t tmp_interval = m_consensus->getEmptyBlockIntervalMs(); - m_consensus->setEmptyBlockIntervalMs( 50 ); m_consensusThread = std::thread( csus_func ); - bootstrap_promise.get_future().wait(); - m_consensus->setEmptyBlockIntervalMs( tmp_interval ); } // TODO finish all gracefully to allow all undone jobs be finished @@ -841,22 +821,6 @@ void SkaleHost::stopWorking() { if ( !working ) return; - bool locked = - m_consensusWorkingMutex.try_lock_for( std::chrono::seconds( EXIT_FORCEFULLTY_SECONDS ) ); - auto lock = locked ? std::make_unique< std::lock_guard< std::timed_mutex > >( - m_consensusWorkingMutex, std::adopt_lock ) : - std::unique_ptr< std::lock_guard< std::timed_mutex > >(); - ( void ) lock; // for Codacy - - // if we could not lock from 1st attempt - then exit forcefully! - if ( !locked ) { - m_exitedForcefully = true; - clog( VerbosityWarning, "skale-host" ) - << cc::fatal( "ATTENTION:" ) << " " - << cc::error( "Forcefully shutting down consensus!" ); - } - - m_exitNeeded = true; pauseConsensus( false ); @@ -866,8 +830,12 @@ void SkaleHost::stopWorking() { // requested exit int signal = ExitHandler::getSignal(); int exitCode = ExitHandler::requestedExitCode(); - clog( VerbosityInfo, "skale-host" ) - << cc::info( "Exit requested with signal " ) << signal << " and exit code " << exitCode; + if ( signal > 0 ) + clog( VerbosityInfo, "skale-host" ) << cc::info( "Exit requested with signal " ) + << signal << " and exit code " << exitCode; + else + clog( VerbosityInfo, "skale-host" ) + << cc::info( "Exit requested internally with exit code " ) << exitCode; } else { clog( VerbosityInfo, "skale-host" ) << cc::info( "Exiting without request" ); } @@ -933,7 +901,6 @@ void SkaleHost::broadcastFunc() { skutils::tools::format( "broadcast %zu", nBroadcastTaskNumber++ ); skutils::task::performance::json jsn = skutils::task::performance::json::object(); - jsn["rlp"] = rlp; jsn["hash"] = h; skutils::task::performance::action a( strPerformanceQueueName, strPerformanceActionName, jsn ); diff --git a/libethereum/SkaleHost.h b/libethereum/SkaleHost.h index 05194ffe2..bb79d5d60 100644 --- a/libethereum/SkaleHost.h +++ b/libethereum/SkaleHost.h @@ -40,6 +40,7 @@ #include #include +#include #include #include @@ -87,7 +88,6 @@ class DefaultConsensusFactory : public ConsensusFactory { class SkaleHost { friend class ConsensusExtImpl; - static const int EXIT_FORCEFULLTY_SECONDS; struct my_hash { size_t operator()( const dev::eth::Transaction& tx ) const { return hash( tx.sha3() ); } @@ -114,7 +114,6 @@ class SkaleHost { void startWorking(); void stopWorking(); bool isWorking() const { return this->working; } - bool exitedForcefully() const { return m_exitedForcefully; } void noteNewTransactions(); void noteNewBlocks(); @@ -150,7 +149,6 @@ class SkaleHost { private: std::atomic_bool working = false; - std::atomic_bool m_exitedForcefully = false; std::unique_ptr< Broadcaster > m_broadcaster; @@ -170,8 +168,6 @@ class SkaleHost { std::recursive_mutex m_pending_createMutex; // for race conditions between // pendingTransactions() and createBock() - std::timed_mutex m_consensusWorkingMutex; // unlocks when it's OK to exit - std::atomic_int m_bcast_counter = 0; void penalizePeer(){}; // fake function for now @@ -191,7 +187,10 @@ class SkaleHost { // creating block dev::eth::Client& m_client; dev::eth::TransactionQueue& m_tq; // transactions ready to go to consensus + std::shared_ptr< InstanceMonitor > m_instanceMonitor; + std::atomic_bool m_ignoreNewBlocks = false; // used when we need to exit at specific block + bool m_broadcastEnabled; dev::Logger m_debugLogger{ dev::createLogger( dev::VerbosityDebug, "skale-host" ) }; @@ -213,4 +212,6 @@ class SkaleHost { std::set< dev::h256 > arrived; #endif std::atomic_int total_sent, total_arrived; + + boost::chrono::high_resolution_clock::time_point latestBlockTime; }; diff --git a/libethereum/SnapshotAgent.cpp b/libethereum/SnapshotAgent.cpp new file mode 100644 index 000000000..68e055147 --- /dev/null +++ b/libethereum/SnapshotAgent.cpp @@ -0,0 +1,231 @@ +#include "SnapshotAgent.h" + +#include + +#include + +#include + +using namespace dev; + +SnapshotAgent::SnapshotAgent( int64_t _snapshotIntervalSec, + std::shared_ptr< SnapshotManager > _snapshotManager, SkaleDebugTracer& _debugTracer ) + : m_snapshotIntervalSec( _snapshotIntervalSec ), + m_snapshotManager( _snapshotManager ), + m_debugTracer( _debugTracer ) { + if ( m_snapshotIntervalSec > 0 ) { + LOG( m_logger ) << "Snapshots enabled, snapshotIntervalSec is: " << m_snapshotIntervalSec; + } +} + +void SnapshotAgent::init( unsigned _currentBlockNumber, int64_t _timestampOfBlock1 ) { + if ( m_snapshotIntervalSec <= 0 ) + return; + + if ( _currentBlockNumber == 0 ) + doSnapshotAndComputeHash( 0 ); + + auto latest_snapshots = this->m_snapshotManager->getLatestSnapshots(); + + // if two + if ( latest_snapshots.first ) { + assert( latest_snapshots.first != 1 ); // 1 can never be snapshotted + + this->last_snapshoted_block_with_hash = latest_snapshots.first; + + // ignore second as it was "in hash computation" + // check that both are imported!! + // h256 h2 = this->hashFromNumber( latest_snapshots.second ); + // assert( h2 != h256() ); + // last_snapshot_creation_time = blockInfo( h2 ).timestamp(); + + last_snapshot_creation_time = + this->m_snapshotManager->getBlockTimestamp( latest_snapshots.second ); + + if ( !m_snapshotManager->isSnapshotHashPresent( latest_snapshots.second ) ) + startHashComputingThread(); + + // one snapshot + } else if ( latest_snapshots.second ) { + assert( latest_snapshots.second != 1 ); // 1 can never be snapshotted + assert( _timestampOfBlock1 > 0 ); // we created snapshot somehow + + // whether it is local or downloaded - we shall ignore it's hash but use it's time + // see also how last_snapshoted_block_with_hash is updated in importTransactionsAsBlock + // h256 h2 = this->hashFromNumber( latest_snapshots.second ); + // uint64_t time_of_second = blockInfo( h2 ).timestamp(); + + this->last_snapshoted_block_with_hash = -1; + // last_snapshot_creation_time = time_of_second; + + last_snapshot_creation_time = + this->m_snapshotManager->getBlockTimestamp( latest_snapshots.second ); + + if ( !m_snapshotManager->isSnapshotHashPresent( latest_snapshots.second ) ) + startHashComputingThread(); + + // no snapshots yet + } else { + this->last_snapshoted_block_with_hash = -1; + // init last block creation time with only robust time source - timestamp of 1st block! + last_snapshot_creation_time = _timestampOfBlock1; + } + + LOG( m_logger ) << "Latest snapshots init: " << latest_snapshots.first << " " + << latest_snapshots.second << " -> " << this->last_snapshoted_block_with_hash; + + LOG( m_logger ) << "Init last snapshot creation time: " << this->last_snapshot_creation_time; +} + +void SnapshotAgent::finishHashComputingAndUpdateHashesIfNeeded( int64_t _timestamp ) { + if ( m_snapshotIntervalSec > 0 && this->isTimeToDoSnapshot( _timestamp ) ) { + LOG( m_logger ) << "Last snapshot creation time: " << this->last_snapshot_creation_time; + + if ( m_snapshotHashComputing != nullptr && m_snapshotHashComputing->joinable() ) + m_snapshotHashComputing->join(); + + // TODO Make this number configurable + // thread can be absent - if hash was already there + // snapshot can be absent too + // but hash cannot be absent + auto latest_snapshots = this->m_snapshotManager->getLatestSnapshots(); + if ( latest_snapshots.second ) { + assert( m_snapshotManager->isSnapshotHashPresent( latest_snapshots.second ) ); + this->last_snapshoted_block_with_hash = latest_snapshots.second; + m_snapshotManager->leaveNLastSnapshots( 2 ); + } + } +} + +void SnapshotAgent::doSnapshotIfNeeded( unsigned _currentBlockNumber, int64_t _timestamp ) { + if ( m_snapshotIntervalSec <= 0 ) + return; + + LOG( m_loggerDetail ) << "Block timestamp: " << _timestamp; + + if ( this->isTimeToDoSnapshot( _timestamp ) ) { + try { + boost::chrono::high_resolution_clock::time_point t1; + boost::chrono::high_resolution_clock::time_point t2; + LOG( m_logger ) << "DOING SNAPSHOT: " << _currentBlockNumber; + m_debugTracer.tracepoint( "doing_snapshot" ); + + t1 = boost::chrono::high_resolution_clock::now(); + m_snapshotManager->doSnapshot( _currentBlockNumber ); + t2 = boost::chrono::high_resolution_clock::now(); + this->snapshot_calculation_time_ms = + boost::chrono::duration_cast< boost::chrono::milliseconds >( t2 - t1 ).count(); + } catch ( SnapshotManager::SnapshotPresent& ex ) { + cerror << "WARNING " << dev::nested_exception_what( ex ); + } + + this->last_snapshot_creation_time = _timestamp; + + LOG( m_logger ) << "New snapshot creation time: " << this->last_snapshot_creation_time; + } + + // snapshots without hash can appear either from start, from downloading or from just + // creation + auto latest_snapshots = this->m_snapshotManager->getLatestSnapshots(); + + // start if thread is free and there is work + if ( ( m_snapshotHashComputing == nullptr || !m_snapshotHashComputing->joinable() ) && + latest_snapshots.second && + !m_snapshotManager->isSnapshotHashPresent( latest_snapshots.second ) ) { + startHashComputingThread(); + + } // if thread +} + +boost::filesystem::path SnapshotAgent::createSnapshotFile( unsigned _blockNumber ) { + if ( _blockNumber > this->getLatestSnapshotBlockNumer() && _blockNumber != 0 ) + throw std::invalid_argument( "Too new snapshot requested" ); + boost::filesystem::path path = m_snapshotManager->makeOrGetDiff( _blockNumber ); + // TODO Make constant 2 configurable + m_snapshotManager->leaveNLastDiffs( 2 ); + return path; +} + +void SnapshotAgent::terminate() { + if ( m_snapshotHashComputing != nullptr ) { + try { + if ( m_snapshotHashComputing->joinable() ) + m_snapshotHashComputing->detach(); + } catch ( ... ) { + } + } +} + +dev::h256 SnapshotAgent::getSnapshotHash( unsigned _blockNumber ) const { + if ( _blockNumber > this->last_snapshoted_block_with_hash && _blockNumber != 0 ) + return dev::h256(); + + try { + dev::h256 res = this->m_snapshotManager->getSnapshotHash( _blockNumber ); + return res; + } catch ( const SnapshotManager::SnapshotAbsent& ) { + return dev::h256(); + } + + // fall through other exceptions +} + +uint64_t SnapshotAgent::getBlockTimestampFromSnapshot( unsigned _blockNumber ) const { + return this->m_snapshotManager->getBlockTimestamp( _blockNumber ); +} + +bool SnapshotAgent::isTimeToDoSnapshot( uint64_t _timestamp ) const { + return _timestamp / uint64_t( m_snapshotIntervalSec ) > + this->last_snapshot_creation_time / uint64_t( m_snapshotIntervalSec ); +} + +void SnapshotAgent::startHashComputingThread() { + auto latest_snapshots = this->m_snapshotManager->getLatestSnapshots(); + + m_snapshotHashComputing.reset( new std::thread( [this, latest_snapshots]() { + m_debugTracer.tracepoint( "computeSnapshotHash_start" ); + try { + boost::chrono::high_resolution_clock::time_point t1; + boost::chrono::high_resolution_clock::time_point t2; + + t1 = boost::chrono::high_resolution_clock::now(); + this->m_snapshotManager->computeSnapshotHash( latest_snapshots.second ); + t2 = boost::chrono::high_resolution_clock::now(); + this->snapshot_hash_calculation_time_ms = + boost::chrono::duration_cast< boost::chrono::milliseconds >( t2 - t1 ).count(); + LOG( m_logger ) << "Computed hash for snapshot " << latest_snapshots.second << ": " + << m_snapshotManager->getSnapshotHash( latest_snapshots.second ); + m_debugTracer.tracepoint( "computeSnapshotHash_end" ); + + } catch ( const std::exception& ex ) { + cerror << cc::fatal( "CRITICAL" ) << " " << cc::warn( dev::nested_exception_what( ex ) ) + << cc::error( " in computeSnapshotHash(). Exiting..." ); + cerror << "\n" << skutils::signal::generate_stack_trace() << "\n" << std::endl; + ExitHandler::exitHandler( -1, ExitHandler::ec_compute_snapshot_error ); + } catch ( ... ) { + cerror << cc::fatal( "CRITICAL" ) + << cc::error( + " unknown exception in computeSnapshotHash(). " + "Exiting..." ); + cerror << "\n" << skutils::signal::generate_stack_trace() << "\n" << std::endl; + ExitHandler::exitHandler( -1, ExitHandler::ec_compute_snapshot_error ); + } + } ) ); +} + +void SnapshotAgent::doSnapshotAndComputeHash( unsigned _blockNumber ) { + LOG( m_logger ) << "DOING SNAPSHOT: " << _blockNumber; + m_debugTracer.tracepoint( "doing_snapshot" ); + + try { + m_snapshotManager->doSnapshot( _blockNumber ); + } catch ( SnapshotManager::SnapshotPresent& ex ) { + LOG( m_logger ) << "0 block snapshot is already present. Skipping."; + return; + } + + m_snapshotManager->computeSnapshotHash( _blockNumber ); + LOG( m_logger ) << "Computed hash for snapshot " << _blockNumber << ": " + << m_snapshotManager->getSnapshotHash( _blockNumber ); + m_debugTracer.tracepoint( "computeSnapshotHash_end" ); +} diff --git a/libethereum/SnapshotAgent.h b/libethereum/SnapshotAgent.h new file mode 100644 index 000000000..140e0c4cd --- /dev/null +++ b/libethereum/SnapshotAgent.h @@ -0,0 +1,65 @@ +#ifndef SNAPSHOTAGENT_H +#define SNAPSHOTAGENT_H + +#include +#include + +#include + +#include +#include + +class SnapshotManager; + +// Knows all about snapshots and maintains all dynamic behavior related to them: +// - keeping time of snapshot creation +// - hash computation +// - serialization +class SnapshotAgent { +public: + SnapshotAgent( int64_t _snapshotIntervalSec, + std::shared_ptr< SnapshotManager > _snapshotManager, SkaleDebugTracer& _debugTracer ); + + // timestamp of 1st block is the only robust time source + void init( unsigned _currentBlockNumber, int64_t _timestampOfBlock1 ); + + void finishHashComputingAndUpdateHashesIfNeeded( int64_t _timestamp ); + void doSnapshotIfNeeded( unsigned _currentBlockNumber, int64_t _timestamp ); + + boost::filesystem::path createSnapshotFile( unsigned _blockNumber ); + + void terminate(); + + dev::h256 getSnapshotHash( unsigned _blockNumber ) const; + uint64_t getBlockTimestampFromSnapshot( unsigned _blockNumber ) const; + int64_t getLatestSnapshotBlockNumer() const { return this->last_snapshoted_block_with_hash; } + uint64_t getSnapshotCalculationTime() const { return this->snapshot_calculation_time_ms; } + uint64_t getSnapshotHashCalculationTime() const { + return this->snapshot_hash_calculation_time_ms; + } + +private: + // time of last physical snapshot + int64_t last_snapshot_creation_time = 0; + // usually this is snapshot before last! + int64_t last_snapshoted_block_with_hash = -1; + + int64_t m_snapshotIntervalSec; + std::shared_ptr< SnapshotManager > m_snapshotManager; + + inline bool isTimeToDoSnapshot( uint64_t _timestamp ) const; + void doSnapshotAndComputeHash( unsigned _blockNumber ); + void startHashComputingThread(); + + std::unique_ptr< std::thread > m_snapshotHashComputing; + + uint64_t snapshot_calculation_time_ms; + uint64_t snapshot_hash_calculation_time_ms; + + dev::Logger m_logger{ createLogger( dev::VerbosityInfo, "SnapshotAgent" ) }; + dev::Logger m_loggerDetail{ createLogger( dev::VerbosityTrace, "SnapshotAgent" ) }; + + SkaleDebugTracer& m_debugTracer; +}; + +#endif // SNAPSHOTAGENT_H diff --git a/libethereum/Transaction.cpp b/libethereum/Transaction.cpp index 65e75171d..5abc81622 100644 --- a/libethereum/Transaction.cpp +++ b/libethereum/Transaction.cpp @@ -29,6 +29,8 @@ #include #include #include +#include + using namespace std; using namespace dev; using namespace dev::eth; @@ -172,7 +174,7 @@ u256 Transaction::getExternalGas() const { } u256 Transaction::gas() const { - if ( m_externalGasIsChecked && hasExternalGas() ) { + if ( m_externalGasIsChecked && hasExternalGas() && POWCheckPatch::isEnabled() ) { return *m_externalGas; } else { return TransactionBase::gas(); diff --git a/libethereum/TransactionQueue.cpp b/libethereum/TransactionQueue.cpp index 52da7fec4..bfeee388f 100644 --- a/libethereum/TransactionQueue.cpp +++ b/libethereum/TransactionQueue.cpp @@ -40,11 +40,14 @@ constexpr size_t c_maxVerificationQueueSize = 8192; constexpr size_t c_maxDroppedTransactionCount = 1024; } // namespace -TransactionQueue::TransactionQueue( unsigned _limit, unsigned _futureLimit ) +TransactionQueue::TransactionQueue( unsigned _limit, unsigned _futureLimit, + unsigned _currentLimitBytes, unsigned _futureLimitBytes ) : m_dropped{ c_maxDroppedTransactionCount }, m_current( PriorityCompare{ *this } ), m_limit( _limit ), m_futureLimit( _futureLimit ), + m_currentSizeBytesLimit( _currentLimitBytes ), + m_futureSizeBytesLimit( _futureLimitBytes ), m_aborting( false ) { m_readyCondNotifier = this->onReady( [this]() { this->m_cond.notify_all(); @@ -131,6 +134,7 @@ ImportResult TransactionQueue::import( // if( t == fs->second.begin() ){ UpgradeGuard ul( l ); --m_futureSize; + m_futureSizeBytes -= t->second.transaction.rlp().size(); auto erasedHash = t->second.transaction.sha3(); LOG( m_loggerDetail ) << "Re-inserting future transaction " << erasedHash; m_known.erase( erasedHash ); @@ -179,7 +183,7 @@ Transactions TransactionQueue::topTransactions_WITH_LOCK( unsigned _limit, int _maxCategory, int _setCategory ) { MICROPROFILE_SCOPEI( "TransactionQueue", "topTransactions_WITH_LOCK_cat", MP_PAPAYAWHIP ); - Transactions topTransactions; + Transactions top_transactions; std::vector< PriorityQueue::node_type > found; VerifiedTransaction dummy = VerifiedTransaction( Transaction() ); @@ -188,9 +192,9 @@ Transactions TransactionQueue::topTransactions_WITH_LOCK( PriorityQueue::iterator my_begin = m_current.lower_bound( dummy ); for ( PriorityQueue::iterator transaction_ptr = my_begin; - topTransactions.size() < _limit && transaction_ptr != m_current.end(); + top_transactions.size() < _limit && transaction_ptr != m_current.end(); ++transaction_ptr ) { - topTransactions.push_back( transaction_ptr->transaction ); + top_transactions.push_back( transaction_ptr->transaction ); if ( _setCategory >= 0 ) { found.push_back( m_current.extract( transaction_ptr ) ); } @@ -198,13 +202,32 @@ Transactions TransactionQueue::topTransactions_WITH_LOCK( // set all at once if ( _setCategory >= 0 ) { - for ( PriorityQueue::node_type& queueNode : found ) { - queueNode.value().category = _setCategory; - m_current.insert( std::move( queueNode ) ); + for ( PriorityQueue::node_type& queue_node : found ) { + queue_node.value().category = _setCategory; + m_current.insert( std::move( queue_node ) ); } } - return topTransactions; + // HACK For IS-348 + auto saved_txns = top_transactions; + std::stable_sort( top_transactions.begin(), top_transactions.end(), + TransactionQueue::PriorityCompare{ *this } ); + bool found_difference = false; + for ( size_t i = 0; i < top_transactions.size(); ++i ) { + if ( top_transactions[i].sha3() != saved_txns[i].sha3() ) + found_difference = true; + } + if ( found_difference ) { + clog( VerbosityError, "skale-host" ) << "IS-348 bug detected. Wrong transaction order in " + "block proposal was fixed by workaround :("; + clog( VerbosityTrace, "skale-host" ) << " "; + for ( size_t i = 0; i < top_transactions.size(); ++i ) { + clog( VerbosityTrace, "skale-host" ) + << i << " " << saved_txns[i].sha3() << " " << top_transactions[i].sha3(); + } + } + + return top_transactions; } const h256Hash TransactionQueue::knownTransactions() const { @@ -242,7 +265,7 @@ ImportResult TransactionQueue::manageImport_WITH_LOCK( insertCurrent_WITH_LOCK( make_pair( _h, _transaction ) ); LOG( m_loggerDetail ) << "Queued vaguely legit-looking transaction " << _h; - while ( m_current.size() > m_limit ) { + while ( m_current.size() > m_limit || m_currentSizeBytes > m_currentSizeBytesLimit ) { LOG( m_loggerDetail ) << "Dropping out of bounds transaction " << _h; remove_WITH_LOCK( m_current.rbegin()->transaction.sha3() ); } @@ -299,8 +322,12 @@ void TransactionQueue::insertCurrent_WITH_LOCK( std::pair< h256, Transaction > c auto inserted = m_currentByAddressAndNonce[t.from()].insert( std::make_pair( t.nonce(), PriorityQueue::iterator() ) ); PriorityQueue::iterator handle = m_current.emplace( VerifiedTransaction( t ) ); +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-copy" inserted.first->second = handle; m_currentByHash[_p.first] = handle; +#pragma GCC diagnostic pop + m_currentSizeBytes += t.rlp().size(); // Move following transactions from future to current makeCurrent_WITH_LOCK( t ); @@ -318,6 +345,7 @@ bool TransactionQueue::remove_WITH_LOCK( h256 const& _txHash ) { auto it = m_currentByAddressAndNonce.find( from ); assert( it != m_currentByAddressAndNonce.end() ); it->second.erase( ( *t->second ).transaction.nonce() ); + m_currentSizeBytes -= ( *t->second ).transaction.rlp().size(); m_current.erase( t->second ); m_currentByHash.erase( t ); if ( it->second.empty() ) @@ -354,6 +382,8 @@ void TransactionQueue::setFuture_WITH_LOCK( h256 const& _txHash ) { *( m->second ) ); // set has only const iterators. Since we are moving out of container // that's fine m_currentByHash.erase( t.transaction.sha3() ); + m_currentSizeBytes -= t.transaction.rlp().size(); + m_futureSizeBytes += t.transaction.rlp().size(); target.emplace( t.transaction.nonce(), move( t ) ); m_current.erase( m->second ); ++m_futureSize; @@ -362,10 +392,11 @@ void TransactionQueue::setFuture_WITH_LOCK( h256 const& _txHash ) { if ( queue.empty() ) m_currentByAddressAndNonce.erase( from ); - while ( m_futureSize > m_futureLimit ) { + while ( m_futureSize > m_futureLimit || m_futureSizeBytes > m_futureSizeBytesLimit ) { // TODO: priority queue for future transactions // For now just drop random chain end --m_futureSize; + m_futureSizeBytes -= m_future.begin()->second.rbegin()->second.transaction.rlp().size(); auto erasedHash = m_future.begin()->second.rbegin()->second.transaction.sha3(); LOG( m_loggerDetail ) << "Dropping out of bounds future transaction " << erasedHash; m_known.erase( erasedHash ); @@ -394,8 +425,13 @@ void TransactionQueue::makeCurrent_WITH_LOCK( Transaction const& _t ) { auto inserted = m_currentByAddressAndNonce[_t.from()].insert( std::make_pair( ft->second.transaction.nonce(), PriorityQueue::iterator() ) ); PriorityQueue::iterator handle = m_current.emplace( move( ft->second ) ); +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-copy" inserted.first->second = handle; m_currentByHash[( *handle ).transaction.sha3()] = handle; +#pragma GCC diagnostic pop + m_futureSizeBytes -= ( *handle ).transaction.rlp().size(); + m_currentSizeBytes += ( *handle ).transaction.rlp().size(); --m_futureSize; ++ft; ++nonce; @@ -441,11 +477,13 @@ void TransactionQueue::clear() { WriteGuard l( m_lock ); m_known.clear(); m_current.clear(); + m_currentSizeBytes = 0; m_dropped.clear(); m_currentByAddressAndNonce.clear(); m_currentByHash.clear(); m_future.clear(); m_futureSize = 0; + m_futureSizeBytes = 0; } void TransactionQueue::enqueue( RLP const& _data, h512 const& _nodeId ) { diff --git a/libethereum/TransactionQueue.h b/libethereum/TransactionQueue.h index 31bbd2340..63ad3ee96 100644 --- a/libethereum/TransactionQueue.h +++ b/libethereum/TransactionQueue.h @@ -52,15 +52,22 @@ namespace eth { class TransactionQueue { public: struct Limits { - size_t current; - size_t future; + size_t currentLimit; + size_t futureLimit; + size_t currentLimitBytes = 12322916; + size_t futureLimitBytes = 24645833; }; /// @brief TransactionQueue /// @param _limit Maximum number of pending transactions in the queue. /// @param _futureLimit Maximum number of future nonce transactions. - TransactionQueue( unsigned _limit = 1024, unsigned _futureLimit = 1024 ); - TransactionQueue( Limits const& _l ) : TransactionQueue( _l.current, _l.future ) {} + /// @param _currentLimitBytes Maximum size of pending transactions in the queue in bytes. + /// @param _futureLimitBytes Maximum size of future nonce transactions in bytes. + TransactionQueue( unsigned _limit = 1024, unsigned _futureLimit = 1024, + unsigned _currentLimitBytes = 12322916, unsigned _futureLimitBytes = 24645833 ); + TransactionQueue( Limits const& _l ) + : TransactionQueue( + _l.currentLimit, _l.futureLimit, _l.currentLimitBytes, _l.futureLimitBytes ) {} ~TransactionQueue(); void HandleDestruction(); /// Add transaction to the queue to be verified and imported. @@ -143,6 +150,8 @@ class TransactionQueue { size_t future; size_t unverified; size_t dropped; + size_t currentBytes; + size_t futureBytes; }; /// @returns the status of the transaction queue. Status status() const { @@ -151,12 +160,16 @@ class TransactionQueue { ReadGuard l( m_lock ); ret.dropped = m_dropped.size(); ret.current = m_currentByHash.size(); - ret.future = m_future.size(); + ret.future = m_futureSize; + ret.currentBytes = m_currentSizeBytes; + ret.futureBytes = m_futureSizeBytes; return ret; } /// @returns the transaction limits on current/future. - Limits limits() const { return Limits{ m_limit, m_futureLimit }; } + Limits limits() const { + return Limits{ m_limit, m_futureLimit, m_currentSizeBytes, m_futureSizeBytes }; + } /// @returns the number of tx in future queue. size_t futureSize() const { return m_futureSize; } @@ -323,6 +336,11 @@ class TransactionQueue { unsigned m_futureLimit; ///< Max number of future transactions unsigned m_futureSize = 0; ///< Current number of future transactions + unsigned m_currentSizeBytesLimit = 0; // max pending queue size in bytes + unsigned m_currentSizeBytes = 0; // current pending queue size in bytes + unsigned m_futureSizeBytesLimit = 0; // max future queue size in bytes + unsigned m_futureSizeBytes = 0; // current future queue size in bytes + std::condition_variable m_queueReady; ///< Signaled when m_unverified has a new entry. std::vector< std::thread > m_verifiers; std::deque< UnverifiedTransaction > m_unverified; ///< Pending verification queue diff --git a/libethereum/TransactionReceipt.h b/libethereum/TransactionReceipt.h index cc32f89c2..dcbeff84c 100644 --- a/libethereum/TransactionReceipt.h +++ b/libethereum/TransactionReceipt.h @@ -44,6 +44,8 @@ class TransactionReceipt { TransactionReceipt( bytesConstRef _rlp ); TransactionReceipt( h256 const& _root, u256 const& _gasUsed, LogEntries const& _log ); TransactionReceipt( uint8_t _status, u256 const& _gasUsed, LogEntries const& _log ); + TransactionReceipt( const TransactionReceipt& other ) = default; + TransactionReceipt& operator=( const TransactionReceipt& other ) = default; /// @returns true if the receipt has a status code. Otherwise the receipt has a state root /// (pre-EIP658). @@ -110,6 +112,8 @@ class LocalisedTransactionReceipt : public TransactionReceipt { m_localisedLogs.push_back( LocalisedLogEntry( entries[i], m_blockHash, m_blockNumber, m_hash, m_transactionIndex, i ) ); } + LocalisedTransactionReceipt( const LocalisedTransactionReceipt& other ) = default; + LocalisedTransactionReceipt& operator=( const LocalisedTransactionReceipt& other ) = default; h256 const& hash() const { return m_hash; } h256 const& blockHash() const { return m_blockHash; } diff --git a/libethereum/ValidationSchemes.cpp b/libethereum/ValidationSchemes.cpp index 6db14c81e..38231c130 100644 --- a/libethereum/ValidationSchemes.cpp +++ b/libethereum/ValidationSchemes.cpp @@ -152,6 +152,8 @@ void validateConfigJson( js::mObject const& _obj ) { { "infoWssRpcPort6", { { js::int_type }, JsonFieldPresence::Optional } }, { "imaMonitoringPort", { { js::int_type }, JsonFieldPresence::Optional } }, { "emptyBlockIntervalMs", { { js::int_type }, JsonFieldPresence::Optional } }, + { "emptyBlockIntervalAfterCatchupMs", + { { js::int_type }, JsonFieldPresence::Optional } }, { "snapshotIntervalSec", { { js::int_type }, JsonFieldPresence::Optional } }, { "rotateAfterBlock", { { js::int_type }, JsonFieldPresence::Optional } }, { "wallets", { { js::obj_type }, JsonFieldPresence::Optional } }, @@ -169,6 +171,10 @@ void validateConfigJson( js::mObject const& _obj ) { { "collectionQueueSize", { { js::int_type }, JsonFieldPresence::Optional } }, { "collectionDuration", { { js::int_type }, JsonFieldPresence::Optional } }, { "transactionQueueSize", { { js::int_type }, JsonFieldPresence::Optional } }, + { "futureTransactionQueueSize", { { js::int_type }, JsonFieldPresence::Optional } }, + { "transactionQueueLimitBytes", { { js::int_type }, JsonFieldPresence::Optional } }, + { "futureTransactionQueueLimitBytes", + { { js::int_type }, JsonFieldPresence::Optional } }, { "maxOpenLeveldbFiles", { { js::int_type }, JsonFieldPresence::Optional } }, { "logLevel", { { js::str_type }, JsonFieldPresence::Optional } }, { "logLevelConfig", { { js::str_type }, JsonFieldPresence::Optional } }, @@ -201,8 +207,6 @@ void validateConfigJson( js::mObject const& _obj ) { { "log-tx-params-limit", { { js::int_type }, JsonFieldPresence::Optional } }, { "no-ima-signing", { { js::bool_type }, JsonFieldPresence::Optional } }, { "skale-manager", { { js::obj_type }, JsonFieldPresence::Optional } }, - { "skale-network-browser-refresh", { { js::int_type }, JsonFieldPresence::Optional } }, - { "skale-network-browser-verbose", { { js::bool_type }, JsonFieldPresence::Optional } }, { "imaMainNet", { { js::str_type }, JsonFieldPresence::Optional } }, { "imaMessageProxySChain", { { js::str_type }, JsonFieldPresence::Optional } }, { "imaMessageProxyMainNet", { { js::str_type }, JsonFieldPresence::Optional } }, @@ -240,6 +244,8 @@ void validateConfigJson( js::mObject const& _obj ) { { "schainOwner", { { js::str_type }, JsonFieldPresence::Optional } }, { "blockAuthor", { { js::str_type }, JsonFieldPresence::Optional } }, { "emptyBlockIntervalMs", { { js::int_type }, JsonFieldPresence::Optional } }, + { "emptyBlockIntervalAfterCatchupMs", + { { js::int_type }, JsonFieldPresence::Optional } }, { "snapshotIntervalSec", { { js::int_type }, JsonFieldPresence::Optional } }, { "snapshotDownloadTimeout", { { js::int_type }, JsonFieldPresence::Optional } }, { "snapshotDownloadInactiveTimeout", @@ -259,7 +265,12 @@ void validateConfigJson( js::mObject const& _obj ) { { "contractStorageZeroValuePatchTimestamp", { { js::int_type }, JsonFieldPresence::Optional } }, { "verifyDaSigsPatchTimestamp", { { js::int_type }, JsonFieldPresence::Optional } }, - { "nodeGroups", { { js::obj_type }, JsonFieldPresence::Optional } } } ); + { "storageDestructionPatchTimestamp", + { { js::int_type }, JsonFieldPresence::Optional } }, + { "powCheckPatchTimestamp", { { js::int_type }, JsonFieldPresence::Optional } }, + { "nodeGroups", { { js::obj_type }, JsonFieldPresence::Optional } }, + { "skipInvalidTransactionsPatchTimestamp", + { { js::int_type }, JsonFieldPresence::Optional } } } ); js::mArray const& nodes = sChain.at( "nodes" ).get_array(); for ( auto const& obj : nodes ) { diff --git a/libhistoric/HistoricAccount.h b/libhistoric/HistoricAccount.h index ca354ecac..467a15468 100644 --- a/libhistoric/HistoricAccount.h +++ b/libhistoric/HistoricAccount.h @@ -46,6 +46,7 @@ class HistoricAccount : public Account { HistoricAccount( u256 _nonce, u256 _balance, Changedness _c = Changed ) : Account( _nonce, _balance, _c ){}; + HistoricAccount& operator=( const HistoricAccount& other ) = default; /// Explicit constructor for wierd cases of construction or a contract account. HistoricAccount( u256 _nonce, u256 _balance, StorageRoot _storageRoot, h256 _codeHash, @@ -54,4 +55,4 @@ class HistoricAccount : public Account { }; } // namespace eth } // namespace dev -#endif \ No newline at end of file +#endif diff --git a/libskale/CMakeLists.txt b/libskale/CMakeLists.txt index 9a8e361cc..73acfbbf5 100644 --- a/libskale/CMakeLists.txt +++ b/libskale/CMakeLists.txt @@ -13,11 +13,14 @@ set(sources UnsafeRegion.cpp TotalStorageUsedPatch.cpp ContractStorageLimitPatch.cpp - ContractStorageZeroValuePatch.cpp - VerifyDaSigsPatch.cpp + ContractStorageZeroValuePatch.cpp + VerifyDaSigsPatch.cpp AmsterdamFixPatch.cpp RevertableFSPatch.cpp OverlayFS.cpp + StorageDestructionPatch.cpp + POWCheckPatch.cpp + SkipInvalidTransactionsPatch.cpp ) set(headers @@ -35,7 +38,9 @@ set(headers ContractStorageLimitPatch.h AmsterdamFixPatch.h RevertableFSPatch.h + POWCheckPatch.h OverlayFS.h + SkipInvalidTransactionsPatch.h ) add_library(skale ${sources} ${headers}) diff --git a/libskale/ContractStorageLimitPatch.h b/libskale/ContractStorageLimitPatch.h index 0884b81a1..f7442b572 100644 --- a/libskale/ContractStorageLimitPatch.h +++ b/libskale/ContractStorageLimitPatch.h @@ -21,6 +21,11 @@ class ContractStorageLimitPatch : public SchainPatch { public: static bool isEnabled(); + static void setTimestamp( time_t _timeStamp ) { + printInfo( __FILE__, _timeStamp ); + contractStoragePatchTimestamp = _timeStamp; + } + private: friend class dev::eth::Client; static time_t contractStoragePatchTimestamp; diff --git a/libskale/ContractStorageZeroValuePatch.h b/libskale/ContractStorageZeroValuePatch.h index d38b737ce..f64d3059a 100644 --- a/libskale/ContractStorageZeroValuePatch.h +++ b/libskale/ContractStorageZeroValuePatch.h @@ -21,6 +21,12 @@ class ContractStorageZeroValuePatch : public SchainPatch { public: static bool isEnabled(); + static void setTimestamp( time_t _timeStamp ) { + printInfo( __FILE__, _timeStamp ); + contractStorageZeroValuePatchTimestamp = _timeStamp; + } + + private: friend class dev::eth::Client; static time_t contractStorageZeroValuePatchTimestamp; diff --git a/libskale/OverlayDB.cpp b/libskale/OverlayDB.cpp index bb1c5fa1d..2c6b5eebb 100644 --- a/libskale/OverlayDB.cpp +++ b/libskale/OverlayDB.cpp @@ -330,7 +330,9 @@ std::unordered_map< h160, string > OverlayDB::accounts() const { std::unordered_map< u256, u256 > OverlayDB::storage( const dev::h160& _address ) const { unordered_map< u256, u256 > storage; if ( m_db_face ) { - m_db_face->forEach( [&storage, &_address]( Slice key, Slice value ) { + // iterate of a keys that start with the given substring + string prefix( ( const char* ) _address.data(), _address.size ); + m_db_face->forEachWithPrefix( prefix, [&storage, &_address]( Slice key, Slice value ) { if ( key.size() == h160::size + h256::size ) { // key is storage address string keyString( key.begin(), key.end() ); @@ -342,6 +344,8 @@ std::unordered_map< u256, u256 > OverlayDB::storage( const dev::h160& _address ) u256 memoryValue = h256( string( value.begin(), value.end() ), h256::ConstructFromStringType::FromBinary ); storage[memoryAddress] = memoryValue; + } else { + cerror << "Address mismatch in:" << __FUNCTION__; } } return true; diff --git a/libskale/POWCheckPatch.cpp b/libskale/POWCheckPatch.cpp new file mode 100644 index 000000000..79a43dddd --- /dev/null +++ b/libskale/POWCheckPatch.cpp @@ -0,0 +1,11 @@ +#include "POWCheckPatch.h" + +time_t POWCheckPatch::powCheckPatchTimestamp; +time_t POWCheckPatch::lastBlockTimestamp; + +bool POWCheckPatch::isEnabled() { + if ( powCheckPatchTimestamp == 0 ) { + return false; + } + return powCheckPatchTimestamp <= lastBlockTimestamp; +} diff --git a/libskale/POWCheckPatch.h b/libskale/POWCheckPatch.h new file mode 100644 index 000000000..65a4f5905 --- /dev/null +++ b/libskale/POWCheckPatch.h @@ -0,0 +1,31 @@ +#include +#include + +#ifndef POWCHECKPATCH_H +#define POWCHECKPATCH_H + +namespace dev { +namespace eth { +class Client; +} +} // namespace dev + +/* + * Context: enable fix for POW txns gas limit check + */ +class POWCheckPatch : public SchainPatch { +public: + static bool isEnabled(); + + static void setTimestamp( time_t _timeStamp ) { + printInfo( __FILE__, _timeStamp ); + powCheckPatchTimestamp = _timeStamp; + } + +private: + friend class dev::eth::Client; + static time_t powCheckPatchTimestamp; + static time_t lastBlockTimestamp; +}; + +#endif // POWCHECKPATCH_H diff --git a/libskale/RevertableFSPatch.h b/libskale/RevertableFSPatch.h index 195d409f1..83321bfec 100644 --- a/libskale/RevertableFSPatch.h +++ b/libskale/RevertableFSPatch.h @@ -14,6 +14,11 @@ class RevertableFSPatch : public SchainPatch { public: static bool isEnabled(); + static void setTimestamp( time_t _timeStamp ) { + printInfo( __FILE__, _timeStamp ); + revertableFSPatchTimestamp = _timeStamp; + } + private: friend class dev::eth::Client; static time_t revertableFSPatchTimestamp; diff --git a/libskale/SkipInvalidTransactionsPatch.cpp b/libskale/SkipInvalidTransactionsPatch.cpp new file mode 100644 index 000000000..5ab660337 --- /dev/null +++ b/libskale/SkipInvalidTransactionsPatch.cpp @@ -0,0 +1,12 @@ +#include "SkipInvalidTransactionsPatch.h" + +using namespace dev::eth; + +time_t SkipInvalidTransactionsPatch::activationTimestamp; +time_t SkipInvalidTransactionsPatch::lastBlockTimestamp; + +bool SkipInvalidTransactionsPatch::isEnabled() { + if ( activationTimestamp == 0 ) + return false; + return lastBlockTimestamp >= activationTimestamp; +} diff --git a/libskale/SkipInvalidTransactionsPatch.h b/libskale/SkipInvalidTransactionsPatch.h new file mode 100644 index 000000000..7c171b09e --- /dev/null +++ b/libskale/SkipInvalidTransactionsPatch.h @@ -0,0 +1,52 @@ +#ifndef SKIPINVALIDTRANSACTIONSPATCH_H +#define SKIPINVALIDTRANSACTIONSPATCH_H + +#include +#include +#include +#include +#include + +namespace dev { +namespace eth { +class Client; +} +} // namespace dev + +// What this patch does: +// 1. "Invalid" transactions that came with winning block proposal from consensus +// are skipped, and not included in block. +// Their "validity is determined in Block::syncEveryone: +// a) Transactions should have gasPrice >= current block min gas price +// b) State::execute should not throw (it causes WouldNotBeInBlock exception). +// Usually this exception is caused by Executive::verifyTransaction() failure. +// +// 2. Specifically for historic node - we ignore "invalid" transactions that +// are already in block as though they never came. +// This affects following JSON-RPC calls: +// 1) eth_getBlockByHash/Number +// 2) eth_getTransactionReceipt (affects "transactionIndex" field) +// 3) eth_getBlockTransactionCountByHash/Number +// 4) eth_getTransactionByHash (invalid transactions are treated as never present) +// 5) eth_getTransactionByBlockHash/NumberAndIndex +// Transactions are removed from Transaction Queue as usually. + +// TODO better start to apply patches from 1st block after timestamp, not second +class SkipInvalidTransactionsPatch : public SchainPatch { +public: + static bool isEnabled(); + + static void setTimestamp( time_t _activationTimestamp ) { + activationTimestamp = _activationTimestamp; + printInfo( __FILE__, _activationTimestamp ); + } + + static time_t getActivationTimestamp() { return activationTimestamp; } + +private: + friend class dev::eth::Client; + static time_t activationTimestamp; + static time_t lastBlockTimestamp; +}; + +#endif // SKIPINVALIDTRANSACTIONSPATCH_H diff --git a/libskale/SnapshotHashAgent.cpp b/libskale/SnapshotHashAgent.cpp index 1ea31e6f7..83797bce8 100644 --- a/libskale/SnapshotHashAgent.cpp +++ b/libskale/SnapshotHashAgent.cpp @@ -266,20 +266,6 @@ std::vector< std::string > SnapshotHashAgent::getNodesToDownloadSnapshotFrom( ( this->chain_params_.sChain.nodes[i].port + 3 ).convert_to< std::string >() ); SkaleClient skaleClient( *jsonRpcClient ); - // just ask block number in this special case - if ( block_number == 0 ) { - unsigned n = skaleClient.skale_getLatestSnapshotBlockNumber(); - if ( n == 0 ) { - const std::lock_guard< std::mutex > lock( this->hashes_mutex ); - if ( ipToDownloadSnapshotFrom_.empty() ) - nodes_to_download_snapshot_from_.push_back( i ); - else if ( ipToDownloadSnapshotFrom_ == chain_params_.sChain.nodes[i].ip ) - nodes_to_download_snapshot_from_.push_back( i ); - delete jsonRpcClient; - return; - } - } - Json::Value joSignatureResponse; try { joSignatureResponse = skaleClient.skale_getSnapshotSignature( block_number ); @@ -376,9 +362,7 @@ std::vector< std::string > SnapshotHashAgent::getNodesToDownloadSnapshotFrom( } // for i result = this->nodes_to_download_snapshot_from_.size() > 0; - } else if ( block_number == 0 ) - result = this->nodes_to_download_snapshot_from_.size() * 3 >= 2 * this->n_ + 1; - else + } else try { result = this->voteForHash(); } catch ( SnapshotHashAgentException& ex ) { diff --git a/libskale/SnapshotManager.cpp b/libskale/SnapshotManager.cpp index 970a15d31..d3c3bc04e 100644 --- a/libskale/SnapshotManager.cpp +++ b/libskale/SnapshotManager.cpp @@ -54,8 +54,10 @@ const std::string SnapshotManager::snapshot_hash_file_name = "snapshot_hash.txt" // - bad data dir // - not btrfs // - volumes don't exist -SnapshotManager::SnapshotManager( const fs::path& _dataDir, - const std::vector< std::string >& _volumes, const std::string& _diffsDir ) { +SnapshotManager::SnapshotManager( const dev::eth::ChainParams& _chain_params, + const fs::path& _dataDir, const std::vector< std::string >& _volumes, + const std::string& _diffsDir ) + : chain_params( _chain_params ) { assert( _volumes.size() > 0 ); data_dir = _dataDir; @@ -272,6 +274,12 @@ void SnapshotManager::removeSnapshot( unsigned _blockNumber ) { void SnapshotManager::cleanupButKeepSnapshot( unsigned _keepSnapshot ) { this->cleanupDirectory( snapshots_dir, snapshots_dir / std::to_string( _keepSnapshot ) ); this->cleanupDirectory( data_dir, snapshots_dir ); + if ( !fs::exists( diffs_dir ) ) + try { + boost::filesystem::create_directory( diffs_dir ); + } catch ( const fs::filesystem_error& ex ) { + std::throw_with_nested( CannotWrite( ex.path1() ) ); + } } void SnapshotManager::cleanup() { @@ -280,7 +288,8 @@ void SnapshotManager::cleanup() { try { boost::filesystem::create_directory( snapshots_dir ); - boost::filesystem::create_directory( diffs_dir ); + if ( !fs::exists( diffs_dir ) ) + boost::filesystem::create_directory( diffs_dir ); } catch ( const fs::filesystem_error& ex ) { std::throw_with_nested( CannotWrite( ex.path1() ) ); } // catch @@ -335,7 +344,7 @@ void SnapshotManager::leaveNLastSnapshots( unsigned n ) { } // for } -std::pair< int, int > SnapshotManager::getLatestSnasphots() const { +std::pair< int, int > SnapshotManager::getLatestSnapshots() const { map< int, fs::path, std::greater< int > > numbers; for ( auto& f : fs::directory_iterator( snapshots_dir ) ) { // HACK We exclude 0 snapshot forcefully @@ -436,7 +445,9 @@ void SnapshotManager::computeDatabaseHash( BOOST_THROW_EXCEPTION( InvalidPath( _dbDir ) ); } - std::unique_ptr< dev::db::LevelDB > m_db( new dev::db::LevelDB( _dbDir.string() ) ); + std::unique_ptr< dev::db::LevelDB > m_db( new dev::db::LevelDB( _dbDir.string(), + dev::db::LevelDB::defaultSnapshotReadOptions(), dev::db::LevelDB::defaultWriteOptions(), + dev::db::LevelDB::defaultSnapshotDBOptions() ) ); dev::h256 hash_volume = m_db->hashBase(); cnote << _dbDir << " hash is: " << hash_volume << std::endl; @@ -455,7 +466,9 @@ void SnapshotManager::addLastPriceToHash( unsigned _blockNumber, secp256k1_sha25 std::string last_price_str; std::string last_price_key = "1.0:" + std::to_string( _blockNumber ); while ( it != end ) { - std::unique_ptr< dev::db::LevelDB > m_db( new dev::db::LevelDB( it->path().string() ) ); + std::unique_ptr< dev::db::LevelDB > m_db( new dev::db::LevelDB( it->path().string(), + dev::db::LevelDB::defaultReadOptions(), dev::db::LevelDB::defaultWriteOptions(), + dev::db::LevelDB::defaultSnapshotDBOptions() ) ); if ( m_db->exists( last_price_key ) ) { last_price_str = m_db->lookup( last_price_key ); break; @@ -629,7 +642,7 @@ void SnapshotManager::computeAllVolumesHash( this->snapshots_dir / std::to_string( _blockNumber ) / "filestorage", ctx, is_checking ); // if have prices and blocks - if ( this->volumes.size() > 3 ) { + if ( _blockNumber && this->volumes.size() > 3 ) { this->addLastPriceToHash( _blockNumber, ctx ); } } @@ -688,8 +701,7 @@ void SnapshotManager::computeSnapshotHash( unsigned _blockNumber, bool is_checki } } -uint64_t SnapshotManager::getBlockTimestamp( - unsigned _blockNumber, const dev::eth::ChainParams& chain_params ) const { +uint64_t SnapshotManager::getBlockTimestamp( unsigned _blockNumber ) const { fs::path snapshot_dir = snapshots_dir / to_string( _blockNumber ); try { diff --git a/libskale/SnapshotManager.h b/libskale/SnapshotManager.h index 2dd15280b..26aa1d411 100644 --- a/libskale/SnapshotManager.h +++ b/libskale/SnapshotManager.h @@ -22,8 +22,8 @@ * @date 2019 */ -#ifndef SNAPSHOTAGENT_H -#define SNAPSHOTAGENT_H +#ifndef SNAPSHOTMANAGER_H +#define SNAPSHOTMANAGER_H #include #include @@ -152,8 +152,9 @@ class SnapshotManager { /////////////// MORE INTERESTING STUFF //////////////// public: - SnapshotManager( const boost::filesystem::path& _dataDir, - const std::vector< std::string >& _volumes, const std::string& diffs_dir = std::string() ); + SnapshotManager( const dev::eth::ChainParams& _chain_params, + const boost::filesystem::path& _dataDir, const std::vector< std::string >& _volumes, + const std::string& diffs_dir = std::string() ); void doSnapshot( unsigned _blockNumber ); void restoreSnapshot( unsigned _blockNumber ); boost::filesystem::path makeOrGetDiff( unsigned _toBlock ); @@ -167,12 +168,11 @@ class SnapshotManager { void leaveNLastDiffs( unsigned n ); dev::h256 getSnapshotHash( unsigned _blockNumber ) const; - std::pair< int, int > getLatestSnasphots() const; + std::pair< int, int > getLatestSnapshots() const; bool isSnapshotHashPresent( unsigned _blockNumber ) const; void computeSnapshotHash( unsigned _blockNumber, bool is_checking = false ); - uint64_t getBlockTimestamp( - unsigned _blockNumber, const dev::eth::ChainParams& chain_params ) const; + uint64_t getBlockTimestamp( unsigned _blockNumber ) const; static boost::filesystem::path findMostRecentBlocksDBPath( const boost::filesystem::path& _dirPath ); @@ -186,6 +186,8 @@ class SnapshotManager { static const std::string snapshot_hash_file_name; mutable std::mutex hash_file_mutex; + dev::eth::ChainParams chain_params; + void cleanupDirectory( const boost::filesystem::path& p, const boost::filesystem::path& _keepDirectory = "" ); @@ -203,4 +205,4 @@ class SnapshotManager { void addLastPriceToHash( unsigned _blockNumber, secp256k1_sha256_t* ctx ) const; }; -#endif // SNAPSHOTAGENT_H +#endif // SNAPSHOTMANAGER_H diff --git a/libskale/State.cpp b/libskale/State.cpp index 2411e193e..5bf4433d5 100644 --- a/libskale/State.cpp +++ b/libskale/State.cpp @@ -46,6 +46,7 @@ #include #include +#include namespace fs = boost::filesystem; @@ -66,6 +67,52 @@ using dev::eth::TransactionReceipt; #define ETH_VMTRACE 0 #endif +State::State( dev::u256 const& _accountStartNonce, boost::filesystem::path const& _dbPath, + dev::h256 const& _genesis, BaseState _bs, dev::u256 _initialFunds, + dev::s256 _contractStorageLimit ) + : x_db_ptr( make_shared< boost::shared_mutex >() ), + m_storedVersion( make_shared< size_t >( 0 ) ), + m_currentVersion( *m_storedVersion ), + m_accountStartNonce( _accountStartNonce ), + m_initial_funds( _initialFunds ), + contractStorageLimit_( _contractStorageLimit ) +#ifdef HISTORIC_STATE + , + m_historicState( _accountStartNonce, + dev::eth::HistoricState::openDB( + boost::filesystem::path( std::string( _dbPath.string() ) + .append( "/" ) + .append( dev::eth::HISTORIC_STATE_DIR ) ), + _genesis, + _bs == BaseState::PreExisting ? dev::WithExisting::Trust : dev::WithExisting::Kill ), + dev::eth::HistoricState::openDB( + boost::filesystem::path( std::string( _dbPath.string() ) + .append( "/" ) + .append( dev::eth::HISTORIC_ROOTS_DIR ) ), + _genesis, + _bs == BaseState::PreExisting ? dev::WithExisting::Trust : dev::WithExisting::Kill ) ) +#endif +{ + m_db_ptr = make_shared< OverlayDB >( openDB( _dbPath, _genesis, + _bs == BaseState::PreExisting ? dev::WithExisting::Trust : dev::WithExisting::Kill ) ); + + auto state = createStateReadOnlyCopy(); + totalStorageUsed_ = state.storageUsedTotal(); +#ifdef HISTORIC_STATE + m_historicState.setRootFromDB(); +#endif + m_fs_ptr = state.fs(); + if ( _bs == BaseState::PreExisting ) { + clog( VerbosityDebug, "statedb" ) << cc::debug( "Using existing database" ); + } else if ( _bs == BaseState::Empty ) { + // Initialise to the state entailed by the genesis block; this guarantees the trie is built + // correctly. + m_db_ptr->clearDB(); + } else { + throw std::logic_error( "Not implemented" ); + } +} + State::State( u256 const& _accountStartNonce, OverlayDB const& _db, #ifdef HISTORIC_STATE dev::OverlayDB const& _historicDb, dev::OverlayDB const& _historicBlockToStateRootDb, @@ -178,9 +225,9 @@ skale::OverlayDB State::openDB( fs::path state_path = path / fs::path( "state" ); try { - std::shared_ptr< db::DatabaseFace > db( new db::DBImpl( state_path ) ); + m_orig_db.reset( new db::DBImpl( state_path ) ); std::unique_ptr< batched_io::batched_db > bdb = make_unique< batched_io::batched_db >(); - bdb->open( db ); + bdb->open( m_orig_db ); assert( bdb->is_open() ); clog( VerbosityDebug, "statedb" ) << cc::success( "Opened state DB." ); return OverlayDB( std::move( bdb ) ); @@ -213,6 +260,7 @@ State::State( const State& _s ) std::logic_error( "Can't copy locked for writing state object" ); } m_db_ptr = _s.m_db_ptr; + m_orig_db = _s.m_orig_db; m_storedVersion = _s.m_storedVersion; m_currentVersion = _s.m_currentVersion; m_cache = _s.m_cache; @@ -234,6 +282,7 @@ State& State::operator=( const State& _s ) { std::logic_error( "Can't copy locked for writing state object" ); } m_db_ptr = _s.m_db_ptr; + m_orig_db = _s.m_orig_db; m_storedVersion = _s.m_storedVersion; m_currentVersion = _s.m_currentVersion; m_cache = _s.m_cache; @@ -346,7 +395,7 @@ std::pair< State::AddressMap, h256 > State::addresses( } addresses.erase( addresses.begin(), addresses.lower_bound( _begin ) ); if ( addresses.size() > _maxResults ) { - assert( numeric_limits< long >::max() >= _maxResults ); + assert( numeric_limits< long >::max() >= long( _maxResults ) ); auto next_ptr = std::next( addresses.begin(), static_cast< long >( _maxResults ) ); next = next_ptr->first; addresses.erase( next_ptr, addresses.end() ); @@ -460,7 +509,11 @@ void State::commit( dev::eth::CommitBehaviour _commitBehaviour ) { if ( !account.isAlive() ) { m_db_ptr->kill( address ); m_db_ptr->killAuxiliary( address, Auxiliary::CODE ); - // TODO: remove account storage + + if ( StorageDestructionPatch::isEnabled() ) { + clearStorage( address ); + } + } else { RLPStream rlpStream( 4 ); @@ -605,8 +658,15 @@ void State::kill( Address _addr ) { // If the account is not in the db, nothing to kill. } + std::map< h256, std::pair< u256, u256 > > State::storage( const Address& _contract ) const { boost::shared_lock< boost::shared_mutex > lock( *x_db_ptr ); + return storage_WITHOUT_LOCK( _contract ); +} + + +std::map< h256, std::pair< u256, u256 > > State::storage_WITHOUT_LOCK( + const Address& _contract ) const { if ( !checkVersion() ) { cerror << "Current state version is " << m_currentVersion << " but stored version is " << *m_storedVersion << endl; @@ -628,6 +688,9 @@ std::map< h256, std::pair< u256, u256 > > State::storage( const Address& _contra } } } + + cdebug << "Self-destruct cleared values:" << storage.size() << endl; + return storage; } @@ -680,9 +743,26 @@ void State::setStorage( Address const& _contract, u256 const& _key, u256 const& if ( totalStorageUsed_ + currentStorageUsed_ > contractStorageLimit_ ) { BOOST_THROW_EXCEPTION( dev::StorageOverflow() << errinfo_comment( _contract.hex() ) ); } - // TODO::review it |^ } +void State::clearStorageValue( + Address const& _contract, u256 const& _key, u256 const& _currentValue ) { + m_changeLog.emplace_back( _contract, _key, _currentValue ); + m_cache[_contract].setStorage( _key, 0 ); + + int count; + + if ( _currentValue == 0 ) { + count = 0; + } else { + count = -1; + } + + storageUsage[_contract] += count * 32; + currentStorageUsed_ += count * 32; +} + + u256 State::originalStorageValue( Address const& _contract, u256 const& _key ) const { if ( Account const* acc = account( _contract ) ) { auto memoryPtr = acc->originalStorageCache().find( _key ); @@ -703,16 +783,11 @@ u256 State::originalStorageValue( Address const& _contract, u256 const& _key ) c } -// Clear storage needs to be called when a new contract is -// created for an address that included a different contract -// that was destroyed using selfdestruct op code -// The only way this can happen if one calls -// CREATE2, self-destruct, and then CREATE2 again, which is -// extremely rare and a bad security practice -// Note that in Shanhai fork the selfdestruct op code will be removed void State::clearStorage( Address const& _contract ) { // only clear storage if the storage used is not 0 + cdebug << "Self-destructing" << _contract; + Account* acc = account( _contract ); dev::s256 accStorageUsed = acc->storageUsed(); @@ -720,16 +795,29 @@ void State::clearStorage( Address const& _contract ) { return; } - // TODO: This is extremely inefficient - for ( auto const& hashPairPair : storage( _contract ) ) { + // clearStorage is called from functions that already hold a read + // or write lock over the state Therefore, we can use + // storage_WITHOUT_LOCK() here + for ( auto const& hashPairPair : storage_WITHOUT_LOCK( _contract ) ) { auto const& key = hashPairPair.second.first; - setStorage( _contract, key, 0 ); + auto const& value = hashPairPair.second.first; + // Set storage to zero in state cache + clearStorageValue( _contract, key, value ); + // Set storage to zero in the account storage cache + // we have lots of caches, some of them may be unneeded + // will analyze this more in future releases acc->setStorageCache( key, 0 ); + /* The corresponding key/value pair needs to be cleared in database + Inserting ZERO deletes the key during commit + at the end of transaction + see OverlayDB::commitStorageValues() + */ + h256 ZERO( 0 ); + m_db_ptr->insert( _contract, key, ZERO ); } totalStorageUsed_ -= ( accStorageUsed + storageUsage[_contract] ); acc->updateStorageUsage( -accStorageUsed ); - // TODO Do we need to clear storageUsage[_contract] here? } bytes const& State::code( Address const& _addr ) const { diff --git a/libskale/State.h b/libskale/State.h index ee7a827e6..0fe41329e 100644 --- a/libskale/State.h +++ b/libskale/State.h @@ -179,29 +179,8 @@ class State { // This is called once in the client during the client creation explicit State( dev::u256 const& _accountStartNonce, boost::filesystem::path const& _dbPath, dev::h256 const& _genesis, BaseState _bs = BaseState::PreExisting, - dev::u256 _initialFunds = 0, dev::s256 _contractStorageLimit = 32 ) - : State( _accountStartNonce, - openDB( _dbPath, _genesis, - _bs == BaseState::PreExisting ? dev::WithExisting::Trust : - dev::WithExisting::Kill ), -#ifdef HISTORIC_STATE - dev::eth::HistoricState::openDB( - boost::filesystem::path( std::string( _dbPath.string() ) - .append( "/" ) - .append( dev::eth::HISTORIC_STATE_DIR ) ), - _genesis, - _bs == BaseState::PreExisting ? dev::WithExisting::Trust : - dev::WithExisting::Kill ), - dev::eth::HistoricState::openDB( - boost::filesystem::path( std::string( _dbPath.string() ) - .append( "/" ) - .append( dev::eth::HISTORIC_ROOTS_DIR ) ), - _genesis, - _bs == BaseState::PreExisting ? dev::WithExisting::Trust : - dev::WithExisting::Kill ), -#endif /// which uses it. If you have no preexisting database then set BaseState to something other - _bs, _initialFunds, _contractStorageLimit ) { - } + dev::u256 _initialFunds = 0, dev::s256 _contractStorageLimit = 32 ); + /// which uses it. If you have no preexisting database then set BaseState to something other State() : State( dev::Invalid256, skale::OverlayDB(), @@ -285,6 +264,10 @@ class State { void setStorage( dev::Address const& _contract, dev::u256 const& _location, dev::u256 const& _value ); + /// Clean storage at position of an account. Used in selfdestruct + void clearStorageValue( + dev::Address const& _contract, dev::u256 const& _location, dev::u256 const& _currentValue ); + /// Get the original value of a storage position of an account (before modifications saved in /// account cache). /// @returns 0 if no account exists at that address. @@ -313,6 +296,11 @@ class State { std::map< dev::h256, std::pair< dev::u256, dev::u256 > > storage( dev::Address const& _contract ) const; + /// Not thread safe version of storage() that does not hold any locks + std::map< dev::h256, std::pair< dev::u256, dev::u256 > > storage_WITHOUT_LOCK( + dev::Address const& _contract ) const; + + /// Get the code of an account. /// @returns bytes() if no account exists at that address. /// @warning The reference to the code is only valid until the access to @@ -394,7 +382,7 @@ class State { /// Check if state is empty bool empty() const; - // const dev::db::DBImpl* getOriginalDb() const { return m_orig_db.get(); } + const dev::db::DBImpl* getOriginalDb() const { return m_orig_db.get(); } void resetStorageChanges() { storageUsage.clear(); @@ -425,7 +413,7 @@ class State { /// Open a DB - useful for passing into the constructor & keeping for other states that are /// necessary. - static OverlayDB openDB( boost::filesystem::path const& _path, dev::h256 const& _genesisHash, + OverlayDB openDB( boost::filesystem::path const& _path, dev::h256 const& _genesisHash, dev::WithExisting _we = dev::WithExisting::Trust ); /// Turns all "touched" empty accounts into non-alive accounts. @@ -482,9 +470,8 @@ class State { std::shared_ptr< boost::shared_mutex > x_db_ptr; std::shared_ptr< OverlayDB > m_db_ptr; ///< Our overlay for the state. std::shared_ptr< OverlayFS > m_fs_ptr; ///< Our overlay for the file system operations. - // // HACK - // // TODO Implement DB-registry, remove it! - // std::shared_ptr< dev::db::DBImpl > m_orig_db; + // TODO Implement DB-registry, remove it! + std::shared_ptr< dev::db::DBImpl > m_orig_db; std::shared_ptr< size_t > m_storedVersion; size_t m_currentVersion; mutable std::unordered_map< dev::Address, dev::eth::Account > m_cache; ///< Our address cache. diff --git a/libskale/StorageDestructionPatch.cpp b/libskale/StorageDestructionPatch.cpp new file mode 100644 index 000000000..5ae6b03c7 --- /dev/null +++ b/libskale/StorageDestructionPatch.cpp @@ -0,0 +1,11 @@ +#include "StorageDestructionPatch.h" + +time_t StorageDestructionPatch::storageDestructionPatchTimestamp; +time_t StorageDestructionPatch::lastBlockTimestamp; + +bool StorageDestructionPatch::isEnabled() { + if ( storageDestructionPatchTimestamp == 0 ) { + return false; + } + return storageDestructionPatchTimestamp <= lastBlockTimestamp; +} diff --git a/libskale/StorageDestructionPatch.h b/libskale/StorageDestructionPatch.h new file mode 100644 index 000000000..fb16bbb4d --- /dev/null +++ b/libskale/StorageDestructionPatch.h @@ -0,0 +1,27 @@ +#include +#include + +namespace dev { +namespace eth { +class Client; +} +} // namespace dev + +/* + * Context: enable effective storage destruction + */ +class StorageDestructionPatch : public SchainPatch { +public: + static bool isEnabled(); + + static void setTimestamp( time_t _timeStamp ) { + printInfo( __FILE__, _timeStamp ); + storageDestructionPatchTimestamp = _timeStamp; + } + + +private: + friend class dev::eth::Client; + static time_t storageDestructionPatchTimestamp; + static time_t lastBlockTimestamp; +}; \ No newline at end of file diff --git a/libskale/VerifyDaSigsPatch.h b/libskale/VerifyDaSigsPatch.h index 816f2b743..426412507 100644 --- a/libskale/VerifyDaSigsPatch.h +++ b/libskale/VerifyDaSigsPatch.h @@ -26,6 +26,11 @@ class VerifyDaSigsPatch : public SchainPatch { static time_t verifyDaSigsPatchTimestamp; static time_t lastBlockTimestamp; + static void setTimestamp( time_t _timeStamp ) { + printInfo( __FILE__, _timeStamp ); + verifyDaSigsPatchTimestamp = _timeStamp; + } + public: static time_t getVerifyDaSigsPatchTimestamp(); }; diff --git a/libskale/httpserveroverride.cpp b/libskale/httpserveroverride.cpp index 9eb77db31..70175a404 100644 --- a/libskale/httpserveroverride.cpp +++ b/libskale/httpserveroverride.cpp @@ -923,7 +923,7 @@ void SkaleWsPeer::onMessage( const std::string& msg, skutils::ws::opcv eOpCode ) size_t nRequestSize = strRequest.size(); // skutils::task::performance::action a( - strPerformanceQueueName, strPerformanceActionName, joRequest ); + strPerformanceQueueName, strPerformanceActionName ); if ( pSO->methodTraceVerbosity( strMethod ) != dev::VerbositySilent ) clog( pSO->methodTraceVerbosity( strMethod ), cc::info( pThis->getRelay().nfoGetSchemeUC() ) + cc::debug( "/" ) + @@ -2452,8 +2452,7 @@ skutils::result_of_http_request SkaleServerOverride::implHandleHttpRequest( skutils::tools::format( "rpc/%s/%zu", strProtocol.c_str(), nServerIndex ); std::string strPerformanceActionName = skutils::tools::format( "%s task %zu, %s", strProtocol.c_str(), nTaskNumberCall_++, strMethod.c_str() ); - skutils::task::performance::action a( - strPerformanceQueueName, strPerformanceActionName, joRequest ); + skutils::task::performance::action a( strPerformanceQueueName, strPerformanceActionName ); // skutils::stats::time_tracker::element_ptr_t rttElement; rttElement.emplace( "RPC", strProtocol.c_str(), strMethod.c_str(), nServerIndex, ipVer ); diff --git a/libskutils/include/skutils/http_pg.h b/libskutils/include/skutils/http_pg.h index 796be8bc6..5a7b06c9b 100644 --- a/libskutils/include/skutils/http_pg.h +++ b/libskutils/include/skutils/http_pg.h @@ -3,6 +3,13 @@ #include +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-copy" +#pragma GCC diagnostic ignored "-Waddress" +#pragma GCC diagnostic ignored "-Wnonnull-compare" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wattributes" + #include #include @@ -12,6 +19,8 @@ #include #include +#pragma GCC diagnostic pop + //#include #include diff --git a/libskutils/include/skutils/rest_call.h b/libskutils/include/skutils/rest_call.h index ebdb0943f..94fa2c96c 100644 --- a/libskutils/include/skutils/rest_call.h +++ b/libskutils/include/skutils/rest_call.h @@ -183,11 +183,11 @@ class client { data_t call( const nlohmann::json& joIn, bool isAutoGenJsonID = true, e_data_fetch_strategy edfs = e_data_fetch_strategy::edfs_default, std::chrono::milliseconds wait_step = std::chrono::milliseconds( 20 ), - size_t cntSteps = 1000, bool isReturnErrorResponse = false ); + size_t cntSteps = 1000, [[maybe_unused]] bool isReturnErrorResponse = false ); data_t call( const std::string& strJsonIn, bool isAutoGenJsonID = true, e_data_fetch_strategy edfs = e_data_fetch_strategy::edfs_default, std::chrono::milliseconds wait_step = std::chrono::milliseconds( 20 ), - size_t cntSteps = 1000, bool isReturnErrorResponse = false ); + size_t cntSteps = 1000, [[maybe_unused]] bool isReturnErrorResponse = false ); private: typedef std::map< std::string, await_t > map_await_t; diff --git a/libskutils/include/skutils/stats.h b/libskutils/include/skutils/stats.h index 79650c733..3600d637b 100644 --- a/libskutils/include/skutils/stats.h +++ b/libskutils/include/skutils/stats.h @@ -220,7 +220,7 @@ class element : public skutils::ref_retain_release { element( const char* strSubSystem, const char* strProtocol, const char* strMethod, int /*nServerIndex*/, int /*ipVer*/ ); virtual ~element(); - void stop() const; + void stop(); void setMethod( const char* strMethod ) const; void setError() const; double getDurationInSeconds() const; diff --git a/libskutils/include/skutils/task_performance.h b/libskutils/include/skutils/task_performance.h index ad24744a8..67f251a03 100644 --- a/libskutils/include/skutils/task_performance.h +++ b/libskutils/include/skutils/task_performance.h @@ -196,7 +196,8 @@ class tracker : public skutils::ref_retain_release, typedef std::map< string, queue_ptr > map_type; mutable map_type map_; - atomic_bool isEnabled_ = true; + // make sure that performance tracking is disabled by default + atomic_bool isEnabled_ = false; atomic_index_type safeMaxItemCount_ = 10 * 1000 * 1000; atomic_index_type sessionMaxItemCount_ = 0; // zero means use safeMaxItemCount_ string strFirstEncounteredStopReason_; diff --git a/libskutils/include/skutils/utils.h b/libskutils/include/skutils/utils.h index 722761ffe..7a792f230 100644 --- a/libskutils/include/skutils/utils.h +++ b/libskutils/include/skutils/utils.h @@ -580,9 +580,6 @@ extern char getch_no_wait(); namespace signal { -extern std::atomic_int g_nStopSignal; -extern std::atomic_bool g_bStop; - extern bool get_signal_description( int nSignalNo, std::string& strSignalName, std::string& strSignalDescription ); // returns true if signal name is known extern std::string signal2str( int nSignalNo, const char* strPrefix = nullptr, @@ -591,6 +588,7 @@ typedef void ( *fn_signal_handler_t )( int nSignalNo ); extern bool init_common_signal_handling( fn_signal_handler_t fnSignalHander ); extern std::string generate_stack_trace( int nSkip = 1, bool isExtended = true ); +extern std::string read_maps(); }; // namespace signal diff --git a/libskutils/src/http_pg.cpp b/libskutils/src/http_pg.cpp index 1ed77230e..fc2fb766d 100644 --- a/libskutils/src/http_pg.cpp +++ b/libskutils/src/http_pg.cpp @@ -1,14 +1,22 @@ #include +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-copy" +#pragma GCC diagnostic ignored "-Waddress" +#pragma GCC diagnostic ignored "-Wnonnull-compare" +#pragma GCC diagnostic ignored "-Wsign-compare" + #include #include +#include + +#pragma GCC diagnostic pop + #include #include #include -#include - namespace skutils { namespace http_pg { diff --git a/libskutils/src/mail.cpp b/libskutils/src/mail.cpp index 228e6fa4c..c96868f83 100644 --- a/libskutils/src/mail.cpp +++ b/libskutils/src/mail.cpp @@ -210,7 +210,7 @@ void client::addRecipient( const char* email, const char* name ) { if ( name != nullptr ) recipient.name_ = name; else - recipient.name_.empty(); + recipient.name_.clear(); vecRecipients_.insert( vecRecipients_.end(), recipient ); } void client::addCCRecipient( const char* email, const char* name ) { @@ -221,7 +221,7 @@ void client::addCCRecipient( const char* email, const char* name ) { if ( name != nullptr ) recipient.name_ = name; else - recipient.name_.empty(); + recipient.name_.clear(); vecCCRecipients_.insert( vecCCRecipients_.end(), recipient ); } void client::addBCCRecipient( const char* email, const char* name ) { @@ -232,7 +232,7 @@ void client::addBCCRecipient( const char* email, const char* name ) { if ( name != nullptr ) recipient.name_ = name; else - recipient.name_.empty(); + recipient.name_.clear(); vecBvecCCRecipients__.insert( vecBvecCCRecipients__.end(), recipient ); } diff --git a/libskutils/src/rest_call.cpp b/libskutils/src/rest_call.cpp index 467226ef5..8b671b30a 100644 --- a/libskutils/src/rest_call.cpp +++ b/libskutils/src/rest_call.cpp @@ -401,7 +401,6 @@ bool client::open( const skutils::url& u, std::chrono::milliseconds wait_step, s else strPort = "80"; } - int nPort = std::atoi( strPort.c_str() ); // long nEffectiveClientConnectionTimeoutMS = ( nClientConnectionTimeoutMS_ > 0 ) ? @@ -414,6 +413,7 @@ bool client::open( const skutils::url& u, std::chrono::milliseconds wait_step, s u, nEffectiveClientConnectionTimeoutMS, &optsSSL_ ) ); ch_->isVerboseInsideCURL_ = isVerboseInsideNetworkLayer_; #else // (defined __SKUTIS_REST_USE_CURL_FOR_HTTP) + int nPort = std::atoi( strPort.c_str() ); ch_.reset( new skutils::http::client( -1, strHost.c_str(), nPort, nEffectiveClientConnectionTimeoutMS, nullptr ) ); #endif // else from (defined __SKUTIS_REST_USE_CURL_FOR_HTTP) @@ -424,6 +424,7 @@ bool client::open( const skutils::url& u, std::chrono::milliseconds wait_step, s u, nEffectiveClientConnectionTimeoutMS, &optsSSL_ ) ); ch_->isVerboseInsideCURL_ = isVerboseInsideNetworkLayer_; #else // (defined __SKUTIS_REST_USE_CURL_FOR_HTTP) + int nPort = std::atoi( strPort.c_str() ); ch_.reset( new skutils::http::SSL_client( -1, strHost.c_str(), nPort, nEffectiveClientConnectionTimeoutMS, &optsSSL_ ) ); #endif // else from (defined __SKUTIS_REST_USE_CURL_FOR_HTTP) @@ -648,7 +649,8 @@ bool client::stat_auto_gen_json_id( nlohmann::json& jo ) { } data_t client::call( const nlohmann::json& joIn, bool isAutoGenJsonID, e_data_fetch_strategy edfs, - std::chrono::milliseconds wait_step, size_t cntSteps, bool isReturnErrorResponse ) { + std::chrono::milliseconds wait_step, size_t cntSteps, + [[maybe_unused]] bool isReturnErrorResponse ) { nlohmann::json jo = joIn; if ( isAutoGenJsonID ) stat_auto_gen_json_id( jo ); @@ -728,7 +730,8 @@ data_t client::call( const nlohmann::json& joIn, bool isAutoGenJsonID, e_data_fe return d; } data_t client::call( const std::string& strJsonIn, bool isAutoGenJsonID, e_data_fetch_strategy edfs, - std::chrono::milliseconds wait_step, size_t cntSteps, bool isReturnErrorResponse ) { + std::chrono::milliseconds wait_step, size_t cntSteps, + [[maybe_unused]] bool isReturnErrorResponse ) { try { nlohmann::json jo = nlohmann::json::parse( strJsonIn ); return call( jo, isAutoGenJsonID, edfs, wait_step, cntSteps, isReturnErrorResponse ); diff --git a/libskutils/src/stats.cpp b/libskutils/src/stats.cpp index 2d0814992..d77dcbd20 100644 --- a/libskutils/src/stats.cpp +++ b/libskutils/src/stats.cpp @@ -405,7 +405,6 @@ element::element( const char* strSubSystem, const char* strProtocol, const char* strProtocol_ = "N/A"; if ( strMethod_.empty() ) strMethod_ = g_strMethodNameUnknown; - do_register(); } element::~element() { stop(); @@ -420,12 +419,13 @@ void element::do_unregister() { queue::getQueueForSubsystem( strSubSystem_.c_str() ).do_unregister( rttElement ); } -void element::stop() const { +void element::stop() { lock_type lock( mtx() ); if ( isStopped_ ) return; isStopped_ = true; tpEnd_ = skutils::stats::clock::now(); + do_register(); } void element::setMethod( const char* strMethod ) const { diff --git a/libskutils/src/utils.cpp b/libskutils/src/utils.cpp index 1c398316c..abb760c96 100644 --- a/libskutils/src/utils.cpp +++ b/libskutils/src/utils.cpp @@ -1300,9 +1300,6 @@ char getch_no_wait() { namespace signal { -std::atomic_int g_nStopSignal{ 0 }; -std::atomic_bool g_bStop{ false }; - bool get_signal_description( int nSignalNo, std::string& strSignalName, std::string& strSignalDescription ) { // returns true if signal name is known struct sig_desc_t { @@ -1577,62 +1574,86 @@ bool init_common_signal_handling( fn_signal_handler_t fnSignalHander ) { std::string generate_stack_trace( int nSkip, bool isExtended ) { if ( nSkip < 0 ) nSkip = 0; - void* callstack_data[256]; // 128 - const int nCountOfStackFramesRequested = sizeof( callstack_data ) / sizeof( callstack_data[0] ); - int nStackFramesFrameCount = backtrace( callstack_data, nCountOfStackFramesRequested ); + void* callstackData[256]; // 128 + const int nCountOfStackFramesRequested = sizeof( callstackData ) / sizeof( callstackData[0] ); + int nStackFramesFrameCount = backtrace( callstackData, nCountOfStackFramesRequested ); if ( nStackFramesFrameCount <= 0 ) return std::string( "[empty(or corrupt) stack frame]\n" ); - char** traced_symbols = backtrace_symbols( callstack_data, nStackFramesFrameCount ); + + char** tracedSymbols = backtrace_symbols( callstackData, nStackFramesFrameCount ); + if ( tracedSymbols == nullptr ) + return ""; + std::ostringstream ss; for ( int i = nSkip; i < nStackFramesFrameCount; ++i ) { - char* walk_sym = traced_symbols[i]; + char* walkSym = tracedSymbols[i]; bool bLinePassed = false; if ( isExtended ) { - char *begin_name = nullptr, *begin_offset = nullptr, *end_offset = nullptr; + char *beginName = nullptr, *beginOffset = nullptr, *endOffset = nullptr; + char *beginAddr = nullptr, *endAddr = nullptr; // find parentheses and +address offset surrounding the mangled name: // ./module(function+0x15c) [0x8048a6d] - for ( char* p = walk_sym; *p; ++p ) { + for ( char* p = walkSym; *p; ++p ) { if ( *p == '(' ) - begin_name = p; + beginName = p; else if ( *p == '+' ) - begin_offset = p; - else if ( *p == ')' && begin_offset ) { - end_offset = p; - break; - } + beginOffset = p; + else if ( *p == ')' && beginOffset ) { + endOffset = p; + } else if ( *p == '[' ) + beginAddr = p + 1; + else if ( *p == ']' ) + endAddr = p; } - if ( begin_name && begin_offset && end_offset && begin_name < begin_offset ) { - *begin_name++ = '\0'; - *begin_offset++ = '\0'; - *end_offset = '\0'; + if ( beginName && beginOffset && endOffset && beginName < beginOffset ) { + *beginName++ = '\0'; + *beginOffset++ = '\0'; + *endOffset = '\0'; + + std::string addr; + if ( beginAddr && endAddr && endAddr > beginAddr ) { + *endAddr = '\0'; + addr = beginAddr; + } + // mangled name is now in [begin_name, begin_offset) and caller offset in // [begin_offset, end_offset). now apply __cxa_demangle(): int status = -1; - size_t funcnamesize = 512; // 256 - char* funcname = ( char* ) calloc( 1, funcnamesize ); - char* ret = abi::__cxa_demangle( begin_name, funcname, &funcnamesize, &status ); + char* funcname = abi::__cxa_demangle( beginName, nullptr, nullptr, &status ); if ( status == 0 ) { - funcname = ret; // use possibly realloc()-ed string ss << skutils::tools::format( - " %s : %s+%s\n", walk_sym, funcname, begin_offset ); + " %s : %s+%s [%s]\n", walkSym, funcname, beginOffset, addr.c_str() ); } else { // demangling failed, output function name as a C function with no arguments ss << skutils::tools::format( - " %s : %s()+%s\n", walk_sym, begin_name, begin_offset ); + " %s : %s()+%s [%s]\n", walkSym, beginName, beginOffset, addr.c_str() ); } free( funcname ); bLinePassed = true; } } if ( !bLinePassed ) - ss << walk_sym << "\n"; + ss << walkSym << "\n"; } - free( traced_symbols ); + free( tracedSymbols ); if ( nStackFramesFrameCount == nCountOfStackFramesRequested ) ss << "[truncated]\n"; return ss.str(); } +std::string read_maps() { + FILE* fp = fopen( "/proc/self/maps", "rb" ); + if ( fp == nullptr ) + return ""; + + std::ostringstream ss; + int c; + while ( ( c = fgetc( fp ) ) > 0 ) { + ss << ( char ) c; + } + fclose( fp ); + return ss.str(); +} }; // namespace signal diff --git a/libweb3jsonrpc/CMakeLists.txt b/libweb3jsonrpc/CMakeLists.txt index db5b99152..b1b50a770 100644 --- a/libweb3jsonrpc/CMakeLists.txt +++ b/libweb3jsonrpc/CMakeLists.txt @@ -49,9 +49,6 @@ set(sources SkaleStatsFace.h SkaleStatsSite.h - SkaleNetworkBrowser.h - SkaleNetworkBrowser.cpp - rapidjson_handlers.cpp ) diff --git a/libweb3jsonrpc/Debug.cpp b/libweb3jsonrpc/Debug.cpp index cd227d2e6..0ef884262 100644 --- a/libweb3jsonrpc/Debug.cpp +++ b/libweb3jsonrpc/Debug.cpp @@ -299,18 +299,18 @@ uint64_t Debug::debug_getSnapshotHashCalculationTime() { return m_eth.getSnapshotHashCalculationTime(); } -// uint64_t Debug::debug_doStateDbCompaction() { -// auto t1 = boost::chrono::high_resolution_clock::now(); -// m_eth.doStateDbCompaction(); -// auto t2 = boost::chrono::high_resolution_clock::now(); +uint64_t Debug::debug_doStateDbCompaction() { + auto t1 = boost::chrono::high_resolution_clock::now(); + m_eth.doStateDbCompaction(); + auto t2 = boost::chrono::high_resolution_clock::now(); -// return boost::chrono::duration_cast< boost::chrono::milliseconds >( t2 - t1 ).count(); -//} + return boost::chrono::duration_cast< boost::chrono::milliseconds >( t2 - t1 ).count(); +} -// uint64_t Debug::debug_doBlocksDbCompaction() { -// auto t1 = boost::chrono::high_resolution_clock::now(); -// m_eth.doBlocksDbCompaction(); -// auto t2 = boost::chrono::high_resolution_clock::now(); +uint64_t Debug::debug_doBlocksDbCompaction() { + auto t1 = boost::chrono::high_resolution_clock::now(); + m_eth.doBlocksDbCompaction(); + auto t2 = boost::chrono::high_resolution_clock::now(); -// return boost::chrono::duration_cast< boost::chrono::milliseconds >( t2 - t1 ).count(); -//} + return boost::chrono::duration_cast< boost::chrono::milliseconds >( t2 - t1 ).count(); +} diff --git a/libweb3jsonrpc/Debug.h b/libweb3jsonrpc/Debug.h index 99bf31303..f5337001d 100644 --- a/libweb3jsonrpc/Debug.h +++ b/libweb3jsonrpc/Debug.h @@ -57,8 +57,8 @@ class Debug : public DebugFace { virtual uint64_t debug_getSnapshotCalculationTime() override; virtual uint64_t debug_getSnapshotHashCalculationTime() override; - // virtual uint64_t debug_doStateDbCompaction() override; - // virtual uint64_t debug_doBlocksDbCompaction() override; + virtual uint64_t debug_doStateDbCompaction() override; + virtual uint64_t debug_doBlocksDbCompaction() override; private: eth::Client const& m_eth; diff --git a/libweb3jsonrpc/DebugFace.h b/libweb3jsonrpc/DebugFace.h index 7991af5b2..ebcc07fe2 100644 --- a/libweb3jsonrpc/DebugFace.h +++ b/libweb3jsonrpc/DebugFace.h @@ -91,15 +91,13 @@ class DebugFace : public ServerInterface< DebugFace > { jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL ), &dev::rpc::DebugFace::debug_getSnapshotHashCalculationTimeI ); - // this->bindAndAddMethod( jsonrpc::Procedure( "debug_doStateDbCompaction", - // jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, - // NULL ), - // &dev::rpc::DebugFace::debug_doStateDbCompactionI ); + this->bindAndAddMethod( jsonrpc::Procedure( "debug_doStateDbCompaction", + jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL ), + &dev::rpc::DebugFace::debug_doStateDbCompactionI ); - // this->bindAndAddMethod( jsonrpc::Procedure( "debug_doBlocksDbCompaction", - // jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, - // NULL ), - // &dev::rpc::DebugFace::debug_doBlocksDbCompactionI ); + this->bindAndAddMethod( jsonrpc::Procedure( "debug_doBlocksDbCompaction", + jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL ), + &dev::rpc::DebugFace::debug_doBlocksDbCompactionI ); } inline virtual void debug_accountRangeAtI( const Json::Value& request, Json::Value& response ) { response = this->debug_accountRangeAt( request[0u].asString(), request[1u].asInt(), @@ -173,13 +171,13 @@ class DebugFace : public ServerInterface< DebugFace > { response = this->debug_getSnapshotHashCalculationTime(); } - // virtual void debug_doStateDbCompactionI( const Json::Value&, Json::Value& response ) { - // response = this->debug_doStateDbCompaction(); - // } + virtual void debug_doStateDbCompactionI( const Json::Value&, Json::Value& response ) { + response = this->debug_doStateDbCompaction(); + } - // virtual void debug_doBlocksDbCompactionI( const Json::Value&, Json::Value& response ) { - // response = this->debug_doBlocksDbCompaction(); - // } + virtual void debug_doBlocksDbCompactionI( const Json::Value&, Json::Value& response ) { + response = this->debug_doBlocksDbCompaction(); + } virtual Json::Value debug_accountRangeAt( const std::string& param1, int param2, const std::string& param3, int param4 ) = 0; @@ -206,8 +204,8 @@ class DebugFace : public ServerInterface< DebugFace > { virtual uint64_t debug_getSnapshotCalculationTime() = 0; virtual uint64_t debug_getSnapshotHashCalculationTime() = 0; - // virtual uint64_t debug_doStateDbCompaction() = 0; - // virtual uint64_t debug_doBlocksDbCompaction() = 0; + virtual uint64_t debug_doStateDbCompaction() = 0; + virtual uint64_t debug_doBlocksDbCompaction() = 0; }; } // namespace rpc diff --git a/libweb3jsonrpc/Eth.cpp b/libweb3jsonrpc/Eth.cpp index 21927d4a9..4b68b667b 100644 --- a/libweb3jsonrpc/Eth.cpp +++ b/libweb3jsonrpc/Eth.cpp @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -48,13 +49,89 @@ using namespace dev::rpc; const uint64_t MAX_CALL_CACHE_ENTRIES = 1024; const uint64_t MAX_RECEIPT_CACHE_ENTRIES = 1024; +#ifdef HISTORIC_STATE + +using namespace dev::rpc::_detail; + +// TODO Check LatestBlock number - update! +// Needs external locks to exchange read one to write one +void GappedTransactionIndexCache::ensureCached( BlockNumber _bn, + std::shared_lock< std::shared_mutex >& _readLock, + std::unique_lock< std::shared_mutex >& _writeLock ) const { + if ( _bn != PendingBlock && _bn != LatestBlock && real2gappedCache.count( _bn ) ) + return; + + // change read lock for write lock + // they both will be destroyed externally + _readLock.unlock(); + _writeLock.lock(); + + assert( real2gappedCache.size() <= cacheSize ); + if ( real2gappedCache.size() >= cacheSize ) { + real2gappedCache.erase( real2gappedCache.begin() ); + gapped2realCache.erase( gapped2realCache.begin() ); + } + + // can be empty for absent blocks + h256s transactions = client.transactionHashes( _bn ); + + real2gappedCache[_bn] = vector< size_t >( transactions.size(), UNDEFINED ); + gapped2realCache[_bn] = vector< size_t >(); + + u256 gasBefore = 0; + for ( size_t realIndex = 0; realIndex < transactions.size(); ++realIndex ) { + // find transaction gas usage + const h256& th = transactions[realIndex]; + u256 gasAfter = client.transactionReceipt( th ).cumulativeGasUsed(); + u256 diff = gasAfter - gasBefore; + gasBefore = gasAfter; + + // ignore transactions with 0 gas usage + if ( diff == 0 ) + continue; + + // cache it + size_t gappedIndex = gapped2realCache[_bn].size(); + gapped2realCache[_bn].push_back( realIndex ); + real2gappedCache[_bn][realIndex] = gappedIndex; + + } // for +} + +// returns true if block N can contain invalid transactions +// returns false if this block was created with SkipInvalidTransactionsPatch and they were skipped +bool hasPotentialInvalidTransactionsInBlock( BlockNumber _bn, const Interface& _client ) { + if ( _bn == 0 ) + return false; + + if ( SkipInvalidTransactionsPatch::getActivationTimestamp() == 0 ) + return true; + + if ( _bn == PendingBlock ) + return !SkipInvalidTransactionsPatch::isEnabled(); + + if ( _bn == LatestBlock ) + _bn = _client.number(); + + time_t prev_ts = _client.blockInfo( _bn - 1 ).timestamp(); + + return prev_ts < SkipInvalidTransactionsPatch::getActivationTimestamp(); +} + +#endif Eth::Eth( const std::string& configPath, eth::Interface& _eth, eth::AccountHolder& _ethAccounts ) : skutils::json_config_file_accessor( configPath ), m_eth( _eth ), m_ethAccounts( _ethAccounts ), m_callCache( MAX_CALL_CACHE_ENTRIES ), - m_receiptsCache( MAX_RECEIPT_CACHE_ENTRIES ) {} + m_receiptsCache( MAX_RECEIPT_CACHE_ENTRIES ) +#ifdef HISTORIC_STATE + , + m_gapCache( std::make_unique< GappedTransactionIndexCache >( 16, *client() ) ) +#endif +{ +} bool Eth::isEnabledTransactionSending() const { bool isEnabled = true; @@ -221,7 +298,14 @@ Json::Value Eth::eth_getBlockTransactionCountByHash( string const& _blockHash ) if ( !client()->isKnown( blockHash ) ) return Json::Value( Json::nullValue ); - return toJS( client()->transactionCount( blockHash ) ); +#ifdef HISTORIC_STATE + BlockNumber bn = client()->numberFromHash( blockHash ); + if ( !hasPotentialInvalidTransactionsInBlock( bn, *client() ) ) +#endif + return toJS( client()->transactionCount( blockHash ) ); +#ifdef HISTORIC_STATE + return toJS( m_gapCache->gappedBlockTransactionCount( bn ) ); +#endif } catch ( ... ) { BOOST_THROW_EXCEPTION( JsonRpcException( Errors::ERROR_RPC_INVALID_PARAMS ) ); } @@ -233,7 +317,14 @@ Json::Value Eth::eth_getBlockTransactionCountByNumber( string const& _blockNumbe if ( !client()->isKnown( blockNumber ) ) return Json::Value( Json::nullValue ); - return toJS( client()->transactionCount( jsToBlockNumber( _blockNumber ) ) ); +#ifdef HISTORIC_STATE + BlockNumber bn = jsToBlockNumber( _blockNumber ); + if ( !hasPotentialInvalidTransactionsInBlock( bn, *client() ) ) +#endif + return toJS( client()->transactionCount( jsToBlockNumber( _blockNumber ) ) ); +#ifdef HISTORIC_STATE + return toJS( m_gapCache->gappedBlockTransactionCount( blockNumber ) ); +#endif } catch ( ... ) { BOOST_THROW_EXCEPTION( JsonRpcException( Errors::ERROR_RPC_INVALID_PARAMS ) ); } @@ -475,13 +566,41 @@ Json::Value Eth::eth_getBlockByHash( string const& _blockHash, bool _includeTran if ( !client()->isKnown( h ) ) return Json::Value( Json::nullValue ); - if ( _includeTransactions ) + if ( _includeTransactions ) { + Transactions transactions = client()->transactions( h ); + +#ifdef HISTORIC_STATE + BlockNumber bn = client()->numberFromHash( h ); + if ( hasPotentialInvalidTransactionsInBlock( bn, *client() ) ) { + // remove invalid transactions + size_t index = 0; + Transactions::iterator newEnd = std::remove_if( transactions.begin(), + transactions.end(), [this, &index, bn]( const Transaction& ) -> bool { + return !m_gapCache->transactionPresent( bn, index++ ); + } ); + transactions.erase( newEnd, transactions.end() ); + } +#endif return toJson( client()->blockInfo( h ), client()->blockDetails( h ), - client()->uncleHashes( h ), client()->transactions( h ), client()->sealEngine() ); - else + client()->uncleHashes( h ), transactions, client()->sealEngine() ); + } else { + h256s transactions = client()->transactionHashes( h ); + +#ifdef HISTORIC_STATE + BlockNumber bn = client()->numberFromHash( h ); + if ( hasPotentialInvalidTransactionsInBlock( bn, *client() ) ) { + // remove invalid transactions + size_t index = 0; + h256s::iterator newEnd = std::remove_if( transactions.begin(), transactions.end(), + [this, &index, bn]( const h256& ) -> bool { + return !m_gapCache->transactionPresent( bn, index++ ); + } ); + transactions.erase( newEnd, transactions.end() ); + } +#endif return toJson( client()->blockInfo( h ), client()->blockDetails( h ), - client()->uncleHashes( h ), client()->transactionHashes( h ), - client()->sealEngine() ); + client()->uncleHashes( h ), transactions, client()->sealEngine() ); + } } catch ( ... ) { BOOST_THROW_EXCEPTION( JsonRpcException( Errors::ERROR_RPC_INVALID_PARAMS ) ); } @@ -493,6 +612,13 @@ Json::Value Eth::eth_getBlockByNumber( string const& _blockNumber, bool _include if ( !client()->isKnown( h ) ) return Json::Value( Json::nullValue ); +#ifdef HISTORIC_STATE + h256 bh = client()->hashFromNumber( h ); + return eth_getBlockByHash( "0x" + bh.hex(), _includeTransactions ); + } catch ( const JsonRpcException& ) { + throw; +#else + if ( _includeTransactions ) return toJson( client()->blockInfo( h ), client()->blockDetails( h ), client()->uncleHashes( h ), client()->transactions( h ), client()->sealEngine() ); @@ -500,6 +626,7 @@ Json::Value Eth::eth_getBlockByNumber( string const& _blockNumber, bool _include return toJson( client()->blockInfo( h ), client()->blockDetails( h ), client()->uncleHashes( h ), client()->transactionHashes( h ), client()->sealEngine() ); +#endif } catch ( ... ) { BOOST_THROW_EXCEPTION( JsonRpcException( Errors::ERROR_RPC_INVALID_PARAMS ) ); } @@ -511,6 +638,13 @@ Json::Value Eth::eth_getTransactionByHash( string const& _transactionHash ) { if ( !client()->isKnownTransaction( h ) ) return Json::Value( Json::nullValue ); +#ifdef HISTORIC_STATE + // skip invalid + auto rcp = client()->localisedTransactionReceipt( h ); + if ( rcp.gasUsed() == 0 ) + return Json::Value( Json::nullValue ); +#endif + return toJson( client()->localisedTransaction( h ) ); } catch ( ... ) { BOOST_THROW_EXCEPTION( JsonRpcException( Errors::ERROR_RPC_INVALID_PARAMS ) ); @@ -522,6 +656,16 @@ Json::Value Eth::eth_getTransactionByBlockHashAndIndex( try { h256 bh = jsToFixed< 32 >( _blockHash ); unsigned int ti = static_cast< unsigned int >( jsToInt( _transactionIndex ) ); + +#ifdef HISTORIC_STATE + BlockNumber bn = client()->numberFromHash( bh ); + if ( hasPotentialInvalidTransactionsInBlock( bn, *client() ) ) + try { + ti = m_gapCache->realIndexFromGapped( bn, ti ); + } catch ( const out_of_range& ) { + return Json::Value( Json::nullValue ); + } +#endif if ( !client()->isKnownTransaction( bh, ti ) ) return Json::Value( Json::nullValue ); @@ -537,6 +681,16 @@ Json::Value Eth::eth_getTransactionByBlockNumberAndIndex( BlockNumber bn = jsToBlockNumber( _blockNumber ); h256 bh = client()->hashFromNumber( bn ); unsigned int ti = static_cast< unsigned int >( jsToInt( _transactionIndex ) ); + +#ifdef HISTORIC_STATE + if ( hasPotentialInvalidTransactionsInBlock( bn, *client() ) ) + try { + ti = m_gapCache->realIndexFromGapped( bn, ti ); + } catch ( const out_of_range& ) { + return Json::Value( Json::nullValue ); + } +#endif + if ( !client()->isKnownTransaction( bh, ti ) ) return Json::Value( Json::nullValue ); @@ -572,7 +726,7 @@ LocalisedTransactionReceipt Eth::eth_getTransactionReceipt( string const& _trans } - // Step 2. We got cache miss. Do the work and put the result into the cach + // Step 2. We got cache miss. Do the work and put the result into the cache if ( !client()->isKnownTransaction( h ) ) { // transaction is not yet in the blockchain. Put null as receipt // into the cache @@ -583,6 +737,22 @@ LocalisedTransactionReceipt Eth::eth_getTransactionReceipt( string const& _trans auto cli = client(); auto rcp = cli->localisedTransactionReceipt( h ); +#ifdef HISTORIC_STATE + if ( hasPotentialInvalidTransactionsInBlock( rcp.blockNumber(), *client() ) ) { + // skip invalid + if ( rcp.gasUsed() == 0 ) { + m_receiptsCache.put( cacheKey, nullptr ); + throw std::invalid_argument( "Not known transaction" ); + } + + // substitute position, skipping invalid transactions + size_t newIndex = + m_gapCache->gappedIndexFromReal( rcp.blockNumber(), rcp.transactionIndex() ); + rcp = LocalisedTransactionReceipt( rcp, rcp.hash(), rcp.blockHash(), rcp.blockNumber(), + newIndex, rcp.from(), rcp.to(), rcp.gasUsed(), rcp.contractAddress() ); + } +#endif + // got a receipt. Put it into the cache before returning // so that we have it if anyone asks again m_receiptsCache.put( cacheKey, make_shared< LocalisedTransactionReceipt >( rcp ) ); diff --git a/libweb3jsonrpc/Eth.h b/libweb3jsonrpc/Eth.h index ee661dbdd..a3ce404da 100644 --- a/libweb3jsonrpc/Eth.h +++ b/libweb3jsonrpc/Eth.h @@ -46,13 +46,87 @@ struct TransactionSkeleton; class Interface; class LocalisedTransactionReceipt; } // namespace eth - } // namespace dev namespace dev { namespace rpc { +#ifdef HISTORIC_STATE +namespace _detail { +// cache for transaction index mapping +class GappedTransactionIndexCache { +public: + GappedTransactionIndexCache( size_t _cacheSize, const dev::eth::Interface& _client ) + : client( _client ), cacheSize( _cacheSize ) { + assert( _cacheSize > 0 ); + } + + size_t realBlockTransactionCount( dev::eth::BlockNumber _bn ) const { + std::shared_lock< std::shared_mutex > readLock( mtx ); + std::unique_lock< std::shared_mutex > writeLock( mtx, std::defer_lock ); + ensureCached( _bn, readLock, writeLock ); + + return real2gappedCache[_bn].size(); + } + size_t gappedBlockTransactionCount( dev::eth::BlockNumber _bn ) const { + std::shared_lock< std::shared_mutex > readLock( mtx ); + std::unique_lock< std::shared_mutex > writeLock( mtx, std::defer_lock ); + ensureCached( _bn, readLock, writeLock ); + + return gapped2realCache[_bn].size(); + } + // can throw + size_t realIndexFromGapped( dev::eth::BlockNumber _bn, size_t _gappedIndex ) const { + std::shared_lock< std::shared_mutex > readLock( mtx ); + std::unique_lock< std::shared_mutex > writeLock( mtx, std::defer_lock ); + ensureCached( _bn, readLock, writeLock ); + + // throws out_of_range! + return gapped2realCache[_bn].at( _gappedIndex ); + } + // can throw + size_t gappedIndexFromReal( dev::eth::BlockNumber _bn, size_t _realIndex ) const { + std::shared_lock< std::shared_mutex > readLock( mtx ); + std::unique_lock< std::shared_mutex > writeLock( mtx, std::defer_lock ); + ensureCached( _bn, readLock, writeLock ); + + // throws out_of_range! + size_t res = real2gappedCache[_bn].at( _realIndex ); + if ( res == UNDEFINED ) + throw std::out_of_range( "Transaction at index " + std::to_string( _realIndex ) + + " in block " + to_string( _bn ) + + " is invalid and should have been ignored!" ); + return res; + } + // can throw + // TODO rename to valid + bool transactionPresent( dev::eth::BlockNumber _bn, size_t _realIndex ) const { + std::shared_lock< std::shared_mutex > readLock( mtx ); + std::unique_lock< std::shared_mutex > writeLock( mtx, std::defer_lock ); + ensureCached( _bn, readLock, writeLock ); + + return real2gappedCache[_bn].at( _realIndex ) != UNDEFINED; + } + +private: + void ensureCached( dev::eth::BlockNumber _bn, std::shared_lock< std::shared_mutex >& _readLock, + std::unique_lock< std::shared_mutex >& _writeLock ) const; + +private: + mutable std::shared_mutex mtx; + + const dev::eth::Interface& client; + const size_t cacheSize; + + enum { UNDEFINED = ( size_t ) -1 }; + + mutable std::map< dev::eth::BlockNumber, std::vector< size_t > > real2gappedCache; + mutable std::map< dev::eth::BlockNumber, std::vector< size_t > > gapped2realCache; +}; +} // namespace _detail +#endif + // Should only be called within a catch block std::string exceptionToErrorMessage(); @@ -159,6 +233,10 @@ class Eth : public dev::rpc::EthFace, public skutils::json_config_file_accessor // the transaction was not yet ready // for which the request has been executed cache::lru_cache< string, ptr< dev::eth::LocalisedTransactionReceipt > > m_receiptsCache; + +#ifdef HISTORIC_STATE + std::unique_ptr< _detail::GappedTransactionIndexCache > m_gapCache; +#endif }; } // namespace rpc diff --git a/libweb3jsonrpc/JsonHelper.cpp b/libweb3jsonrpc/JsonHelper.cpp index a240343cb..883374885 100644 --- a/libweb3jsonrpc/JsonHelper.cpp +++ b/libweb3jsonrpc/JsonHelper.cpp @@ -344,17 +344,21 @@ Json::Value toJson( dev::eth::Transaction const& _t, bytes const& _rlp ) { Json::Value toJson( dev::eth::LocalisedTransaction const& _t ) { Json::Value res; if ( _t ) { - res["hash"] = toJS( _t.sha3() ); - res["input"] = toJS( _t.data() ); - res["to"] = _t.isCreation() ? Json::Value() : toJS( _t.receiveAddress() ); + res["blockHash"] = toJS( _t.blockHash() ); + res["blockNumber"] = toJS( _t.blockNumber() ); res["from"] = toJS( _t.safeSender() ); res["gas"] = toJS( _t.gas() ); res["gasPrice"] = toJS( _t.gasPrice() ); + res["hash"] = toJS( _t.sha3() ); + res["input"] = toJS( _t.data() ); res["nonce"] = toJS( _t.nonce() ); - res["value"] = toJS( _t.value() ); - res["blockHash"] = toJS( _t.blockHash() ); + res["to"] = _t.isCreation() ? Json::Value() : toJS( _t.receiveAddress() ); res["transactionIndex"] = toJS( _t.transactionIndex() ); - res["blockNumber"] = toJS( _t.blockNumber() ); + res["value"] = toJS( _t.value() ); + res["v"] = _t.isReplayProtected() ? toJS( 2 * _t.chainId() + 35 + _t.signature().v ) : + toJS( 27 + _t.signature().v ); + res["r"] = toJS( _t.signature().r.hex() ); + res["s"] = toJS( _t.signature().s.hex() ); } return res; } diff --git a/libweb3jsonrpc/Skale.cpp b/libweb3jsonrpc/Skale.cpp index a1d83e5c8..763c3af6c 100644 --- a/libweb3jsonrpc/Skale.cpp +++ b/libweb3jsonrpc/Skale.cpp @@ -64,13 +64,22 @@ namespace rpc { std::string exceptionToErrorMessage(); -Skale::Skale( Client& _client, std::shared_ptr< SharedSpace > _sharedSpace ) - : m_client( _client ), m_shared_space( _sharedSpace ) {} - volatile bool Skale::g_bShutdownViaWeb3Enabled = false; volatile bool Skale::g_bNodeInstanceShouldShutdown = false; Skale::list_fn_on_shutdown_t Skale::g_list_fn_on_shutdown; +Skale::Skale( Client& _client, std::shared_ptr< SharedSpace > _sharedSpace ) + : m_client( _client ), m_shared_space( _sharedSpace ) {} + +Skale::~Skale() { + threadExitRequested = true; + if ( snapshotDownloadFragmentMonitorThread != nullptr && + snapshotDownloadFragmentMonitorThread->joinable() ) { + clog( VerbosityInfo, "Skale" ) << "Joining downloadSnapshotFragmentMonitorThread"; + snapshotDownloadFragmentMonitorThread->join(); + } +} + bool Skale::isWeb3ShutdownEnabled() { return g_bShutdownViaWeb3Enabled; } @@ -149,8 +158,6 @@ nlohmann::json Skale::impl_skale_getSnapshot( const nlohmann::json& joRequest, C // TODO check unsigned blockNumber = joRequest["blockNumber"].get< unsigned >(); - if ( blockNumber == 0 ) - throw std::runtime_error( "Snapshot for block 0 is absent" ); // exit if too early if ( currentSnapshotBlockNumber >= 0 ) { @@ -201,11 +208,13 @@ nlohmann::json Skale::impl_skale_getSnapshot( const nlohmann::json& joRequest, C m_client.chainParams().sChain.snapshotDownloadInactiveTimeout ) && time( NULL ) - currentSnapshotTime < m_client.chainParams().sChain.snapshotDownloadTimeout ) { - sleep( 30 ); + if ( threadExitRequested ) + break; + sleep( 10 ); } clog( VerbosityInfo, "skale_downloadSnapshotFragmentMonitorThread" ) - << "Unlocking shared space as timeout was reached.\n"; + << "Unlocking shared space.\n"; std::lock_guard< std::mutex > lock( m_snapshot_mutex ); if ( currentSnapshotBlockNumber >= 0 ) { diff --git a/libweb3jsonrpc/Skale.h b/libweb3jsonrpc/Skale.h index e1769a460..3a39db3e7 100644 --- a/libweb3jsonrpc/Skale.h +++ b/libweb3jsonrpc/Skale.h @@ -57,6 +57,7 @@ class Skale : public dev::rpc::SkaleFace { public: explicit Skale( dev::eth::Client& _client, std::shared_ptr< SharedSpace > _sharedSpace = nullptr ); + virtual ~Skale(); virtual RPCModules implementedModules() const override { return RPCModules{ RPCModule{ "skale", "0.1" } }; @@ -105,6 +106,7 @@ class Skale : public dev::rpc::SkaleFace { std::atomic< time_t > currentSnapshotTime = 0; std::atomic< time_t > lastSnapshotDownloadFragmentTime = 0; std::unique_ptr< std::thread > snapshotDownloadFragmentMonitorThread; + std::atomic_bool threadExitRequested = false; mutable std::mutex m_snapshot_mutex; }; diff --git a/libweb3jsonrpc/SkaleNetworkBrowser.cpp b/libweb3jsonrpc/SkaleNetworkBrowser.cpp deleted file mode 100644 index 5d35ec390..000000000 --- a/libweb3jsonrpc/SkaleNetworkBrowser.cpp +++ /dev/null @@ -1,1593 +0,0 @@ -#include "SkaleNetworkBrowser.h" - -#include - -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -namespace skale { -namespace network { -namespace browser { - -bool g_bVerboseLogging = false; -size_t g_nRefreshIntervalInSeconds = 15 * 60; - -volatile fn_stop_indication_t g_fnStopIndication = nullptr; - -static bool stat_fnNonStopIndication() { - return false; -} - -// see: https://docs.soliditylang.org/en/develop/abi-spec.html#abi -// see: https://docs.soliditylang.org/en/develop/internals/layout_in_memory.html - -struct item256_t { - std::string strRaw; // without 0x prefix - dev::u256 u256; - size_t n; -}; // struct item256_t - -typedef std::vector< item256_t > vec256_t; - -static std::string stat_list_ids( const vec256_t& schains_ids_on_node ) { - std::string s; - const size_t cnt = schains_ids_on_node.size(); - for ( size_t i = 0; i < cnt; ++i ) { - const item256_t& schain_id_on_node = schains_ids_on_node[i]; - if ( i > 0 ) - s += ", "; - s += dev::toJS( schain_id_on_node.u256 ); - } - return s; -} - -static std::string stat_list_raw_vec( const vec256_t& vec ) { - std::string s; - const size_t cnt = vec.size(); - for ( size_t i = 0; i < cnt; ++i ) { - const item256_t& val = vec[i]; - if ( i > 0 ) - s += ", "; - s += val.strRaw; - } - return s; -} - -static vec256_t stat_split_raw_answer( const std::string& strIn ) { - std::string s = skutils::tools::to_lower( skutils::tools::trim_copy( strIn ) ); - size_t n = s.length(); - if ( n > 2 && ( s[0] == '0' && s[1] == 'x' ) ) - s = s.substr( 2, n - 2 ); - n = s.length(); - size_t cnt = n / 64; - vec256_t vec; - for ( size_t i = 0; i < cnt; ++i ) { - item256_t item; - item.strRaw = s.substr( i * 64, 64 ); - item.u256 = dev::u256( "0x" + item.strRaw ); - std::stringstream ss; - ss << std::hex << item.strRaw; - ss >> item.n; - vec.push_back( item ); - } - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " split raw answer result: " ) + - cc::normal( stat_list_raw_vec( vec ) ) ); - } - return vec; -} - -static std::string stat_extract_string( const vec256_t& vec, size_t i ) { - size_t offset = vec[i].n; - i = offset / 32; - size_t len = vec[i].n; - ++i; - size_t accumulated = 0; - std::string s; - for ( ; accumulated < len; ) { - size_t cntPart = len - accumulated; - if ( cntPart > 32 ) - cntPart = 32; - for ( size_t j = 0; j < cntPart; ++j ) { - std::string str1 = vec[i].strRaw.substr( j * 2, 2 ); - char* pEnd = nullptr; - char c = ( char ) ::strtol( str1.c_str(), &pEnd, 16 ); - s += c; - } - accumulated += cntPart; - ++i; - } - return s; -} - -static std::string stat_extract_ipv4( const vec256_t& vec, size_t i ) { - std::string ip; - for ( size_t j = 0; j < 4; ++j ) { - std::string s = vec[i].strRaw.substr( j * 2, 2 ); - char* pEnd = nullptr; - unsigned int n = ( unsigned int ) ::strtol( s.c_str(), &pEnd, 16 ); - if ( ip.length() > 0 ) - ip += "."; - ip += skutils::tools::format( "%d", int( uint8_t( n ) ) ); - } - return ip; -} - -vec256_t stat_extract_vector( const vec256_t& vec, size_t i ) { - size_t offset = vec[i].n; - i = offset / 32; - size_t len = vec[i].n; - ++i; - vec256_t vecOut; - for ( size_t j = 0; j < len; ++j, ++i ) { - vecOut.push_back( vec[i] ); - } - return vecOut; -} - -static dev::u256 stat_compute_chain_id_from_schain_name( const std::string& name ) { - dev::h256 schain_id = dev::sha3( name ); - std::string s = skutils::tools::to_lower( skutils::tools::trim_copy( schain_id.hex() ) ); - size_t n = s.length(); - if ( n > 2 && ( s[0] == '0' && s[1] == 'x' ) ) - s = s.substr( 2, n - 2 ); - while ( s.length() < 64 ) - s = "0" + s; - s = s.substr( 0, 14 ); - dev::h256 chainId( "0x" + s ); - return chainId; -} - -static nlohmann::json stat_create_basic_call() { - nlohmann::json joCall = nlohmann::json::object(); - joCall["jsonrpc"] = "2.0"; - joCall["method"] = "eth_call"; - joCall["params"] = nlohmann::json::array(); - return joCall; -} - -static std::string stat_to_appendable_string( std::string s ) { - s = skutils::tools::to_lower( skutils::tools::trim_copy( s ) ); - size_t n = s.length(); - if ( n > 2 && ( s[0] == '0' && s[1] == 'x' ) ) - s = s.substr( 2, n - 2 ); - while ( s.length() < 64 ) - s = "0" + s; - return s; -} - -static std::string stat_to_appendable_string( const dev::u256& val ) { - std::string s = skutils::tools::to_lower( skutils::tools::trim_copy( dev::toJS( val ) ) ); - return stat_to_appendable_string( s ); -} - -static std::string stat_to_appendable_string( const dev::h256& val ) { - std::string s = skutils::tools::to_lower( skutils::tools::trim_copy( val.hex() ) ); - return stat_to_appendable_string( s ); -} - -// static std::string stat_to_appendable_string( size_t val ) { -// return stat_to_appendable_string( dev::u256( val ) ); -//} - -static std::string stat_to_0x_string( const dev::u256& val ) { - return "0x" + stat_to_appendable_string( val ); -} -static std::string stat_to_0x_string( const dev::h256& val ) { - return "0x" + stat_to_appendable_string( val ); -} -// static std::string stat_to_0x_string( size_t val ) { -// return "0x" + stat_to_appendable_string( val ); -//} - -const size_t PORTS_PER_SCHAIN = 64; - -static int stat_calc_schain_base_port( int node_base_port, int schain_index ) { - return node_base_port + schain_index * PORTS_PER_SCHAIN; -} - -static int stat_get_schain_index_in_node( - dev::h256 schain_id, const vec256_t& schains_ids_on_node ) { - const std::string strFindWhat = stat_to_appendable_string( schain_id ); - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " finding S-Chain index on node for S-Chain ID: " ) + - cc::normal( strFindWhat ) ); - } - const size_t cnt = schains_ids_on_node.size(); - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " will review " ) + - cc::size10( cnt ) + cc::debug( " chain ID(s) " ) ); - } - for ( size_t i = 0; i < cnt; ++i ) { - const item256_t& schain_id_on_node = schains_ids_on_node[i]; - const std::string strCompareWith = stat_to_appendable_string( schain_id_on_node.u256 ); - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " reviewing S-Chain ID #" ) + - cc::size10( i ) + cc::debug( " : " ) + cc::normal( strCompareWith ) ); - } - if ( strFindWhat == strCompareWith ) { - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::success( " found S-Chain index on node: " ) + cc::size10( i ) ); - } - return i; - } - } - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to find S-Chain index on node for S-Chain ID: " ) + - cc::normal( strFindWhat ) ); - } - throw std::runtime_error( "S-Chain " + dev::toJS( schain_id ) + " is not found in the list: " + - stat_list_ids( schains_ids_on_node ) ); -} - -static int stat_get_schain_base_port_on_node( - dev::h256 schain_id, const vec256_t& schains_ids_on_node, int node_base_port ) { - int schain_index = stat_get_schain_index_in_node( schain_id, schains_ids_on_node ); - return stat_calc_schain_base_port( node_base_port, schain_index ); -} - -void stat_compute_endpoints( node_t& node ) { - std::string ip = node.ip, domain = node.domainName; - node.http_endpoint_ip = - skutils::url( "http://" + ip + ":" + skutils::tools::format( "%d", node.httpRpcPort ) ); - node.http_endpoint_domain = - skutils::url( "http://" + domain + ":" + skutils::tools::format( "%d", node.httpRpcPort ) ); - node.https_endpoint_ip = - skutils::url( "https://" + ip + ":" + skutils::tools::format( "%d", node.httpsRpcPort ) ); - node.https_endpoint_domain = skutils::url( - "https://" + domain + ":" + skutils::tools::format( "%d", node.httpsRpcPort ) ); - node.ws_endpoint_ip = "ws://" + ip + ":" + skutils::tools::format( "%d", node.wsRpcPort ); - node.ws_endpoint_domain = - skutils::url( "ws://" + domain + ":" + skutils::tools::format( "%d", node.wsRpcPort ) ); - node.wss_endpoint_ip = - skutils::url( "wss://" + ip + ":" + skutils::tools::format( "%d", node.wssRpcPort ) ); - node.wss_endpoint_domain = - skutils::url( "wss://" + domain + ":" + skutils::tools::format( "%d", node.wssRpcPort ) ); - node.info_http_endpoint_ip = - skutils::url( "http://" + ip + ":" + skutils::tools::format( "%d", node.infoHttpRpcPort ) ); - node.info_http_endpoint_domain = skutils::url( - "https://" + domain + ":" + skutils::tools::format( "%d", node.infoHttpRpcPort ) ); -} - -enum SkaledPorts : int { - PROPOSAL = 0, - CATCHUP = 1, - WS_JSON = 2, - HTTP_JSON = 3, - BINARY_CONSENSUS = 4, - ZMQ_BROADCAST = 5, - IMA_MONITORING = 6, - WSS_JSON = 7, - HTTPS_JSON = 8, - INFO_HTTP_JSON = 9 -}; -static void stat_calc_ports( node_t& node ) { - node.httpRpcPort = node.schain_base_port + SkaledPorts::HTTP_JSON; - node.httpsRpcPort = node.schain_base_port + SkaledPorts::HTTPS_JSON; - node.wsRpcPort = node.schain_base_port + SkaledPorts::WS_JSON; - node.wssRpcPort = node.schain_base_port + SkaledPorts::WSS_JSON; - node.infoHttpRpcPort = node.schain_base_port + SkaledPorts::INFO_HTTP_JSON; -} - -dev::u256 get_schains_count( - const skutils::url& u, const dev::u256& addressFrom, const dev::u256& addressSchainsInternal ) { - if ( g_fnStopIndication() ) - return dev::u256( 0 ); - static const char g_strContractMethodName[] = "numberOfSchains()"; - // 0x77ad87c1a3f5c981edbb22216a0b27bcf0b6c20e34df970e44c43bc8d7952fc6 - // 0x77ad87c1 - nlohmann::json joCall = stat_create_basic_call(); - nlohmann::json joParamsItem = nlohmann::json::object(); - // joParamsItem["from"] = dev::address_to_js( addressFrom ); - joParamsItem["to"] = dev::address_to_js( addressSchainsInternal ); - joParamsItem["data"] = "0x77ad87c1"; - joCall["params"].push_back( joParamsItem ); - joCall["params"].push_back( std::string( "latest" ) ); - skutils::rest::client cli; - cli.isVerboseInsideNetworkLayer_ = g_bVerboseLogging; - // cli.optsSSL_ = optsSSL; - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " will call " ) + - cc::info( g_strContractMethodName ) + cc::debug( " at " ) + cc::u( u ) + - cc::debug( " with " ) + cc::j( joCall ) ); - cli.open( u ); - skutils::rest::data_t d = cli.call( joCall ); - if ( !d.err_s_.empty() ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", got error: " ) + - cc::warn( d.err_s_ ) ); - throw std::runtime_error( - std::string( "Failed call to \"" ) + g_strContractMethodName + "\": " + d.err_s_ ); - } - if ( d.empty() ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", EMPTY data received" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + g_strContractMethodName + - "\", EMPTY data received" ); - } - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " did called " ) + - cc::info( g_strContractMethodName ) + cc::debug( " with answer " ) + - cc::j( d.s_ ) ); - nlohmann::json joAnswer = nlohmann::json::parse( d.s_ ); - if ( joAnswer.count( "result" ) == 0 ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", no " ) + - cc::warn( "result" ) + cc::error( " field provided" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + g_strContractMethodName + - "\", no \"result\" field provided" ); - } - const nlohmann::json& joResult_numberOfSchains = joAnswer["result"]; - if ( joResult_numberOfSchains.is_null() ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", provided " ) + - cc::warn( "result" ) + - cc::error( " field is null, will return zero value" ) ); - dev::u256 cntSChains( 0 ); - return cntSChains; - } - if ( joResult_numberOfSchains.is_number() ) { - dev::u256 cntSChains( joResult_numberOfSchains.get< int >() ); - return cntSChains; - } - if ( joResult_numberOfSchains.is_string() ) { - dev::u256 cntSChains( joResult_numberOfSchains.get< std::string >() ); - return cntSChains; - } - // error, final - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", provided " ) + - cc::warn( "result" ) + cc::error( " field is not string or number type" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + g_strContractMethodName + - "\", provided \"result\" field is not string or number type" ); -} - -s_chain_t load_schain( const skutils::url& u, const dev::u256& addressFrom, - const dev::u256& idxSChain, const dev::u256& /*cntSChains*/, - const dev::u256& addressSchainsInternal, const dev::u256& addressNodes ) { - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " will load S-Chain #" ) + - cc::info( dev::toJS( idxSChain ) ) + cc::debug( " from URL " ) + cc::u( u ) ); - } - // - // load s-chain - // - s_chain_t s_chain; - if ( g_fnStopIndication() ) - return s_chain; - dev::u256 hash; - { // block - static const char g_strContractMethodName[] = "schainsAtSystem(uint256)"; - // bytes32[] public schainsAtSystem; - // 0xec79b50186e75f6719f28a07047b7e7cd13f13eb7b11a87b480887fe5f2df5aa - // 0xec79b501 - nlohmann::json joCall = stat_create_basic_call(); - nlohmann::json joParamsItem = nlohmann::json::object(); - joParamsItem["from"] = dev::address_to_js( addressFrom ); - joParamsItem["to"] = dev::address_to_js( addressSchainsInternal ); - joParamsItem["data"] = "0xec79b501" + stat_to_appendable_string( idxSChain ); - joCall["params"].push_back( joParamsItem ); - joCall["params"].push_back( std::string( "latest" ) ); - skutils::rest::client cli; - cli.isVerboseInsideNetworkLayer_ = g_bVerboseLogging; - // cli.optsSSL_ = optsSSL; - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " will call " ) + - cc::info( g_strContractMethodName ) + cc::debug( " at " ) + cc::u( u ) + - cc::debug( " with " ) + cc::j( joCall ) ); - cli.open( u ); - skutils::rest::data_t d = cli.call( joCall ); - if ( !d.err_s_.empty() ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", got error: " ) + - cc::warn( d.err_s_ ) ); - throw std::runtime_error( - std::string( "Failed call to \"" ) + g_strContractMethodName + "\": " + d.err_s_ ); - } - if ( d.empty() ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + - cc::error( ", EMPTY data received" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + g_strContractMethodName + - "\", EMPTY data received" ); - } - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " did called " ) + - cc::info( g_strContractMethodName ) + cc::debug( " with answer " ) + - cc::j( d.s_ ) ); - nlohmann::json joAnswer = nlohmann::json::parse( d.s_ ); - if ( joAnswer.count( "result" ) == 0 ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", no " ) + - cc::warn( "result" ) + cc::error( " field provided" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + g_strContractMethodName + - "\", no \"result\" field provided" ); - } - const nlohmann::json& joResult = joAnswer["result"]; - if ( !joResult.is_string() ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", provided " ) + - cc::warn( "result" ) + cc::error( " field is not string type" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + g_strContractMethodName + - "\", provided \"result\" field is not string type" ); - } - hash = dev::u256( joResult.get< std::string >() ); - } // block - // - if ( g_fnStopIndication() ) - return s_chain; - { // block - static const char g_strContractMethodName[] = "schains(bytes32)"; - // mapping (bytes32 => Schain) public schains; - // 0xb340c4b3db50a45804480331de552947d2c3df932cbfbc1edeacea1073b13f03 - // 0xb340c4b3 - // out: - // struct Schain { - // string name; - // address owner; - // uint indexInOwnerList; - // uint8 partOfNode; - // uint lifetime; - // uint startDate; - // uint startBlock; - // uint deposit; - // uint64 index; - // uint generation; - // address originator; - // } - // 0000 0000000000000000000000000000000000000000000000000000000000000160 "name" offset = 352 - // 0020 0000000000000000000000007aa5e36aa15e93d10f4f26357c30f052dacdde5f - // "owner":"0x7aa5E36AA15E93D10F4F26357C30F052DacDde5F" 0040 - // 0000000000000000000000000000000000000000000000000000000000000000 "indexInOwnerList":"0" - // 0060 0000000000000000000000000000000000000000000000000000000000000000 "partOfNode":"0" - // 0080 0000000000000000000000000000000000000000000000000000000000000005 "lifetime":"5" - // 00A0 0000000000000000000000000000000000000000000000000000000061e6d984 "startDate" - // 00C0 000000000000000000000000000000000000000000000000000000000000006b "startBlock" - // 00E0 0000000000000000000000000000000000000000000000056bc75e2d63100000 - // "deposit":"100000000000000000000" 0100 - // 0000000000000000000000000000000000000000000000000000000000000000 index":"0" 0120 - // 0000000000000000000000000000000000000000000000000000000000000000 "generation":"0" 0140 - // 0000000000000000000000000000000000000000000000000000000000000000 - // "originator":"0x0000000000000000000000000000000000000000" 0160 - // 0000000000000000000000000000000000000000000000000000000000000007 length of "name" 0180 - // 426f623130303000000000000000000000000000000000000000000000000000 "name":"Bob1000" - nlohmann::json joCall = stat_create_basic_call(); - nlohmann::json joParamsItem = nlohmann::json::object(); - // joParamsItem["from"] = dev::address_to_js( addressFrom ); - joParamsItem["to"] = dev::address_to_js( addressSchainsInternal ); - joParamsItem["data"] = "0xb340c4b3" + stat_to_appendable_string( hash ); - joCall["params"].push_back( joParamsItem ); - joCall["params"].push_back( std::string( "latest" ) ); - skutils::rest::client cli; - cli.isVerboseInsideNetworkLayer_ = g_bVerboseLogging; - // cli.optsSSL_ = optsSSL; - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " will call " ) + - cc::info( g_strContractMethodName ) + cc::debug( " at " ) + cc::u( u ) + - cc::debug( " with " ) + cc::j( joCall ) ); - } - cli.open( u ); - skutils::rest::data_t d = cli.call( joCall ); - if ( !d.err_s_.empty() ) { - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", got error: " ) + - cc::warn( d.err_s_ ) ); - } - throw std::runtime_error( - std::string( "Failed call to \"" ) + g_strContractMethodName + "\": " + d.err_s_ ); - } - if ( d.empty() ) { - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + - cc::error( ", EMPTY data received" ) ); - } - throw std::runtime_error( std::string( "Failed call to \"" ) + g_strContractMethodName + - "\", EMPTY data received" ); - } - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " did called " ) + - cc::info( g_strContractMethodName ) + cc::debug( " with answer " ) + - cc::j( d.s_ ) ); - } - nlohmann::json joAnswer = nlohmann::json::parse( d.s_ ); - if ( joAnswer.count( "result" ) == 0 ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", no " ) + - cc::warn( "result" ) + cc::error( " field provided" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + g_strContractMethodName + - "\", no \"result\" field provided" ); - } - const nlohmann::json& joResult = joAnswer["result"]; - if ( !joResult.is_string() ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", provided " ) + - cc::warn( "result" ) + cc::error( " field is not string type" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + g_strContractMethodName + - "\", provided \"result\" field is not string type" ); - } - std::string strResult = joResult.get< std::string >(); - vec256_t vec = stat_split_raw_answer( strResult ); - size_t i = 0; - s_chain.name = stat_extract_string( vec, i++ ); - s_chain.schain_id = dev::sha3( s_chain.name ); - s_chain.chainId = stat_compute_chain_id_from_schain_name( s_chain.name ); - s_chain.owner = vec[i++].u256; // address - s_chain.indexInOwnerList = vec[i++].n; // uint - s_chain.partOfNode = vec[i++].n; // uint8 - s_chain.lifetime = vec[i++].n; // uint - s_chain.startDate = vec[i++].n; // uint - s_chain.startBlock = vec[i++].u256; // uint - s_chain.deposit = vec[i++].u256; // uint - s_chain.index = vec[i++].n; // uint64 - s_chain.generation = vec[i++].n; // uint - s_chain.originator = vec[i++].u256; // address - } // block - // - // load s-chain parts - // - if ( g_fnStopIndication() ) - return s_chain; - { // block - static const char g_strContractMethodName[] = "getNodesInGroup(bytes32)"; - // 0xb70a4223305cdb661a25301e0dd3a7d6dce139327a8f2e1ffeea696adcf2f42e - // 0xb70a4223 - // out: - // 0000 0000000000000000000000000000000000000000000000000000000000000020 // array offset - // 0020 0000000000000000000000000000000000000000000000000000000000000002 // array length = 2 - // 0040 0000000000000000000000000000000000000000000000000000000000000001 // item[0] - // 0060 0000000000000000000000000000000000000000000000000000000000000000 // item[0] - nlohmann::json joCall = stat_create_basic_call(); - nlohmann::json joParamsItem = nlohmann::json::object(); - // joParamsItem["from"] = dev::address_to_js( addressFrom ); - joParamsItem["to"] = dev::address_to_js( addressSchainsInternal ); - joParamsItem["data"] = "0xb70a4223" + stat_to_appendable_string( s_chain.schain_id ); - joCall["params"].push_back( joParamsItem ); - joCall["params"].push_back( std::string( "latest" ) ); - skutils::rest::client cli; - cli.isVerboseInsideNetworkLayer_ = g_bVerboseLogging; - // cli.optsSSL_ = optsSSL; - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " will call " ) + - cc::info( g_strContractMethodName ) + cc::debug( " at " ) + cc::u( u ) + - cc::debug( " with " ) + cc::j( joCall ) ); - } - cli.open( u ); - skutils::rest::data_t d = cli.call( joCall ); - if ( !d.err_s_.empty() ) { - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", got error: " ) + - cc::warn( d.err_s_ ) ); - } - throw std::runtime_error( - std::string( "Failed call to \"" ) + g_strContractMethodName + "\": " + d.err_s_ ); - } - if ( d.empty() ) { - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + - cc::error( ", EMPTY data received" ) ); - } - throw std::runtime_error( std::string( "Failed call to \"" ) + g_strContractMethodName + - "\", EMPTY data received" ); - } - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " did called " ) + - cc::info( g_strContractMethodName ) + cc::debug( " with answer " ) + - cc::j( d.s_ ) ); - } - nlohmann::json joAnswer = nlohmann::json::parse( d.s_ ); - if ( joAnswer.count( "result" ) == 0 ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", no " ) + - cc::warn( "result" ) + cc::error( " field provided" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + g_strContractMethodName + - "\", no \"result\" field provided" ); - } - const nlohmann::json& joResult = joAnswer["result"]; - if ( !joResult.is_string() ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", provided " ) + - cc::warn( "result" ) + cc::error( " field is not string type" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + g_strContractMethodName + - "\", provided \"result\" field is not string type" ); - } - if ( g_fnStopIndication() ) - return s_chain; - std::string strResult = joResult.get< std::string >(); - vec256_t vec = stat_split_raw_answer( strResult ); - vec256_t vecNodeIds = stat_extract_vector( vec, 0 ); - size_t idxNode, cntNodes = vecNodeIds.size(); - for ( idxNode = 0; idxNode < cntNodes; ++idxNode ) { - if ( g_fnStopIndication() ) - return s_chain; - const item256_t& node_id = vecNodeIds[idxNode]; - node_t node; - { // block - static const char g_strContractMethodName[] = "nodes(uint256)"; - // Node[] public nodes; - // 0x1c53c280643e9644acc64db8c4ceeb5d6e7c3ed526c08b82d73b7a30b16b3c27 - // 0x1c53c280 - // out: - // struct Node { - // string name; - // bytes4 ip; - // bytes4 publicIP; - // uint16 port; - // bytes32[2] publicKey; - // uint startBlock; - // uint lastRewardDate; - // uint finishTime; - // NodeStatus status; - // uint validatorId; - //} - // 0000 0000000000000000000000000000000000000000000000000000000000000120 offset - // "name" 0020 7f00000200000000000000000000000000000000000000000000000000000000 ip - // 0040 7f00000200000000000000000000000000000000000000000000000000000000 publicIP - // 0060 00000000000000000000000000000000000000000000000000000000000008d5 port 2261 - // 0080 000000000000000000000000000000000000000000000000000000000000006a ??? - // publicKey 00A0 0000000000000000000000000000000000000000000000000000000061e6d983 - // ??? startBlock 00C0 - // 0000000000000000000000000000000000000000000000000000000000000000 ??? - // lastRewardDate 00E0 - // 0000000000000000000000000000000000000000000000000000000000000000 - // ??? lastRewardDate 0100 - // 0000000000000000000000000000000000000000000000000000000000000001 ??? finishTime - // 0120 0000000000000000000000000000000000000000000000000000000000000004 length - // "name" 0140 4265617200000000000000000000000000000000000000000000000000000000 - // "name" value "Bear" - nlohmann::json joCall = stat_create_basic_call(); - nlohmann::json joParamsItem = nlohmann::json::object(); - // joParamsItem["from"] = dev::address_to_js( addressFrom ); - joParamsItem["to"] = dev::address_to_js( addressNodes ); - joParamsItem["data"] = "0x1c53c280" + stat_to_appendable_string( node_id.u256 ); - joCall["params"].push_back( joParamsItem ); - joCall["params"].push_back( std::string( "latest" ) ); - skutils::rest::client cli; - cli.isVerboseInsideNetworkLayer_ = g_bVerboseLogging; - // cli.optsSSL_ = optsSSL; - cli.open( u ); - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " will call " ) + - cc::info( g_strContractMethodName ) + cc::debug( " at " ) + - cc::u( u ) + cc::debug( " with " ) + cc::j( joCall ) ); - } - skutils::rest::data_t d = cli.call( joCall ); - if ( !d.err_s_.empty() ) { - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + - cc::error( ", got error: " ) + cc::warn( d.err_s_ ) ); - } - throw std::runtime_error( std::string( "Failed call to \"" ) + - g_strContractMethodName + "\": " + d.err_s_ ); - } - if ( d.empty() ) { - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + - cc::error( ", EMPTY data received" ) ); - } - throw std::runtime_error( std::string( "Failed call to \"" ) + - g_strContractMethodName + "\", EMPTY data received" ); - } - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " did called " ) + - cc::info( g_strContractMethodName ) + cc::debug( " with answer " ) + - cc::j( d.s_ ) ); - } - nlohmann::json joAnswer = nlohmann::json::parse( d.s_ ); - if ( joAnswer.count( "result" ) == 0 ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", no " ) + - cc::warn( "result" ) + cc::error( " field provided" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + - g_strContractMethodName + - "\", no \"result\" field provided" ); - } - const nlohmann::json& joResult = joAnswer["result"]; - if ( !joResult.is_string() ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + - cc::error( ", provided " ) + cc::warn( "result" ) + - cc::error( " field is not string type" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + - g_strContractMethodName + - "\", provided \"result\" field is not string type" ); - } - std::string strResult = joResult.get< std::string >(); - vec256_t vec = stat_split_raw_answer( strResult ); - size_t i = 0; - node.node_id = node_id.u256; - node.name = stat_extract_string( vec, i++ ); - node.ip = stat_extract_ipv4( vec, i++ ); - node.publicIP = stat_extract_ipv4( vec, i++ ); - node.nPort = ( int ) vec[i++].n; - } // block - if ( g_fnStopIndication() ) - return s_chain; - { // block - static const char g_strContractMethodName[] = "getNodeDomainName(uint256)"; - // function getNodeDomainName(uint nodeIndex) - // 0xd31c48ede05d5ff086a9eedf48e201b69f7a1a854adcf2ac2d8af92bb7e848c2 - // 0xd31c48ed - // out: - // 0000 0000000000000000000000000000000000000000000000000000000000000020 // string - // offset 0020 0000000000000000000000000000000000000000000000000000000000000015 // - // string lengs 0040 - // 746573742e646f6d61696e2e6e616d652e686572650000000000000000000000 // string data - nlohmann::json joCall = stat_create_basic_call(); - nlohmann::json joParamsItem = nlohmann::json::object(); - joParamsItem["from"] = dev::address_to_js( addressFrom ); - joParamsItem["to"] = dev::address_to_js( addressNodes ); - joParamsItem["data"] = "0xd31c48ed" + stat_to_appendable_string( node_id.u256 ); - joCall["params"].push_back( joParamsItem ); - joCall["params"].push_back( std::string( "latest" ) ); - skutils::rest::client cli; - cli.isVerboseInsideNetworkLayer_ = g_bVerboseLogging; - // cli.optsSSL_ = optsSSL; - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " will call " ) + - cc::info( g_strContractMethodName ) + cc::debug( " at " ) + - cc::u( u ) + cc::debug( " with " ) + cc::j( joCall ) ); - } - cli.open( u ); - skutils::rest::data_t d = cli.call( joCall ); - if ( !d.err_s_.empty() ) { - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + - cc::error( ", got error: " ) + cc::warn( d.err_s_ ) ); - } - throw std::runtime_error( std::string( "Failed call to \"" ) + - g_strContractMethodName + "\": " + d.err_s_ ); - } - if ( d.empty() ) { - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + - cc::error( ", EMPTY data received" ) ); - } - throw std::runtime_error( std::string( "Failed call to \"" ) + - g_strContractMethodName + "\", EMPTY data received" ); - } - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " did called " ) + - cc::info( g_strContractMethodName ) + cc::debug( " with answer " ) + - cc::j( d.s_ ) ); - } - nlohmann::json joAnswer = nlohmann::json::parse( d.s_ ); - if ( joAnswer.count( "result" ) == 0 ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", no " ) + - cc::warn( "result" ) + cc::error( " field provided" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + - g_strContractMethodName + - "\", no \"result\" field provided" ); - } - const nlohmann::json& joResult = joAnswer["result"]; - if ( !joResult.is_string() ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + - cc::error( ", provided " ) + cc::warn( "result" ) + - cc::error( " field is not string type" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + - g_strContractMethodName + - "\", provided \"result\" field is not string type" ); - } - std::string strResult = joResult.get< std::string >(); - vec256_t vec = stat_split_raw_answer( strResult ); - size_t i = 0; - node.domainName = stat_extract_string( vec, i++ ); - } // block - if ( g_fnStopIndication() ) - return s_chain; - { // block - static const char g_strContractMethodName[] = "isNodeInMaintenance(uint256)"; - // function isNodeInMaintenance(uint nodeIndex) - // 0x5990e3cb693783f2ae688ffdd7d57079fc68f1648db65fd344b99e64a5c7fedf - // 0x5990e3cb - // out: - // 0000 0000000000000000000000000000000000000000000000000000000000000000 bool - nlohmann::json joCall = stat_create_basic_call(); - nlohmann::json joParamsItem = nlohmann::json::object(); - joParamsItem["from"] = dev::address_to_js( addressFrom ); - joParamsItem["to"] = dev::address_to_js( addressNodes ); - joParamsItem["data"] = "0x5990e3cb" + stat_to_appendable_string( node_id.u256 ); - joCall["params"].push_back( joParamsItem ); - joCall["params"].push_back( std::string( "latest" ) ); - skutils::rest::client cli; - cli.isVerboseInsideNetworkLayer_ = g_bVerboseLogging; - // cli.optsSSL_ = optsSSL; - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " will call " ) + - cc::info( g_strContractMethodName ) + cc::debug( " at " ) + - cc::u( u ) + cc::debug( " with " ) + cc::j( joCall ) ); - } - cli.open( u ); - skutils::rest::data_t d = cli.call( joCall ); - if ( !d.err_s_.empty() ) { - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + - cc::error( ", got error: " ) + cc::warn( d.err_s_ ) ); - } - throw std::runtime_error( std::string( "Failed call to \"" ) + - g_strContractMethodName + "\": " + d.err_s_ ); - } - if ( d.empty() ) { - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + - cc::error( ", EMPTY data received" ) ); - } - throw std::runtime_error( std::string( "Failed call to \"" ) + - g_strContractMethodName + "\", EMPTY data received" ); - } - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " did called " ) + - cc::info( g_strContractMethodName ) + cc::debug( " with answer " ) + - cc::j( d.s_ ) ); - } - nlohmann::json joAnswer = nlohmann::json::parse( d.s_ ); - if ( joAnswer.count( "result" ) == 0 ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", no " ) + - cc::warn( "result" ) + cc::error( " field provided" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + - g_strContractMethodName + - "\", no \"result\" field provided" ); - } - const nlohmann::json& joResult = joAnswer["result"]; - if ( !joResult.is_string() ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + - cc::error( ", provided " ) + cc::warn( "result" ) + - cc::error( " field is not string type" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + - g_strContractMethodName + - "\", provided \"result\" field is not string type" ); - } - std::string strResult = joResult.get< std::string >(); - vec256_t vec = stat_split_raw_answer( strResult ); - node.isMaintenance = ( vec[0].u256.is_zero() ) ? false : true; - } // block - if ( g_fnStopIndication() ) - return s_chain; - vec256_t vecSChainIds; - { // block - static const char g_strContractMethodName[] = "getSchainHashesForNode(uint256)"; - // function getSchainHashesForNode(uint nodeIndex) - // 0x46660419a40b36978f951f5d6d936a614248a2628f5d1f05abce867aa8d189db - // 0x46660419 - // out: - // 0000 0000000000000000000000000000000000000000000000000000000000000020 // array - // offset 0020 0000000000000000000000000000000000000000000000000000000000000001 // - // array length 0040 - // 975a4814cff8b9fd85b48879dade195028650b0a23f339ca81bd3b1231f72974 - // // array item[0] - nlohmann::json joCall = stat_create_basic_call(); - nlohmann::json joParamsItem = nlohmann::json::object(); - joParamsItem["from"] = dev::address_to_js( addressFrom ); - joParamsItem["to"] = dev::address_to_js( addressSchainsInternal ); - joParamsItem["data"] = "0x46660419" + stat_to_appendable_string( node_id.u256 ); - joCall["params"].push_back( joParamsItem ); - joCall["params"].push_back( std::string( "latest" ) ); - skutils::rest::client cli; - cli.isVerboseInsideNetworkLayer_ = g_bVerboseLogging; - // cli.optsSSL_ = optsSSL; - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " will call " ) + - cc::info( g_strContractMethodName ) + cc::debug( " at " ) + - cc::u( u ) + cc::debug( " with " ) + cc::j( joCall ) ); - } - cli.open( u ); - skutils::rest::data_t d = cli.call( joCall ); - if ( !d.err_s_.empty() ) { - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + - cc::error( ", got error: " ) + cc::warn( d.err_s_ ) ); - } - throw std::runtime_error( std::string( "Failed call to \"" ) + - g_strContractMethodName + "\": " + d.err_s_ ); - } - if ( d.empty() ) { - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + - cc::error( ", EMPTY data received" ) ); - } - throw std::runtime_error( std::string( "Failed call to \"" ) + - g_strContractMethodName + "\", EMPTY data received" ); - } - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " did called " ) + - cc::info( g_strContractMethodName ) + cc::debug( " with answer " ) + - cc::j( d.s_ ) ); - } - nlohmann::json joAnswer = nlohmann::json::parse( d.s_ ); - if ( joAnswer.count( "result" ) == 0 ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + cc::error( ", no " ) + - cc::warn( "result" ) + cc::error( " field provided" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + - g_strContractMethodName + - "\", no \"result\" field provided" ); - } - const nlohmann::json& joResult = joAnswer["result"]; - if ( !joResult.is_string() ) { - if ( g_bVerboseLogging ) - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::error( " failed to call " ) + - cc::info( g_strContractMethodName ) + - cc::error( ", provided " ) + cc::warn( "result" ) + - cc::error( " field is not string type" ) ); - throw std::runtime_error( std::string( "Failed call to \"" ) + - g_strContractMethodName + - "\", provided \"result\" field is not string type" ); - } - std::string strResult = joResult.get< std::string >(); - vec256_t vec = stat_split_raw_answer( strResult ); - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " pre-split vector dats for chain ID(s): " ) + - stat_list_raw_vec( vec ) ); - } - vecSChainIds = stat_extract_vector( vec, 0 ); - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " extracted vector of chain ID(s): " ) + - stat_list_raw_vec( vecSChainIds ) ); - } - } // block - node.schain_base_port = stat_get_schain_base_port_on_node( s_chain.schain_id, - vecSChainIds, // schain_ids - node.nPort // node_dict.base_port - ); - stat_calc_ports( node ); - stat_compute_endpoints( node ); - s_chain.vecNodes.push_back( node ); - } // for( idxNode = 0; idxNode < cntNodes; ++ idxNode ) - } // block - return s_chain; -} - -vec_s_chains_t load_schains( const skutils::url& u, const dev::u256& addressFrom, - const dev::u256& addressSchainsInternal, const dev::u256& addressNodes ) { - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " will load all S-Chains" ) ); - } - vec_s_chains_t vec; - dev::u256 cntSChains = get_schains_count( u, addressFrom, addressSchainsInternal ); - for ( dev::u256 idxSChain; idxSChain < cntSChains; ++idxSChain ) { - if ( g_fnStopIndication() ) { - vec.clear(); - break; - } - s_chain_t s_chain = load_schain( - u, addressFrom, idxSChain, cntSChains, addressSchainsInternal, addressNodes ); - vec.push_back( s_chain ); - } - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " did loaded all S-Chains" ) ); - } - return vec; -} - -nlohmann::json to_json( const node_t& node ) { - nlohmann::json jo = nlohmann::json::object(); - jo["id"] = stat_to_0x_string( node.node_id ); - jo["name"] = node.name; - jo["ip"] = node.ip; - jo["publicIP"] = node.publicIP; - jo["base_port"] = node.nPort; - jo["domain"] = node.domainName; - jo["isMaintenance"] = node.isMaintenance; - jo["schain_base_port"] = node.schain_base_port; - jo["http_endpoint_ip"] = node.http_endpoint_ip.str(); - jo["http_endpoint_domain"] = node.http_endpoint_domain.str(); - jo["https_endpoint_ip"] = node.https_endpoint_ip.str(); - jo["https_endpoint_domain"] = node.https_endpoint_domain.str(); - jo["ws_endpoint_ip"] = node.ws_endpoint_ip.str(); - jo["ws_endpoint_domain"] = node.ws_endpoint_domain.str(); - jo["wss_endpoint_ip"] = node.wss_endpoint_ip.str(); - jo["wss_endpoint_domain"] = node.wss_endpoint_domain.str(); - jo["info_http_endpoint_ip"] = node.info_http_endpoint_ip.str(); - jo["info_http_endpoint_domain"] = node.info_http_endpoint_domain.str(); - return jo; -} - -static nlohmann::json stat_to_json( const vec_nodes_t& vecNodes ) { - nlohmann::json jarr = nlohmann::json::array(); - vec_nodes_t::const_iterator itWalk = vecNodes.cbegin(), itEnd = vecNodes.cend(); - for ( ; itWalk != itEnd; ++itWalk ) { - const node_t& node = ( *itWalk ); - jarr.push_back( to_json( node ) ); - } - return jarr; -} - -nlohmann::json to_json( const s_chain_t& s_chain ) { - nlohmann::json jo = nlohmann::json::object(); - jo["name"] = s_chain.name; - jo["owner"] = stat_to_0x_string( s_chain.owner ); // address - jo["indexInOwnerList"] = s_chain.indexInOwnerList; // uint - jo["partOfNode"] = s_chain.partOfNode; // uint8 - jo["lifetime"] = s_chain.lifetime; // uint - jo["startDate"] = s_chain.startDate; // uint - jo["startBlock"] = stat_to_0x_string( s_chain.startBlock ); // uint - jo["deposit"] = stat_to_0x_string( s_chain.deposit ); // uint - jo["index"] = s_chain.index; // uint64 - jo["generation"] = s_chain.generation; // uint - jo["originator"] = stat_to_0x_string( s_chain.originator ); // address - jo["computed"] = nlohmann::json::object(); - jo["computed"]["schain_id"] = stat_to_0x_string( s_chain.schain_id ); // keccak256(name) - jo["computed"]["chainId"] = stat_to_0x_string( s_chain.chainId ); // part of schain_id - jo["computed"]["nodes"] = stat_to_json( s_chain.vecNodes ); - return jo; -} - -nlohmann::json to_json( const vec_s_chains_t& vec ) { - nlohmann::json jarr = nlohmann::json::array(); - vec_s_chains_t::const_iterator itWalk = vec.cbegin(), itEnd = vec.cend(); - for ( ; itWalk != itEnd; ++itWalk ) { - const s_chain_t& s_chain = ( *itWalk ); - jarr.push_back( to_json( s_chain ) ); - } - return jarr; -} - -static std::recursive_mutex g_mtx; -static const char g_queue_id[] = "skale-network-browser"; -static skutils::dispatch::job_id_t g_idDispatchJob; -static std::shared_ptr< skutils::json_config_file_accessor > g_json_config_file_accessor; -static skutils::dispatch::job_t g_dispatch_job; -static vec_s_chains_t g_last_cached; - -vec_s_chains_t refreshing_cached() { - vec_s_chains_t vec; - if ( !g_fnStopIndication() ) { - std::lock_guard lock( g_mtx ); - vec = g_last_cached; - } - return vec; -} - -bool stat_refresh_now( const skutils::url& u, const dev::u256& addressFrom, - const dev::u256& addressSchainsInternal, const dev::u256& addressNodes ) { - try { - if ( g_fnStopIndication() ) - return false; - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " will perform cache refreshing" ) ); - } - vec_s_chains_t vec = load_schains( u, addressFrom, addressSchainsInternal, addressNodes ); - nlohmann::json jarr = to_json( vec ); - std::lock_guard lock( g_mtx ); - g_last_cached = vec; - clog( dev::VerbosityDebug, "snb" ) << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " cached data: " ) + cc::j( jarr ) ); - return true; - } catch ( std::exception& ex ) { - std::string strErrorDescription = ex.what(); - clog( dev::VerbosityError, "snb" ) - << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " Failed to download " ) + cc::note( "SKALE NETWORK" ) + - cc::error( " browsing data: " ) + cc::warn( strErrorDescription ) ); - } catch ( ... ) { - std::string strErrorDescription = "unknown exception"; - clog( dev::VerbosityError, "snb" ) - << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " Failed to download " ) + cc::note( "SKALE NETWORK" ) + - cc::error( " browsing data: " ) + cc::warn( strErrorDescription ) ); - } - std::lock_guard lock( g_mtx ); - return false; -} - -bool refreshing_start( const std::string& configPath, fn_stop_indication_t fnStopIndication ) { - std::lock_guard lock( g_mtx ); - refreshing_stop(); - g_fnStopIndication = - ( fnStopIndication != nullptr ) ? fnStopIndication : stat_fnNonStopIndication; - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " refreshing(start) will prepare data to run..." ) ); - g_json_config_file_accessor.reset( new skutils::json_config_file_accessor( configPath ) ); - // - nlohmann::json joConfig = g_json_config_file_accessor->getConfigJSON(); - if ( joConfig.count( "skaleConfig" ) == 0 ) { - clog( dev::VerbosityError, "snb" ) - << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " Error in config.json file, cannot find \"skaleConfig\"" ) ); - return false; - } - const nlohmann::json& joSkaleConfig = joConfig["skaleConfig"]; - clog( dev::VerbosityTrace, "snb" ) << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " refreshing(start) have entire config" ) ); - if ( joSkaleConfig.count( "nodeInfo" ) == 0 ) { - clog( dev::VerbosityError, "snb" ) - << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( - " Error in config.json file, cannot find \"skaleConfig\"/\"nodeInfo\"" ) ); - return false; - } - const nlohmann::json& joSkaleConfig_nodeInfo = joSkaleConfig["nodeInfo"]; - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " refreshing(start) have " ) + - cc::notice( "nodeInfo" ) + cc::debug( " in config" ) ); - // - if ( joSkaleConfig_nodeInfo.count( "skale-manager" ) == 0 ) { - clog( dev::VerbosityError, "snb" ) - << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " Error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"skale-manager\"" ) ); - return false; - } - const nlohmann::json& joSkaleConfig_nodeInfo_sm = joSkaleConfig_nodeInfo["skale-manager"]; - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " refreshing(start) have " ) + - cc::notice( "nodeInfo" ) + cc::debug( "/" ) + cc::notice( "skale-manager" ) + - cc::debug( " in config" ) ); - // - if ( joSkaleConfig_nodeInfo_sm.count( "SchainsInternal" ) == 0 ) { - clog( dev::VerbosityError, "snb" ) - << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( - " Error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"skale-manager\"/\"SchainsInternal\"" ) ); - return false; - } - const nlohmann::json& joSkaleConfig_nodeInfo_sm_SchainsInternal = - joSkaleConfig_nodeInfo_sm["SchainsInternal"]; - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " refreshing(start) have " ) + - cc::notice( "nodeInfo" ) + cc::debug( "/" ) + cc::notice( "skale-manager" ) + - cc::debug( "/" ) + cc::notice( "SchainsInternal" ) + cc::debug( " in config" ) ); - if ( !joSkaleConfig_nodeInfo_sm_SchainsInternal.is_string() ) { - clog( dev::VerbosityError, "snb" ) - << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " Error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"skale-manager\"/\"SchainsInternal\" " - "as string value" ) ); - return false; - } - // - if ( joSkaleConfig_nodeInfo_sm.count( "Nodes" ) == 0 ) { - clog( dev::VerbosityError, "snb" ) - << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " Error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"skale-manager\"/\"Nodes\"" ) ); - return false; - } - const nlohmann::json& joSkaleConfig_nodeInfo_sm_Nodes = joSkaleConfig_nodeInfo_sm["Nodes"]; - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " refreshing(start) have " ) + - cc::notice( "nodeInfo" ) + cc::debug( "/" ) + cc::notice( "skale-manager" ) + - cc::debug( "/" ) + cc::notice( "Nodes" ) + cc::debug( " in config" ) ); - if ( !joSkaleConfig_nodeInfo_sm_Nodes.is_string() ) { - clog( dev::VerbosityError, "snb" ) - << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " Error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"skale-manager\"/\"Nodes\" as string " - "value" ) ); - return false; - } - // - if ( joSkaleConfig.count( "sChain" ) == 0 ) { - clog( dev::VerbosityError, "snb" ) - << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( - " Error in config.json file, cannot find \"skaleConfig\"/\"sChain\"" ) ); - return false; - } - const nlohmann::json& joSkaleConfig_sChain = joSkaleConfig["sChain"]; - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " refreshing(start) have " ) + - cc::notice( "sChain" ) + cc::debug( " in config" ) ); - // - std::string strAddressFrom; - if ( joSkaleConfig_sChain.count( "schainOwner" ) != 0 ) { - const nlohmann::json& joSkaleConfig_sChain_schainOwner = - joSkaleConfig_sChain["schainOwner"]; - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " refreshing(start) have " ) + - cc::notice( "sChain" ) + cc::debug( "/" ) + cc::notice( "schainOwner" ) + - cc::debug( " in config" ) ); - if ( joSkaleConfig_sChain_schainOwner.is_string() ) { - strAddressFrom = - skutils::tools::trim_copy( joSkaleConfig_sChain_schainOwner.get< std::string >() ); - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " refreshing(start) have " ) + - cc::notice( "sChain" ) + cc::debug( "/" ) + cc::notice( "schainOwner" ) + - cc::debug( " value " ) + cc::attention( strAddressFrom ) ); - } - } - size_t nIntervalSeconds = g_nRefreshIntervalInSeconds; - if ( joSkaleConfig_nodeInfo.count( "skale-network-browser-refresh" ) > 0 ) { - const nlohmann::json& joSkaleConfig_nodeInfo_refresh = - joSkaleConfig_nodeInfo["skale-network-browser-refresh"]; - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " refreshing(start) have " ) + - cc::notice( "nodeInfo" ) + cc::debug( "/" ) + - cc::notice( "skale-network-browser-refresh" ) + cc::debug( " in config" ) ); - if ( joSkaleConfig_nodeInfo_refresh.is_number() ) { - nIntervalSeconds = g_nRefreshIntervalInSeconds = - joSkaleConfig_nodeInfo_refresh.get< size_t >(); - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " refreshing(start) have " ) + - cc::notice( "nodeInfo" ) + cc::debug( "/" ) + - cc::notice( "skale-network-browser-refresh" ) + cc::debug( " value " ) + - cc::num10( nIntervalSeconds ) + cc::debug( "(in seconds)" ) ); - } - } - if ( joSkaleConfig_nodeInfo.count( "skale-network-browser-verbose" ) > 0 ) { - const nlohmann::json& joSkaleConfig_nodeInfo_verbose = - joSkaleConfig_nodeInfo["skale-network-browser-verbose"]; - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " refreshing(start) have " ) + - cc::notice( "nodeInfo" ) + cc::debug( "/" ) + - cc::notice( "skale-network-browser-verbose" ) + cc::debug( " in config" ) ); - if ( joSkaleConfig_nodeInfo_verbose.is_boolean() ) { - g_bVerboseLogging = joSkaleConfig_nodeInfo_verbose.get< bool >(); - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " refreshing(start) have " ) + - cc::notice( "nodeInfo" ) + cc::debug( "/" ) + - cc::notice( "skale-network-browser-verbose" ) + cc::debug( " value " ) + - cc::flag( g_bVerboseLogging ) + cc::debug( "(as boolean)" ) ); - } else if ( joSkaleConfig_nodeInfo_verbose.is_number() ) { - g_bVerboseLogging = - ( joSkaleConfig_nodeInfo_verbose.get< size_t >() != 0 ) ? true : false; - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " refreshing(start) have " ) + - cc::notice( "nodeInfo" ) + cc::debug( "/" ) + - cc::notice( "skale-network-browser-verbose" ) + cc::debug( " value " ) + - cc::flag( g_bVerboseLogging ) + cc::debug( "(as number)" ) ); - } else { - // g_bVerboseLogging = false; - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " refreshing(start) have " ) + - cc::notice( "nodeInfo" ) + cc::debug( "/" ) + - cc::notice( "skale-network-browser-verbose" ) + cc::debug( " value " ) + - cc::flag( g_bVerboseLogging ) + - cc::debug( "(as unparsed, left previous)" ) ); - } - } else { - // g_bVerboseLogging = false; - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " refreshing(start) have " ) + - cc::notice( "nodeInfo" ) + cc::debug( "/" ) + - cc::notice( "skale-network-browser-verbose" ) + cc::debug( " value " ) + - cc::flag( g_bVerboseLogging ) + cc::debug( "(as absent, left previous)" ) ); - } - std::string strAddressSchainsInternal, strAddressNodes; - try { - strAddressSchainsInternal = skutils::tools::trim_copy( - joSkaleConfig_nodeInfo_sm_SchainsInternal.get< std::string >() ); - } catch ( ... ) { - clog( dev::VerbosityError, "snb" ) - << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " failed to get \"SchainsInternal\" contract address" ) ); - return false; - } - try { - strAddressNodes = - skutils::tools::trim_copy( joSkaleConfig_nodeInfo_sm_Nodes.get< std::string >() ); - } catch ( ... ) { - clog( dev::VerbosityError, "snb" ) - << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " failed to get \"Nodes\" contract address" ) ); - return false; - } - if ( strAddressFrom.empty() ) { - strAddressFrom = "0xaa0f3d9f62271ef8d668947af98e51487ba3f26b"; - clog( dev::VerbosityWarning, "snb" ) - << ( cc::warn( "SKALE NETWORK BROWSER WARNING:" ) + - cc::debug( "Using static address " ) + cc::info( strAddressFrom ) + - cc::debug( " for contract calls because no " ) + cc::info( "skaleConfig" ) + - cc::debug( "/" ) + cc::info( "sChain" ) + cc::debug( "/" ) + - cc::info( "schainOwner" ) + cc::debug( " value is provided" ) ); - } - if ( strAddressSchainsInternal.empty() ) { - clog( dev::VerbosityError, "snb" ) - << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " Error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"skale-manager\"/\"SchainsInternal\" " - "as non-empty string value" ) ); - return false; - } - if ( strAddressNodes.empty() ) { - clog( dev::VerbosityError, "snb" ) - << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " Error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"skale-manager\"/\"Nodes\" as " - "non-empty string value" ) ); - return false; - } - // - skutils::url u; - try { - u = g_json_config_file_accessor->getImaMainNetURL(); - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + cc::debug( " will use Main Net URL " ) + - cc::u( u ) ); - } - } catch ( ... ) { - clog( dev::VerbosityError, "snb" ) << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " Main Net URL is unknown" ) ); - return false; - } - // - dev::u256 addressFrom, addressSchainsInternal, addressNodes; - try { - addressFrom = dev::u256( strAddressFrom ); - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " will use \"from\" call address " ) + - cc::info( dev::address_to_js( addressFrom ) ) ); - } - addressSchainsInternal = dev::u256( strAddressSchainsInternal ); - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " will use \"SchainsInternal\" contract address " ) + - cc::info( dev::address_to_js( addressSchainsInternal ) ) ); - } - addressNodes = dev::u256( strAddressNodes ); - if ( g_bVerboseLogging ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " will use \"Nodes\" contract address " ) + - cc::info( dev::address_to_js( addressNodes ) ) ); - } - } catch ( ... ) { - clog( dev::VerbosityError, "snb" ) - << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " failed to construct needed addresses" ) ); - return false; - } - try { - clog( dev::VerbosityTrace, "snb" ) << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " refreshing(initial) will run..." ) ); - clock_t tt = clock(); - stat_refresh_now( u, addressFrom, addressSchainsInternal, addressNodes ); - tt = clock() - tt; - double lf_time_taken = ( ( double ) tt ) / CLOCKS_PER_SEC; // in seconds - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " refreshing(initial) did finished, " ) + - cc::notice( skutils::tools::format( "%f", lf_time_taken ) ) + - cc::debug( " second(s) spent" ) ); - } catch ( std::exception& ex ) { - std::string strErrorDescription = ex.what(); - clog( dev::VerbosityError, "snb" ) << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " refreshing(initial) exception: " ) + - cc::warn( strErrorDescription ) ); - } catch ( ... ) { - std::string strErrorDescription = "unknown exception"; - clog( dev::VerbosityError, "snb" ) << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " refreshing(initial) exception: " ) + - cc::warn( strErrorDescription ) ); - } - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " refreshing(start) will register dispatch/job..." ) ); - g_dispatch_job = [=]() -> void { - stat_refresh_now( u, addressFrom, addressSchainsInternal, addressNodes ); - }; - skutils::dispatch::repeat( g_queue_id, g_dispatch_job, - skutils::dispatch::duration_from_seconds( nIntervalSeconds ), &g_idDispatchJob ); - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " refreshing(start) did registered dispatch/job" ) ); - return true; -} - -void refreshing_stop() { - std::lock_guard lock( g_mtx ); - if ( !g_idDispatchJob.empty() ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " refreshing(stop) will stop existing dispatch/job..." ) ); - skutils::dispatch::stop( g_idDispatchJob ); - g_idDispatchJob.clear(); - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " refreshing(stop) did stopped existing dispatch/job" ) ); - } - if ( g_json_config_file_accessor ) - g_json_config_file_accessor.reset(); - g_dispatch_job = skutils::dispatch::job_t(); // clear - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " refreshing(stop) did cleared/forgot dispatch/job" ) ); -} - -vec_s_chains_t refreshing_do_now() { - std::lock_guard lock( g_mtx ); - if ( ( !g_idDispatchJob.empty() ) && g_json_config_file_accessor && g_dispatch_job ) { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " refreshing(now) will invoke existing dispatch/job..." ) ); - try { - clock_t tt = clock(); - g_dispatch_job(); - tt = clock() - tt; - double lf_time_taken = ( ( double ) tt ) / CLOCKS_PER_SEC; // in seconds - clog( dev::VerbosityTrace, "snb" ) - << ( cc::info( "SKALE NETWORK BROWSER" ) + - cc::debug( " refreshing(now) did invoked existing dispatch/job, " ) + - cc::notice( skutils::tools::format( "%f", lf_time_taken ) ) + - cc::debug( " second(s) spent" ) ); - } catch ( std::exception& ex ) { - std::string strErrorDescription = ex.what(); - clog( dev::VerbosityError, "snb" ) << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " refreshing(now) exception: " ) + - cc::warn( strErrorDescription ) ); - } catch ( ... ) { - std::string strErrorDescription = "unknown exception"; - clog( dev::VerbosityError, "snb" ) << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE:" ) + - cc::error( " refreshing(now) exception: " ) + - cc::warn( strErrorDescription ) ); - } - } else { - clog( dev::VerbosityTrace, "snb" ) - << ( cc::fatal( "SKALE NETWORK BROWSER FAILURE: " ) + - cc::error( " refreshing(now) did skipped invoking existing dispatch/job, old " - "cached data will be returned and used" ) ); - } - return refreshing_cached(); -} - -skutils::url refreshing_pick_s_chain_url( const std::string& strSChainName ) { - if ( strSChainName.empty() ) - throw std::runtime_error( - "SKALE NETWORK BROWSER FAILURE: Cannot pick S-Chain URL by empty S-Chain name" ); - vec_s_chains_t vec = refreshing_cached(); - if ( vec.empty() ) - throw std::runtime_error( "SKALE NETWORK BROWSER FAILURE: Cannot pick S-Chain \"" + - strSChainName + "\" URL from empty cache" ); - const size_t cnt = vec.size(); - for ( size_t i = 0; i < cnt; ++i ) { - const s_chain_t& s_chain = vec[i]; - if ( s_chain.name == strSChainName ) { - const size_t cntNodes = s_chain.vecNodes.size(); - if ( cntNodes == 0 ) - throw std::runtime_error( "SKALE NETWORK BROWSER FAILURE: Cannot pick S-Chain \"" + - strSChainName + - "\" URL because there are no nodes in cache" ); - const size_t idxNode = rand() % cntNodes; - const node_t& node = s_chain.vecNodes[idxNode]; - return node.http_endpoint_ip; - } - } - throw std::runtime_error( "SKALE NETWORK BROWSER FAILURE: Cannot pick S-Chain \"" + - strSChainName + "\" URL because it's not in cache" ); -} - -} // namespace browser -} // namespace network -} // namespace skale diff --git a/libweb3jsonrpc/SkaleNetworkBrowser.h b/libweb3jsonrpc/SkaleNetworkBrowser.h deleted file mode 100644 index 66c094d73..000000000 --- a/libweb3jsonrpc/SkaleNetworkBrowser.h +++ /dev/null @@ -1,91 +0,0 @@ -#if ( !defined __SKALE_NETWORK_BROWSER_H ) -#define __SKALE_NETWORK_BROWSER_H 1 - -#include -#include - -#include - -#include -#include - -#include - -namespace skale { -namespace network { -namespace browser { - -extern bool g_bVerboseLogging; -extern size_t g_nRefreshIntervalInSeconds; - -struct node_t { - dev::u256 node_id; - std::string name, ip, publicIP; - int nPort = -1; // base port - // downloaded via other calls: - std::string domainName; - bool isMaintenance = false; - int schain_base_port = -1; - // computed ports: - int httpRpcPort = -1, httpsRpcPort = -1, wsRpcPort = -1, wssRpcPort = -1, infoHttpRpcPort = -1; - // computed endpoints: - skutils::url http_endpoint_ip, http_endpoint_domain, https_endpoint_ip, https_endpoint_domain, - ws_endpoint_ip, ws_endpoint_domain, wss_endpoint_ip, wss_endpoint_domain, - info_http_endpoint_ip, info_http_endpoint_domain; -}; // struct node_t - -typedef std::vector< node_t > vec_nodes_t; - -struct s_chain_t { - std::string name; - dev::u256 owner; // address - size_t indexInOwnerList; // uint - size_t partOfNode; // uint8 - size_t lifetime; // uint - size_t startDate; // uint - dev::u256 startBlock; // uint - dev::u256 deposit; // uint - size_t index; // uint64 - size_t generation; // uint - dev::u256 originator; // address - vec_nodes_t vecNodes; - // computed: - dev::h256 schain_id; // keccak256(name) - dev::u256 chainId; // part of schain_id -}; // struct s_chain_t - -typedef std::vector< s_chain_t > vec_s_chains_t; - -dev::u256 get_schains_count( const skutils::url& u, const dev::u256& addressFrom ); - -s_chain_t load_schain( const skutils::url& u, const dev::u256& addressFrom, - const dev::u256& idxSChain, const dev::u256& cntSChains, - const dev::u256& addressSchainsInternal, const dev::u256& addressNodes ); - -vec_s_chains_t load_schains( const skutils::url& u, const dev::u256& addressFrom, - const dev::u256& addressSchainsInternal, const dev::u256& addressNodes ); - -nlohmann::json to_json( const node_t& node ); - -nlohmann::json to_json( const s_chain_t& s_chain ); - -nlohmann::json to_json( const vec_s_chains_t& vec ); - -vec_s_chains_t refreshing_cached(); - -typedef bool ( *fn_stop_indication_t )(); - -bool refreshing_start( - const std::string& configPath, fn_stop_indication_t fnStopIndication = nullptr ); - -void refreshing_stop(); - -vec_s_chains_t refreshing_do_now(); - -skutils::url refreshing_pick_s_chain_url( const std::string& strSChainName ); - -} // namespace browser -} // namespace network -} // namespace skale - -#endif /// (!defined __SKALE_NETWORK_BROWSER_H) diff --git a/libweb3jsonrpc/SkaleStats.cpp b/libweb3jsonrpc/SkaleStats.cpp index 886769036..a79a01c47 100644 --- a/libweb3jsonrpc/SkaleStats.cpp +++ b/libweb3jsonrpc/SkaleStats.cpp @@ -62,8 +62,6 @@ #include #include -#include - ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -129,1010 +127,116 @@ static void stat_check_rpc_call_error_and_throw( ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -namespace tracking { - -txn_entry::txn_entry() { - clear(); -} - -txn_entry::txn_entry( const dev::u256& hash ) { - clear(); - hash_ = hash; - setNowTimeStamp(); -} - -txn_entry::txn_entry( const txn_entry& other ) { - assign( other ); -} - -txn_entry::txn_entry( txn_entry&& other ) { - assign( other ); - other.clear(); -} -txn_entry::~txn_entry() { - clear(); -} - -bool txn_entry::operator!() const { - return empty() ? false : true; -} - -txn_entry& txn_entry::operator=( const txn_entry& other ) { - return assign( other ); -} - -bool txn_entry::operator==( const txn_entry& other ) const { - return ( compare( other ) == 0 ) ? true : false; -} -bool txn_entry::operator!=( const txn_entry& other ) const { - return ( compare( other ) != 0 ) ? true : false; -} -bool txn_entry::operator<( const txn_entry& other ) const { - return ( compare( other ) < 0 ) ? true : false; -} -bool txn_entry::operator<=( const txn_entry& other ) const { - return ( compare( other ) <= 0 ) ? true : false; -} -bool txn_entry::operator>( const txn_entry& other ) const { - return ( compare( other ) > 0 ) ? true : false; -} -bool txn_entry::operator>=( const txn_entry& other ) const { - return ( compare( other ) >= 0 ) ? true : false; -} - -bool txn_entry::operator==( const dev::u256& hash ) const { - return ( compare( hash ) == 0 ) ? true : false; -} -bool txn_entry::operator!=( const dev::u256& hash ) const { - return ( compare( hash ) != 0 ) ? true : false; -} -bool txn_entry::operator<( const dev::u256& hash ) const { - return ( compare( hash ) < 0 ) ? true : false; -} -bool txn_entry::operator<=( const dev::u256& hash ) const { - return ( compare( hash ) <= 0 ) ? true : false; -} -bool txn_entry::operator>( const dev::u256& hash ) const { - return ( compare( hash ) > 0 ) ? true : false; -} -bool txn_entry::operator>=( const dev::u256& hash ) const { - return ( compare( hash ) >= 0 ) ? true : false; -} - -bool txn_entry::empty() const { - if ( hash_ == 0 ) - return true; - return false; -} - -void txn_entry::clear() { - hash_ = 0; - ts_ = 0; -} - -txn_entry& txn_entry::assign( const txn_entry& other ) { - hash_ = other.hash_; - ts_ = other.ts_; - return ( *this ); -} - -int txn_entry::compare( const dev::u256& hash ) const { - if ( hash_ < hash ) - return -1; - if ( hash_ > hash ) - return 1; - return 0; -} - -int txn_entry::compare( const txn_entry& other ) const { - return compare( other.hash_ ); -} - -void txn_entry::setNowTimeStamp() { - ts_ = ::time( nullptr ); -} - -nlohmann::json txn_entry::toJSON() const { - nlohmann::json jo = nlohmann::json::object(); - jo["hash"] = dev::toJS( hash_ ); - jo["timestamp"] = ts_; - return jo; -} - -bool txn_entry::fromJSON( const nlohmann::json& jo ) { - if ( !jo.is_object() ) - return false; - try { - std::string strHash; - if ( jo.count( "hash" ) > 0 && jo["hash"].is_string() ) - strHash = jo["hash"].get< std::string >(); - else - throw std::runtime_error( - "txn_entry::fromJSON() failed because \"hash\" is must-have field of tracked TXN" ); - dev::u256 h = stat_str2u256( strHash ); - int ts = 0; - try { - if ( jo.count( "timestamp" ) > 0 && jo["timestamp"].is_number() ) - ts = jo["timestamp"].get< int >(); - } catch ( ... ) { - ts = 0; - } - hash_ = h; - ts_ = ts; - return true; - } catch ( ... ) { - return false; - } -} - -////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -std::atomic_size_t txn_pending_tracker_system_impl::g_nMaxPendingTxns = 512; -std::string txn_pending_tracker_system_impl::g_strDispatchQueueID = "IMA-txn-tracker"; - -txn_pending_tracker_system_impl::txn_pending_tracker_system_impl( - const std::string& configPath, const std::string& strSgxWalletURL ) - : skutils::json_config_file_accessor( configPath ), strSgxWalletURL_( strSgxWalletURL ) {} - -txn_pending_tracker_system_impl::~txn_pending_tracker_system_impl() { - tracking_stop(); - clear(); -} - -bool txn_pending_tracker_system_impl::empty() const { - lock_type lock( mtx() ); - if ( !set_txns_.empty() ) - return false; - return true; -} -void txn_pending_tracker_system_impl::clear() { - lock_type lock( mtx() ); - set_txns_.clear(); - list_txns_.clear(); - tracking_auto_start_stop(); -} - -size_t txn_pending_tracker_system_impl::max_txns() const { - size_t cnt = g_nMaxPendingTxns; - return cnt; -} - -size_t txn_pending_tracker_system_impl::adjust_limits_impl( bool isEnableBroadcast ) { - const size_t nMax = max_txns(); - if ( nMax < 1 ) - return nMax; // no limits - size_t cnt = list_txns_.size(); - while ( cnt > nMax ) { - txn_entry txe = list_txns_.front(); - if ( !erase( txe.hash_, isEnableBroadcast ) ) - break; - cnt = list_txns_.size(); - } - tracking_auto_start_stop(); - cnt = list_txns_.size(); - return cnt; -} -size_t txn_pending_tracker_system_impl::adjust_limits( bool isEnableBroadcast ) { - lock_type lock( mtx() ); - size_t cnt = adjust_limits_impl( isEnableBroadcast ); - return cnt; -} - -bool txn_pending_tracker_system_impl::insert( txn_entry& txe, bool isEnableBroadcast ) { - lock_type lock( mtx() ); -#if ( defined __IMA_PTX_ENABLE_TRACKING_ON_THE_FLY ) - tracking_step(); -#endif // (defined __IMA_PTX_ENABLE_TRACKING_ON_THE_FLY) - set_txns_t::iterator itFindS = set_txns_.find( txe ), itEndS = set_txns_.end(); - if ( itFindS != itEndS ) - return false; - set_txns_.insert( txe ); - list_txns_.push_back( txe ); - on_txn_insert( txe, isEnableBroadcast ); - adjust_limits_impl( isEnableBroadcast ); - return true; -} -bool txn_pending_tracker_system_impl::insert( dev::u256 hash, bool isEnableBroadcast ) { - txn_entry txe( hash ); - return insert( txe, isEnableBroadcast ); -} - -bool txn_pending_tracker_system_impl::erase( txn_entry& txe, bool isEnableBroadcast ) { - return erase( txe.hash_, isEnableBroadcast ); -} -bool txn_pending_tracker_system_impl::erase( dev::u256 hash, bool isEnableBroadcast ) { - lock_type lock( mtx() ); - set_txns_t::iterator itFindS = set_txns_.find( hash ), itEndS = set_txns_.end(); - if ( itFindS == itEndS ) - return false; - txn_entry txe = ( *itFindS ); - set_txns_.erase( itFindS ); - list_txns_t::iterator itFindL = std::find( list_txns_.begin(), list_txns_.end(), hash ); - if ( itFindL != list_txns_.end() ) - list_txns_.erase( itFindL ); - on_txn_erase( txe, isEnableBroadcast ); -#if ( defined __IMA_PTX_ENABLE_TRACKING_ON_THE_FLY ) - tracking_step(); -#endif // (defined __IMA_PTX_ENABLE_TRACKING_ON_THE_FLY) - return true; -} - -bool txn_pending_tracker_system_impl::find( txn_entry& txe ) const { - return find( txe.hash_ ); -} -bool txn_pending_tracker_system_impl::find( const dev::u256& hash ) const { - lock_type lock( mtx() ); - //#if ( defined __IMA_PTX_ENABLE_TRACKING_ON_THE_FLY ) - // ( const_cast< txn_pending_tracker_system_impl* >( this ) )->tracking_step(); - //#endif // (defined __IMA_PTX_ENABLE_TRACKING_ON_THE_FLY) - set_txns_t::const_iterator itFindS = set_txns_.find( hash ), itEndS = set_txns_.cend(); - if ( itFindS == itEndS ) - return false; - return true; -} - -void txn_pending_tracker_system_impl::list_all( list_txns_t& lst ) const { - lst.clear(); - //#if ( defined __IMA_PTX_ENABLE_TRACKING_ON_THE_FLY ) - // ( const_cast< txn_pending_tracker_system_impl* >( this ) )->tracking_step(); - //#endif // (defined __IMA_PTX_ENABLE_TRACKING_ON_THE_FLY) - lock_type lock( mtx() ); - lst = list_txns_; -} - -void txn_pending_tracker_system_impl::on_txn_insert( - const txn_entry& txe, bool isEnableBroadcast ) { - tracking_auto_start_stop(); - if ( isEnableBroadcast ) - broadcast_txn_insert( txe ); -} -void txn_pending_tracker_system_impl::on_txn_erase( const txn_entry& txe, bool isEnableBroadcast ) { - tracking_auto_start_stop(); - if ( isEnableBroadcast ) - broadcast_txn_erase( txe ); -} +namespace rpc { -bool txn_pending_tracker_system_impl::broadcast_txn_sign_is_enabled( - const std::string& strWalletURL ) { +SkaleStats::SkaleStats( + const std::string& configPath, eth::Interface& _eth, const dev::eth::ChainParams& chainParams ) + : skutils::json_config_file_accessor( configPath ), chainParams_( chainParams ), m_eth( _eth ) { + nThisNodeIndex_ = findThisNodeIndex(); + // try { - nlohmann::json joConfig = getConfigJSON(); - if ( joConfig.count( "skaleConfig" ) == 0 ) - return false; - const nlohmann::json& joSkaleConfig = joConfig["skaleConfig"]; - if ( joSkaleConfig.count( "nodeInfo" ) == 0 ) - return false; - const nlohmann::json& joSkaleConfig_nodeInfo = joSkaleConfig["nodeInfo"]; - if ( joSkaleConfig_nodeInfo.count( "ecdsaKeyName" ) == 0 ) - return false; - if ( joSkaleConfig_nodeInfo.count( "wallets" ) == 0 ) - return false; - const nlohmann::json& joSkaleConfig_nodeInfo_wallets = joSkaleConfig_nodeInfo["wallets"]; - if ( joSkaleConfig_nodeInfo_wallets.count( "ima" ) == 0 ) - return false; - if ( strWalletURL.empty() ) - return false; - return true; - } catch ( ... ) { - } - return false; + skutils::url urlMainNet = getImaMainNetURL(); + } catch ( const std::exception& ex ) { + clog( VerbosityInfo, std::string( "IMA disabled: " ) + ex.what() ); + } // catch } -std::string txn_pending_tracker_system_impl::broadcast_txn_sign_string( const char* strToSign ) { - std::string strBroadcastSignature; +int SkaleStats::findThisNodeIndex() { try { - // - // Check wallet URL and keyShareName for future use, - // fetch SSL options for SGX - // - skutils::url u; - skutils::http::SSL_client_options optsSSL; nlohmann::json joConfig = getConfigJSON(); - // if ( joConfig.count( "skaleConfig" ) == 0 ) throw std::runtime_error( "error in config.json file, cannot find \"skaleConfig\"" ); const nlohmann::json& joSkaleConfig = joConfig["skaleConfig"]; + // if ( joSkaleConfig.count( "nodeInfo" ) == 0 ) throw std::runtime_error( "error in config.json file, cannot find \"skaleConfig\"/\"nodeInfo\"" ); const nlohmann::json& joSkaleConfig_nodeInfo = joSkaleConfig["nodeInfo"]; - if ( joSkaleConfig_nodeInfo.count( "ecdsaKeyName" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"ecdsaKeyName\"" ); - const nlohmann::json& joSkaleConfig_nodeInfo_ecdsaKeyName = - joSkaleConfig_nodeInfo["ecdsaKeyName"]; - std::string strEcdsaKeyName = joSkaleConfig_nodeInfo_ecdsaKeyName.get< std::string >(); - if ( joSkaleConfig_nodeInfo.count( "wallets" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"wallets\"" ); - const nlohmann::json& joSkaleConfig_nodeInfo_wallets = joSkaleConfig_nodeInfo["wallets"]; - if ( joSkaleConfig_nodeInfo_wallets.count( "ima" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"wallets\"/\"ima\"" ); - const nlohmann::json& joSkaleConfig_nodeInfo_wallets_ima = - joSkaleConfig_nodeInfo_wallets["ima"]; - const std::string strWalletURL = strSgxWalletURL_; - u = skutils::url( strWalletURL ); - if ( u.scheme().empty() || u.host().empty() ) - throw std::runtime_error( "bad wallet url" ); - // - // - try { - if ( joSkaleConfig_nodeInfo_wallets_ima.count( "caFile" ) > 0 ) - optsSSL.ca_file = skutils::tools::trim_copy( - joSkaleConfig_nodeInfo_wallets_ima["caFile"].get< std::string >() ); - } catch ( ... ) { - optsSSL.ca_file.clear(); - } - try { - if ( joSkaleConfig_nodeInfo_wallets_ima.count( "certFile" ) > 0 ) - optsSSL.client_cert = skutils::tools::trim_copy( - joSkaleConfig_nodeInfo_wallets_ima["certFile"].get< std::string >() ); - } catch ( ... ) { - optsSSL.client_cert.clear(); - } - try { - if ( joSkaleConfig_nodeInfo_wallets_ima.count( "keyFile" ) > 0 ) - optsSSL.client_key = skutils::tools::trim_copy( - joSkaleConfig_nodeInfo_wallets_ima["keyFile"].get< std::string >() ); - } catch ( ... ) { - optsSSL.client_key.clear(); - } - // - // - // - dev::u256 hashToSign = - dev::sha3( bytesConstRef( ( unsigned char* ) ( strToSign ? strToSign : "" ), - strToSign ? strlen( strToSign ) : 0 ) ); - std::string strHashToSign = dev::toJS( hashToSign ); - clog( VerbosityTrace, "IMA" ) - << ( cc::debug( "Did composeed IMA broadcast message hash " ) + - cc::info( strHashToSign ) + cc::debug( " to sign" ) ); // - // - // - nlohmann::json joCall = nlohmann::json::object(); - joCall["jsonrpc"] = "2.0"; - joCall["method"] = "ecdsaSignMessageHash"; - joCall["type"] = "ECDSASignReq"; - joCall["params"] = nlohmann::json::object(); - joCall["params"]["base"] = 16; - joCall["params"]["keyName"] = strEcdsaKeyName; - joCall["params"]["messageHash"] = strHashToSign; - clog( VerbosityTrace, "IMA" ) - << ( cc::debug( " Contacting " ) + cc::notice( "SGX Wallet" ) + - cc::debug( " server at " ) + cc::u( u ) ); - clog( VerbosityTrace, "IMA" ) - << ( cc::debug( " Will send " ) + cc::notice( "ECDSA sign query" ) + - cc::debug( " to wallet: " ) + cc::j( joCall ) ); - skutils::rest::client cli; - cli.optsSSL_ = optsSSL; - cli.open( u ); - skutils::rest::data_t d = cli.call( joCall ); - if ( !d.err_s_.empty() ) - throw std::runtime_error( - "failed to \"ecdsaSignMessageHash\" sign message(s) with wallet: " + d.err_s_ ); - if ( d.empty() ) + if ( joSkaleConfig.count( "sChain" ) == 0 ) throw std::runtime_error( - "failed to \"ecdsaSignMessageHash\" sign message(s) with wallet, EMPTY data " - "received" ); - const nlohmann::json joAnswer = dev::stat_parse_json_with_error_conversion( d.s_ ); - dev::stat_check_rpc_call_error_and_throw( joAnswer, "ecdsaSignMessageHash" ); - nlohmann::json joSignResult = - ( joAnswer.count( "result" ) > 0 ) ? joAnswer["result"] : joAnswer; - if ( joSignResult.count( "signature_r" ) == 0 || joSignResult.count( "signature_v" ) == 0 || - joSignResult.count( "signature_s" ) == 0 ) { + "error in config.json file, cannot find \"skaleConfig\"/\"sChain\"" ); + const nlohmann::json& joSkaleConfig_sChain = joSkaleConfig["sChain"]; + // + if ( joSkaleConfig_sChain.count( "nodes" ) == 0 ) throw std::runtime_error( - "Got \"ecdsaSignMessageHash\" bad answer without " - "\"signature_r\"+\"signature_s\"+\"signature_v\" fields, answer is \"" + - joAnswer.dump() + "\"" ); + "error in config.json file, cannot find \"skaleConfig\"/\"sChain\"/\"nodes\"" ); + const nlohmann::json& joSkaleConfig_sChain_nodes = joSkaleConfig_sChain["nodes"]; + // + int nID = joSkaleConfig_nodeInfo["nodeID"].get< int >(); + const nlohmann::json& jarrNodes = joSkaleConfig_sChain_nodes; + size_t i, cnt = jarrNodes.size(); + for ( i = 0; i < cnt; ++i ) { + const nlohmann::json& joNC = jarrNodes[i]; + try { + int nWalkID = joNC["nodeID"].get< int >(); + if ( nID == nWalkID ) + return joNC["schainIndex"].get< int >(); + } catch ( ... ) { + continue; + } } - clog( VerbosityTrace, "IMA" ) << ( cc::debug( " Got " ) + cc::notice( "ECDSA sign query" ) + - cc::debug( " result: " ) + cc::j( joSignResult ) ); - std::string r = joSignResult["signature_r"].get< std::string >(); - std::string v = joSignResult["signature_v"].get< std::string >(); - std::string s = joSignResult["signature_s"].get< std::string >(); - strBroadcastSignature = v + ":" + r.substr( 2 ) + ":" + s.substr( 2 ); - } catch ( const std::exception& ex ) { - clog( VerbosityTrace, "IMA" ) - << ( cc::fatal( "BROADCAST SIGN ERROR:" ) + " " + cc::warn( ex.what() ) ); - strBroadcastSignature = ""; } catch ( ... ) { - clog( VerbosityTrace, "IMA" ) - << ( cc::fatal( "BROADCAST SIGN ERROR:" ) + " " + cc::warn( "unknown exception" ) ); - strBroadcastSignature = ""; } - return strBroadcastSignature; + return -1; } -std::string txn_pending_tracker_system_impl::broadcast_txn_compose_string( - const char* strActionName, const dev::u256& tx_hash ) { - std::string strToSign; - strToSign += strActionName ? strActionName : "N/A"; - strToSign += ":"; - strToSign += dev::toJS( tx_hash ); - return strToSign; -} +Json::Value SkaleStats::skale_stats() { + try { + nlohmann::json joStats = consumeSkaleStats(); + + // HACK Add stats from SkalePerformanceTracker + // TODO Why we need all this absatract infrastructure? + const dev::eth::Client* c = dynamic_cast< dev::eth::Client* const >( this->client() ); + if ( c ) { + nlohmann::json joTrace; + std::shared_ptr< SkaleHost > h = c->skaleHost(); -std::string txn_pending_tracker_system_impl::broadcast_txn_sign( - const char* strActionName, const dev::u256& tx_hash ) { - clog( VerbosityTrace, "IMA" ) << ( cc::debug( - "Will compose IMA broadcast message to sign from TX " ) + - cc::info( dev::toJS( tx_hash ) ) + - cc::debug( " and action name " ) + - cc::info( strActionName ) + cc::debug( "..." ) ); - std::string strToSign = broadcast_txn_compose_string( strActionName, tx_hash ); - clog( VerbosityTrace, "IMA" ) << ( cc::debug( "Did composed IMA broadcast message to sign " ) + - cc::info( strToSign ) ); - std::string strBroadcastSignature = broadcast_txn_sign_string( strToSign.c_str() ); - clog( VerbosityTrace, "IMA" ) << ( cc::debug( "Got broadcast signature " ) + - cc::info( strBroadcastSignature ) ); - return strBroadcastSignature; + std::istringstream list( h->getDebugHandler()( "trace list" ) ); + std::string key; + while ( list >> key ) { + std::string count_str = h->getDebugHandler()( "trace count " + key ); + joTrace[key] = atoi( count_str.c_str() ); + } // while + + joStats["tracepoints"] = joTrace; + + } // if client + + std::string strStatsJson = joStats.dump(); + Json::Value ret; + Json::Reader().parse( strStatsJson, ret ); + return ret; + } catch ( Exception const& ) { + throw jsonrpc::JsonRpcException( exceptionToErrorMessage() ); + } catch ( const std::exception& ex ) { + throw jsonrpc::JsonRpcException( ex.what() ); + } } -std::string txn_pending_tracker_system_impl::broadcast_txn_get_ecdsa_public_key( int node_id ) { - std::string strEcdsaPublicKey; +Json::Value SkaleStats::skale_nodesRpcInfo() { try { nlohmann::json joConfig = getConfigJSON(); if ( joConfig.count( "skaleConfig" ) == 0 ) throw std::runtime_error( "error in config.json file, cannot find \"skaleConfig\"" ); const nlohmann::json& joSkaleConfig = joConfig["skaleConfig"]; + // if ( joSkaleConfig.count( "nodeInfo" ) == 0 ) throw std::runtime_error( "error in config.json file, cannot find \"skaleConfig\"/\"nodeInfo\"" ); - // const nlohmann::json& joSkaleConfig_nodeInfo = joSkaleConfig["nodeInfo"]; + const nlohmann::json& joSkaleConfig_nodeInfo = joSkaleConfig["nodeInfo"]; + // if ( joSkaleConfig.count( "sChain" ) == 0 ) throw std::runtime_error( "error in config.json file, cannot find \"skaleConfig\"/\"sChain\"" ); const nlohmann::json& joSkaleConfig_sChain = joSkaleConfig["sChain"]; + // if ( joSkaleConfig_sChain.count( "nodes" ) == 0 ) throw std::runtime_error( "error in config.json file, cannot find \"skaleConfig\"/\"sChain\"/\"nodes\"" ); const nlohmann::json& joSkaleConfig_sChain_nodes = joSkaleConfig_sChain["nodes"]; - for ( auto& joNode : joSkaleConfig_sChain_nodes ) { - if ( !joNode.is_object() ) - continue; - if ( joNode.count( "nodeID" ) == 0 ) - continue; - int walk_id = joNode["nodeID"].get< int >(); - if ( walk_id != node_id ) - continue; - if ( joNode.count( "publicKey" ) == 0 ) - continue; - strEcdsaPublicKey = - skutils::tools::trim_copy( joNode["publicKey"].get< std::string >() ); - if ( strEcdsaPublicKey.empty() ) - continue; - auto nLength = strEcdsaPublicKey.length(); - if ( nLength > 2 && strEcdsaPublicKey[0] == '0' && - ( strEcdsaPublicKey[1] == 'x' || strEcdsaPublicKey[1] == 'X' ) ) - strEcdsaPublicKey = strEcdsaPublicKey.substr( 2, nLength - 2 ); - break; - } - } catch ( ... ) { - strEcdsaPublicKey = ""; - } - return strEcdsaPublicKey; -} - -int txn_pending_tracker_system_impl::broadcast_txn_get_node_id() { - int node_id = 0; - try { - nlohmann::json joConfig = getConfigJSON(); // - if ( joConfig.count( "skaleConfig" ) == 0 ) - throw std::runtime_error( "error in config.json file, cannot find \"skaleConfig\"" ); - const nlohmann::json& joSkaleConfig = joConfig["skaleConfig"]; - if ( joSkaleConfig.count( "nodeInfo" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find \"skaleConfig\"/\"nodeInfo\"" ); - const nlohmann::json& joSkaleConfig_nodeInfo = joSkaleConfig["nodeInfo"]; - if ( joSkaleConfig_nodeInfo.count( "nodeID" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"nodeID\"" ); - const nlohmann::json& joSkaleConfig_nodeInfo_nodeID = joSkaleConfig_nodeInfo["nodeID"]; - node_id = joSkaleConfig_nodeInfo_nodeID.get< int >(); - } catch ( ... ) { - node_id = 0; - } - return node_id; -} - -bool txn_pending_tracker_system_impl::broadcast_txn_verify_signature( const char* strActionName, - const std::string& strBroadcastSignature, int node_id, const dev::u256& tx_hash ) { - bool isSignatureOK = false; - std::string strNextErrorType = "", strEcdsaPublicKey = "", - strHashToSign = ""; - try { - clog( VerbosityTrace, "IMA" ) - << ( cc::debug( "Will compose IMA broadcast message to verify from TX " ) + - cc::info( dev::toJS( tx_hash ) ) + cc::debug( " and action name " ) + - cc::info( strActionName ) + cc::debug( "..." ) ); - strNextErrorType = "compose verify string"; - std::string strToSign = broadcast_txn_compose_string( strActionName, tx_hash ); - clog( VerbosityTrace, "IMA" ) - << ( cc::debug( "Did composed IMA broadcast message to verify " ) + - cc::info( strToSign ) ); - strNextErrorType = "compose verify hash"; - dev::u256 hashToSign = dev::sha3( - bytesConstRef( ( unsigned char* ) ( ( !strToSign.empty() ) ? strToSign.c_str() : "" ), - strToSign.length() ) ); - strHashToSign = dev::toJS( hashToSign ); - clog( VerbosityTrace, "IMA" ) - << ( cc::debug( "Did composeed IMA broadcast message hash " ) + - cc::info( strHashToSign ) + cc::debug( " to verify" ) ); - // - strNextErrorType = "find node ECDSA public key"; - strEcdsaPublicKey = broadcast_txn_get_ecdsa_public_key( node_id ); - clog( VerbosityTrace, "IMA" ) - << ( cc::debug( "Will verify IMA broadcast ECDSA signature " ) + - cc::info( strBroadcastSignature ) + cc::debug( " from node ID " ) + - cc::num10( node_id ) + cc::debug( " using ECDSA public key " ) + - cc::info( strEcdsaPublicKey ) + cc::debug( " and message/hash " ) + - cc::info( strHashToSign ) + cc::debug( "..." ) ); - strNextErrorType = "import node ECDSA public key"; - auto key = OpenSSLECDSAKey::importSGXPubKey( strEcdsaPublicKey ); - strNextErrorType = "encode TX hash"; - bytes v = dev::BMPBN::encode2vec< dev::u256 >( hashToSign, true ); - strNextErrorType = "do ECDSA signature verification"; - try { - key->verifySGXSig( strBroadcastSignature, ( const char* ) v.data() ); - isSignatureOK = true; - } catch ( ... ) { - isSignatureOK = false; - } - clog( VerbosityTrace, "IMA" ) - << ( cc::debug( "IMA broadcast ECDSA signature " ) + cc::info( strBroadcastSignature ) + - cc::debug( " verification from node ID " ) + cc::num10( node_id ) + - cc::debug( " using ECDSA public key " ) + cc::info( strEcdsaPublicKey ) + - cc::debug( " and message/hash " ) + cc::info( strHashToSign ) + - cc::debug( " is " ) + - ( isSignatureOK ? cc::success( "passed" ) : cc::fatal( "failed" ) ) ); - } catch ( const std::exception& ex ) { - isSignatureOK = false; - clog( VerbosityTrace, "IMA" ) - << ( cc::debug( "IMA broadcast ECDSA signature " ) + cc::info( strBroadcastSignature ) + - cc::debug( " verification from node ID " ) + cc::num10( node_id ) + - cc::debug( " using ECDSA public key " ) + cc::info( strEcdsaPublicKey ) + - cc::debug( " and message/hash " ) + cc::info( strHashToSign ) + - cc::debug( " is " ) + cc::fatal( "failed" ) + cc::debug( " during " ) + - cc::warn( strNextErrorType ) + cc::debug( ", exception: " ) + - cc::warn( ex.what() ) ); - } catch ( ... ) { - isSignatureOK = false; - clog( VerbosityTrace, "IMA" ) - << ( cc::debug( "IMA broadcast ECDSA signature " ) + cc::info( strBroadcastSignature ) + - cc::debug( " verification from node ID " ) + cc::num10( node_id ) + - cc::debug( " using ECDSA public key " ) + cc::info( strEcdsaPublicKey ) + - cc::debug( " and message/hash " ) + cc::info( strHashToSign ) + - cc::debug( " is " ) + cc::fatal( "failed" ) + cc::debug( " during " ) + - cc::warn( strNextErrorType ) + cc::debug( ", unknown exception" ) ); - } - return isSignatureOK; -} - -void txn_pending_tracker_system_impl::broadcast_txn_insert( const txn_entry& txe ) { - std::string strLogPrefix = cc::deep_info( "IMA broadcast TXN insert" ); - dev::u256 tx_hash = txe.hash_; - nlohmann::json jo_tx = txe.toJSON(); - try { - size_t nOwnIndex = std::string::npos; - std::vector< std::string > vecURLs; - if ( !extract_s_chain_URL_infos( nOwnIndex, vecURLs ) ) - throw std::runtime_error( - "failed to extract S-Chain node information from config JSON" ); - nlohmann::json joParams = jo_tx; // copy - std::string strBroadcastSignature = broadcast_txn_sign( "insert", tx_hash ); - int nNodeID = broadcast_txn_get_node_id(); - if ( !strBroadcastSignature.empty() ) { - clog( VerbosityTrace, "IMA" ) - << ( strLogPrefix + " " + cc::debug( "Broadcast/insert signature from node iD " ) + - cc::num10( nNodeID ) + cc::debug( " is " ) + - cc::info( strBroadcastSignature ) ); - joParams["broadcastSignature"] = strBroadcastSignature; - joParams["broadcastFromNode"] = nNodeID; - } else - clog( VerbosityWarning, "IMA" ) - << ( strLogPrefix + " " + cc::warn( "Broadcast/insert signature from node iD " ) + - cc::num10( nNodeID ) + cc::warn( " is " ) + cc::error( "EMPTY" ) ); - clog( VerbosityTrace, "IMA" ) - << ( strLogPrefix + " " + cc::debug( "Will broadcast" ) + " " + - cc::info( "inserted TXN" ) + " " + cc::info( dev::toJS( tx_hash ) ) + - cc::debug( ": " ) + cc::j( joParams ) ); - size_t i, cnt = vecURLs.size(); - for ( i = 0; i < cnt; ++i ) { - if ( i == nOwnIndex ) - continue; - std::string strURL = vecURLs[i]; - skutils::dispatch::async( g_strDispatchQueueID, [=]() -> void { - nlohmann::json joCall = nlohmann::json::object(); - joCall["jsonrpc"] = "2.0"; - joCall["method"] = "skale_imaBroadcastTxnInsert"; - joCall["params"] = joParams; - skutils::rest::client cli( strURL ); - skutils::rest::data_t d = cli.call( joCall ); - try { - if ( !d.err_s_.empty() ) - throw std::runtime_error( "empty broadcast answer, error is: " + d.err_s_ ); - if ( d.empty() ) - throw std::runtime_error( "empty broadcast answer, EMPTY data received" ); - nlohmann::json joAnswer = - dev::stat_parse_json_with_error_conversion( d.s_, true ); - if ( !joAnswer.is_object() ) - throw std::runtime_error( "malformed non-JSON-object broadcast answer" ); - clog( VerbosityTrace, "IMA" ) - << ( strLogPrefix + " " + cc::debug( "Did broadcast" ) + " " + - cc::info( "inserted TXN" ) + " " + cc::info( dev::toJS( tx_hash ) ) + - cc::debug( " and got answer: " ) + cc::j( joAnswer ) ); - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "ERROR:" ) + - cc::error( " Transaction " ) + cc::info( dev::toJS( tx_hash ) ) + - cc::error( " to node " ) + cc::u( strURL ) + - cc::error( " broadcast failed: " ) + cc::warn( ex.what() ) ); - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "ERROR:" ) + - cc::error( " Transaction " ) + cc::info( dev::toJS( tx_hash ) ) + - cc::error( " broadcast to node " ) + cc::u( strURL ) + - cc::error( " failed: " ) + cc::warn( "unknown exception" ) ); - } - } ); - } - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "ERROR:" ) + cc::error( " Transaction " ) + - cc::info( dev::toJS( tx_hash ) ) + cc::error( " broadcast failed: " ) + - cc::warn( ex.what() ) ); - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "ERROR:" ) + cc::error( " Transaction " ) + - cc::info( dev::toJS( tx_hash ) ) + cc::error( " broadcast failed: " ) + - cc::warn( "unknown exception" ) ); - } -} -void txn_pending_tracker_system_impl::broadcast_txn_erase( const txn_entry& txe ) { - std::string strLogPrefix = cc::deep_info( "IMA broadcast TXN erase" ); - dev::u256 tx_hash = txe.hash_; - nlohmann::json jo_tx = txe.toJSON(); - try { - size_t nOwnIndex = std::string::npos; - std::vector< std::string > vecURLs; - if ( !extract_s_chain_URL_infos( nOwnIndex, vecURLs ) ) - throw std::runtime_error( - "failed to extract S-Chain node information from config JSON" ); - nlohmann::json joParams = jo_tx; // copy - std::string strBroadcastSignature = broadcast_txn_sign( "erase", tx_hash ); - int nNodeID = broadcast_txn_get_node_id(); - if ( !strBroadcastSignature.empty() ) { - clog( VerbosityTrace, "IMA" ) - << ( strLogPrefix + " " + cc::debug( "Broadcast/erase signature from node iD " ) + - cc::num10( nNodeID ) + cc::debug( " is " ) + - cc::info( strBroadcastSignature ) ); - joParams["broadcastSignature"] = strBroadcastSignature; - joParams["broadcastFromNode"] = nNodeID; - } else - clog( VerbosityWarning, "IMA" ) - << ( strLogPrefix + " " + cc::warn( "Broadcast/erase signature from node iD " ) + - cc::num10( nNodeID ) + cc::warn( " is " ) + cc::error( "EMPTY" ) ); - clog( VerbosityTrace, "IMA" ) - << ( strLogPrefix + " " + cc::debug( "Will broadcast" ) + " " + - cc::info( "erased TXN" ) + " " + cc::info( dev::toJS( tx_hash ) ) + - cc::debug( ": " ) + cc::j( joParams ) ); - size_t i, cnt = vecURLs.size(); - for ( i = 0; i < cnt; ++i ) { - if ( i == nOwnIndex ) - continue; - std::string strURL = vecURLs[i]; - skutils::dispatch::async( g_strDispatchQueueID, [=]() -> void { - nlohmann::json joCall = nlohmann::json::object(); - joCall["jsonrpc"] = "2.0"; - joCall["method"] = "skale_imaBroadcastTxnErase"; - joCall["params"] = joParams; - skutils::rest::client cli( strURL ); - skutils::rest::data_t d = cli.call( joCall ); - try { - if ( !d.err_s_.empty() ) - throw std::runtime_error( "empty broadcast answer, error is: " + d.err_s_ ); - if ( d.empty() ) - throw std::runtime_error( "empty broadcast answer, EMPTY data received" ); - nlohmann::json joAnswer = - dev::stat_parse_json_with_error_conversion( d.s_, true ); - if ( !joAnswer.is_object() ) - throw std::runtime_error( "malformed non-JSON-object broadcast answer" ); - clog( VerbosityTrace, "IMA" ) - << ( strLogPrefix + " " + cc::debug( "Did broadcast" ) + " " + - cc::info( "erased TXN" ) + " " + cc::info( dev::toJS( tx_hash ) ) + - cc::debug( " and got answer: " ) + cc::j( joAnswer ) ); - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "ERROR:" ) + - cc::error( " Transaction " ) + cc::info( dev::toJS( tx_hash ) ) + - cc::error( " broadcast to node " ) + cc::u( strURL ) + - cc::error( " failed: " ) + cc::warn( ex.what() ) ); - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "ERROR:" ) + - cc::error( " Transaction " ) + cc::info( dev::toJS( tx_hash ) ) + - cc::error( " to node " ) + cc::u( strURL ) + - cc::error( " broadcast failed: " ) + - cc::warn( "unknown exception" ) ); - } - } ); - } - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "ERROR:" ) + cc::error( " Transaction " ) + - cc::info( dev::toJS( tx_hash ) ) + cc::error( " broadcast failed: " ) + - cc::warn( ex.what() ) ); - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "ERROR:" ) + cc::error( " Transaction " ) + - cc::info( dev::toJS( tx_hash ) ) + cc::error( " broadcast failed: " ) + - cc::warn( "unknown exception" ) ); - } -} - -std::atomic_size_t txn_pending_tracker_system_impl::g_nTrackingIntervalInSeconds = 90; - -size_t txn_pending_tracker_system_impl::tracking_interval_in_seconds() const { - return size_t( g_nTrackingIntervalInSeconds ); -} - -bool txn_pending_tracker_system_impl::is_tracking() const { - return bool( isTracking_ ); -} - -void txn_pending_tracker_system_impl::tracking_auto_start_stop() { - lock_type lock( mtx() ); - if ( list_txns_.size() == 0 ) { - tracking_stop(); - } else { - tracking_start(); - } -} - -void txn_pending_tracker_system_impl::tracking_step() { - try { - list_txns_t lst, lstMined; - list_all( lst ); - for ( const dev::tracking::txn_entry& txe : lst ) { - if ( !check_txn_is_mined( txe ) ) - break; - lstMined.push_back( txe ); - } - for ( const dev::tracking::txn_entry& txe : lstMined ) { - erase( txe.hash_, true ); - } - } catch ( std::exception const& ex ) { - cerror << "txn_pending_tracker_system_impl::tracking_step() exception: " << ex.what() - << "\n"; - } catch ( ... ) { - cerror << "txn_pending_tracker_system_impl::tracking_step() unknown exception\n"; - } -} - -void txn_pending_tracker_system_impl::tracking_start() { -#if ( defined __IMA_PTX_ENABLE_TRACKING_PARALLEL ) - lock_type lock( mtx() ); - if ( is_tracking() ) - return; - skutils::dispatch::repeat( - g_strDispatchQueueID, [=]() -> void { tracking_step(); }, - skutils::dispatch::duration_from_seconds( tracking_interval_in_seconds() ), - &tracking_job_id_ ); - isTracking_ = true; -#endif // (defined __IMA_PTX_ENABLE_TRACKING_PARALLEL) -} - -void txn_pending_tracker_system_impl::tracking_stop() { -#if ( defined __IMA_PTX_ENABLE_TRACKING_PARALLEL ) - lock_type lock( mtx() ); - if ( !is_tracking() ) - return; - skutils::dispatch::stop( tracking_job_id_ ); - tracking_job_id_.clear(); - isTracking_ = false; -#endif // (defined __IMA_PTX_ENABLE_TRACKING_PARALLEL) -} - -bool txn_pending_tracker_system_impl::check_txn_is_mined( const txn_entry& txe ) { - return check_txn_is_mined( txe.hash_ ); -} - -bool txn_pending_tracker_system_impl::check_txn_is_mined( const dev::u256& hash ) { - try { - skutils::url urlMainNet = getImaMainNetURL(); - // - nlohmann::json jarr = nlohmann::json::array(); - jarr.push_back( dev::toJS( hash ) ); - nlohmann::json joCall = nlohmann::json::object(); - joCall["jsonrpc"] = "2.0"; - joCall["method"] = "eth_getTransactionReceipt"; - joCall["params"] = jarr; - skutils::rest::client cli( urlMainNet ); - skutils::rest::data_t d = cli.call( joCall ); - if ( !d.err_s_.empty() ) - throw std::runtime_error( - "Main Net call to \"eth_getTransactionReceipt\" failed: " + d.err_s_ ); - if ( d.empty() ) - throw std::runtime_error( - "Main Net call to \"eth_getTransactionReceipt\" failed, EMPTY data received" ); - const nlohmann::json joAnswer = dev::stat_parse_json_with_error_conversion( d.s_ ); - dev::stat_check_rpc_call_error_and_throw( joAnswer, "eth_getTransactionReceipt" ); - if ( joAnswer.count( "result" ) == 0 ) - throw std::runtime_error( - "Got \"eth_getTransactionReceipt\" bad answer without \"result\" field, answer is " - "\"" + - joAnswer.dump() + "\"" ); - nlohmann::json joReceipt = joAnswer["result"]; - if ( joReceipt.is_object() && joReceipt.count( "transactionHash" ) > 0 && - joReceipt.count( "blockNumber" ) > 0 && joReceipt.count( "gasUsed" ) > 0 ) - return true; - } catch ( std::exception const& ex ) { - cerror << "txn_pending_tracker_system_impl::check_txn_is_mined() exception: " << ex.what() - << "\n"; - } catch ( ... ) { - cerror << "txn_pending_tracker_system_impl::check_txn_is_mined() unknown exception\n"; - } - return false; -} - -////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -std::unique_ptr< txn_pending_tracker_system > txn_pending_tracker_system::g_ptr; - -txn_pending_tracker_system::txn_pending_tracker_system( - const std::string& configPath, const std::string& strSgxWalletURL ) - : txn_pending_tracker_system_impl( configPath, strSgxWalletURL ) {} - -txn_pending_tracker_system::~txn_pending_tracker_system() {} - -txn_pending_tracker_system& txn_pending_tracker_system::init( - const std::string& configPath, const std::string& strSgxWalletURL ) { - if ( !g_ptr ) - g_ptr = std::make_unique< txn_pending_tracker_system >( configPath, strSgxWalletURL ); - return ( *( g_ptr.get() ) ); -} -txn_pending_tracker_system& txn_pending_tracker_system::instance() { - if ( g_ptr ) - return ( *( g_ptr.get() ) ); - throw std::runtime_error( "no global instance for IMA pending TXN tracker initialized yet" ); -} - -}; // namespace tracking - -////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -namespace rpc { - -static std::string stat_guess_sgx_url_4_zmq( const std::string& strURL, bool isDisableZMQ ) { - if ( isDisableZMQ ) - return strURL; - if ( strURL.empty() ) - return strURL; - skutils::url u( strURL ); - u.scheme( "zmq" ); - u.port( "1031" ); - return u.str(); -} - -SkaleStats::SkaleStats( const std::string& configPath, eth::Interface& _eth, - const dev::eth::ChainParams& chainParams, bool isDisableZMQ ) - : skutils::json_config_file_accessor( configPath ), chainParams_( chainParams ), m_eth( _eth ) { - nThisNodeIndex_ = findThisNodeIndex(); - // - try { - skutils::url urlMainNet = getImaMainNetURL(); - } catch ( const std::exception& ex ) { - clog( VerbosityInfo, std::string( "IMA disabled: " ) + ex.what() ); - } // catch - dev::tracking::txn_pending_tracker_system::init( - configPath, stat_guess_sgx_url_4_zmq( chainParams.nodeInfo.sgxServerUrl, isDisableZMQ ) ); -} - -int SkaleStats::findThisNodeIndex() { - try { - nlohmann::json joConfig = getConfigJSON(); - if ( joConfig.count( "skaleConfig" ) == 0 ) - throw std::runtime_error( "error in config.json file, cannot find \"skaleConfig\"" ); - const nlohmann::json& joSkaleConfig = joConfig["skaleConfig"]; - // - if ( joSkaleConfig.count( "nodeInfo" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find \"skaleConfig\"/\"nodeInfo\"" ); - const nlohmann::json& joSkaleConfig_nodeInfo = joSkaleConfig["nodeInfo"]; - // - if ( joSkaleConfig.count( "sChain" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find \"skaleConfig\"/\"sChain\"" ); - const nlohmann::json& joSkaleConfig_sChain = joSkaleConfig["sChain"]; - // - if ( joSkaleConfig_sChain.count( "nodes" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find \"skaleConfig\"/\"sChain\"/\"nodes\"" ); - const nlohmann::json& joSkaleConfig_sChain_nodes = joSkaleConfig_sChain["nodes"]; - // - int nID = joSkaleConfig_nodeInfo["nodeID"].get< int >(); - const nlohmann::json& jarrNodes = joSkaleConfig_sChain_nodes; - size_t i, cnt = jarrNodes.size(); - for ( i = 0; i < cnt; ++i ) { - const nlohmann::json& joNC = jarrNodes[i]; - try { - int nWalkID = joNC["nodeID"].get< int >(); - if ( nID == nWalkID ) - return joNC["schainIndex"].get< int >(); - } catch ( ... ) { - continue; - } - } - } catch ( ... ) { - } - return -1; -} - -Json::Value SkaleStats::skale_stats() { - try { - nlohmann::json joStats = consumeSkaleStats(); - - // HACK Add stats from SkalePerformanceTracker - // TODO Why we need all this absatract infrastructure? - const dev::eth::Client* c = dynamic_cast< dev::eth::Client* const >( this->client() ); - if ( c ) { - nlohmann::json joTrace; - std::shared_ptr< SkaleHost > h = c->skaleHost(); - - std::istringstream list( h->getDebugHandler()( "trace list" ) ); - std::string key; - while ( list >> key ) { - std::string count_str = h->getDebugHandler()( "trace count " + key ); - joTrace[key] = atoi( count_str.c_str() ); - } // while - - joStats["tracepoints"] = joTrace; - - } // if client - - std::string strStatsJson = joStats.dump(); - Json::Value ret; - Json::Reader().parse( strStatsJson, ret ); - return ret; - } catch ( Exception const& ) { - throw jsonrpc::JsonRpcException( exceptionToErrorMessage() ); - } catch ( const std::exception& ex ) { - throw jsonrpc::JsonRpcException( ex.what() ); - } -} - -Json::Value SkaleStats::skale_nodesRpcInfo() { - try { - nlohmann::json joConfig = getConfigJSON(); - if ( joConfig.count( "skaleConfig" ) == 0 ) - throw std::runtime_error( "error in config.json file, cannot find \"skaleConfig\"" ); - const nlohmann::json& joSkaleConfig = joConfig["skaleConfig"]; - // - if ( joSkaleConfig.count( "nodeInfo" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find \"skaleConfig\"/\"nodeInfo\"" ); - const nlohmann::json& joSkaleConfig_nodeInfo = joSkaleConfig["nodeInfo"]; - // - if ( joSkaleConfig.count( "sChain" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find \"skaleConfig\"/\"sChain\"" ); - const nlohmann::json& joSkaleConfig_sChain = joSkaleConfig["sChain"]; - // - if ( joSkaleConfig_sChain.count( "nodes" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find \"skaleConfig\"/\"sChain\"/\"nodes\"" ); - const nlohmann::json& joSkaleConfig_sChain_nodes = joSkaleConfig_sChain["nodes"]; - // - nlohmann::json jo = nlohmann::json::object(); + nlohmann::json jo = nlohmann::json::object(); // nlohmann::json joThisNode = nlohmann::json::object(); joThisNode["thisNodeIndex"] = nThisNodeIndex_; // 1-based "schainIndex" @@ -1696,2034 +800,6 @@ skutils::url SkaleStats::pick_own_s_chain_url() { return u; } -Json::Value SkaleStats::skale_imaVerifyAndSign( const Json::Value& request ) { - std::string strLogPrefix = cc::deep_info( "IMA Verify+Sign" ); - std::string strSgxWalletURL = - dev::tracking::txn_pending_tracker_system::instance().url_sgx_wallet(); - bool bHaveQaInRequest = false; - nlohmann::json joQA = nlohmann::json::object(); - try { - if ( !isEnabledImaMessageSigning() ) - throw std::runtime_error( "IMA message signing feature is disabled on this instance" ); - nlohmann::json joConfig = getConfigJSON(); - Json::FastWriter fastWriter; - const std::string strRequest = fastWriter.write( request ); - const nlohmann::json joRequest = - dev::stat_parse_json_with_error_conversion( strRequest, true ); - strLogPrefix = cc::bright( "Startup" ) + " " + cc::deep_info( "IMA Verify+Sign" ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Processing " ) + cc::notice( "IMA Verify and Sign" ) + - cc::debug( " request: " ) + cc::j( joRequest ) ); - // - if ( joRequest.count( "qa" ) > 0 ) { - bHaveQaInRequest = true; - joQA = joRequest["qa"]; - } - // - if ( joRequest.count( "direction" ) == 0 ) - throw std::runtime_error( "missing \"params\"/\"direction\" in call parameters" ); - const nlohmann::json& joDirection = joRequest["direction"]; - if ( !joDirection.is_string() ) - throw std::runtime_error( "bad value type of \"params\"/\"direction\" must be string" ); - const std::string strDirection = skutils::tools::to_upper( - skutils::tools::trim_copy( joDirection.get< std::string >() ) ); - if ( !( strDirection == "M2S" || strDirection == "S2M" || strDirection == "S2S" ) ) - throw std::runtime_error( - "value of \"params\"/\"direction\" must be \"M2S\" or \"S2M\" or \"S2S\"" ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Message direction is " ) + cc::sunny( strDirection ) ); - // from now on strLogPrefix includes strDirection - strLogPrefix = cc::bright( strDirection ) + " " + cc::deep_info( "IMA Verify+Sign" ); - // - // - // Extract needed config.json parameters, ensure they are all present and valid - // - if ( joConfig.count( "skaleConfig" ) == 0 ) - throw std::runtime_error( "error in config.json file, cannot find \"skaleConfig\"" ); - const nlohmann::json& joSkaleConfig = joConfig["skaleConfig"]; - // - if ( joSkaleConfig.count( "nodeInfo" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find \"skaleConfig\"/\"nodeInfo\"" ); - const nlohmann::json& joSkaleConfig_nodeInfo = joSkaleConfig["nodeInfo"]; - // - bool bIsVerifyImaMessagesViaLogsSearch = ( strDirection == "M2S" ) ? true : false; - if ( joSkaleConfig_nodeInfo.count( "verifyImaMessagesViaLogsSearch" ) > 0 ) - bIsVerifyImaMessagesViaLogsSearch = - joSkaleConfig_nodeInfo["verifyImaMessagesViaLogsSearch"].get< bool >(); - // - // - if ( joSkaleConfig_nodeInfo.count( "imaMessageProxySChain" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"imaMessageProxySChain\"" ); - const nlohmann::json& joAddressImaMessageProxySChain = - joSkaleConfig_nodeInfo["imaMessageProxySChain"]; - if ( !joAddressImaMessageProxySChain.is_string() ) - throw std::runtime_error( - "error in config.json file, bad type of value in " - "\"skaleConfig\"/\"nodeInfo\"/\"imaMessageProxySChain\"" ); - std::string strAddressImaMessageProxySChain = - joAddressImaMessageProxySChain.get< std::string >(); - if ( strAddressImaMessageProxySChain.empty() ) - throw std::runtime_error( - "error in config.json file, bad EMPTY value in " - "\"skaleConfig\"/\"nodeInfo\"/\"imaMessageProxySChain\"" ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Using " ) + cc::notice( "IMA Message Proxy/S-Chain" ) + - cc::debug( " contract at address " ) + - cc::info( strAddressImaMessageProxySChain ) ); - const std::string strAddressImaMessageProxySChainLC = - skutils::tools::to_lower( strAddressImaMessageProxySChain ); - // - // - if ( joSkaleConfig_nodeInfo.count( "imaMessageProxyMainNet" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"imaMessageProxyMainNet\"" ); - const nlohmann::json& joAddressImaMessageProxyMainNet = - joSkaleConfig_nodeInfo["imaMessageProxyMainNet"]; - if ( !joAddressImaMessageProxyMainNet.is_string() ) - throw std::runtime_error( - "error in config.json file, bad type of value in " - "\"skaleConfig\"/\"nodeInfo\"/\"imaMessageProxyMainNet\"" ); - std::string strAddressImaMessageProxyMainNet = - joAddressImaMessageProxyMainNet.get< std::string >(); - if ( strAddressImaMessageProxyMainNet.empty() ) - throw std::runtime_error( - "error in config.json file, bad EMPTY value in " - "\"skaleConfig\"/\"nodeInfo\"/\"imaMessageProxyMainNet\"" ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Using " ) + cc::notice( "IMA Message Proxy/MainNet" ) + - cc::debug( " contract at address " ) + - cc::info( strAddressImaMessageProxyMainNet ) ); - const std::string strAddressImaMessageProxyMainNetLC = - skutils::tools::to_lower( strAddressImaMessageProxyMainNet ); - // - if ( joSkaleConfig.count( "sChain" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find " - "\"skaleConfig\"/\"sChain\"" ); - const nlohmann::json& joSkaleConfig_sChain = joSkaleConfig["sChain"]; - if ( joSkaleConfig_sChain.count( "schainName" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find " - "\"skaleConfig\"/\"sChain\"/\"schainName\"" ); - std::string strSChainName = joSkaleConfig_sChain["schainName"].get< std::string >(); - // - // - std::string strAddressImaMessageProxy = ( strDirection == "M2S" ) ? - strAddressImaMessageProxyMainNet : - strAddressImaMessageProxySChain; - std::string strAddressImaMessageProxyLC = ( strDirection == "M2S" ) ? - strAddressImaMessageProxyMainNetLC : - strAddressImaMessageProxySChainLC; - skutils::url urlMainNet = getImaMainNetURL(); - // - // - const nlohmann::json& joFromChainName = joRequest["srcChainName"]; - if ( !joFromChainName.is_string() ) - throw std::runtime_error( - "bad value type of \"params\"/\"srcChainName\" must be string" ); - const std::string strFromChainName = - skutils::tools::trim_copy( joFromChainName.get< std::string >() ); - if ( strFromChainName.empty() ) - throw std::runtime_error( - "bad value of \"params\"/\"srcChainName\" must be non-empty string" ); - - const nlohmann::json& joTargetChainName = joRequest["dstChainName"]; - if ( !joTargetChainName.is_string() ) - throw std::runtime_error( - "bad value type of \"params\"/\"dstChainName\" must be string" ); - const std::string strTargetChainName = - skutils::tools::trim_copy( joTargetChainName.get< std::string >() ); - if ( strTargetChainName.empty() ) - throw std::runtime_error( - "bad value of \"params\"/\"dstChainName\" must be non-empty string" ); - - skutils::url urlSourceChain; - if ( strDirection == "M2S" ) - urlSourceChain = urlMainNet; - else if ( strDirection == "S2M" ) { - // urlSourceChain = skale::network::browser::refreshing_pick_s_chain_url( strSChainName - // ); - urlSourceChain = pick_own_s_chain_url(); - } else if ( strDirection == "S2S" ) - urlSourceChain = - skale::network::browser::refreshing_pick_s_chain_url( strFromChainName ); - else - throw std::runtime_error( "unknown direction \"" + strDirection + "\"" ); - - // - if ( joSkaleConfig_nodeInfo.count( "wallets" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"wallets\"" ); - const nlohmann::json& joSkaleConfig_nodeInfo_wallets = joSkaleConfig_nodeInfo["wallets"]; - // - if ( joSkaleConfig_nodeInfo_wallets.count( "ima" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"wallets\"/\"ima\"" ); - const nlohmann::json& joSkaleConfig_nodeInfo_wallets_ima = - joSkaleConfig_nodeInfo_wallets["ima"]; - // - // Extract needed request arguments, ensure they are all present and valid - // - bool bOnlyVerify = false; - if ( joRequest.count( "onlyVerify" ) > 0 ) - bOnlyVerify = joRequest["onlyVerify"].get< bool >(); - if ( joRequest.count( "startMessageIdx" ) == 0 ) - throw std::runtime_error( - "missing \"messages\"/\"startMessageIdx\" in call parameters" ); - const nlohmann::json& joStartMessageIdx = joRequest["startMessageIdx"]; - if ( !joStartMessageIdx.is_number_unsigned() ) - throw std::runtime_error( - "bad value type of \"messages\"/\"startMessageIdx\" must be unsigned number" ); - const size_t nStartMessageIdx = joStartMessageIdx.get< size_t >(); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + " " + cc::notice( "Start message index" ) + cc::debug( " is " ) + - cc::size10( nStartMessageIdx ) ); - // - if ( joRequest.count( "srcChainName" ) == 0 ) - throw std::runtime_error( "missing \"messages\"/\"srcChainName\" in call parameters" ); - const nlohmann::json& joSrcChainName = joRequest["srcChainName"]; - if ( !joSrcChainName.is_string() ) - throw std::runtime_error( - "bad value type of \"messages\"/\"srcChainName\" must be string" ); - const std::string strSrcChainName = joSrcChainName.get< std::string >(); - if ( strSrcChainName.empty() ) - throw std::runtime_error( - "value of \"messages\"/\"srcChainName\" must be non-EMPTY string" ); - clog( VerbosityDebug, "IMA" ) << ( strLogPrefix + " " + cc::notice( "Source Chain Name" ) + - cc::debug( " is " ) + cc::info( strSrcChainName ) ); - // - if ( joRequest.count( "dstChainName" ) == 0 ) - throw std::runtime_error( "missing \"messages\"/\"dstChainName\" in call parameters" ); - const nlohmann::json& joDstChainName = joRequest["dstChainName"]; - if ( !joDstChainName.is_string() ) - throw std::runtime_error( - "bad value type of \"messages\"/\"dstChainName\" must be string" ); - const std::string strDstChainName = joDstChainName.get< std::string >(); - if ( strDstChainName.empty() ) - throw std::runtime_error( - "value of \"messages\"/\"dstChainName\" must be non-EMPTY string" ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + " " + cc::notice( "Destination Chain Name" ) + cc::debug( " is " ) + - cc::info( strDstChainName ) ); - // - std::string strDstChainName_hex_32; - size_t tmp = 0; - for ( const char& c : strDstChainName ) { - strDstChainName_hex_32 += skutils::tools::format( "%02x", int( c ) ); - ++tmp; - if ( tmp == 32 ) - break; - } - while ( tmp < 32 ) { - strDstChainName_hex_32 += "00"; - ++tmp; - } - dev::u256 uDestinationChainID_32_max( "0x" + strDstChainName_hex_32 ); - // - if ( joRequest.count( "messages" ) == 0 ) - throw std::runtime_error( "missing \"messages\" in call parameters" ); - const nlohmann::json& jarrMessags = joRequest["messages"]; - if ( !jarrMessags.is_array() ) - throw std::runtime_error( "parameter \"messages\" must be array" ); - const size_t cntMessagesToSign = jarrMessags.size(); - if ( cntMessagesToSign == 0 ) - throw std::runtime_error( - "parameter \"messages\" is EMPTY array, nothing to verify and sign" ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Composing summary message to sign from " ) + - cc::size10( cntMessagesToSign ) + - cc::debug( " message(s), IMA index of first message is " ) + - cc::size10( nStartMessageIdx ) + cc::debug( ", src chain name is " ) + - cc::info( strSrcChainName ) + cc::debug( ", dst chain name is " ) + - cc::info( strDstChainName ) + cc::debug( "(" ) + - cc::info( dev::toJS( uDestinationChainID_32_max ) ) + cc::debug( ")..." ) ); - // - // - // Perform basic validation of arrived messages we will sign - // - for ( size_t idxMessage = 0; idxMessage < cntMessagesToSign; ++idxMessage ) { - const nlohmann::json& joMessageToSign = jarrMessags[idxMessage]; - if ( !joMessageToSign.is_object() ) - throw std::runtime_error( - "parameter \"messages\" must be array containing message objects" ); - // each message in array looks like - // { - // "amount": joValues.amount, - // "data": joValues.data, - // "destinationContract": joValues.dstContract, - // "sender": joValues.srcContract, - // "to": joValues.to - // } - // if ( joMessageToSign.count( "amount" ) == 0 ) - // throw std::runtime_error( - // "parameter \"messages\" contains message object without field \"amount\"" ); - if ( joMessageToSign.count( "data" ) == 0 || ( !joMessageToSign["data"].is_string() ) || - joMessageToSign["data"].get< std::string >().empty() ) - throw std::runtime_error( - "parameter \"messages\" contains message object without field \"data\"" ); - if ( joMessageToSign.count( "destinationContract" ) == 0 || - ( !joMessageToSign["destinationContract"].is_string() ) || - joMessageToSign["destinationContract"].get< std::string >().empty() ) - throw std::runtime_error( - "parameter \"messages\" contains message object without field " - "\"destinationContract\"" ); - if ( joMessageToSign.count( "sender" ) == 0 || - ( !joMessageToSign["sender"].is_string() ) || - joMessageToSign["sender"].get< std::string >().empty() ) - throw std::runtime_error( - "parameter \"messages\" contains message object without field \"sender\"" ); - const std::string strData = joMessageToSign["data"].get< std::string >(); - if ( strData.empty() ) - throw std::runtime_error( - "parameter \"messages\" contains message object with EMPTY field " - "\"data\"" ); - } - // - // Check wallet URL and keyShareName for future use, - // fetch SSL options for SGX - // - skutils::url u; - skutils::http::SSL_client_options optsSSL; - const std::string strWalletURL = strSgxWalletURL; - u = skutils::url( strWalletURL ); - if ( u.scheme().empty() || u.host().empty() ) - throw std::runtime_error( "bad SGX wallet url" ); - // - // - try { - if ( joSkaleConfig_nodeInfo_wallets_ima.count( "caFile" ) > 0 ) - optsSSL.ca_file = skutils::tools::trim_copy( - joSkaleConfig_nodeInfo_wallets_ima["caFile"].get< std::string >() ); - } catch ( ... ) { - optsSSL.ca_file.clear(); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::fatal( "CRICICAL ERROR:" ) + " " + - cc::error( "SGX Wallet CA file path was not loaded from settings" ) ); - } - if ( isExposeAllDebugInfo_ ) - clog( VerbosityDebug, "IMA" ) << ( strLogPrefix + cc::debug( " SGX Wallet CA file " ) + - cc::info( optsSSL.ca_file ) ); - try { - if ( joSkaleConfig_nodeInfo_wallets_ima.count( "certFile" ) > 0 ) - optsSSL.client_cert = skutils::tools::trim_copy( - joSkaleConfig_nodeInfo_wallets_ima["certFile"].get< std::string >() ); - } catch ( ... ) { - optsSSL.client_cert.clear(); - clog( VerbosityDebug, "IMA" ) << ( strLogPrefix + cc::fatal( "CRICICAL ERROR:" ) + " " + - cc::error( "SGX Wallet client certificate file path " - "was not loaded from settings" ) ); - } - if ( isExposeAllDebugInfo_ ) - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " SGX Wallet client certificate file " ) + - cc::info( optsSSL.client_cert ) ); - try { - if ( joSkaleConfig_nodeInfo_wallets_ima.count( "keyFile" ) > 0 ) - optsSSL.client_key = skutils::tools::trim_copy( - joSkaleConfig_nodeInfo_wallets_ima["keyFile"].get< std::string >() ); - } catch ( ... ) { - optsSSL.client_key.clear(); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::fatal( "CRICICAL ERROR:" ) + " " + - cc::error( - "SGX Wallet client key file path was not loaded from settings" ) ); - } - if ( isExposeAllDebugInfo_ ) - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " SGX Wallet client key file " ) + - cc::info( optsSSL.client_key ) ); - const std::string keyShareName = - ( joSkaleConfig_nodeInfo_wallets_ima.count( "keyShareName" ) > 0 ) ? - joSkaleConfig_nodeInfo_wallets_ima["keyShareName"].get< std::string >() : - ""; - if ( keyShareName.empty() ) - throw std::runtime_error( - "error in config.json file, cannot find valid value for " - "\"skaleConfig\"/\"nodeInfo\"/\"wallets\"/\"keyShareName\" parameter" ); - // - // - // Walk through all messages, parse and validate data of each message, then verify each - // message present in contract events - // - dev::bytes vecComputeMessagesHash; - // - // append sha3 of source chain name into vecComputeMessagesHash - std::string sxx_FromChainName = dev::sha3( strFromChainName ).hex(); - std::string sh = stat_ensure_have_0x_at_start( sxx_FromChainName ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Appending hash " ) + cc::info( sh ) + - cc::debug( " of source chain name " ) + cc::attention( strFromChainName ) + - cc::debug( " into bytes to BLS-sign" ) ); - stat_append_hash_str_2_vec( vecComputeMessagesHash, sh ); - // append first message nonce into vecComputeMessagesHash - dev::u256 uStartMessageIdx( nStartMessageIdx ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Appending start message index " ) + - cc::size10( nStartMessageIdx ) + cc::debug( "/" ) + - cc::notice( dev::toJS( uStartMessageIdx ) ) + - cc::debug( " into bytes to BLS-sign" ) ); - stat_append_u256_2_vec( vecComputeMessagesHash, uStartMessageIdx ); - // re-compute hash of vecComputeMessagesHash - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Accumulated vector " ) + - cc::binary_singleline( ( void* ) vecComputeMessagesHash.data(), - vecComputeMessagesHash.size(), "" ) ); - stat_re_compute_vec_2_h256vec( vecComputeMessagesHash ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Computed hash from vector " ) + - cc::binary_singleline( ( void* ) vecComputeMessagesHash.data(), - vecComputeMessagesHash.size(), "" ) ); - // - for ( size_t idxMessage = 0; idxMessage < cntMessagesToSign; ++idxMessage ) { - const nlohmann::json& joMessageToSign = jarrMessags[idxMessage]; - const std::string strMessageSender = - skutils::tools::trim_copy( joMessageToSign["sender"].get< std::string >() ); - const std::string strMessageSenderLC = - skutils::tools::to_lower( skutils::tools::trim_copy( strMessageSender ) ); - const dev::u256 uMessageSender( strMessageSenderLC ); - const std::string strMessageData = joMessageToSign["data"].get< std::string >(); - const std::string strMessageData_linear_LC = skutils::tools::to_lower( - skutils::tools::trim_copy( skutils::tools::replace_all_copy( - strMessageData, std::string( "0x" ), std::string( "" ) ) ) ); - const std::string strDestinationContract = skutils::tools::trim_copy( - joMessageToSign["destinationContract"].get< std::string >() ); - const dev::u256 uDestinationContract( strDestinationContract ); - const bytes vecBytes = dev::jsToBytes( strMessageData, dev::OnFailed::Throw ); - const size_t cntMessageBytes = vecBytes.size(); - const size_t cntPortions32 = cntMessageBytes / 32; - const size_t cntRestPart = cntMessageBytes % 32; - size_t nMessageTypeCode = size_t( std::string::npos ); - if ( cntMessageBytes > 32 ) { - dev::u256 messageTypeCode = - BMPBN::decode_inv< dev::u256 >( vecBytes.data() + 0, 32 ); - nMessageTypeCode = messageTypeCode.convert_to< size_t >(); - } - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Walking through IMA message " ) + - cc::size10( idxMessage ) + cc::debug( " of " ) + - cc::size10( cntMessagesToSign ) + cc::debug( " with size " ) + - cc::size10( cntMessageBytes ) + cc::debug( ", " ) + - cc::size10( cntPortions32 ) + cc::debug( " of 32-byte portions and " ) + - cc::size10( cntRestPart ) + - cc::debug( " bytes rest part, message typecode is " ) + - cc::num10( nMessageTypeCode ) - // + cc::debug( ", message binary data is:\n" ) + - // cc::binary_table( ( void* ) vecBytes.data(), vecBytes.size(), 32 ) - ); - // - // - // event OutgoingMessage( - // bytes32 indexed dstChainHash, - // uint256 indexed msgCounter, - // address indexed srcContract, - // address dstContract, - // bytes data - // ); - static const std::string strSignature_event_OutgoingMessage( - "OutgoingMessage(bytes32,uint256,address,address,bytes)" ); - static const std::string strTopic_event_OutgoingMessage = - dev::toJS( dev::sha3( strSignature_event_OutgoingMessage ) ); - static const dev::u256 uTopic_event_OutgoingMessage( strTopic_event_OutgoingMessage ); - // - const std::string strTopic_dstChainHash = dev::toJS( dev::sha3( strDstChainName ) ); - const dev::u256 uTopic_dstChainHash( strTopic_dstChainHash ); - static const size_t nPaddoingZeroesForUint256 = 64; - const std::string strTopic_msgCounter = - skutils::tools::to_lower( dev::BMPBN::toHexStringWithPadding< dev::u256 >( - dev::u256( nStartMessageIdx + idxMessage ), nPaddoingZeroesForUint256 ) ); - const dev::u256 uTopic_msgCounter( strTopic_msgCounter ); - nlohmann::json jarrTopic_dstChainHash = nlohmann::json::array(); - nlohmann::json jarrTopic_msgCounter = nlohmann::json::array(); - jarrTopic_dstChainHash.push_back( strTopic_dstChainHash ); - jarrTopic_msgCounter.push_back( strTopic_msgCounter ); - if ( bIsVerifyImaMessagesViaLogsSearch ) { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + " " + - cc::debug( "Will use contract event based verification of IMA " - "message(s)" ) ); - std::function< dev::u256() > do_getBlockNumber = [&]() -> dev::u256 { - if ( strDirection == "M2S" || strDirection == "S2S" ) { - try { - nlohmann::json joCall = nlohmann::json::object(); - joCall["jsonrpc"] = "2.0"; - joCall["method"] = "eth_blockNumber"; - joCall["params"] = nlohmann::json::array(); - skutils::rest::client cli( urlSourceChain ); - skutils::rest::data_t d = cli.call( joCall ); - if ( !d.err_s_.empty() ) - throw std::runtime_error( - "Main Net call to \"eth_blockNumber\" failed: " + d.err_s_ ); - if ( d.empty() ) - throw std::runtime_error( - "Main Net call to \"eth_blockNumber\" failed, EMPTY data " - "received" ); - const nlohmann::json joAnswer = - dev::stat_parse_json_with_error_conversion( d.s_ ); - dev::stat_check_rpc_call_error_and_throw( joAnswer, "eth_blockNumber" ); - if ( joAnswer.count( "result" ) == 0 ) - throw std::runtime_error( - "Got \"eth_blockNumber\" bad answer without \"result\" field, " - "answer is \"" + - joAnswer.dump() + "\"" ); - nlohmann::json joMainNetBlockNumber = joAnswer["result"]; - if ( joMainNetBlockNumber.is_string() ) { - dev::u256 uBN = - dev::u256( joMainNetBlockNumber.get< std::string >() ); - return uBN; - } else if ( joMainNetBlockNumber.is_number() ) { - dev::u256 uBN = dev::u256( joMainNetBlockNumber.get< uint64_t >() ); - return uBN; - } - throw std::runtime_error( - "Main Net call to eth_blockNumber failed, bad data returned: " + - joMainNetBlockNumber.dump() ); - } catch ( ... ) { - } - } // if ( strDirection == "M2S" || strDirection == "S2S" ) - else { - dev::u256 uBN = this->client()->number(); - return uBN; - } // else from if ( strDirection == "M2S" || strDirection == "S2S" ) - dev::u256 uBN = dev::u256( "0" ); - return uBN; - }; /// do_getBlockNumber - std::function< nlohmann::json( dev::u256, dev::u256 ) > do_logs_search = - [&]( dev::u256 uBlockFrom, dev::u256 uBlockTo ) -> nlohmann::json { - // - // - // - // Forming eth_getLogs query similar to web3's getPastEvents, see details here: - // https://solidity.readthedocs.io/en/v0.4.24/abi-spec.html - // Here is example - // { - // "address": "0x4c6ad417e3bf7f3d623bab87f29e119ef0f28059", - // "fromBlock": "0x0", - // "toBlock": "latest", - // "topics": - // ["0xa701ebe76260cb49bb2dc03cf8cf6dacbc4c59a5d615c4db34a7dfdf36e6b6dc", - // ["0x8d646f556e5d9d6f1edcf7a39b77f5ac253776eb34efcfd688aacbee518efc26"], - // ["0x0000000000000000000000000000000000000000000000000000000000000010"], - // null - // ] - // } - // - nlohmann::json jarrTopics = nlohmann::json::array(); - jarrTopics.push_back( strTopic_event_OutgoingMessage ); - jarrTopics.push_back( jarrTopic_dstChainHash ); - jarrTopics.push_back( jarrTopic_msgCounter ); - // jarrTopics.push_back( nullptr ); - nlohmann::json joLogsQuery = nlohmann::json::object(); - joLogsQuery["address"] = strAddressImaMessageProxy; - joLogsQuery["fromBlock"] = dev::toJS( uBlockFrom ); - joLogsQuery["toBlock"] = dev::toJS( uBlockTo ); - joLogsQuery["topics"] = jarrTopics; - nlohmann::json jarrLogsQuery = nlohmann::json::array(); - jarrLogsQuery.push_back( joLogsQuery ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Will search " ) + - cc::info( ( strDirection == "M2S" ) ? "Main NET" : "S-Chain" ) + - cc::debug( " logs from block " ) + - cc::info( dev::toJS( uBlockFrom ) ) + cc::debug( " to block " ) + - cc::info( dev::toJS( uBlockTo ) ) + - cc::debug( " by executing logs search query: " ) + - cc::j( joLogsQuery ) ); - // - // - nlohmann::json jarrFoundLogRecords; - if ( strDirection == "M2S" || strDirection == "S2S" ) { - nlohmann::json joCall = nlohmann::json::object(); - joCall["jsonrpc"] = "2.0"; - joCall["method"] = "eth_getLogs"; - joCall["params"] = jarrLogsQuery; - skutils::rest::client cli( urlSourceChain ); - skutils::rest::data_t d = cli.call( joCall ); - if ( !d.err_s_.empty() ) - throw std::runtime_error( - "Main Net call to \"eth_getLogs\" failed: " + d.err_s_ ); - if ( d.empty() ) - throw std::runtime_error( - "Main Net call to \"eth_getLogs\" failed, EMPTY data received" ); - const nlohmann::json joAnswer = - dev::stat_parse_json_with_error_conversion( d.s_ ); - dev::stat_check_rpc_call_error_and_throw( joAnswer, "eth_getLogs" ); - if ( joAnswer.count( "result" ) == 0 ) - throw std::runtime_error( - "Got \"eth_getLogs\" bad answer without \"result\" field, answer " - "is \"" + - joAnswer.dump() + "\"" ); - jarrFoundLogRecords = joAnswer["result"]; - } // if ( strDirection == "M2S" || strDirection == "S2S" ) - else { - Json::Value jvLogsQuery; - Json::Reader().parse( joLogsQuery.dump(), jvLogsQuery ); - Json::Value jvLogs = dev::toJson( - this->client()->logs( dev::eth::toLogFilter( jvLogsQuery ) ) ); - jarrFoundLogRecords = dev::stat_parse_json_with_error_conversion( - Json::FastWriter().write( jvLogs ), true ); - } // else from if ( strDirection == "M2S" || strDirection == "S2S" ) - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Got " ) + - cc::info( ( strDirection == "M2S" ) ? "Main NET" : "S-Chain" ) + - cc::debug( " (" ) + cc::sunny( strDirection ) + - cc::debug( ") logs search from block " ) + - cc::info( dev::toJS( uBlockFrom ) ) + cc::debug( " to block " ) + - cc::info( dev::toJS( uBlockTo ) ) + cc::debug( " results: " ) + - cc::j( jarrFoundLogRecords ) ); - return jarrFoundLogRecords; - }; /// do_logs_search - static const int64_t g_nCountOfBlocksInIterativeStep = 1000; - static const int64_t g_nMaxBlockScanIterationsInAllRange = 5000; - std::function< nlohmann::json( dev::u256 ) > do_logs_search_iterative = - [&]( dev::u256 uBlockFrom ) -> nlohmann::json { - dev::u256 nLatestBlockNumber = do_getBlockNumber(); - dev::u256 uBlockTo = nLatestBlockNumber; - if ( g_nCountOfBlocksInIterativeStep <= 0 || - g_nMaxBlockScanIterationsInAllRange <= 0 ) { - clog( VerbosityDebug, "IMA" ) - << ( cc::fatal( "IMPORTANT NOTICE:" ) + " " + cc::warn( "Will skip " ) + - cc::attention( "iterative" ) + - cc::warn( " events scan in block range from " ) + - cc::info( dev::toJS( uBlockFrom ) ) + cc::warn( " to " ) + - cc::info( dev::toJS( uBlockTo ) ) + - cc::warn( " because it's " ) + cc::error( "DISABLED" ) ); - return do_logs_search( uBlockFrom, uBlockTo ); - } - if ( ( nLatestBlockNumber / g_nCountOfBlocksInIterativeStep ) > - g_nMaxBlockScanIterationsInAllRange ) { - clog( VerbosityDebug, "IMA" ) - << ( cc::fatal( "IMPORTANT NOTICE:" ) + " " + cc::warn( "Will skip " ) + - cc::attention( "iterative" ) + - cc::warn( " scan and use scan in block range from " ) + - cc::info( dev::toJS( uBlockFrom ) ) + cc::warn( " to " ) + - cc::info( dev::toJS( uBlockTo ) ) ); - return do_logs_search( uBlockFrom, uBlockTo ); - } - clog( VerbosityDebug, "IMA" ) - << ( cc::debug( "Iterative scan in " ) + - cc::info( dev::toJS( uBlockFrom ) ) + cc::debug( "/" ) + - cc::info( dev::toJS( uBlockTo ) ) + cc::debug( " block range..." ) ); - clog( VerbosityDebug, "IMA" ) - << ( cc::debug( "Iterative scan up to latest block " ) + - cc::attention( "#" ) + cc::info( dev::toJS( uBlockTo ) ) + - cc::debug( " assumed instead of " ) + cc::attention( "latest" ) ); - dev::u256 idxBlockSubRangeFrom = uBlockFrom; - for ( ; true; ) { - dev::u256 idxBlockSubRangeTo = - idxBlockSubRangeFrom + g_nCountOfBlocksInIterativeStep; - if ( idxBlockSubRangeTo > uBlockTo ) - idxBlockSubRangeTo = uBlockTo; - try { - clog( VerbosityDebug, "IMA" ) - << ( cc::debug( "Iterative scan of " ) + - cc::info( dev::toJS( idxBlockSubRangeFrom ) ) + - cc::debug( "/" ) + - cc::info( dev::toJS( idxBlockSubRangeTo ) ) + - cc::debug( " block sub-range in " ) + - cc::info( dev::toJS( uBlockFrom ) ) + cc::debug( "/" ) + - cc::info( dev::toJS( uBlockTo ) ) + - cc::debug( " block range..." ) ); - nlohmann::json joAllEventsInBlock = - do_logs_search( idxBlockSubRangeFrom, idxBlockSubRangeTo ); - if ( joAllEventsInBlock.is_array() && joAllEventsInBlock.size() > 0 ) { - clog( VerbosityDebug, "IMA" ) - << ( cc::success( "Result of " ) + - cc::attention( "iterative" ) + - cc::success( " scan in " ) + - cc::info( dev::toJS( uBlockFrom ) ) + - cc::success( "/" ) + cc::info( dev::toJS( uBlockTo ) ) + - cc::success( " block range is: " ) + - cc::j( joAllEventsInBlock ) ); - return joAllEventsInBlock; - } - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FAILED:" ) + " " + - cc::error( "Iterative scan of " ) + - cc::info( dev::toJS( idxBlockSubRangeFrom ) ) + - cc::error( "/" ) + - cc::info( dev::toJS( idxBlockSubRangeTo ) ) + - cc::error( " block sub-range in " ) + - cc::info( dev::toJS( uBlockFrom ) ) + cc::error( "/" ) + - cc::info( dev::toJS( uBlockTo ) ) + - cc::error( " block range, error:" ) + " " + - cc::warn( ex.what() ) ); - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FAILED:" ) + " " + - cc::error( "Iterative scan of " ) + - cc::info( dev::toJS( idxBlockSubRangeFrom ) ) + - cc::error( "/" ) + - cc::info( dev::toJS( idxBlockSubRangeTo ) ) + - cc::error( " block sub-range in " ) + - cc::info( dev::toJS( uBlockFrom ) ) + cc::error( "/" ) + - cc::info( dev::toJS( uBlockTo ) ) + - cc::error( " block range, error:" ) + " " + - cc::warn( "unknown exception" ) ); - } - idxBlockSubRangeFrom = idxBlockSubRangeTo; - if ( idxBlockSubRangeFrom == uBlockTo ) - break; - } - clog( VerbosityDebug, "IMA" ) - << ( cc::debug( "Result of " ) + cc::attention( "iterative" ) + - cc::debug( " scan in " ) + cc::info( dev::toJS( uBlockFrom ) ) + - cc::debug( "/" ) + cc::info( dev::toJS( uBlockTo ) ) + - cc::debug( " block range is " ) + cc::warn( "EMPTY" ) ); - nlohmann::json jarrFoundLogRecords = nlohmann::json::array(); - return jarrFoundLogRecords; - }; /// do_logs_search_iterative - typedef std::list< dev::u256 > plan_list_t; - std::function< plan_list_t( dev::u256 ) > create_progressive_events_scan_plan = - []( dev::u256 nLatestBlockNumber ) -> plan_list_t { - // assume Main Net mines 60 txns per second for one account - // approximately 10x larger then real - const dev::u256 txns_in_1_minute( 60 ); - const dev::u256 txns_in_1_hour( txns_in_1_minute * 60 ); - const dev::u256 txns_in_1_day( txns_in_1_hour * 24 ); - const dev::u256 txns_in_1_week( txns_in_1_day * 7 ); - const dev::u256 txns_in_1_month( txns_in_1_day * 31 ); - const dev::u256 txns_in_1_year( txns_in_1_day * 366 ); - plan_list_t a_plan; - if ( nLatestBlockNumber > txns_in_1_day ) - a_plan.push_back( nLatestBlockNumber - txns_in_1_day ); - if ( nLatestBlockNumber > txns_in_1_week ) - a_plan.push_back( nLatestBlockNumber - txns_in_1_week ); - if ( nLatestBlockNumber > txns_in_1_month ) - a_plan.push_back( nLatestBlockNumber - txns_in_1_month ); - if ( nLatestBlockNumber > txns_in_1_year ) - a_plan.push_back( nLatestBlockNumber - txns_in_1_year ); - a_plan.push_back( dev::u256( 0 ) ); - return a_plan; - }; /// create_progressive_events_scan_plan() - std::function< nlohmann::json( dev::u256 ) > do_logs_search_progressive = - [&]( dev::u256 uLatestBlockNumber ) -> nlohmann::json { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Will progressive search " ) + - cc::info( ( strDirection == "M2S" ) ? "Main NET" : "S-Chain" ) + - cc::debug( " (" ) + cc::sunny( strDirection ) + - cc::debug( ") logs..." ) ); - const plan_list_t a_plan = - create_progressive_events_scan_plan( uLatestBlockNumber ); - plan_list_t::const_iterator itPlanWalk = a_plan.cbegin(), itPlanEnd = - a_plan.cend(); - for ( ; itPlanWalk != itPlanEnd; ++itPlanWalk ) { - dev::u256 uBlockFrom = ( *itPlanWalk ); - try { - nlohmann::json jarrFoundLogRecords = - do_logs_search_iterative( uBlockFrom ); - if ( jarrFoundLogRecords.is_array() && - jarrFoundLogRecords.size() > 0 ) { - clog( VerbosityWarning, "IMA" ) - << ( strLogPrefix + cc::success( " Progressive " ) + - cc::info( ( strDirection == "M2S" ) ? "Main NET" : - "S-Chain" ) + - cc::success( " logs search from block " ) + - cc::info( dev::toJS( uBlockFrom ) ) + - cc::success( " to block " ) + cc::info( "latest" ) + - cc::success( " finished with " ) + - cc::j( jarrFoundLogRecords ) ); - return jarrFoundLogRecords; - } - clog( VerbosityWarning, "IMA" ) - << ( strLogPrefix + cc::warn( " Progressive " ) + - cc::info( - ( strDirection == "M2S" ) ? "Main NET" : "S-Chain" ) + - cc::warn( " logs search finished with " ) + - cc::error( "EMPTY" ) + cc::warn( " result: " ) + - cc::j( jarrFoundLogRecords ) ); - } catch ( const std::exception& ex ) { - clog( VerbosityWarning, "IMA" ) - << ( strLogPrefix + cc::warn( " Progressive " ) + - cc::info( - ( strDirection == "M2S" ) ? "Main NET" : "S-Chain" ) + - cc::warn( " logs search from block " ) + - cc::info( dev::toJS( uBlockFrom ) ) + - cc::warn( " to block " ) + cc::info( "latest" ) + - cc::warn( " error: " ) + cc::error( ex.what() ) ); - continue; - } catch ( ... ) { - clog( VerbosityWarning, "IMA" ) - << ( strLogPrefix + cc::warn( " Progressive " ) + - cc::info( - ( strDirection == "M2S" ) ? "Main NET" : "S-Chain" ) + - cc::warn( " logs search from block " ) + - cc::info( dev::toJS( uBlockFrom ) ) + - cc::warn( " to block " ) + cc::info( "latest" ) + - cc::warn( " error: " ) + cc::error( "unknown error" ) ); - continue; - } - } // for ( ; itPlanWalk != itPlanEnd; ++itPlanWalk ) - nlohmann::json jarrFoundLogRecords = nlohmann::json::array(); - return jarrFoundLogRecords; - }; /// do_logs_search_progressive() - - bool isDefaultProgressiveLogsSearchNeeded = true, haveEffectiveBlockNo = false; - dev::u256 uBlockOptimized( 0 ); - nlohmann::json jarrFoundLogRecords = nlohmann::json::array(); - if ( joMessageToSign.count( "savedBlockNumberForOptimizations" ) > 0 ) { - if ( joMessageToSign["savedBlockNumberForOptimizations"].is_string() ) { - uBlockOptimized = - dev::u256( joMessageToSign["savedBlockNumberForOptimizations"] - .get< std::string >() ); - haveEffectiveBlockNo = true; - } else if ( joMessageToSign["savedBlockNumberForOptimizations"].is_number() ) { - uBlockOptimized = dev::u256( - joMessageToSign["savedBlockNumberForOptimizations"].get< uint64_t >() ); - haveEffectiveBlockNo = true; - } - } - if ( haveEffectiveBlockNo ) { - jarrFoundLogRecords = do_logs_search( uBlockOptimized, uBlockOptimized ); - if ( jarrFoundLogRecords.is_array() && jarrFoundLogRecords.size() > 0 ) { - isDefaultProgressiveLogsSearchNeeded = false; - clog( VerbosityDebug, "IMA" ) - << ( cc::success( "Result of " ) + - cc::attention( "successful effective scan" ) + - cc::success( " in block " ) + - cc::info( dev::toJS( uBlockOptimized ) ) + - cc::success( " is: " ) + cc::j( jarrFoundLogRecords ) ); - } else { - isDefaultProgressiveLogsSearchNeeded = true; - clog( VerbosityDebug, "IMA" ) - << ( cc::warn( "Nothing was found using " ) + - cc::attention( "effective scan" ) + cc::warn( " in block " ) + - cc::info( dev::toJS( uBlockOptimized ) ) + - cc::warn( ", will use default progressive search algorithm" ) ); - } - } // if( haveEffectiveBlockNo ) - else { - isDefaultProgressiveLogsSearchNeeded = true; - clog( VerbosityDebug, "IMA" ) - << ( cc::warn( "Skipped " ) + cc::attention( "effective scan" ) + - cc::warn( ", will use default progressive search algorithm" ) ); - } // else from if( haveEffectiveBlockNo ) - if ( isDefaultProgressiveLogsSearchNeeded ) - jarrFoundLogRecords = do_logs_search_progressive( do_getBlockNumber() ); - - /* example of jarrFoundLogRecords value: - [{ - "address": "0x4c6ad417e3bf7f3d623bab87f29e119ef0f28059", - - "blockHash": - "0x4bcb4bba159b42d1d3dd896a563ca426140fe9d5d1b4e0ed8f3472a681b0f5ea", - - "blockNumber": 82640, - - "data": - "0x00000000000000000000000000000000000000000000000000000000000000c000000000000000000000000088a5edcf315599ade5b6b4cc0991a23bf9e88f650000000000000000000000007aa5e36aa15e93d10f4f26357c30f052dacdde5f0000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000074d61696e6e65740000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010100000000000000000000000000000000000000000000000000000000000000", - - "logIndex": 0, - - "polarity": true, - - "topics": - ["0xa701ebe76260cb49bb2dc03cf8cf6dacbc4c59a5d615c4db34a7dfdf36e6b6dc", - "0x8d646f556e5d9d6f1edcf7a39b77f5ac253776eb34efcfd688aacbee518efc26", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x000000000000000000000000c2fe505c79c82bb8cef48709816480ff6e1e0379"], - - "transactionHash": - "0x8013af1333055df9f291a58d2da58c912b5326972b1c981b73b854625e904c91", - - "transactionIndex": 0, - - "type": "mined" - }] */ - bool bIsVerified = false; - if ( jarrFoundLogRecords.is_array() && jarrFoundLogRecords.size() > 0 ) - bIsVerified = true; - if ( !bIsVerified ) { - throw std::runtime_error( "IMA message " + - std::to_string( nStartMessageIdx + idxMessage ) + - " verification failed - not found in logs" ); - } - // - // - // Find transaction, simlar to call tp eth_getTransactionByHash - // - bool bTransactionWasFound = false; - size_t idxFoundLogRecord = 0, cntFoundLogRecords = jarrFoundLogRecords.size(); - for ( idxFoundLogRecord = 0; idxFoundLogRecord < cntFoundLogRecords; - ++idxFoundLogRecord ) { - const nlohmann::json& joFoundLogRecord = jarrFoundLogRecords[idxFoundLogRecord]; - if ( joFoundLogRecord.count( "transactionHash" ) == 0 ) - continue; // bad log record??? this should never happen - const nlohmann::json& joTransactionHash = joFoundLogRecord["transactionHash"]; - if ( !joTransactionHash.is_string() ) - continue; // bad log record??? this should never happen - const std::string strTransactionHash = joTransactionHash.get< std::string >(); - if ( strTransactionHash.empty() ) - continue; // bad log record??? this should never happen - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Analyzing transaction " ) + - cc::notice( strTransactionHash ) + cc::debug( "..." ) ); - nlohmann::json joTransaction; - try { - if ( strDirection == "M2S" || strDirection == "S2S" ) { - nlohmann::json jarrParams = nlohmann::json::array(); - jarrParams.push_back( strTransactionHash ); - nlohmann::json joCall = nlohmann::json::object(); - joCall["jsonrpc"] = "2.0"; - joCall["method"] = "eth_getTransactionByHash"; - joCall["params"] = jarrParams; - skutils::rest::client cli( urlSourceChain ); - skutils::rest::data_t d = cli.call( joCall ); - if ( !d.err_s_.empty() ) - throw std::runtime_error( - "Main Net call to \"eth_getTransactionByHash\" failed: " + - d.err_s_ ); - if ( d.empty() ) - throw std::runtime_error( - "Main Net call to \"eth_getTransactionByHash\" failed, EMPTY " - "data " - "received" ); - const nlohmann::json joAnswer = - dev::stat_parse_json_with_error_conversion( d.s_ ); - dev::stat_check_rpc_call_error_and_throw( - joAnswer, "eth_getTransactionByHash" ); - if ( joAnswer.count( "result" ) == 0 ) - throw std::runtime_error( - "Got \"eth_getTransactionByHash\" bad answer without " - "\"result\" field, answer is \"" + - joAnswer.dump() + "\"" ); - joTransaction = joAnswer["result"]; - } else { - Json::Value jvTransaction; - h256 h = dev::jsToFixed< 32 >( strTransactionHash ); - if ( !this->client()->isKnownTransaction( h ) ) - jvTransaction = Json::Value( Json::nullValue ); - else - jvTransaction = toJson( this->client()->localisedTransaction( h ) ); - joTransaction = dev::stat_parse_json_with_error_conversion( - Json::FastWriter().write( jvTransaction ), true ); - } // else from if ( strDirection == "M2S" ) - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Transaction verification failed: " ) + - cc::warn( ex.what() ) ); - continue; - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Transaction verification failed: " ) + - cc::warn( "unknown exception" ) ); - continue; - } - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Reviewing transaction:" ) + - cc::j( joTransaction ) + cc::debug( "..." ) ); - // - // - // Find more transaction details, simlar to call tp - // eth_getTransactionReceipt - // - /* Receipt should look like: - { - "blockHash": - "0x995cb104795b28c16f3be075fbf08afd69753a6c1b16df3758e570342fd3dadf", - "blockNumber": 115508, - "contractAddress": - "0x1bbde22a5d43d59883c1befd474eff2ec51519d2", - "cumulativeGasUsed": "0xf055", - "gasUsed": "0xf055", - "logs": [{ - "address": - "0xfd02fc34219dc1dc923127062543c9522373d895", - "blockHash": - "0x995cb104795b28c16f3be075fbf08afd69753a6c1b16df3758e570342fd3dadf", - "blockNumber": 115508, - "data": - "0x0000000000000000000000000000000000000000000000000de0b6b3a7640000", - "logIndex": 0, "polarity": false, "topics": - ["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", - "0x00000000000000000000000066c5a87f4a49dd75e970055a265e8dd5c3f8f852", - "0x0000000000000000000000000000000000000000000000000000000000000000"], - "transactionHash": - "0xab241b07a2b7a8a59aafb5e25fdc5750a8c195ee42b3503e65ff737c514dde71", - "transactionIndex": 0, - "type": "mined" - }, { - "address": - "0x4c6ad417e3bf7f3d623bab87f29e119ef0f28059", - "blockHash": - "0x995cb104795b28c16f3be075fbf08afd69753a6c1b16df3758e570342fd3dadf", - "blockNumber": 115508, - "data": - "0x00000000000000000000000000000000000000000000000000000000000000c000000000000000000000000088a5edcf315599ade5b6b4cc0991a23bf9e88f650000000000000000000000007aa5e36aa15e93d10f4f26357c30f052dacdde5f0000000000000000000000000000000000000000000000000de0b6b3a76400000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000074d61696e6e65740000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010100000000000000000000000000000000000000000000000000000000000000", - "logIndex": 1, - "polarity": false, - "topics": - ["0xa701ebe76260cb49bb2dc03cf8cf6dacbc4c59a5d615c4db34a7dfdf36e6b6dc", - "0x8d646f556e5d9d6f1edcf7a39b77f5ac253776eb34efcfd688aacbee518efc26", - "0x0000000000000000000000000000000000000000000000000000000000000021", - "0x000000000000000000000000c2fe505c79c82bb8cef48709816480ff6e1e0379"], - "transactionHash": - "0xab241b07a2b7a8a59aafb5e25fdc5750a8c195ee42b3503e65ff737c514dde71", - "transactionIndex": 0, - "type": "mined" - }], - "logsBloom": - "0x00000000000000000000000000200000000000000000000000000800000000020000000000000000000000000400000000000000000000000000000000100000000000000000000000000008000000000000000000000000000000000000020000000400020000400000000000000800000000000000000000000010040000000000000000000000000000000000000000000000000040000000000000000000000040000080000000000000000000000000001800000000200000000000000000000002000000000800000000000000000000000000000000020000200020000000000000000004000000000000000000000000000000003000000000000000", - "status": "1", - "transactionHash": - "0xab241b07a2b7a8a59aafb5e25fdc5750a8c195ee42b3503e65ff737c514dde71", - "transactionIndex": 0 - } - - The last log record in receipt abaove contains "topics" and - "data" field we - should verify by comparing fields of IMA message - */ - // - nlohmann::json joTransactionReceipt; - try { - if ( strDirection == "M2S" || strDirection == "S2S" ) { - nlohmann::json jarrParams = nlohmann::json::array(); - jarrParams.push_back( strTransactionHash ); - nlohmann::json joCall = nlohmann::json::object(); - joCall["jsonrpc"] = "2.0"; - joCall["method"] = "eth_getTransactionReceipt"; - joCall["params"] = jarrParams; - skutils::rest::client cli( urlSourceChain ); - skutils::rest::data_t d = cli.call( joCall ); - if ( !d.err_s_.empty() ) - throw std::runtime_error( - "Main Net call to \"eth_getTransactionReceipt\" failed: " + - d.err_s_ ); - if ( d.empty() ) - throw std::runtime_error( - "Main Net call to \"eth_getTransactionReceipt\" failed, EMPTY " - "data " - "received" ); - const nlohmann::json joAnswer = - dev::stat_parse_json_with_error_conversion( d.s_ ); - dev::stat_check_rpc_call_error_and_throw( - joAnswer, "eth_getTransactionReceipt" ); - if ( joAnswer.count( "result" ) == 0 ) - throw std::runtime_error( - "Got \"eth_getTransactionReceipt\" bad answer without " - "\"result\" field, answer is \"" + - joAnswer.dump() + "\"" ); - joTransactionReceipt = joAnswer["result"]; - } else { - Json::Value jvTransactionReceipt; - const h256 h = dev::jsToFixed< 32 >( strTransactionHash ); - if ( !this->client()->isKnownTransaction( h ) ) - jvTransactionReceipt = Json::Value( Json::nullValue ); - else - jvTransactionReceipt = dev::eth::toJson( - this->client()->localisedTransactionReceipt( h ) ); - joTransactionReceipt = dev::stat_parse_json_with_error_conversion( - Json::FastWriter().write( jvTransactionReceipt ), true ); - } // else from if ( strDirection == "M2S" ) - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Receipt verification failed: " ) + - cc::warn( ex.what() ) ); - continue; - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Receipt verification failed: " ) + - cc::warn( "unknown exception" ) ); - continue; - } - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Reviewing transaction receipt:" ) + - cc::j( joTransactionReceipt ) + cc::debug( "..." ) ); - if ( joTransactionReceipt.count( "logs" ) == 0 ) - continue; // ??? - const nlohmann::json& jarrLogsReceipt = joTransactionReceipt["logs"]; - if ( !jarrLogsReceipt.is_array() ) - continue; // ??? - bool bReceiptVerified = false; - size_t idxReceiptLogRecord = 0, cntReceiptLogRecords = jarrLogsReceipt.size(); - for ( idxReceiptLogRecord = 0; idxReceiptLogRecord < cntReceiptLogRecords; - ++idxReceiptLogRecord ) { - const nlohmann::json& joReceiptLogRecord = - jarrLogsReceipt[idxReceiptLogRecord]; - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Reviewing TX receipt record:" ) + - cc::j( joReceiptLogRecord ) + cc::debug( "..." ) ); - if ( joReceiptLogRecord.count( "address" ) == 0 || - ( !joReceiptLogRecord["address"].is_string() ) ) { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + - cc::warn( " TX receipt record is skipped because " ) + - cc::info( "address" ) + cc::warn( " field is not found" ) ); - continue; - } - const std::string strReceiptLogRecord = - joReceiptLogRecord["address"].get< std::string >(); - if ( strReceiptLogRecord.empty() ) { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + - cc::warn( " TX receipt record is skipped because " ) + - cc::info( "address" ) + cc::warn( " field is EMPTY" ) ); - continue; - } - const std::string strReceiptLogRecordLC = - skutils::tools::to_lower( strReceiptLogRecord ); - if ( strAddressImaMessageProxyLC != strReceiptLogRecordLC ) { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + - cc::warn( " TX receipt record is skipped because " ) + - cc::info( "address" ) + - cc::warn( " field is not equal to " ) + - cc::notice( strAddressImaMessageProxyLC ) ); - continue; - } - // - // find needed entries in "topics" - if ( joReceiptLogRecord.count( "topics" ) == 0 || - ( !joReceiptLogRecord["topics"].is_array() ) ) { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + - cc::warn( " TX receipt record is skipped because " ) + - cc::info( "topics" ) + - cc::warn( " array field is not found" ) ); - continue; - } - bool bTopicSignatureFound = false, bTopicMsgCounterFound = false, - bTopicDstChainHashFound = false; - const nlohmann::json& jarrReceiptTopics = joReceiptLogRecord["topics"]; - size_t idxReceiptTopic = 0, cntReceiptTopics = jarrReceiptTopics.size(); - for ( idxReceiptTopic = 0; idxReceiptTopic < cntReceiptTopics; - ++idxReceiptTopic ) { - const nlohmann::json& joReceiptTopic = - jarrReceiptTopics[idxReceiptTopic]; - if ( !joReceiptTopic.is_string() ) - continue; - const dev::u256 uTopic( joReceiptTopic.get< std::string >() ); - if ( uTopic == uTopic_event_OutgoingMessage ) - bTopicSignatureFound = true; - if ( uTopic == uTopic_msgCounter ) - bTopicMsgCounterFound = true; - if ( uTopic == uTopic_dstChainHash ) - bTopicDstChainHashFound = true; - } - if ( !bTopicSignatureFound ) { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + - cc::warn( " TX receipt record is skipped because " ) + - cc::info( "topics" ) + - cc::warn( " array field does not contain signature" ) ); - continue; - } - if ( !bTopicMsgCounterFound ) { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + - cc::warn( " TX receipt record is skipped because " ) + - cc::info( "topics" ) + - cc::warn( - " array field does not contain message counter" ) ); - continue; - } - if ( !bTopicDstChainHashFound ) { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + - cc::warn( " TX receipt record is skipped because " ) + - cc::info( "topics" ) + - cc::warn( " array field does not contain destination chain " - "hash" ) ); - continue; - } - // - // analyze "data" - if ( joReceiptLogRecord.count( "data" ) == 0 || - ( !joReceiptLogRecord["data"].is_string() ) ) { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + - cc::warn( " TX receipt record is skipped because " ) + - cc::info( "data" ) + cc::warn( " field is not found" ) ); - continue; - } - const std::string strData = joReceiptLogRecord["data"].get< std::string >(); - if ( strData.empty() ) { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + - cc::warn( " TX receipt record is skipped because " ) + - cc::info( "data" ) + cc::warn( " field is EMPTY" ) ); - continue; - } - const std::string strDataLC_linear = skutils::tools::trim_copy( - skutils::tools::replace_all_copy( skutils::tools::to_lower( strData ), - std::string( "0x" ), std::string( "" ) ) ); - const size_t nDataLength = strDataLC_linear.size(); - if ( strDataLC_linear.find( strMessageData_linear_LC ) == - std::string::npos ) { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + - cc::warn( " TX receipt record is skipped because " ) + - cc::info( "data" ) + cc::warn( " field is not equal to " ) + - cc::notice( strMessageData_linear_LC ) ); - continue; // no IMA messahe data - } - // std::set< std::string > setChunksLC; - std::set< dev::u256 > setChunksU256; - static const size_t nChunkSize = 64; - const size_t cntChunks = nDataLength / nChunkSize + - ( ( ( nDataLength % nChunkSize ) != 0 ) ? 1 : 0 ); - for ( size_t idxChunk = 0; idxChunk < cntChunks; ++idxChunk ) { - const size_t nChunkStart = idxChunk * nChunkSize; - size_t nChunkEnd = nChunkStart + nChunkSize; - if ( nChunkEnd > nDataLength ) - nChunkEnd = nDataLength; - const size_t nChunkSize = nChunkEnd - nChunkStart; - const std::string strChunk = - strDataLC_linear.substr( nChunkStart, nChunkSize ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " chunk " ) + - cc::info( strChunk ) ); - try { - const dev::u256 uChunk( "0x" + strChunk ); - // setChunksLC.insert( strChunk ); - setChunksU256.insert( uChunk ); - } catch ( ... ) { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " skipped chunk " ) ); - continue; - } - } - if ( setChunksU256.find( uDestinationContract ) == setChunksU256.end() ) { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + - cc::warn( " TX receipt record is skipped because " ) + - cc::info( "data" ) + - cc::warn( " chunks does not contain destination contract " - "address" ) ); - continue; - } - bReceiptVerified = true; - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + " " + cc::notice( "Notice:" ) + " " + - cc::success( " Transaction " ) + - cc::notice( strTransactionHash ) + - cc::success( " receipt was verified, success" ) ); - break; - } // for ( idxReceiptLogRecord = 0; idxReceiptLogRecord < cntReceiptLogRecords; - // ++idxReceiptLogRecord ) - if ( !bReceiptVerified ) { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + " " + cc::notice( "Notice:" ) + " " + - cc::attention( " Skipping transaction " ) + - cc::notice( strTransactionHash ) + - cc::attention( " because no appropriate receipt was found" ) ); - continue; - } - // - // - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::success( " Found transaction for IMA message " ) + - cc::size10( nStartMessageIdx + idxMessage ) + cc::success( ": " ) + - cc::j( joTransaction ) ); - bTransactionWasFound = true; - break; - } - if ( !bTransactionWasFound ) { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + " " + - cc::error( "No transaction was found in logs for IMA message " ) + - cc::size10( nStartMessageIdx + idxMessage ) + cc::error( "." ) ); - throw std::runtime_error( "No transaction was found in logs for IMA message " + - std::to_string( nStartMessageIdx + idxMessage ) ); - } // if ( !bTransactionWasFound ) - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::success( " Success, IMA message " ) + - cc::size10( nStartMessageIdx + idxMessage ) + - cc::success( " was found in logs." ) ); - } // if( bIsVerifyImaMessagesViaLogsSearch ) - // - // - if ( !bOnlyVerify ) { - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Appending message sender address " ) + - cc::notice( dev::toJS( uMessageSender ) ) + - cc::debug( " to summary vector" ) ); - stat_append_address_2_vec( vecComputeMessagesHash, uMessageSender ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Appending destination contract address " ) + - cc::notice( dev::toJS( uDestinationContract ) ) + - cc::debug( " to summary vector" ) ); - stat_append_address_2_vec( vecComputeMessagesHash, uDestinationContract ); - bytes v = dev::fromHex( strMessageData, dev::WhenError::DontThrow ); - // stat_array_invert( v.data(), v.size() ); // do not invert byte order data field - // (see SKALE-3554 for details) - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Appending message data " ) + - cc::binary_singleline( ( void* ) v.data(), v.size(), "" ) + - cc::debug( " to summary vector" ) ); - vecComputeMessagesHash.insert( vecComputeMessagesHash.end(), v.begin(), v.end() ); - // re-compute hash of vecComputeMessagesHash - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Accumulated vector " ) + - cc::binary_singleline( ( void* ) vecComputeMessagesHash.data(), - vecComputeMessagesHash.size(), "" ) ); - stat_re_compute_vec_2_h256vec( vecComputeMessagesHash ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Computed hash from vector for message " ) + - cc::num10( idxMessage ) + cc::debug( " is " ) + - cc::binary_singleline( ( void* ) vecComputeMessagesHash.data(), - vecComputeMessagesHash.size(), "" ) ); - } // if ( !bOnlyVerify ) - } // for ( size_t idxMessage = 0; idxMessage < cntMessagesToSign; ++idxMessage ) { - - if ( !bOnlyVerify ) { - const std::string sh = stat_bytes_2_hex_string( - vecComputeMessagesHash ); // there is no "0x" prefix at start of return value - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Got hash to sign " ) + cc::info( sh ) ); - // - // If we are here, then all IMA messages are valid - // Perform call to wallet to sign messages - // - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Calling wallet to sign " ) + cc::notice( sh ) + - cc::debug( " composed from " ) + - cc::binary_singleline( ( void* ) vecComputeMessagesHash.data(), - vecComputeMessagesHash.size(), "" ) + - cc::debug( "..." ) ); - // - nlohmann::json jo = nlohmann::json::object(); - // - nlohmann::json joCall = nlohmann::json::object(); - joCall["jsonrpc"] = "2.0"; - joCall["method"] = "blsSignMessageHash"; - if ( u.scheme() == "zmq" ) - joCall["type"] = "BLSSignReq"; - joCall["params"] = nlohmann::json::object(); - joCall["params"]["keyShareName"] = keyShareName; - joCall["params"]["messageHash"] = sh; // there is no "0x" prefix at start - joCall["params"]["n"] = joSkaleConfig_nodeInfo_wallets_ima["n"]; - joCall["params"]["t"] = joSkaleConfig_nodeInfo_wallets_ima["t"]; - joCall["params"]["signerIndex"] = nThisNodeIndex_; // 1-based - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Contacting " ) + cc::notice( "SGX Wallet" ) + - cc::debug( " server at " ) + cc::u( u ) ); - clog( VerbosityDebug, "IMA" ) << ( strLogPrefix + cc::debug( " Will send " ) + - cc::notice( "messages sign query" ) + - cc::debug( " to wallet: " ) + cc::j( joCall ) ); - skutils::rest::client cli; - cli.optsSSL_ = optsSSL; - cli.open( u ); - skutils::rest::data_t d = cli.call( joCall ); - if ( !d.err_s_.empty() ) - throw std::runtime_error( - "failed to \"blsSignMessageHash\" sign message(s) with wallet: " + d.err_s_ ); - if ( d.empty() ) - throw std::runtime_error( - "failed to \"blsSignMessageHash\" sign message(s) with wallet, EMPTY data " - "received" ); - const nlohmann::json joAnswer = dev::stat_parse_json_with_error_conversion( d.s_ ); - dev::stat_check_rpc_call_error_and_throw( joAnswer, "blsSignMessageHash" ); - nlohmann::json joSignResult = - ( joAnswer.count( "result" ) > 0 ) ? joAnswer["result"] : joAnswer; - jo["signResult"] = joSignResult; - // - // Done, provide result to caller - // - if ( bHaveQaInRequest ) - jo["qa"] = joQA; - std::string s = jo.dump(); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::success( " Success, got messages " ) + - cc::notice( "sign result" ) + cc::success( " from wallet: " ) + - cc::j( joSignResult ) ); - Json::Value ret; - Json::Reader().parse( s, ret ); - return ret; - } // if ( !bOnlyVerify ) - else { - nlohmann::json jo = nlohmann::json::object(); - jo["success"] = true; - std::string s = jo.dump(); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::success( " Success, verification passed" ) ); - Json::Value ret; - Json::Reader().parse( s, ret ); - return ret; - } // else from if ( !bOnlyVerify ) - } catch ( Exception const& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA Verify and Sign" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( exceptionToErrorMessage() ); - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA Verify and Sign" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( ex.what() ); - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA Verify and Sign" ) + - cc::error( ", exception information: " ) + cc::warn( "unknown exception" ) ); - throw jsonrpc::JsonRpcException( "unknown exception" ); - } -} // skale_imaVerifyAndSign() - -Json::Value SkaleStats::skale_imaBSU256( const Json::Value& request ) { - std::string strLogPrefix = cc::deep_info( "IMA BLS Sign U256" ); - std::string strSgxWalletURL = - dev::tracking::txn_pending_tracker_system::instance().url_sgx_wallet(); - try { - // if ( !isEnabledImaMessageSigning() ) - // throw std::runtime_error( "IMA message signing feature is disabled on this instance" - // ); - Json::FastWriter fastWriter; - const std::string strRequest = fastWriter.write( request ); - const nlohmann::json joRequest = - dev::stat_parse_json_with_error_conversion( strRequest, true ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Processing " ) + cc::notice( "sign" ) + - cc::debug( " request: " ) + cc::j( joRequest ) ); - // - std::string strReason; - if ( joRequest.count( "reason" ) > 0 ) - strReason = skutils::tools::trim_copy( joRequest["reason"].get< std::string >() ); - if ( strReason.empty() ) - strReason = "<<>>"; - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Sign reason description is: " ) + - cc::info( strReason ) ); - // - if ( joRequest.count( "valueToSign" ) == 0 ) - throw std::runtime_error( "missing \"valueToSign\" in call parameters" ); - const nlohmann::json& joValueToSign = joRequest["valueToSign"]; - if ( !joValueToSign.is_string() ) - throw std::runtime_error( "bad value type of \"valueToSign\" must be string" ); - std::string strValueToSign = - skutils::tools::trim_copy( joValueToSign.get< std::string >() ); - if ( strValueToSign.empty() ) - throw std::runtime_error( "value of \"valueToSign\" must be non-EMPTY string" ); - if ( strValueToSign.length() >= 2 && - ( !( strValueToSign[0] == '0' && - ( strValueToSign[1] == 'x' || strValueToSign[1] == 'X' ) ) ) ) - strValueToSign = "0x" + strValueToSign; - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + " " + cc::notice( "U256 value" ) + cc::debug( " to sign is " ) + - cc::info( strValueToSign ) ); - dev::u256 uValueToSign( strValueToSign ); - // - nlohmann::json joConfig = getConfigJSON(); - if ( joConfig.count( "skaleConfig" ) == 0 ) - throw std::runtime_error( "error in config.json file, cannot find \"skaleConfig\"" ); - const nlohmann::json& joSkaleConfig = joConfig["skaleConfig"]; - if ( joSkaleConfig.count( "nodeInfo" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find \"skaleConfig\"/\"nodeInfo\"" ); - const nlohmann::json& joSkaleConfig_nodeInfo = joSkaleConfig["nodeInfo"]; - if ( joSkaleConfig_nodeInfo.count( "ecdsaKeyName" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"ecdsaKeyName\"" ); - const nlohmann::json& joSkaleConfig_nodeInfo_wallets = joSkaleConfig_nodeInfo["wallets"]; - if ( joSkaleConfig_nodeInfo_wallets.count( "ima" ) == 0 ) - throw std::runtime_error( - "error in config.json file, cannot find " - "\"skaleConfig\"/\"nodeInfo\"/\"wallets\"/\"ima\"" ); - const nlohmann::json& joSkaleConfig_nodeInfo_wallets_ima = - joSkaleConfig_nodeInfo_wallets["ima"]; - // - // Check wallet URL and keyShareName for future use, - // fetch SSL options for SGX - // - skutils::url u; - skutils::http::SSL_client_options optsSSL; - const std::string strWalletURL = strSgxWalletURL; - u = skutils::url( strWalletURL ); - if ( u.scheme().empty() || u.host().empty() ) - throw std::runtime_error( "bad SGX wallet url" ); - // - // - try { - if ( joSkaleConfig_nodeInfo_wallets_ima.count( "caFile" ) > 0 ) - optsSSL.ca_file = skutils::tools::trim_copy( - joSkaleConfig_nodeInfo_wallets_ima["caFile"].get< std::string >() ); - } catch ( ... ) { - optsSSL.ca_file.clear(); - } - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " SGX Wallet CA file " ) + cc::info( optsSSL.ca_file ) ); - try { - if ( joSkaleConfig_nodeInfo_wallets_ima.count( "certFile" ) > 0 ) - optsSSL.client_cert = skutils::tools::trim_copy( - joSkaleConfig_nodeInfo_wallets_ima["certFile"].get< std::string >() ); - } catch ( ... ) { - optsSSL.client_cert.clear(); - } - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " SGX Wallet client certificate file " ) + - cc::info( optsSSL.client_cert ) ); - try { - if ( joSkaleConfig_nodeInfo_wallets_ima.count( "keyFile" ) > 0 ) - optsSSL.client_key = skutils::tools::trim_copy( - joSkaleConfig_nodeInfo_wallets_ima["keyFile"].get< std::string >() ); - } catch ( ... ) { - optsSSL.client_key.clear(); - } - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " SGX Wallet client key file " ) + - cc::info( optsSSL.client_key ) ); - const std::string keyShareName = - ( joSkaleConfig_nodeInfo_wallets_ima.count( "keyShareName" ) > 0 ) ? - joSkaleConfig_nodeInfo_wallets_ima["keyShareName"].get< std::string >() : - ""; - if ( keyShareName.empty() ) - throw std::runtime_error( - "error in config.json file, cannot find valid value for " - "\"skaleConfig\"/\"nodeInfo\"/\"wallets\"/\"keyShareName\" parameter" ); - // - // compute hash of u256 value - // - bytes v = dev::BMPBN::encode2vec< dev::u256 >( uValueToSign, true ); - stat_array_align_right( v, 32 ); - const dev::h256 h = dev::sha3( v ); - std::string sh = h.hex(); - sh = stat_remove_0x_from_start( sh ); // there is no "0x" prefix at start - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Got hash to sign " ) + cc::info( sh ) ); - // - nlohmann::json jo = nlohmann::json::object(); - // - nlohmann::json joCall = nlohmann::json::object(); - joCall["jsonrpc"] = "2.0"; - joCall["method"] = "blsSignMessageHash"; - if ( u.scheme() == "zmq" ) - joCall["type"] = "BLSSignReq"; - joCall["params"] = nlohmann::json::object(); - joCall["params"]["keyShareName"] = keyShareName; - joCall["params"]["messageHash"] = sh; // there is no "0x" prefix at start - joCall["params"]["n"] = joSkaleConfig_nodeInfo_wallets_ima["n"]; - joCall["params"]["t"] = joSkaleConfig_nodeInfo_wallets_ima["t"]; - joCall["params"]["signerIndex"] = nThisNodeIndex_; // 1-based - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Contacting " ) + cc::notice( "SGX Wallet" ) + - cc::debug( " server at " ) + cc::u( u ) ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::debug( " Will send " ) + cc::notice( "u256 value sign query" ) + - cc::debug( " to wallet: " ) + cc::j( joCall ) ); - skutils::rest::client cli; - cli.optsSSL_ = optsSSL; - cli.open( u ); - skutils::rest::data_t d = cli.call( joCall ); - if ( !d.err_s_.empty() ) - throw std::runtime_error( - "failed to \"blsSignMessageHash\"/u256 value with wallet: " + d.err_s_ ); - if ( d.empty() ) - throw std::runtime_error( - "failed to \"blsSignMessageHash\"/u256 value with wallet, EMPTY data received" ); - const nlohmann::json joAnswer = dev::stat_parse_json_with_error_conversion( d.s_ ); - dev::stat_check_rpc_call_error_and_throw( joAnswer, "blsSignMessageHash/u256" ); - nlohmann::json joSignResult = - ( joAnswer.count( "result" ) > 0 ) ? joAnswer["result"] : joAnswer; - jo["signResult"] = joSignResult; - // - // Done, provide result to caller - // - std::string s = jo.dump(); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + cc::success( " Success, got u256 value " ) + - cc::notice( "sign result" ) + cc::success( " from wallet: " ) + - cc::j( joSignResult ) ); - Json::Value ret; - Json::Reader().parse( s, ret ); - return ret; - } catch ( Exception const& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA BLS Sign U256" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( exceptionToErrorMessage() ); - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA BLS Sign U256" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( ex.what() ); - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA BLS Sign U256" ) + - cc::error( ", exception information: " ) + cc::warn( "unknown exception" ) ); - throw jsonrpc::JsonRpcException( "unknown exception" ); - } -} // skale_imaBSU256() - - -Json::Value SkaleStats::skale_imaBroadcastTxnInsert( const Json::Value& request ) { - std::string strLogPrefix = cc::deep_info( "IMA broadcast TXN insert" ); - std::string strSgxWalletURL = - dev::tracking::txn_pending_tracker_system::instance().url_sgx_wallet(); - try { - Json::FastWriter fastWriter; - const std::string strRequest = fastWriter.write( request ); - const nlohmann::json joRequest = - dev::stat_parse_json_with_error_conversion( strRequest, true ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + " " + cc::debug( "Got external broadcast/insert request " ) + - cc::j( joRequest ) ); - // - dev::tracking::txn_entry txe; - if ( !txe.fromJSON( joRequest ) ) - throw std::runtime_error( - std::string( "failed to construct tracked IMA TXN entry from " ) + - joRequest.dump() ); - if ( dev::tracking::txn_pending_tracker_system::instance().broadcast_txn_sign_is_enabled( - strSgxWalletURL ) ) { - if ( joRequest.count( "broadcastSignature" ) == 0 ) - throw std::runtime_error( - "IMA broadcast/insert call without \"broadcastSignature\" field specified" ); - if ( joRequest.count( "broadcastFromNode" ) == 0 ) - throw std::runtime_error( - "IMA broadcast/insert call without \"broadcastFromNode\" field specified" ); - std::string strBroadcastSignature = - joRequest["broadcastSignature"].get< std::string >(); - int node_id = joRequest["broadcastFromNode"].get< int >(); - if ( !dev::tracking::txn_pending_tracker_system::instance() - .broadcast_txn_verify_signature( - "insert", strBroadcastSignature, node_id, txe.hash_ ) ) - throw std::runtime_error( "IMA broadcast/insert signature verification failed" ); - } - bool wasInserted = - dev::tracking::txn_pending_tracker_system::instance().insert( txe, false ); - // - nlohmann::json jo = nlohmann::json::object(); - jo["success"] = wasInserted; - std::string s = jo.dump(); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + " " + - ( wasInserted ? cc::success( "Inserted new" ) : cc::warn( "Skipped new" ) ) + - " " + cc::notice( "broadcasted" ) + cc::debug( " IMA TXN " ) + - cc::info( dev::toJS( txe.hash_ ) ) ); - Json::Value ret; - Json::Reader().parse( s, ret ); - return ret; - } catch ( Exception const& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + - cc::info( "IMA broadcast TXN insert" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( exceptionToErrorMessage() ); - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + - cc::info( "IMA broadcast TXN insert" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( ex.what() ); - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + - cc::info( "IMA broadcast TXN insert" ) + - cc::error( ", exception information: " ) + cc::warn( "unknown exception" ) ); - throw jsonrpc::JsonRpcException( "unknown exception" ); - } -} - -Json::Value SkaleStats::skale_imaBroadcastTxnErase( const Json::Value& request ) { - std::string strLogPrefix = cc::deep_info( "IMA broadcast TXN erase" ); - std::string strSgxWalletURL = - dev::tracking::txn_pending_tracker_system::instance().url_sgx_wallet(); - try { - Json::FastWriter fastWriter; - const std::string strRequest = fastWriter.write( request ); - const nlohmann::json joRequest = - dev::stat_parse_json_with_error_conversion( strRequest, true ); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + " " + cc::debug( "Got external broadcast/erase request " ) + - cc::j( joRequest ) ); - // - dev::tracking::txn_entry txe; - if ( !txe.fromJSON( joRequest ) ) - throw std::runtime_error( - std::string( "failed to construct tracked IMA TXN entry from " ) + - joRequest.dump() ); - if ( dev::tracking::txn_pending_tracker_system::instance().broadcast_txn_sign_is_enabled( - strSgxWalletURL ) ) { - if ( joRequest.count( "broadcastSignature" ) == 0 ) - throw std::runtime_error( - "IMA broadcast/erase call without \"broadcastSignature\" field specified" ); - if ( joRequest.count( "broadcastFromNode" ) == 0 ) - throw std::runtime_error( - "IMA broadcast/erase call without \"broadcastFromNode\" field specified" ); - std::string strBroadcastSignature = - joRequest["broadcastSignature"].get< std::string >(); - int node_id = joRequest["broadcastFromNode"].get< int >(); - if ( !dev::tracking::txn_pending_tracker_system::instance() - .broadcast_txn_verify_signature( - "erase", strBroadcastSignature, node_id, txe.hash_ ) ) - throw std::runtime_error( "IMA broadcast/erase signature verification failed" ); - } - bool wasErased = dev::tracking::txn_pending_tracker_system::instance().erase( txe, false ); - // - nlohmann::json jo = nlohmann::json::object(); - jo["success"] = wasErased; - std::string s = jo.dump(); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + " " + - ( wasErased ? cc::success( "Erased existing" ) : - cc::warn( "Skipped erasing" ) ) + - " " + cc::notice( "broadcasted" ) + cc::debug( " IMA TXN " ) + - cc::info( dev::toJS( txe.hash_ ) ) ); - Json::Value ret; - Json::Reader().parse( s, ret ); - return ret; - } catch ( Exception const& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + - cc::info( "IMA broadcast TXN erase" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( exceptionToErrorMessage() ); - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + - cc::info( "IMA broadcast TXN erase" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( ex.what() ); - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + - cc::info( "IMA broadcast TXN erase" ) + - cc::error( ", exception information: " ) + cc::warn( "unknown exception" ) ); - throw jsonrpc::JsonRpcException( "unknown exception" ); - } -} - -Json::Value SkaleStats::skale_imaTxnInsert( const Json::Value& request ) { - std::string strLogPrefix = cc::deep_info( "IMA TXN insert" ); - try { - Json::FastWriter fastWriter; - const std::string strRequest = fastWriter.write( request ); - const nlohmann::json joRequest = - dev::stat_parse_json_with_error_conversion( strRequest, true ); - // - dev::tracking::txn_entry txe; - if ( !txe.fromJSON( joRequest ) ) - throw std::runtime_error( - std::string( "failed to construct tracked IMA TXN entry from " ) + - joRequest.dump() ); - bool wasInserted = - dev::tracking::txn_pending_tracker_system::instance().insert( txe, true ); - // - nlohmann::json jo = nlohmann::json::object(); - jo["success"] = wasInserted; - std::string s = jo.dump(); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + " " + - ( wasInserted ? cc::success( "Inserted new" ) : cc::warn( "Skipped new" ) ) + - " " + cc::notice( "reported" ) + cc::debug( " IMA TXN " ) + - cc::info( dev::toJS( txe.hash_ ) ) ); - Json::Value ret; - Json::Reader().parse( s, ret ); - return ret; - } catch ( Exception const& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA TXN insert" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( exceptionToErrorMessage() ); - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA TXN insert" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( ex.what() ); - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA TXN insert" ) + - cc::error( ", exception information: " ) + cc::warn( "unknown exception" ) ); - throw jsonrpc::JsonRpcException( "unknown exception" ); - } -} - -Json::Value SkaleStats::skale_imaTxnErase( const Json::Value& request ) { - std::string strLogPrefix = cc::deep_info( "IMA TXN erase" ); - try { - Json::FastWriter fastWriter; - const std::string strRequest = fastWriter.write( request ); - const nlohmann::json joRequest = - dev::stat_parse_json_with_error_conversion( strRequest, true ); - // - dev::tracking::txn_entry txe; - if ( !txe.fromJSON( joRequest ) ) - throw std::runtime_error( - std::string( "failed to construct tracked IMA TXN entry from " ) + - joRequest.dump() ); - bool wasErased = dev::tracking::txn_pending_tracker_system::instance().erase( txe, true ); - // - nlohmann::json jo = nlohmann::json::object(); - jo["success"] = wasErased; - std::string s = jo.dump(); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + " " + - ( wasErased ? cc::success( "Erased existing" ) : - cc::warn( "Skipped erasing" ) ) + - " " + cc::notice( "reported" ) + cc::debug( " IMA TXN " ) + - cc::info( dev::toJS( txe.hash_ ) ) ); - Json::Value ret; - Json::Reader().parse( s, ret ); - return ret; - } catch ( Exception const& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA TXN erase" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( exceptionToErrorMessage() ); - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA TXN erase" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( ex.what() ); - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA TXN erase" ) + - cc::error( ", exception information: " ) + cc::warn( "unknown exception" ) ); - throw jsonrpc::JsonRpcException( "unknown exception" ); - } -} - -Json::Value SkaleStats::skale_imaTxnClear( const Json::Value& /*request*/ ) { - std::string strLogPrefix = cc::deep_info( "IMA TXN clear" ); - try { - dev::tracking::txn_pending_tracker_system::instance().clear(); - // - nlohmann::json jo = nlohmann::json::object(); - jo["success"] = true; - std::string s = jo.dump(); - clog( VerbosityDebug, "IMA" ) << ( strLogPrefix + " " + cc::success( "Cleared all" ) + " " + - cc::notice( "reported" ) + cc::debug( " IMA TXNs" ) ); - Json::Value ret; - Json::Reader().parse( s, ret ); - return ret; - } catch ( Exception const& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA TXN clear" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( exceptionToErrorMessage() ); - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA TXN clear" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( ex.what() ); - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA TXN clear" ) + - cc::error( ", exception information: " ) + cc::warn( "unknown exception" ) ); - throw jsonrpc::JsonRpcException( "unknown exception" ); - } -} - -Json::Value SkaleStats::skale_imaTxnFind( const Json::Value& request ) { - std::string strLogPrefix = cc::deep_info( "IMA TXN find" ); - try { - Json::FastWriter fastWriter; - const std::string strRequest = fastWriter.write( request ); - const nlohmann::json joRequest = - dev::stat_parse_json_with_error_conversion( strRequest, true ); - // - dev::tracking::txn_entry txe; - if ( !txe.fromJSON( joRequest ) ) - throw std::runtime_error( - std::string( "failed to construct tracked IMA TXN entry from " ) + - joRequest.dump() ); - bool wasFound = dev::tracking::txn_pending_tracker_system::instance().find( txe ); - // - nlohmann::json jo = nlohmann::json::object(); - jo["success"] = wasFound; - std::string s = jo.dump(); - clog( VerbosityDebug, "IMA" ) - << ( strLogPrefix + " " + - ( wasFound ? cc::success( "Found" ) : cc::warn( "Not found" ) ) + - cc::debug( " IMA TXN " ) + cc::info( dev::toJS( txe.hash_ ) ) ); - Json::Value ret; - Json::Reader().parse( s, ret ); - return ret; - } catch ( Exception const& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA TXN find" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( exceptionToErrorMessage() ); - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA TXN find" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( ex.what() ); - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA TXN find" ) + - cc::error( ", exception information: " ) + cc::warn( "unknown exception" ) ); - throw jsonrpc::JsonRpcException( "unknown exception" ); - } -} - -Json::Value SkaleStats::skale_imaTxnListAll( const Json::Value& /*request*/ ) { - std::string strLogPrefix = cc::deep_info( "IMA TXN list-all" ); - try { - dev::tracking::txn_pending_tracker_system_impl::list_txns_t lst; - dev::tracking::txn_pending_tracker_system::instance().list_all( lst ); - nlohmann::json jarr = nlohmann::json::array(); - for ( const dev::tracking::txn_entry& txe : lst ) { - jarr.push_back( txe.toJSON() ); - } - // - nlohmann::json jo = nlohmann::json::object(); - jo["success"] = true; - jo["allTrackedTXNs"] = jarr; - std::string s = jo.dump(); - clog( VerbosityDebug, "IMA" ) << ( strLogPrefix + " " + cc::debug( "Listed " ) + - cc::size10( lst.size() ) + cc::debug( " IMA TXN(s)" ) ); - Json::Value ret; - Json::Reader().parse( s, ret ); - return ret; - } catch ( Exception const& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA TXN list-all" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( exceptionToErrorMessage() ); - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA TXN list-all" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( ex.what() ); - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + cc::info( "IMA TXN list-all" ) + - cc::error( ", exception information: " ) + cc::warn( "unknown exception" ) ); - throw jsonrpc::JsonRpcException( "unknown exception" ); - } -} - -Json::Value SkaleStats::skale_browseEntireNetwork( const Json::Value& /*request*/ ) { - std::string strLogPrefix = cc::deep_info( "BROWSE/NOW SKALE NETWORK" ); - try { - clog( dev::VerbosityTrace, "snb" ) - << ( strLogPrefix + " " + cc::notice( "SKALE NETWORK BROWSER" ) + - cc::debug( " incoming refreshing(now) call to " ) + - cc::bright( "skale_browseEntireNetwork" ) + cc::debug( "..." ) ); - clock_t tt = clock(); - skale::network::browser::vec_s_chains_t vec = skale::network::browser::refreshing_do_now(); - tt = clock() - tt; - double lf_time_taken = ( ( double ) tt ) / CLOCKS_PER_SEC; // in seconds - clog( dev::VerbosityTrace, "snb" ) - << ( strLogPrefix + " " + cc::notice( "SKALE NETWORK BROWSER" ) + - cc::debug( " refreshing(now) done, " ) + - cc::notice( skutils::tools::format( "%f", lf_time_taken ) ) + - cc::debug( " second(s) spent" ) ); - nlohmann::json jo = skale::network::browser::to_json( vec ); - clog( dev::VerbosityTrace, "snb" ) - << ( strLogPrefix + " " + cc::notice( "SKALE NETWORK BROWSER" ) + - cc::debug( " refreshing(now) result is: " ) + cc::j( jo ) ); - std::string s = jo.dump(); - Json::Value ret; - Json::Reader().parse( s, ret ); - clog( dev::VerbosityTrace, "snb" ) - << ( strLogPrefix + " " + cc::notice( "SKALE NETWORK BROWSER" ) + - cc::debug( " refreshing(now) result is ready to sent back to client/caller" ) ); - return ret; - } catch ( Exception const& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + - cc::info( "skale_browseEntireNetwork" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( exceptionToErrorMessage() ); - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + - cc::info( "skale_browseEntireNetwork" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( ex.what() ); - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + - cc::info( "skale_browseEntireNetwork" ) + - cc::error( ", exception information: " ) + cc::warn( "unknown exception" ) ); - throw jsonrpc::JsonRpcException( "unknown exception" ); - } -} - -Json::Value SkaleStats::skale_cachedEntireNetwork( const Json::Value& /*request*/ ) { - std::string strLogPrefix = cc::deep_info( "CACHED/FETCH SKALE NETWORK" ); - try { - clog( dev::VerbosityTrace, "snb" ) - << ( strLogPrefix + " " + cc::notice( "SKALE NETWORK BROWSER" ) + - cc::debug( " incoming refreshing(cached) call to " ) + - cc::bright( "skale_cachedEntireNetwork" ) + cc::debug( "..." ) ); - clock_t tt = clock(); - skale::network::browser::vec_s_chains_t vec = skale::network::browser::refreshing_cached(); - tt = clock() - tt; - double lf_time_taken = ( ( double ) tt ) / CLOCKS_PER_SEC; // in seconds - clog( dev::VerbosityTrace, "snb" ) - << ( strLogPrefix + " " + cc::notice( "SKALE NETWORK BROWSER" ) + - cc::debug( " refreshing(cached) done, " ) + - cc::notice( skutils::tools::format( "%f", lf_time_taken ) ) + - cc::debug( " second(s) spent" ) ); - nlohmann::json jo = skale::network::browser::to_json( vec ); - clog( dev::VerbosityTrace, "snb" ) - << ( strLogPrefix + " " + cc::notice( "SKALE NETWORK BROWSER" ) + - cc::debug( " refreshing(cached) result is: " ) + cc::j( jo ) ); - std::string s = jo.dump(); - Json::Value ret; - Json::Reader().parse( s, ret ); - clog( dev::VerbosityTrace, "snb" ) - << ( strLogPrefix + " " + cc::notice( "SKALE NETWORK BROWSER" ) + - cc::debug( - " refreshing(cached) result is ready to sent back to client/caller" ) ); - return ret; - } catch ( Exception const& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + - cc::info( "skale_cachedEntireNetwork" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( exceptionToErrorMessage() ); - } catch ( const std::exception& ex ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + - cc::info( "skale_cachedEntireNetwork" ) + - cc::error( ", exception information: " ) + cc::warn( ex.what() ) ); - throw jsonrpc::JsonRpcException( ex.what() ); - } catch ( ... ) { - clog( VerbosityError, "IMA" ) - << ( strLogPrefix + " " + cc::fatal( "FATAL:" ) + - cc::error( " Exception while processing " ) + - cc::info( "skale_cachedEntireNetwork" ) + - cc::error( ", exception information: " ) + cc::warn( "unknown exception" ) ); - throw jsonrpc::JsonRpcException( "unknown exception" ); - } -} - }; // namespace rpc }; // namespace dev diff --git a/libweb3jsonrpc/SkaleStats.h b/libweb3jsonrpc/SkaleStats.h index c586b9aae..2d2e1e497 100644 --- a/libweb3jsonrpc/SkaleStats.h +++ b/libweb3jsonrpc/SkaleStats.h @@ -71,153 +71,6 @@ class Interface; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -namespace tracking { - -class txn_entry { -public: - dev::u256 hash_; - time_t ts_; // second accuracy used here - txn_entry(); - txn_entry( const dev::u256& hash ); - txn_entry( const txn_entry& other ); - txn_entry( txn_entry&& other ); - ~txn_entry(); - bool operator!() const; - txn_entry& operator=( const txn_entry& other ); - bool operator==( const txn_entry& other ) const; - bool operator!=( const txn_entry& other ) const; - bool operator<( const txn_entry& other ) const; - bool operator<=( const txn_entry& other ) const; - bool operator>( const txn_entry& other ) const; - bool operator>=( const txn_entry& other ) const; - bool operator==( const dev::u256& hash ) const; - bool operator!=( const dev::u256& hash ) const; - bool operator<( const dev::u256& hash ) const; - bool operator<=( const dev::u256& hash ) const; - bool operator>( const dev::u256& hash ) const; - bool operator>=( const dev::u256& hash ) const; - bool empty() const; - void clear(); - txn_entry& assign( const txn_entry& other ); - int compare( const dev::u256& hash ) const; - int compare( const txn_entry& other ) const; - void setNowTimeStamp(); - nlohmann::json toJSON() const; - bool fromJSON( const nlohmann::json& jo ); -}; /// class txn_entry - -////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -class txn_pending_tracker_system_impl : public skutils::json_config_file_accessor { -public: - typedef std::list< txn_entry > list_txns_t; - typedef std::set< txn_entry > set_txns_t; - -private: - list_txns_t list_txns_; - set_txns_t set_txns_; - -protected: - const std::string strSgxWalletURL_; - -public: - static std::atomic_size_t g_nMaxPendingTxns; - static std::string g_strDispatchQueueID; - txn_pending_tracker_system_impl( - const std::string& configPath, const std::string& strSgxWalletURL ); - txn_pending_tracker_system_impl( const txn_pending_tracker_system_impl& ) = delete; - txn_pending_tracker_system_impl( txn_pending_tracker_system_impl&& ) = delete; - virtual ~txn_pending_tracker_system_impl(); - txn_pending_tracker_system_impl& operator=( const txn_pending_tracker_system_impl& ) = delete; - txn_pending_tracker_system_impl& operator=( txn_pending_tracker_system_impl&& ) = delete; - // - typedef skutils::multithreading::recursive_mutex_type mutex_type; - typedef std::lock_guard< mutex_type > lock_type; - - std::string url_sgx_wallet() const { return strSgxWalletURL_; } - -private: - mutable mutex_type mtx_; - -public: - mutex_type& mtx() const { return mtx_; } - // - bool empty() const; - virtual void clear(); - virtual size_t max_txns() const; - -private: - size_t adjust_limits_impl( bool isEnableBroadcast ); - -public: - size_t adjust_limits( bool isEnableBroadcast ); - bool insert( txn_entry& txe, bool isEnableBroadcast ); - bool insert( dev::u256 hash, bool isEnableBroadcast ); - bool erase( txn_entry& txe, bool isEnableBroadcast ); - bool erase( dev::u256 hash, bool isEnableBroadcast ); - bool find( txn_entry& txe ) const; - bool find( const dev::u256& hash ) const; - void list_all( list_txns_t& lst ) const; - // - virtual void on_txn_insert( const txn_entry& txe, bool isEnableBroadcast ); - virtual void on_txn_erase( const txn_entry& txe, bool isEnableBroadcast ); - - bool broadcast_txn_sign_is_enabled( const std::string& strWalletURL ); - -private: - std::string broadcast_txn_sign_string( const char* strToSign ); - std::string broadcast_txn_compose_string( const char* strActionName, const dev::u256& tx_hash ); - std::string broadcast_txn_sign( const char* strActionName, const dev::u256& tx_hash ); - std::string broadcast_txn_get_ecdsa_public_key( int node_id ); - int broadcast_txn_get_node_id(); - -public: - bool broadcast_txn_verify_signature( const char* strActionName, - const std::string& strBroadcastSignature, int node_id, const dev::u256& tx_hash ); - -public: - virtual void broadcast_txn_insert( const txn_entry& txe ); - virtual void broadcast_txn_erase( const txn_entry& txe ); - -private: - std::atomic_bool isTracking_ = false; - skutils::dispatch::job_id_t tracking_job_id_; - -public: - static std::atomic_size_t g_nTrackingIntervalInSeconds; - size_t tracking_interval_in_seconds() const; - bool is_tracking() const; - void tracking_auto_start_stop(); - void tracking_step(); - void tracking_start(); - void tracking_stop(); - // - bool check_txn_is_mined( const txn_entry& txe ); - bool check_txn_is_mined( const dev::u256& hash ); -}; /// class txn_pending_tracker_system_impl - -////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -class txn_pending_tracker_system : public txn_pending_tracker_system_impl { - static std::unique_ptr< txn_pending_tracker_system > g_ptr; - -public: - txn_pending_tracker_system( const std::string& configPath, const std::string& strSgxWalletURL ); - txn_pending_tracker_system( const txn_pending_tracker_system_impl& ) = delete; - txn_pending_tracker_system( txn_pending_tracker_system_impl&& ) = delete; - virtual ~txn_pending_tracker_system(); - static txn_pending_tracker_system& init( - const std::string& configPath, const std::string& strSgxWalletURL ); - static txn_pending_tracker_system& instance(); -}; /// class txn_pending_tracker_system - -}; // namespace tracking - -////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - namespace rpc { /** @@ -235,7 +88,7 @@ class SkaleStats : public dev::rpc::SkaleStatsFace, bool isExposeAllDebugInfo_ = false; SkaleStats( const std::string& configPath, eth::Interface& _eth, - const dev::eth::ChainParams& chainParams, bool isDisableZMQ ); + const dev::eth::ChainParams& chainParams ); virtual RPCModules implementedModules() const override { return RPCModules{ RPCModule{ "skaleStats", "1.0" } }; @@ -246,18 +99,6 @@ class SkaleStats : public dev::rpc::SkaleStatsFace, virtual Json::Value skale_stats() override; virtual Json::Value skale_nodesRpcInfo() override; virtual Json::Value skale_imaInfo() override; - virtual Json::Value skale_imaVerifyAndSign( const Json::Value& request ) override; - virtual Json::Value skale_imaBSU256( const Json::Value& request ) override; - virtual Json::Value skale_imaBroadcastTxnInsert( const Json::Value& request ) override; - virtual Json::Value skale_imaBroadcastTxnErase( const Json::Value& request ) override; - virtual Json::Value skale_imaTxnInsert( const Json::Value& request ) override; - virtual Json::Value skale_imaTxnErase( const Json::Value& request ) override; - virtual Json::Value skale_imaTxnClear( const Json::Value& request ) override; - virtual Json::Value skale_imaTxnFind( const Json::Value& request ) override; - virtual Json::Value skale_imaTxnListAll( const Json::Value& request ) override; - - virtual Json::Value skale_browseEntireNetwork( const Json::Value& request ) override; - virtual Json::Value skale_cachedEntireNetwork( const Json::Value& request ) override; protected: eth::Interface* client() const { return &m_eth; } diff --git a/libweb3jsonrpc/SkaleStatsFace.h b/libweb3jsonrpc/SkaleStatsFace.h index c9d9be557..6dbdbc575 100644 --- a/libweb3jsonrpc/SkaleStatsFace.h +++ b/libweb3jsonrpc/SkaleStatsFace.h @@ -22,41 +22,6 @@ class SkaleStatsFace : public ServerInterface< SkaleStatsFace > { this->bindAndAddMethod( jsonrpc::Procedure( "skale_imaInfo", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL ), &dev::rpc::SkaleStatsFace::skale_imaInfoI ); - this->bindAndAddMethod( jsonrpc::Procedure( "skale_imaVerifyAndSign", - jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL ), - &dev::rpc::SkaleStatsFace::skale_imaVerifyAndSignI ); - this->bindAndAddMethod( jsonrpc::Procedure( "skale_imaBSU256", jsonrpc::PARAMS_BY_POSITION, - jsonrpc::JSON_STRING, NULL ), - &dev::rpc::SkaleStatsFace::skale_imaBSU256I ); - this->bindAndAddMethod( jsonrpc::Procedure( "skale_imaBroadcastTxnInsert", - jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL ), - &dev::rpc::SkaleStatsFace::skale_imaBroadcastTxnInsertI ); - this->bindAndAddMethod( jsonrpc::Procedure( "skale_imaBroadcastTxnErase", - jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL ), - &dev::rpc::SkaleStatsFace::skale_imaBroadcastTxnEraseI ); - this->bindAndAddMethod( jsonrpc::Procedure( "skale_imaTxnInsert", - jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL ), - &dev::rpc::SkaleStatsFace::skale_imaTxnInsertI ); - this->bindAndAddMethod( jsonrpc::Procedure( "skale_imaTxnErase", - jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL ), - &dev::rpc::SkaleStatsFace::skale_imaTxnEraseI ); - this->bindAndAddMethod( jsonrpc::Procedure( "skale_imaTxnClear", - jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL ), - &dev::rpc::SkaleStatsFace::skale_imaTxnClearI ); - this->bindAndAddMethod( jsonrpc::Procedure( "skale_imaTxnFind", jsonrpc::PARAMS_BY_POSITION, - jsonrpc::JSON_STRING, NULL ), - &dev::rpc::SkaleStatsFace::skale_imaTxnFindI ); - this->bindAndAddMethod( jsonrpc::Procedure( "skale_imaTxnListAll", - jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL ), - &dev::rpc::SkaleStatsFace::skale_imaTxnListAllI ); - - - this->bindAndAddMethod( jsonrpc::Procedure( "skale_browseEntireNetwork", - jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL ), - &dev::rpc::SkaleStatsFace::skale_browseEntireNetworkI ); - this->bindAndAddMethod( jsonrpc::Procedure( "skale_cachedEntireNetwork", - jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL ), - &dev::rpc::SkaleStatsFace::skale_cachedEntireNetworkI ); } inline virtual void skale_statsI( const Json::Value& request, Json::Value& response ) { @@ -71,62 +36,11 @@ class SkaleStatsFace : public ServerInterface< SkaleStatsFace > { ( void ) request; response = this->skale_imaInfo(); } - inline virtual void skale_imaVerifyAndSignI( - const Json::Value& request, Json::Value& response ) { - response = this->skale_imaVerifyAndSign( request ); - } - inline virtual void skale_imaBSU256I( const Json::Value& request, Json::Value& response ) { - response = this->skale_imaBSU256( request ); - } - inline virtual void skale_imaBroadcastTxnInsertI( - const Json::Value& request, Json::Value& response ) { - response = this->skale_imaBroadcastTxnInsert( request ); - } - inline virtual void skale_imaBroadcastTxnEraseI( - const Json::Value& request, Json::Value& response ) { - response = this->skale_imaBroadcastTxnErase( request ); - } - inline virtual void skale_imaTxnInsertI( const Json::Value& request, Json::Value& response ) { - response = this->skale_imaTxnInsert( request ); - } - inline virtual void skale_imaTxnEraseI( const Json::Value& request, Json::Value& response ) { - response = this->skale_imaTxnErase( request ); - } - inline virtual void skale_imaTxnClearI( const Json::Value& request, Json::Value& response ) { - response = this->skale_imaTxnClear( request ); - } - inline virtual void skale_imaTxnFindI( const Json::Value& request, Json::Value& response ) { - response = this->skale_imaTxnFind( request ); - } - inline virtual void skale_imaTxnListAllI( const Json::Value& request, Json::Value& response ) { - response = this->skale_imaTxnListAll( request ); - } - - inline virtual void skale_browseEntireNetworkI( - const Json::Value& request, Json::Value& response ) { - response = this->skale_browseEntireNetwork( request ); - } - inline virtual void skale_cachedEntireNetworkI( - const Json::Value& request, Json::Value& response ) { - response = this->skale_cachedEntireNetwork( request ); - } - virtual Json::Value skale_stats() = 0; virtual Json::Value skale_nodesRpcInfo() = 0; virtual Json::Value skale_imaInfo() = 0; - virtual Json::Value skale_imaVerifyAndSign( const Json::Value& request ) = 0; - virtual Json::Value skale_imaBSU256( const Json::Value& request ) = 0; - virtual Json::Value skale_imaBroadcastTxnInsert( const Json::Value& request ) = 0; - virtual Json::Value skale_imaBroadcastTxnErase( const Json::Value& request ) = 0; - virtual Json::Value skale_imaTxnInsert( const Json::Value& request ) = 0; - virtual Json::Value skale_imaTxnErase( const Json::Value& request ) = 0; - virtual Json::Value skale_imaTxnClear( const Json::Value& request ) = 0; - virtual Json::Value skale_imaTxnFind( const Json::Value& request ) = 0; - virtual Json::Value skale_imaTxnListAll( const Json::Value& request ) = 0; - virtual Json::Value skale_browseEntireNetwork( const Json::Value& request ) = 0; - virtual Json::Value skale_cachedEntireNetwork( const Json::Value& request ) = 0; }; /// class SkaleStatsFace } // namespace rpc diff --git a/libweb3jsonrpc/skaleStats.json b/libweb3jsonrpc/skaleStats.json index cebb2e43e..99cb5d889 100644 --- a/libweb3jsonrpc/skaleStats.json +++ b/libweb3jsonrpc/skaleStats.json @@ -3,15 +3,6 @@ { "name": "skale_nodesRpcInfo", "params": [], "order": [], "returns": {}}, { "name": "skale_browseEntireNetwork", "params": [], "order": [], "returns": {}}, { "name": "skale_cachedEntireNetwork", "params": [], "order": [], "returns": {}}, -{ "name": "skale_imaInfo", "params": [], "order": [], "returns": {}}, -{ "name": "skale_imaVerifyAndSign", "params": [], "order": [], "returns": {}}, -{ "name": "skale_imaBSU256", "params": [], "order": [], "returns": {}}, -{ "name": "skale_imaBroadcastTxnInsert", "params": [], "order": [], "returns": {}}, -{ "name": "skale_imaBroadcastTxnErase", "params": [], "order": [], "returns": {}}, -{ "name": "skale_imaTxnInsert", "params": [], "order": [], "returns": {}}, -{ "name": "skale_imaTxnErase", "params": [], "order": [], "returns": {}}, -{ "name": "skale_imaTxnClear", "params": [], "order": [], "returns": {}}, -{ "name": "skale_imaTxnFind", "params": [], "order": [], "returns": {}}, -{ "name": "skale_imaTxnListAll", "params": [], "order": [], "returns": {}} +{ "name": "skale_imaInfo", "params": [], "order": [], "returns": {}} ] diff --git a/skaled/CMakeLists.txt b/skaled/CMakeLists.txt index 11209cad8..593119d59 100644 --- a/skaled/CMakeLists.txt +++ b/skaled/CMakeLists.txt @@ -25,8 +25,8 @@ target_link_libraries( devcore Boost::program_options skutils - "${DEPS_INSTALL_ROOT}/lib/liblzma.a" "${DEPS_INSTALL_ROOT}/lib/libunwind.a" + "${DEPS_INSTALL_ROOT}/lib/liblzma.a" pthread idn2 batched-io diff --git a/skaled/main.cpp b/skaled/main.cpp index 3315ca09b..8118b017e 100644 --- a/skaled/main.cpp +++ b/skaled/main.cpp @@ -57,6 +57,7 @@ #include #include +#include #include #include @@ -70,7 +71,6 @@ #include #include #include -#include #include #include #include @@ -251,7 +251,6 @@ void downloadSnapshot( unsigned block_number, std::shared_ptr< SnapshotManager > try { clog( VerbosityInfo, "downloadSnapshot" ) << cc::normal( "Will download snapshot from " ) << cc::u( strURLWeb3 ) << std::endl; - ; try { bool isBinaryDownload = true; @@ -311,10 +310,6 @@ void downloadSnapshot( unsigned block_number, std::shared_ptr< SnapshotManager > } //// HACK END //// - snapshotManager->restoreSnapshot( block_number ); - std::cout << cc::success( "Snapshot restore success for block " ) - << cc::u( to_string( block_number ) ) << std::endl; - } catch ( ... ) { std::throw_with_nested( std::runtime_error( cc::fatal( "FATAL:" ) + " " + @@ -324,6 +319,215 @@ void downloadSnapshot( unsigned block_number, std::shared_ptr< SnapshotManager > fs::remove( saveTo ); } +std::array< std::string, 4 > getBLSPublicKeyToVerifySnapshot( const ChainParams& chainParams ) { + std::array< std::string, 4 > arrayCommonPublicKey; + bool isRotationtrigger = true; + if ( chainParams.sChain.nodeGroups.size() > 1 ) { + if ( ( uint64_t ) time( NULL ) >= + chainParams.sChain.nodeGroups[chainParams.sChain.nodeGroups.size() - 2].finishTs ) { + isRotationtrigger = false; + } + } else { + isRotationtrigger = false; + } + if ( isRotationtrigger ) { + arrayCommonPublicKey = + chainParams.sChain.nodeGroups[chainParams.sChain.nodeGroups.size() - 2].blsPublicKey; + } else { + arrayCommonPublicKey = chainParams.sChain.nodeGroups.back().blsPublicKey; + } + + return arrayCommonPublicKey; +} + +unsigned getBlockToDownladSnapshot( const dev::eth::sChainNode& nodeInfo ) { + std::string blockNumber_url = std::string( "http://" ) + std::string( nodeInfo.ip ) + + std::string( ":" ) + + ( nodeInfo.port + 3 ).convert_to< std::string >(); + + clog( VerbosityInfo, "getBlockToDownladSnapshot" ) + << cc::notice( "Asking node " ) << cc::p( nodeInfo.sChainIndex.str() ) << ' ' + << cc::notice( blockNumber_url ) << cc::notice( " for latest snapshot block number." ); + + unsigned blockNumber = getLatestSnapshotBlockNumber( blockNumber_url ); + clog( VerbosityInfo, "getBlockToDownladSnapshot" ) + << cc::notice( "Latest Snapshot Block Number" ) + cc::debug( " is: " ) + << cc::p( std::to_string( blockNumber ) ) << " (from " << blockNumber_url << ")"; + + return blockNumber; +} + +std::pair< std::vector< std::string >, std::pair< dev::h256, libff::alt_bn128_G1 > > +voteForSnapshotHash( + std::unique_ptr< SnapshotHashAgent >& snapshotHashAgent, unsigned blockNumber ) { + std::pair< dev::h256, libff::alt_bn128_G1 > votedHash; + std::vector< std::string > listUrlsToDownload; + try { + listUrlsToDownload = snapshotHashAgent->getNodesToDownloadSnapshotFrom( blockNumber ); + clog( VerbosityInfo, "voteForSnapshotHash" ) + << cc::notice( "Got urls to download snapshot from " ) + << cc::p( std::to_string( listUrlsToDownload.size() ) ) << cc::notice( " nodes " ); + + if ( listUrlsToDownload.size() == 0 ) + return { listUrlsToDownload, votedHash }; + + votedHash = snapshotHashAgent->getVotedHash(); + + return { listUrlsToDownload, votedHash }; + } catch ( std::exception& ex ) { + std::throw_with_nested( std::runtime_error( + cc::error( "Exception while collecting snapshot hash from other skaleds " ) ) ); + } +} + +bool checkLocalSnapshot( std::shared_ptr< SnapshotManager >& snapshotManager, unsigned blockNumber, + const dev::h256& votedHash ) { + try { + if ( snapshotManager->isSnapshotHashPresent( blockNumber ) ) { + clog( VerbosityInfo, "checkLocalSnapshot" ) + << "Snapshot for block " << blockNumber << " already present locally"; + + dev::h256 calculated_hash = snapshotManager->getSnapshotHash( blockNumber ); + + if ( calculated_hash == votedHash ) { + clog( VerbosityInfo, "checkLocalSnapshot" ) << cc::notice( + "Will delete all snapshots except" + std::to_string( blockNumber ) ); + snapshotManager->cleanupButKeepSnapshot( blockNumber ); + snapshotManager->restoreSnapshot( blockNumber ); + std::cout << cc::success( "Snapshot restore success for block " ) + << cc::u( to_string( blockNumber ) ) << std::endl; + return true; + } else { + clog( VerbosityWarning, "checkLocalSnapshot" ) + << cc::warn( "Snapshot is present locally but its hash is different" ); + } + } // if present + } catch ( const std::exception& ex ) { + // usually snapshot absent exception + clog( VerbosityInfo, "checkLocalSnapshot" ) << dev::nested_exception_what( ex ); + } + + return false; +} + +bool tryDownloadSnapshot( std::shared_ptr< SnapshotManager >& snapshotManager, + const ChainParams& chainParams, const std::vector< std::string >& listUrlsToDownload, + const std::pair< dev::h256, libff::alt_bn128_G1 >& votedHash, unsigned blockNumber, + bool isRegularSnapshot ) { + clog( VerbosityInfo, "tryDownloadSnapshot" ) + << cc::notice( "Will cleanup data dir and snapshots dir if needed" ); + if ( isRegularSnapshot ) + snapshotManager->cleanup(); + + bool successfullDownload = false; + + size_t n_found = listUrlsToDownload.size(); + + size_t shift = rand() % n_found; + + for ( size_t cnt = 0; cnt < n_found && !successfullDownload; ++cnt ) + try { + size_t i = ( shift + cnt ) % n_found; + + std::string urlToDownloadSnapshot; + urlToDownloadSnapshot = listUrlsToDownload[i]; + + downloadSnapshot( blockNumber, snapshotManager, urlToDownloadSnapshot, chainParams ); + + try { + snapshotManager->computeSnapshotHash( blockNumber, true ); + } catch ( const std::exception& ) { + std::throw_with_nested( + std::runtime_error( cc::fatal( "FATAL:" ) + " " + + cc::error( "Exception while computing snapshot hash " ) ) ); + } + + dev::h256 calculated_hash = snapshotManager->getSnapshotHash( blockNumber ); + + if ( calculated_hash == votedHash.first ) { + successfullDownload = true; + if ( isRegularSnapshot ) { + snapshotManager->restoreSnapshot( blockNumber ); + std::cout << cc::success( "Snapshot restore success for block " ) + << cc::u( to_string( blockNumber ) ) << std::endl; + } + return successfullDownload; + } else { + clog( VerbosityWarning, "tryDownloadSnapshot" ) + << cc::notice( + "Downloaded snapshot with incorrect hash! Incoming " + "hash " ) + << cc::notice( votedHash.first.hex() ) + << cc::notice( " is not equal to calculated hash " ) + << cc::notice( calculated_hash.hex() ) << cc::notice( "Will try again" ); + if ( isRegularSnapshot ) + snapshotManager->cleanup(); + else + snapshotManager->removeSnapshot( 0 ); + } + } catch ( const std::exception& ex ) { + // just retry + clog( VerbosityWarning, "tryDownloadSnapshot" ) << dev::nested_exception_what( ex ); + } // for download url + return false; +} + +void downloadAndProccessSnapshot( std::shared_ptr< SnapshotManager >& snapshotManager, + const ChainParams& chainParams, bool requireSnapshotMajority, + const std::string& ipToDownloadSnapshotFrom, bool isRegularSnapshot ) { + std::array< std::string, 4 > arrayCommonPublicKey = + getBLSPublicKeyToVerifySnapshot( chainParams ); + + bool successfullDownload = false; + + for ( size_t idx = 0; idx < chainParams.sChain.nodes.size() && !successfullDownload; ++idx ) + try { + if ( !requireSnapshotMajority && + std::string( chainParams.sChain.nodes[idx].ip ) != ipToDownloadSnapshotFrom ) + continue; + + if ( chainParams.nodeInfo.id == chainParams.sChain.nodes[idx].id ) + continue; + + unsigned blockNumber = 0; + if ( isRegularSnapshot ) + blockNumber = getBlockToDownladSnapshot( chainParams.sChain.nodes[idx] ); + + std::unique_ptr< SnapshotHashAgent > snapshotHashAgent( new SnapshotHashAgent( + chainParams, arrayCommonPublicKey, ipToDownloadSnapshotFrom ) ); + + libff::init_alt_bn128_params(); + std::pair< dev::h256, libff::alt_bn128_G1 > votedHash; + std::vector< std::string > listUrlsToDownload; + std::tie( listUrlsToDownload, votedHash ) = + voteForSnapshotHash( snapshotHashAgent, blockNumber ); + + if ( listUrlsToDownload.empty() ) { + if ( !isRegularSnapshot ) + return; + clog( VerbosityWarning, "downloadAndProccessSnapshot" ) + << cc::warn( "No nodes to download from - will skip " + std::to_string( idx ) ); + continue; + } + + successfullDownload = + checkLocalSnapshot( snapshotManager, blockNumber, votedHash.first ); + if ( successfullDownload ) + break; + + successfullDownload = tryDownloadSnapshot( snapshotManager, chainParams, + listUrlsToDownload, votedHash, blockNumber, isRegularSnapshot ); + } catch ( std::exception& ex ) { + clog( VerbosityWarning, "downloadAndProccessSnapshot" ) + << cc::warn( "Exception while trying to set up snapshot: " ) + << cc::warn( dev::nested_exception_what( ex ) ); + } // for blockNumber_url + + if ( !successfullDownload ) { + throw std::runtime_error( "FATAL: tried to download snapshot from everywhere!" ); + } +} + } // namespace static const std::list< std::pair< std::string, std::string > > @@ -342,133 +546,6 @@ get_machine_ip_addresses_6() { // first-interface name, second-address static std::unique_ptr< Client > g_client; unique_ptr< ModularServer<> > g_jsonrpcIpcServer; -static volatile bool g_bStopActionsStarted = false; -static volatile bool g_bStopActionsComplete = false; - -static void stat_handle_stop_actions() { - if ( g_bStopActionsStarted ) - return; - g_bStopActionsStarted = true; - std::thread( [&]() { - skale::network::browser::refreshing_stop(); - /* - if ( g_jsonrpcIpcServer.get() ) { - std::cerr << ( "\n" + cc::fatal( "SIGNAL-HANDLER:" ) + " " + - cc::error( "Will stop RPC server now..." ) + "\n\n" ); - g_jsonrpcIpcServer->StopListening(); - g_jsonrpcIpcServer.reset( nullptr ); - std::cerr << ( "\n" + cc::fatal( "SIGNAL-HANDLER:" ) + " " + - cc::error( "Did stopped RPC server" ) + "\n\n" ); - } - */ - if ( g_client ) { - std::cerr << ( "\n" + cc::fatal( "SIGNAL-HANDLER:" ) + " " + - cc::error( "Will stop client now..." ) + "\n\n" ); - g_client->stopWorking(); - std::cerr << ( "\n" + cc::fatal( "SIGNAL-HANDLER:" ) + " " + - cc::error( "Did stopped client" ) + "\n\n" ); - } - g_bStopActionsComplete = true; - } ).detach(); -} - -static void stat_wait_stop_actions_complete() { - if ( g_bStopActionsComplete ) - return; - std::cerr << ( "\n" + cc::fatal( "SIGNAL-HANDLER:" ) + " " + - cc::error( "Will wait for stop actions complete..." ) + "\n\n" ); - while ( !g_bStopActionsComplete ) - std::this_thread::sleep_for( std::chrono::milliseconds( 100 ) ); - std::cerr << ( "\n" + cc::fatal( "SIGNAL-HANDLER:" ) + " " + - cc::error( "Done waiting for stop actions] complete" ) + "\n\n" ); -} - -static void stat_init_common_signal_handling() { - skutils::signal::init_common_signal_handling( []( int nSignalNo ) -> void { - std::string strMessagePrefix = skutils::signal::g_bStop ? - cc::error( "\nStop flag was already raised on. " ) + - cc::fatal( "WILL FORCE TERMINATE." ) + - cc::error( " Caught (second) signal. " ) : - cc::error( "\nCaught (first) signal. " ); - std::cerr << strMessagePrefix << cc::error( skutils::signal::signal2str( nSignalNo ) ) - << "\n\n"; - std::cerr.flush(); - - switch ( nSignalNo ) { - case SIGINT: - case SIGTERM: - case SIGHUP: - // exit normally - // just fall through - break; - - case SIGSTOP: - case SIGTSTP: - case SIGPIPE: - // ignore - return; - break; - - case SIGQUIT: - // exit immediately - _exit( ExitHandler::ec_termninated_by_signal ); - break; - - default: - // abort signals - std::cout << "\n" << skutils::signal::generate_stack_trace() << "\n"; - std::cout.flush(); - - break; - } // switch - - stat_handle_stop_actions(); - - // try to exit nicely - then abort - if ( !skutils::signal::g_bStop ) { - static volatile bool g_bSelfKillStarted = false; - if ( !g_bSelfKillStarted ) { - g_bSelfKillStarted = true; - std::thread( [nSignalNo]() { - std::cerr << ( "\n" + cc::fatal( "SELF-KILL:" ) + " " + - cc::error( "Will sleep " ) + - cc::size10( ExitHandler::KILL_TIMEOUT ) + - cc::error( " seconds before force exit..." ) + "\n\n" ); - std::cerr.flush(); - sleep( ExitHandler::KILL_TIMEOUT ); - std::cerr << ( "\n" + cc::fatal( "SELF-KILL:" ) + " " + - cc::error( "Will force exit after sleeping " ) + - cc::size10( ExitHandler::KILL_TIMEOUT ) + - cc::error( " second(s)" ) + "\n\n" ); - std::cerr.flush(); - - // TODO deduplicate this with main() before return - ExitHandler::exit_code_t ec = ExitHandler::requestedExitCode(); - if ( ec == ExitHandler::ec_success ) { - if ( nSignalNo != SIGINT && nSignalNo != SIGTERM ) - ec = ExitHandler::ec_failure; - } - - _exit( ec ); - } ).detach(); - } // if( ! g_bSelfKillStarted ) - } // if ( !skutils::signal::g_bStop ) - - // nice exit here: - - if ( skutils::signal::g_bStop ) { - std::cerr << ( "\n" + cc::fatal( "SIGNAL-HANDLER:" ) + " " + - cc::error( "Will force exit now..." ) + "\n\n" ); - _exit( 13 ); - } - - skutils::signal::g_bStop = true; - skutils::signal::g_nStopSignal = nSignalNo; - - dev::ExitHandler::exitHandler( nSignalNo ); - } ); -} - int main( int argc, char** argv ) try { cc::_on_ = false; cc::_max_value_size_ = 2048; @@ -476,7 +553,7 @@ int main( int argc, char** argv ) try { BlockHeader::useTimestampHack = false; srand( time( nullptr ) ); setCLocale(); - stat_init_common_signal_handling(); // ensure initialized + skutils::signal::init_common_signal_handling( ExitHandler::exitHandler ); bool isExposeAllDebugInfo = false; // Init secp256k1 context by calling one of the functions. @@ -733,14 +810,6 @@ int main( int argc, char** argv ) try { #endif addClientOption( "sgx-url", po::value< string >()->value_name( "" ), "SGX server url" ); - addClientOption( "sgx-url-no-zmq", "Disable automatic use of ZMQ protocol for SGX\n" ); - - addClientOption( "skale-network-browser-verbose", - "Turn on very detailed logging in SKALE NETWORK BROWSER\n" ); - addClientOption( "skale-network-browser-refresh", - po::value< size_t >()->value_name( "" ), - "Refresh time(in seconds) which SKALE NETWORK BROWSER will re-load all S-Chain " - "descriptions from Skale Manager" ); // skale - snapshot download command addClientOption( "download-snapshot", po::value< string >()->value_name( "" ), @@ -956,9 +1025,7 @@ int main( int argc, char** argv ) try { } std::cout << cc::bright( "skaled " ) << cc::sunny( Version ) << "\n" - << cc::bright( "client " ) << clientVersionColorized() << "\n" - << cc::debug( "Recent build intent is " ) - << cc::info( "5029, SKALE NETWORK BROWSER improvements" ) << "\n"; + << cc::bright( "client " ) << clientVersionColorized() << "\n"; std::cout.flush(); version(); @@ -1034,7 +1101,6 @@ int main( int argc, char** argv ) try { std::shared_ptr< StatusAndControl > statusAndControl = std::make_shared< StatusAndControlFile >( boost::filesystem::path( configPath ).remove_filename() ); - ExitHandler::statusAndControl = statusAndControl; // for now, leave previous values in file (for case of crash) if ( vm.count( "main-net-url" ) ) { @@ -1343,6 +1409,8 @@ int main( int argc, char** argv ) try { unsigned c_transactionQueueSize = 100000; unsigned c_futureTransactionQueueSize = 16000; + unsigned c_transactionQueueSizeBytes = 12322916; + unsigned c_futureTransactionQueueSizeBytes = 24645833; if ( chainConfigParsed ) { try { @@ -1388,6 +1456,22 @@ int main( int argc, char** argv ) try { } catch ( ... ) { } + try { + if ( joConfig["skaleConfig"]["nodeInfo"].count( "transactionQueueLimitBytes" ) ) + c_transactionQueueSizeBytes = + joConfig["skaleConfig"]["nodeInfo"]["transactionQueueLimitBytes"] + .get< unsigned >(); + } catch ( ... ) { + } + + try { + if ( joConfig["skaleConfig"]["nodeInfo"].count( "futureTransactionQueueLimitBytes" ) ) + c_futureTransactionQueueSizeBytes = + joConfig["skaleConfig"]["nodeInfo"]["futureTransactionQueueLimitBytes"] + .get< unsigned >(); + } catch ( ... ) { + } + try { if ( joConfig["skaleConfig"]["nodeInfo"].count( "maxOpenLeveldbFiles" ) ) dev::db::c_maxOpenLeveldbFiles = @@ -1478,18 +1562,6 @@ int main( int argc, char** argv ) try { strURL = u.str(); chainParams.nodeInfo.sgxServerUrl = strURL; } - bool isDisableZMQ = false; - if ( vm.count( "sgx-url-no-zmq" ) ) { - isDisableZMQ = true; - } - - if ( vm.count( "skale-network-browser-verbose" ) ) { - skale::network::browser::g_bVerboseLogging = true; - } - if ( vm.count( "skale-network-browser-refresh" ) ) { - skale::network::browser::g_nRefreshIntervalInSeconds = - vm["skale-network-browser-refresh"].as< size_t >(); - } std::shared_ptr< SharedSpace > sharedSpace; if ( vm.count( "shared-space-path" ) ) { @@ -1519,7 +1591,7 @@ int main( int argc, char** argv ) try { // auto mostRecentBlocksDBPath = (getDataDir() / ( "blocks_" + chainParams.nodeInfo.id.str() // + ".db" )) / "1.db"; - snapshotManager.reset( new SnapshotManager( getDataDir(), + snapshotManager.reset( new SnapshotManager( chainParams, getDataDir(), { BlockChain::getChainDirName( chainParams ), "filestorage", "prices_" + chainParams.nodeInfo.id.str() + ".db", "blocks_" + chainParams.nodeInfo.id.str() + ".db"/*, @@ -1527,10 +1599,14 @@ int main( int argc, char** argv ) try { sharedSpace ? sharedSpace->getPath() : "" ) ); } - if ( chainParams.nodeInfo.syncNode && !chainParams.nodeInfo.syncFromCatchup ) { + bool downloadGenesisForSyncNode = false; + if ( chainParams.nodeInfo.syncNode ) { auto bc = BlockChain( chainParams, getDataDir() ); if ( bc.number() == 0 ) { downloadSnapshotFlag = true; + if ( chainParams.nodeInfo.syncFromCatchup ) { + downloadGenesisForSyncNode = true; + } } } @@ -1551,167 +1627,39 @@ int main( int argc, char** argv ) try { if ( sharedSpace ) sharedSpace_lock.reset( new std::lock_guard< SharedSpace >( *sharedSpace ) ); - std::array< std::string, 4 > arrayCommonPublicKey; - bool isRotationtrigger = true; - if ( chainParams.sChain.nodeGroups.size() > 1 ) { - if ( ( uint64_t ) time( NULL ) >= - chainParams.sChain.nodeGroups[chainParams.sChain.nodeGroups.size() - 2] - .finishTs ) { - isRotationtrigger = false; + try { + if ( !downloadGenesisForSyncNode ) + downloadAndProccessSnapshot( snapshotManager, chainParams, requireSnapshotMajority, + ipToDownloadSnapshotFrom, true ); + else { + try { + downloadAndProccessSnapshot( snapshotManager, chainParams, + requireSnapshotMajority, ipToDownloadSnapshotFrom, false ); + snapshotManager->restoreSnapshot( 0 ); + } catch ( SnapshotManager::SnapshotAbsent& ) { + clog( VerbosityWarning, "main" ) + << cc::warn( "Snapshot for 0 block is not found" ); + } } - } else { - isRotationtrigger = false; - } - if ( isRotationtrigger ) { - arrayCommonPublicKey = - chainParams.sChain.nodeGroups[chainParams.sChain.nodeGroups.size() - 2] - .blsPublicKey; - } else { - arrayCommonPublicKey = chainParams.sChain.nodeGroups.back().blsPublicKey; - } - - bool successfullDownload = false; - for ( size_t idx = 0; idx < chainParams.sChain.nodes.size() && !successfullDownload; ++idx ) + // if we dont have 0 snapshot yet try { - if ( !requireSnapshotMajority && - std::string( chainParams.sChain.nodes[idx].ip ) != ipToDownloadSnapshotFrom ) - continue; - - if ( chainParams.nodeInfo.id == chainParams.sChain.nodes[idx].id ) - continue; - - std::string blockNumber_url = - std::string( "http://" ) + std::string( chainParams.sChain.nodes[idx].ip ) + - std::string( ":" ) + - ( chainParams.sChain.nodes[idx].port + 3 ).convert_to< std::string >(); - + snapshotManager->isSnapshotHashPresent( 0 ); + } catch ( SnapshotManager::SnapshotAbsent& ex ) { + // sleep before send skale_getSnapshot again - will receive error clog( VerbosityInfo, "main" ) - << cc::notice( "Asking node " ) << cc::p( std::to_string( idx ) ) << ' ' - << cc::notice( blockNumber_url ) - << cc::notice( " for latest snapshot block number." ); - - unsigned blockNumber = getLatestSnapshotBlockNumber( blockNumber_url ); - clog( VerbosityInfo, "main" ) - << cc::notice( "Latest Snapshot Block Number" ) + cc::debug( " is: " ) - << cc::p( std::to_string( blockNumber ) ) << " (from " << blockNumber_url - << ")"; - - SnapshotHashAgent snapshotHashAgent( - chainParams, arrayCommonPublicKey, ipToDownloadSnapshotFrom ); + << cc::warn( "Will sleep for 60 seconds before downloading 0 snapshot" ); + sleep( 60 ); - libff::init_alt_bn128_params(); - std::pair< dev::h256, libff::alt_bn128_G1 > voted_hash; - std::vector< std::string > list_urls_to_download; - try { - list_urls_to_download = - snapshotHashAgent.getNodesToDownloadSnapshotFrom( blockNumber ); - clog( VerbosityInfo, "main" ) - << cc::notice( "Got urls to download snapshot from " ) - << cc::p( std::to_string( list_urls_to_download.size() ) ) - << cc::notice( " nodes " ); - - if ( list_urls_to_download.size() == 0 ) { - clog( VerbosityWarning, "main" ) << cc::warn( - "No nodes to download from - will skip " + blockNumber_url ); - continue; - } - - if ( blockNumber == 0 ) { - successfullDownload = true; - break; - } else - voted_hash = snapshotHashAgent.getVotedHash(); - - } catch ( std::exception& ex ) { - std::throw_with_nested( std::runtime_error( cc::error( - "Exception while collecting snapshot hash from other skaleds " ) ) ); - } - - try { - if ( snapshotManager->isSnapshotHashPresent( blockNumber ) ) { - clog( VerbosityInfo, "main" ) - << "Snapshot for block " << blockNumber << " already present locally"; - - dev::h256 calculated_hash; - calculated_hash = snapshotManager->getSnapshotHash( blockNumber ); - - if ( calculated_hash == voted_hash.first ) { - clog( VerbosityInfo, "main" ) - << cc::notice( "Will delete all snapshots except" + - std::to_string( blockNumber ) ); - snapshotManager->cleanupButKeepSnapshot( blockNumber ); - clog( VerbosityInfo, "main" ) - << cc::notice( "Will delete all snapshots except" + - std::to_string( blockNumber ) ); - snapshotManager->restoreSnapshot( blockNumber ); - successfullDownload = true; - break; - } else { - clog( VerbosityWarning, "main" ) << cc::warn( - "Snapshot is present locally but its hash is different" ); - } - } // if present - } catch ( const std::exception& ex ) { - // usually snapshot absent exception - clog( VerbosityInfo, "main" ) << dev::nested_exception_what( ex ); - } + downloadAndProccessSnapshot( snapshotManager, chainParams, requireSnapshotMajority, + ipToDownloadSnapshotFrom, false ); + } - clog( VerbosityInfo, "main" ) - << cc::notice( "Will cleanup data dir and snapshots dir" ); - snapshotManager->cleanup(); - - size_t n_found = list_urls_to_download.size(); - - size_t shift = rand() % n_found; - - for ( size_t cnt = 0; cnt < n_found && !successfullDownload; ++cnt ) - try { - size_t i = ( shift + cnt ) % n_found; - - std::string urlToDownloadSnapshot; - urlToDownloadSnapshot = list_urls_to_download[i]; - - downloadSnapshot( - blockNumber, snapshotManager, urlToDownloadSnapshot, chainParams ); - - try { - snapshotManager->computeSnapshotHash( blockNumber, true ); - } catch ( const std::exception& ) { - std::throw_with_nested( std::runtime_error( - cc::fatal( "FATAL:" ) + " " + - cc::error( "Exception while computing snapshot hash " ) ) ); - } - - dev::h256 calculated_hash = snapshotManager->getSnapshotHash( blockNumber ); - - if ( calculated_hash == voted_hash.first ) - successfullDownload = true; - else { - clog( VerbosityWarning, "main" ) - << cc::notice( - "Downloaded snapshot with incorrect hash! Incoming " - "hash " ) - << cc::notice( voted_hash.first.hex() ) - << cc::notice( " is not equal to calculated hash " ) - << cc::notice( calculated_hash.hex() ) - << cc::notice( "Will try again" ); - snapshotManager->cleanup(); - } - } catch ( const std::exception& ex ) { - // just retry - clog( VerbosityWarning, "main" ) << dev::nested_exception_what( ex ); - } // for download url - - } catch ( std::exception& ex ) { - clog( VerbosityWarning, "main" ) - << cc::warn( "Exception while trying to set up snapshot: " ) - << cc::warn( dev::nested_exception_what( ex ) ); - } // for blockNumber_url - - if ( !successfullDownload ) { - throw std::runtime_error( "FATAL: tried to download snapshot from everywhere!" ); + } catch ( std::exception& ) { + std::throw_with_nested( std::runtime_error( + cc::error( " Fatal error in downloadAndProccessSnapshot! Will exit " ) ) ); } + } // if --download-snapshot statusAndControl->setSubsystemRunning( StatusAndControl::SnapshotDownloader, false ); @@ -1807,14 +1755,14 @@ int main( int argc, char** argv ) try { g_client.reset( new eth::EthashClient( chainParams, ( int ) chainParams.networkID, shared_ptr< GasPricer >(), snapshotManager, instanceMonitor, getDataDir(), withExisting, - TransactionQueue::Limits{ - c_transactionQueueSize, c_futureTransactionQueueSize } ) ); + TransactionQueue::Limits{ c_transactionQueueSize, c_futureTransactionQueueSize, + c_transactionQueueSizeBytes, c_futureTransactionQueueSizeBytes } ) ); } else if ( chainParams.sealEngineName == NoProof::name() ) { g_client.reset( new eth::Client( chainParams, ( int ) chainParams.networkID, shared_ptr< GasPricer >(), snapshotManager, instanceMonitor, getDataDir(), withExisting, - TransactionQueue::Limits{ - c_transactionQueueSize, c_futureTransactionQueueSize } ) ); + TransactionQueue::Limits{ c_transactionQueueSize, c_futureTransactionQueueSize, + c_transactionQueueSizeBytes, c_futureTransactionQueueSizeBytes } ) ); } else BOOST_THROW_EXCEPTION( ChainParamsInvalid() << errinfo_comment( "Unknown seal engine: " + chainParams.sealEngineName ) ); @@ -2024,8 +1972,7 @@ int main( int argc, char** argv ) try { auto pWeb3Face = new rpc::Web3( clientVersion() ); auto pEthFace = new rpc::Eth( configPath.string(), *g_client, *accountHolder.get() ); auto pSkaleFace = new rpc::Skale( *g_client, sharedSpace ); - auto pSkaleStatsFace = - new rpc::SkaleStats( configPath.string(), *g_client, chainParams, isDisableZMQ ); + auto pSkaleStatsFace = new rpc::SkaleStats( configPath.string(), *g_client, chainParams ); pSkaleStatsFace->isExposeAllDebugInfo_ = isExposeAllDebugInfo; auto pPersonalFace = bEnabledAPIs_personal ? new rpc::Personal( keyManager, *accountHolder, *g_client ) : @@ -2815,7 +2762,7 @@ int main( int argc, char** argv ) try { << cc::warn( "Enabling programmatic shutdown via Web3..." ); dev::rpc::Skale::enableWeb3Shutdown( true ); dev::rpc::Skale::onShutdownInvoke( - []() { ExitHandler::exitHandler( SIGABRT, ExitHandler::ec_web3_request ); } ); + []() { ExitHandler::exitHandler( -1, ExitHandler::ec_web3_request ); } ); clog( VerbosityWarning, "main" ) << cc::warn( "Done, programmatic shutdown via Web3 is enabled" ); } else { @@ -2826,9 +2773,6 @@ int main( int argc, char** argv ) try { << cc::debug( "Done, programmatic shutdown via Web3 is disabled" ); } - skale::network::browser::refreshing_start( - configPath.string(), []() -> bool { return g_bStopActionsStarted; } ); - dev::setThreadName( "main" ); if ( g_client ) { unsigned int n = g_client->blockChain().details().number; @@ -2840,7 +2784,14 @@ int main( int argc, char** argv ) try { this_thread::sleep_for( chrono::milliseconds( 1000 ) ); } - skale::network::browser::refreshing_stop(); + if ( statusAndControl ) { + statusAndControl->setExitState( StatusAndControl::StartAgain, + ( ExitHandler::requestedExitCode() != ExitHandler::ec_success ) ); + statusAndControl->setExitState( StatusAndControl::StartFromSnapshot, + ( ExitHandler::requestedExitCode() == ExitHandler::ec_state_root_mismatch ) ); + statusAndControl->setExitState( StatusAndControl::ClearDataDir, + ( ExitHandler::requestedExitCode() == ExitHandler::ec_state_root_mismatch ) ); + } // if if ( g_jsonrpcIpcServer.get() ) { g_jsonrpcIpcServer->StopListening(); @@ -2860,9 +2811,6 @@ int main( int argc, char** argv ) try { ( basename + ".html" ).c_str(), ( basename + ".csv" ).c_str(), nullptr ); MicroProfileShutdown(); - stat_handle_stop_actions(); - stat_wait_stop_actions_complete(); - // clog( VerbosityDebug, "main" ) << cc::debug( "Stopping task dispatcher..." ); // skutils::dispatch::shutdown(); // clog( VerbosityDebug, "main" ) << cc::debug( "Done, task dispatcher stopped" ); diff --git a/storage_benchmark/CMakeLists.txt b/storage_benchmark/CMakeLists.txt index 1684ebe56..8180d42dc 100644 --- a/storage_benchmark/CMakeLists.txt +++ b/storage_benchmark/CMakeLists.txt @@ -20,8 +20,8 @@ target_link_libraries( historic skutils devcore - "${DEPS_INSTALL_ROOT}/lib/liblzma.a" "${DEPS_INSTALL_ROOT}/lib/libunwind.a" + "${DEPS_INSTALL_ROOT}/lib/liblzma.a" ) #target_include_directories(evm_benchmark PRIVATE ../utils) diff --git a/test/historicstate/hardhat/contracts/Lock.sol b/test/historicstate/hardhat/contracts/Lock.sol index 917513266..14762d32e 100644 --- a/test/historicstate/hardhat/contracts/Lock.sol +++ b/test/historicstate/hardhat/contracts/Lock.sol @@ -1,12 +1,8 @@ // SPDX-License-Identifier: UNLICENSED pragma solidity ^0.8.9; -import "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; - - - -contract Lock is Initializable { +contract Lock { uint public totalSupply; mapping(address => uint) public balanceOf; mapping(address => mapping(address => uint)) public allowance; @@ -15,7 +11,7 @@ contract Lock is Initializable { uint8 public decimals; bool private initialized; - uint constant ARRAY_SIZE = 1000; + uint constant ARRAY_SIZE = 10000; uint256[ARRAY_SIZE] balance; uint256 counter; mapping(uint256 => uint256) public writeMap; @@ -26,13 +22,19 @@ contract Lock is Initializable { event Approval(address indexed owner, address indexed spender, uint value); - function transfer(address recipient, uint amount) external returns (bool) { - balanceOf[msg.sender] -= amount; - balanceOf[recipient] += amount; - emit Transfer(msg.sender, recipient, amount); - return true; + function writeValues() external { + for (uint256 i = 0; i < 10000; i++) { + // Code to be executed in each iteration + balance[i] = i; + } + } + function die(address payable recipient) external { + selfdestruct(recipient); + } + + function approve(address spender, uint amount) external returns (bool) { allowance[msg.sender][spender] = amount; emit Approval(msg.sender, spender, amount); diff --git a/test/historicstate/hardhat/scripts/deploy.js b/test/historicstate/hardhat/scripts/deploy.js index aafb8073a..606bbdd78 100644 --- a/test/historicstate/hardhat/scripts/deploy.js +++ b/test/historicstate/hardhat/scripts/deploy.js @@ -121,18 +121,18 @@ async function deployContractsProxy() { //const lockedAmount = hre.ethers.utils.parseEther("10"); - console.log(`Testing deploy transfer`); + console.log(`Contract deploy`); + const Lock = await hre.ethers.getContractFactory("Lock"); - const lock = await hre.upgrades.deployProxy(Lock); + const lock = await Lock.deploy(); + lockContract = await lock.deployed(); deployBn = await hre.ethers.provider.getBlockNumber(); - console.log(`Lock deployed to ${lockContract.address} at block ${deployBn}`); + console.log(`Contract deployed to ${lockContract.address} at block ${deployBn}`); - previousBlock = await waitUntilNextBlock(); - previousBlock = await waitUntilNextBlock(); // b = await lockContract.balanceOf(OWNER_ADDRESS, {blockTag : previousBlock}); // owner = await lockContract.owner({blockTag : previousBlock}); @@ -141,21 +141,23 @@ async function deployContractsProxy() { // CHECK(b == INITIAL_MINT) - console.log(`Now testing transfer`); + console.log(`Now writing 10,000 values into the state`); - transferReceipt = await lockContract.transfer("0x690b9a9e9aa1c9db991c7721a92d351db4fac990", 0x02); + transferReceipt = await lockContract.writeValues(); await transferReceipt.wait(); previousBlock = await waitUntilNextBlock(); - owner = await lockContract.owner({blockTag : previousBlock}); - console.log(`Contract owner is ${owner}`); + console.log(`Now testing self-destruct`); + + transferReceipt2 = await lockContract.die("0x690b9a9e9aa1c9db991c7721a92d351db4fac990"); + await transferReceipt2.wait(); - b = await lockContract.balanceOf(OWNER_ADDRESS, {blockTag : previousBlock}); + console.log(`Successfully self destructed`); + + previousBlock = await waitUntilNextBlock(); - console.log(`Balance after transfer ${b}`); - CHECK(b == INITIAL_MINT - 0x02) console.log(`PASSED!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!`); diff --git a/test/tools/libtesteth/Options.cpp b/test/tools/libtesteth/Options.cpp index eb52f90a4..d7c787b0e 100644 --- a/test/tools/libtesteth/Options.cpp +++ b/test/tools/libtesteth/Options.cpp @@ -23,12 +23,20 @@ #include #include #include + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-copy" +#pragma GCC diagnostic ignored "-Waddress" +#pragma GCC diagnostic ignored "-Wnonnull-compare" + #include #include #include #include #include +#pragma GCC diagnostic pop + using namespace std; using namespace dev::test; using namespace dev::db; diff --git a/test/tools/libtesteth/TestHelper.cpp b/test/tools/libtesteth/TestHelper.cpp index 97f548786..7384cff09 100644 --- a/test/tools/libtesteth/TestHelper.cpp +++ b/test/tools/libtesteth/TestHelper.cpp @@ -119,7 +119,7 @@ void simulateMining( Client& client, size_t numBlocks, const dev::Address &addre for ( size_t blockNumber = 0; blockNumber < numBlocks; ++blockNumber ) { reward += client.sealEngine()->blockReward( blockNumber ); } - state.addBalance( client.author(), reward ); + state.addBalance( address, reward ); state.commit(); const auto balanceAfter = client.balanceAt( address ); balanceAfter > balanceBefore; // make compiler happy diff --git a/test/tools/libtesteth/TestHelper.h b/test/tools/libtesteth/TestHelper.h index f57986481..4976ae4c5 100644 --- a/test/tools/libtesteth/TestHelper.h +++ b/test/tools/libtesteth/TestHelper.h @@ -27,6 +27,12 @@ #include #include +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-copy" +#pragma GCC diagnostic ignored "-Waddress" +#pragma GCC diagnostic ignored "-Wnonnull-compare" +#pragma GCC diagnostic ignored "-Wsign-compare" + #include #include #include @@ -44,6 +50,8 @@ #include #include +#pragma GCC diagnostic pop + namespace skale { class State; } diff --git a/test/tools/libtestutils/Common.h b/test/tools/libtestutils/Common.h index 4438f5a62..0992db394 100644 --- a/test/tools/libtestutils/Common.h +++ b/test/tools/libtestutils/Common.h @@ -29,9 +29,16 @@ #include +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-copy" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wnonnull-compare" + #include #include +#pragma GCC diagnostic pop + namespace dev { namespace test { diff --git a/test/unittests/libethereum/ClientTest.cpp b/test/unittests/libethereum/ClientTest.cpp index 8bf0240e1..c49302cad 100644 --- a/test/unittests/libethereum/ClientTest.cpp +++ b/test/unittests/libethereum/ClientTest.cpp @@ -30,6 +30,7 @@ #include #include #include +#include using namespace std; using namespace dev; @@ -261,7 +262,7 @@ class TestClientSnapshotsFixture : public TestOutputHelperFixture, public Fixtur // ), dir, // dir, chainParams, WithExisting::Kill, {"eth"}, testingMode ) ); std::shared_ptr< SnapshotManager > mgr; - mgr.reset( new SnapshotManager( m_tmpDir.path(), { BlockChain::getChainDirName( chainParams ), "vol2", "filestorage"} ) ); + mgr.reset( new SnapshotManager( chainParams, m_tmpDir.path(), { BlockChain::getChainDirName( chainParams ), "vol2", "filestorage"} ) ); // boost::filesystem::create_directory( // m_tmpDir.path() / "vol1" / "12041" ); // boost::filesystem::create_directory( @@ -998,6 +999,8 @@ BOOST_AUTO_TEST_CASE( ClientSnapshotsTest, *boost::unit_test::precondition( dev: BOOST_REQUIRE( testClient->getLatestSnapshotBlockNumer() == -1 ); + BOOST_REQUIRE( testClient->getSnapshotHash( 0 ) != dev::h256() ); + BOOST_REQUIRE( testClient->mineBlocks( 1 ) ); testClient->importTransactionsAsBlock( diff --git a/test/unittests/libethereum/SkaleHost.cpp b/test/unittests/libethereum/SkaleHost.cpp index 2069b3a91..07b744a50 100644 --- a/test/unittests/libethereum/SkaleHost.cpp +++ b/test/unittests/libethereum/SkaleHost.cpp @@ -14,12 +14,15 @@ #include #include #include +#include #include #include #include +#include +#include #include @@ -81,7 +84,7 @@ class ConsensusTestStub : public ConsensusInterface { } uint64_t submitOracleRequest( const string& /*_spec*/, string& - /*_receipt*/, string& ) override { + /*_receipt*/, string& /*error*/) override { return 0; } @@ -124,6 +127,9 @@ struct SkaleHostFixture : public TestOutputHelperFixture { chainParams.nodeInfo.port = chainParams.nodeInfo.port6 = rand_port; chainParams.sChain.nodes[0].port = chainParams.sChain.nodes[0].port6 = rand_port; + // not 0-timestamp genesis - to test patch + chainParams.timestamp = 1; + if( params.count("multiTransactionMode") && stoi( params.at( "multiTransactionMode" ) ) ) chainParams.sChain.multiTransactionMode = true; @@ -223,7 +229,15 @@ struct SkaleHostFixture : public TestOutputHelperFixture { BOOST_FIXTURE_TEST_SUITE( SkaleHostSuite, SkaleHostFixture ) //, *boost::unit_test::disabled() ) -BOOST_AUTO_TEST_CASE( validTransaction ) { +auto skipInvalidTransactionsVariants = boost::unit_test::data::make({false, true}); + +BOOST_DATA_TEST_CASE( validTransaction, skipInvalidTransactionsVariants, skipInvalidTransactionsFlag ) { + + if(skipInvalidTransactionsFlag){ + const_cast(client->chainParams()).sChain.skipInvalidTransactionsPatchTimestamp = 1; + } + SkipInvalidTransactionsPatch::setTimestamp(client->chainParams().sChain.skipInvalidTransactionsPatchTimestamp); + auto senderAddress = coinbase.address(); auto receiver = KeyPair::create(); @@ -260,14 +274,20 @@ BOOST_AUTO_TEST_CASE( validTransaction ) { REQUIRE_BALANCE_DECREASE( senderAddress, value + gasPrice * 21000 ); } -// Transaction should be IGNORED during execution +// Transaction should be IGNORED or EXCLUDED during execution (depending on skipInvalidTransactionsFlag) // Proposer should be penalized // 1 Small amount of random bytes // 2 110 random bytes // 3 110 bytes of semi-correct RLP -BOOST_AUTO_TEST_CASE( transactionRlpBad +BOOST_DATA_TEST_CASE( transactionRlpBad, skipInvalidTransactionsVariants, skipInvalidTransactionsFlag // , *boost::unit_test::precondition( dev::test::run_not_express ) ) { + + if(skipInvalidTransactionsFlag){ + const_cast(client->chainParams()).sChain.skipInvalidTransactionsPatchTimestamp = 1; + } + SkipInvalidTransactionsPatch::setTimestamp(client->chainParams().sChain.skipInvalidTransactionsPatchTimestamp); + auto senderAddress = coinbase.address(); bytes small_tx1 = bytes(); @@ -290,7 +310,13 @@ BOOST_AUTO_TEST_CASE( transactionRlpBad 1U ) ); REQUIRE_BLOCK_INCREASE( 1 ); - REQUIRE_BLOCK_SIZE( 1, 3 ); + + if(skipInvalidTransactionsFlag){ + REQUIRE_BLOCK_SIZE( 1, 0 ); + } + else{ + REQUIRE_BLOCK_SIZE( 1, 3 ); + } REQUIRE_NONCE_INCREASE( senderAddress, 0 ); REQUIRE_BALANCE_DECREASE( senderAddress, 0 ); @@ -299,31 +325,33 @@ BOOST_AUTO_TEST_CASE( transactionRlpBad Transactions txns = client->transactions( 1 ); // cerr << toJson( txns ); - REQUIRE_BLOCK_TRANSACTION( 1, 0, txns[0].sha3() ); - REQUIRE_BLOCK_TRANSACTION( 1, 1, txns[1].sha3() ); - REQUIRE_BLOCK_TRANSACTION( 1, 2, txns[2].sha3() ); + if(!skipInvalidTransactionsFlag){ + REQUIRE_BLOCK_TRANSACTION( 1, 0, txns[0].sha3() ); + REQUIRE_BLOCK_TRANSACTION( 1, 1, txns[1].sha3() ); + REQUIRE_BLOCK_TRANSACTION( 1, 2, txns[2].sha3() ); - // check also receipts and locations - size_t i = 0; - for ( const Transaction& tx : txns ) { - Transaction tx2 = client->transaction( tx.sha3() ); - LocalisedTransaction lt = client->localisedTransaction( tx.sha3() ); - LocalisedTransactionReceipt lr = client->localisedTransactionReceipt( tx.sha3() ); + // check also receipts and locations + size_t i = 0; + for ( const Transaction& tx : txns ) { + Transaction tx2 = client->transaction( tx.sha3() ); + LocalisedTransaction lt = client->localisedTransaction( tx.sha3() ); + LocalisedTransactionReceipt lr = client->localisedTransactionReceipt( tx.sha3() ); - BOOST_REQUIRE_EQUAL( tx2, tx ); + BOOST_REQUIRE_EQUAL( tx2, tx ); - BOOST_REQUIRE_EQUAL( lt, tx ); - BOOST_REQUIRE_EQUAL( lt.blockNumber(), 1 ); - BOOST_REQUIRE_EQUAL( lt.blockHash(), client->hashFromNumber( 1 ) ); - BOOST_REQUIRE_EQUAL( lt.transactionIndex(), i ); + BOOST_REQUIRE_EQUAL( lt, tx ); + BOOST_REQUIRE_EQUAL( lt.blockNumber(), 1 ); + BOOST_REQUIRE_EQUAL( lt.blockHash(), client->hashFromNumber( 1 ) ); + BOOST_REQUIRE_EQUAL( lt.transactionIndex(), i ); - BOOST_REQUIRE_EQUAL( lr.hash(), tx.sha3() ); - BOOST_REQUIRE_EQUAL( lr.blockNumber(), lt.blockNumber() ); - BOOST_REQUIRE_EQUAL( lr.blockHash(), lt.blockHash() ); - BOOST_REQUIRE_EQUAL( lr.transactionIndex(), i ); + BOOST_REQUIRE_EQUAL( lr.hash(), tx.sha3() ); + BOOST_REQUIRE_EQUAL( lr.blockNumber(), lt.blockNumber() ); + BOOST_REQUIRE_EQUAL( lr.blockHash(), lt.blockHash() ); + BOOST_REQUIRE_EQUAL( lr.transactionIndex(), i ); - ++i; - } // for + ++i; + } // for + } } class VrsHackedTransaction : public Transaction { @@ -334,12 +362,18 @@ class VrsHackedTransaction : public Transaction { } }; -// Transaction should be IGNORED during execution +// Transaction should be IGNORED during execution or absent if skipInvalidTransactionsFlag // Proposer should be penalized // zero signature -BOOST_AUTO_TEST_CASE( transactionSigZero +BOOST_DATA_TEST_CASE( transactionSigZero, skipInvalidTransactionsVariants, skipInvalidTransactionsFlag // , *boost::unit_test::precondition( dev::test::run_not_express ) ) { + + if(skipInvalidTransactionsFlag){ + const_cast(client->chainParams()).sChain.skipInvalidTransactionsPatchTimestamp = 1; + } + SkipInvalidTransactionsPatch::setTimestamp(client->chainParams().sChain.skipInvalidTransactionsPatchTimestamp); + auto senderAddress = coinbase.address(); auto receiver = KeyPair::create(); @@ -369,21 +403,32 @@ BOOST_AUTO_TEST_CASE( transactionSigZero stub->createBlock( ConsensusExtFace::transactions_vector{stream.out()}, utcTime(), 1U ) ); REQUIRE_BLOCK_INCREASE( 1 ); - REQUIRE_BLOCK_SIZE( 1, 1 ); - h256 txHash = sha3( stream.out() ); - REQUIRE_BLOCK_TRANSACTION( 1, 0, txHash ); + if(skipInvalidTransactionsFlag){ + REQUIRE_BLOCK_SIZE( 1, 0 ); + } + else { + REQUIRE_BLOCK_SIZE( 1, 1 ); + h256 txHash = sha3( stream.out() ); + REQUIRE_BLOCK_TRANSACTION( 1, 0, txHash ); + } REQUIRE_NONCE_INCREASE( senderAddress, 0 ); REQUIRE_BALANCE_DECREASE( senderAddress, 0 ); } -// Transaction should be IGNORED during execution +// Transaction should be IGNORED during execution or absent if skipInvalidTransactionsFlag // Proposer should be penalized // corrupted signature -BOOST_AUTO_TEST_CASE( transactionSigBad +BOOST_DATA_TEST_CASE( transactionSigBad, skipInvalidTransactionsVariants, skipInvalidTransactionsFlag // , *boost::unit_test::precondition( dev::test::run_not_express ) ) { + + if(skipInvalidTransactionsFlag){ + const_cast(client->chainParams()).sChain.skipInvalidTransactionsPatchTimestamp = 1; + } + SkipInvalidTransactionsPatch::setTimestamp(client->chainParams().sChain.skipInvalidTransactionsPatchTimestamp); + auto senderAddress = coinbase.address(); auto receiver = KeyPair::create(); @@ -412,21 +457,33 @@ BOOST_AUTO_TEST_CASE( transactionSigBad stub->createBlock( ConsensusExtFace::transactions_vector{data}, utcTime(), 1U ) ); REQUIRE_BLOCK_INCREASE( 1 ); - REQUIRE_BLOCK_SIZE( 1, 1 ); - h256 txHash = sha3( data ); - REQUIRE_BLOCK_TRANSACTION( 1, 0, txHash ); + + if(skipInvalidTransactionsFlag){ + REQUIRE_BLOCK_SIZE( 1, 0 ); + } + else { + REQUIRE_BLOCK_SIZE( 1, 1 ); + h256 txHash = sha3( data ); + REQUIRE_BLOCK_TRANSACTION( 1, 0, txHash ); + } REQUIRE_NONCE_INCREASE( senderAddress, 0 ); REQUIRE_BALANCE_DECREASE( senderAddress, 0 ); } -// Transaction should be IGNORED during execution +// Transaction should be IGNORED during execution or absent if skipInvalidTransactionsFlag // Proposer should be penalized // gas < min_gas -BOOST_AUTO_TEST_CASE( transactionGasIncorrect +BOOST_DATA_TEST_CASE( transactionGasIncorrect, skipInvalidTransactionsVariants, skipInvalidTransactionsFlag // , *boost::unit_test::precondition( dev::test::run_not_express ) ) { + + if(skipInvalidTransactionsFlag){ + const_cast(client->chainParams()).sChain.skipInvalidTransactionsPatchTimestamp = 1; + } + SkipInvalidTransactionsPatch::setTimestamp(client->chainParams().sChain.skipInvalidTransactionsPatchTimestamp); + auto senderAddress = coinbase.address(); auto receiver = KeyPair::create(); @@ -454,8 +511,14 @@ BOOST_AUTO_TEST_CASE( transactionGasIncorrect stub->createBlock( ConsensusExtFace::transactions_vector{stream.out()}, utcTime(), 1U ) ); REQUIRE_BLOCK_INCREASE( 1 ); - REQUIRE_BLOCK_SIZE( 1, 1 ); - REQUIRE_BLOCK_TRANSACTION( 1, 0, txHash ); + + if(skipInvalidTransactionsFlag){ + REQUIRE_BLOCK_SIZE( 1, 0 ); + } + else { + REQUIRE_BLOCK_SIZE( 1, 1 ); + REQUIRE_BLOCK_TRANSACTION( 1, 0, txHash ); + } REQUIRE_NONCE_INCREASE( senderAddress, 0 ); REQUIRE_BALANCE_DECREASE( senderAddress, 0 ); @@ -465,9 +528,15 @@ BOOST_AUTO_TEST_CASE( transactionGasIncorrect // Sender should be charged for gas consumed // Proposer should NOT be penalized // transaction exceedes it's gas limit -BOOST_AUTO_TEST_CASE( transactionGasNotEnough +BOOST_DATA_TEST_CASE( transactionGasNotEnough, skipInvalidTransactionsVariants, skipInvalidTransactionsFlag // , *boost::unit_test::precondition( dev::test::run_not_express ) ) { + + if(skipInvalidTransactionsFlag){ + const_cast(client->chainParams()).sChain.skipInvalidTransactionsPatchTimestamp = 1; + } + SkipInvalidTransactionsPatch::setTimestamp(client->chainParams().sChain.skipInvalidTransactionsPatchTimestamp); + auto senderAddress = coinbase.address(); auto receiver = KeyPair::create(); @@ -519,11 +588,16 @@ BOOST_AUTO_TEST_CASE( transactionGasNotEnough } -// Transaction should be IGNORED during execution +// Transaction should be IGNORED during execution or absent if skipInvalidTransactionsFlag // Proposer should be penalized // nonce too big -BOOST_AUTO_TEST_CASE( transactionNonceBig, - *boost::unit_test::precondition( dev::test::run_not_express ) ) { +BOOST_DATA_TEST_CASE( transactionNonceBig, skipInvalidTransactionsVariants, skipInvalidTransactionsFlag ) { + + if(skipInvalidTransactionsFlag){ + const_cast(client->chainParams()).sChain.skipInvalidTransactionsPatchTimestamp = 1; + } + SkipInvalidTransactionsPatch::setTimestamp(client->chainParams().sChain.skipInvalidTransactionsPatchTimestamp); + auto senderAddress = coinbase.address(); auto receiver = KeyPair::create(); @@ -551,19 +625,31 @@ BOOST_AUTO_TEST_CASE( transactionNonceBig, stub->createBlock( ConsensusExtFace::transactions_vector{stream.out()}, utcTime(), 1U ) ); REQUIRE_BLOCK_INCREASE( 1 ); - REQUIRE_BLOCK_SIZE( 1, 1 ); - REQUIRE_BLOCK_TRANSACTION( 1, 0, txHash ); + + if(skipInvalidTransactionsFlag){ + REQUIRE_BLOCK_SIZE( 1, 0 ); + } + else { + REQUIRE_BLOCK_SIZE( 1, 1 ); + REQUIRE_BLOCK_TRANSACTION( 1, 0, txHash ); + } REQUIRE_NONCE_INCREASE( senderAddress, 0 ); REQUIRE_BALANCE_DECREASE( senderAddress, 0 ); } -// Transaction should be IGNORED during execution +// Transaction should be IGNORED during execution or absent if skipInvalidTransactionsFlag // Proposer should be penalized // nonce too small -BOOST_AUTO_TEST_CASE( transactionNonceSmall +BOOST_DATA_TEST_CASE( transactionNonceSmall, skipInvalidTransactionsVariants, skipInvalidTransactionsFlag //, *boost::unit_test::precondition( dev::test::run_not_express ) ) { + + if(skipInvalidTransactionsFlag){ + const_cast(client->chainParams()).sChain.skipInvalidTransactionsPatchTimestamp = 1; + } + SkipInvalidTransactionsPatch::setTimestamp(client->chainParams().sChain.skipInvalidTransactionsPatchTimestamp); + auto senderAddress = coinbase.address(); auto receiver = KeyPair::create(); @@ -605,18 +691,29 @@ BOOST_AUTO_TEST_CASE( transactionNonceSmall stub->createBlock( ConsensusExtFace::transactions_vector{stream2.out()}, utcTime(), 2U ) ); REQUIRE_BLOCK_INCREASE( 1 ); - REQUIRE_BLOCK_SIZE( 2, 1 ); - REQUIRE_BLOCK_TRANSACTION( 2, 0, txHash ); + + if(skipInvalidTransactionsFlag){ + REQUIRE_BLOCK_SIZE( 2, 0 ); + } + else { + REQUIRE_BLOCK_SIZE( 2, 1 ); + REQUIRE_BLOCK_TRANSACTION( 2, 0, txHash ); + } REQUIRE_NONCE_INCREASE( senderAddress, 0 ); REQUIRE_BALANCE_DECREASE( senderAddress, 0 ); } -// Transaction should be IGNORED during execution +// Transaction should be IGNORED during execution or absent if skipInvalidTransactionsFlag // Proposer should be penalized // not enough cash -BOOST_AUTO_TEST_CASE( transactionBalanceBad, - *boost::unit_test::precondition( dev::test::run_not_express ) ) { +BOOST_DATA_TEST_CASE( transactionBalanceBad, skipInvalidTransactionsVariants, skipInvalidTransactionsFlag ) { + + if(skipInvalidTransactionsFlag){ + const_cast(client->chainParams()).sChain.skipInvalidTransactionsPatchTimestamp = 1; + } + SkipInvalidTransactionsPatch::setTimestamp(client->chainParams().sChain.skipInvalidTransactionsPatchTimestamp); + auto senderAddress = coinbase.address(); auto receiver = KeyPair::create(); @@ -644,19 +741,31 @@ BOOST_AUTO_TEST_CASE( transactionBalanceBad, stub->createBlock( ConsensusExtFace::transactions_vector{stream.out()}, utcTime(), 1U ) ); REQUIRE_BLOCK_INCREASE( 1 ); - REQUIRE_BLOCK_SIZE( 1, 1 ); - REQUIRE_BLOCK_TRANSACTION( 1, 0, txHash ); + + if(skipInvalidTransactionsFlag){ + REQUIRE_BLOCK_SIZE( 1, 0 ); + } + else { + REQUIRE_BLOCK_SIZE( 1, 1 ); + REQUIRE_BLOCK_TRANSACTION( 1, 0, txHash ); + } REQUIRE_NONCE_INCREASE( senderAddress, 0 ); REQUIRE_BALANCE_DECREASE( senderAddress, 0 ); } -// Transaction should be IGNORED during execution +// Transaction should be IGNORED during execution or absent if skipInvalidTransactionsFlag // Proposer should be penalized // transaction goes beyond block gas limit -BOOST_AUTO_TEST_CASE( transactionGasBlockLimitExceeded +BOOST_DATA_TEST_CASE( transactionGasBlockLimitExceeded, skipInvalidTransactionsVariants, skipInvalidTransactionsFlag // , *boost::unit_test::precondition( dev::test::run_not_express ) ) { + + if(skipInvalidTransactionsFlag){ + const_cast(client->chainParams()).sChain.skipInvalidTransactionsPatchTimestamp = 1; + } + SkipInvalidTransactionsPatch::setTimestamp(client->chainParams().sChain.skipInvalidTransactionsPatchTimestamp); + auto senderAddress = coinbase.address(); auto receiver = KeyPair::create(); @@ -696,9 +805,18 @@ BOOST_AUTO_TEST_CASE( transactionGasBlockLimitExceeded BOOST_REQUIRE_EQUAL( client->number(), 1 ); REQUIRE_BLOCK_INCREASE( 1 ); - REQUIRE_BLOCK_SIZE( 1, 2 ); - REQUIRE_BLOCK_TRANSACTION( 1, 0, txHash1 ); - REQUIRE_BLOCK_TRANSACTION( 1, 1, txHash2 ); + + if(skipInvalidTransactionsFlag){ + REQUIRE_BLOCK_SIZE( 1, 1 ); + + REQUIRE_BLOCK_TRANSACTION( 1, 0, txHash1 ); + } + else { + REQUIRE_BLOCK_SIZE( 1, 2 ); + + REQUIRE_BLOCK_TRANSACTION( 1, 0, txHash1 ); + REQUIRE_BLOCK_TRANSACTION( 1, 1, txHash2 ); + } REQUIRE_NONCE_INCREASE( senderAddress, 1 ); REQUIRE_BALANCE_DECREASE( senderAddress, 10000 * dev::eth::szabo ); // only 1st! diff --git a/test/unittests/libethereum/TransactionQueue.cpp b/test/unittests/libethereum/TransactionQueue.cpp index 2ed1aba99..2036e9520 100644 --- a/test/unittests/libethereum/TransactionQueue.cpp +++ b/test/unittests/libethereum/TransactionQueue.cpp @@ -111,20 +111,22 @@ BOOST_AUTO_TEST_CASE( tqPriority ) { BOOST_CHECK( ( Transactions{tx2, tx0, tx1, tx3, tx5, tx4} ) == txq.topTransactions( 256 ) ); txq.drop( tx0.sha3() ); - BOOST_CHECK( ( Transactions{tx2, tx1, tx3, tx5, tx4} ) == txq.topTransactions( 256 ) ); + // prev BOOST_CHECK( ( Transactions{tx2, tx1, tx3, tx5, tx4} ) == txq.topTransactions( 256 ) ); + // now tx4 has nonce increase 1, and goes lower then tx5 and tx3 + BOOST_CHECK( ( Transactions{tx2, tx1, tx4, tx3, tx5} ) == txq.topTransactions( 256 ) ); txq.drop( tx1.sha3() ); - BOOST_CHECK( ( Transactions{tx2, tx3, tx5, tx4} ) == txq.topTransactions( 256 ) ); + BOOST_CHECK( ( Transactions{tx2, tx4, tx3, tx5} ) == txq.topTransactions( 256 ) ); txq.drop( tx5.sha3() ); - BOOST_CHECK( ( Transactions{tx2, tx3, tx4} ) == txq.topTransactions( 256 ) ); + BOOST_CHECK( ( Transactions{tx2, tx4, tx3} ) == txq.topTransactions( 256 ) ); Transaction tx6( 0, gasCostMed, gas, dest, bytes(), 20, sender1 ); txq.import( tx6 ); - BOOST_CHECK( ( Transactions{tx2, tx3, tx4, tx6} ) == txq.topTransactions( 256 ) ); + BOOST_CHECK( ( Transactions{tx2, tx4, tx3, tx6} ) == txq.topTransactions( 256 ) ); Transaction tx7( 0, gasCostHigh, gas, dest, bytes(), 2, sender2 ); txq.import( tx7 ); // deterministic signature: hash of tx5 and tx7 will be same - BOOST_CHECK( ( Transactions{tx2, tx3, tx4, tx6} ) == txq.topTransactions( 256 ) ); + BOOST_CHECK( ( Transactions{tx2, tx4, tx3, tx6} ) == txq.topTransactions( 256 ) ); } BOOST_AUTO_TEST_CASE( tqNonceChange ) { @@ -167,7 +169,9 @@ BOOST_AUTO_TEST_CASE( tqNonceChange ) { std::cout << tx.from() << " " << tx.nonce() << std::endl; } // expected BAD result [tx10], [tx11, tx23], [tx12, tx22], [tx13] !!! - BOOST_REQUIRE( ( Transactions{tx10, tx11, tx22, tx23, tx12, tx13 } ) == top6 ); + // prev without sort BOOST_REQUIRE( ( Transactions{tx10, tx11, tx22, tx23, tx12, tx13 } ) == top6 ); + // with sort: + BOOST_REQUIRE( ( Transactions{tx10, tx22, tx11, tx23, tx12, tx13 } ) == top6 ); } BOOST_AUTO_TEST_CASE( tqFuture ) { @@ -510,6 +514,43 @@ BOOST_AUTO_TEST_CASE( tqLimit ) { BOOST_REQUIRE( topTr.size() == 1 ); // 1 imported transaction } +BOOST_AUTO_TEST_CASE( tqLimitBytes ) { + TransactionQueue tq( 100, 100, 250, 250 ); + + unsigned maxTxCount = 250 / TestTransaction::defaultTransaction( 1 ).transaction().rlp().size(); + + TestTransaction testTransaction = TestTransaction::defaultTransaction( 2 ); + ImportResult res = tq.import( testTransaction.transaction(), IfDropped::Ignore, true ); + BOOST_REQUIRE( res == ImportResult::Success ); + + testTransaction = TestTransaction::defaultTransaction( 3 ); + res = tq.import( testTransaction.transaction(), IfDropped::Ignore, true ); + BOOST_REQUIRE( res == ImportResult::Success ); + + BOOST_REQUIRE( tq.status().current == 0 ); + + BOOST_REQUIRE( tq.status().future == maxTxCount ); + + testTransaction = TestTransaction::defaultTransaction( 4 ); + res = tq.import( testTransaction.transaction(), IfDropped::Ignore, true ); + BOOST_REQUIRE( res == ImportResult::Success ); + + BOOST_REQUIRE( tq.status().current == 0 ); + + BOOST_REQUIRE( tq.status().future == maxTxCount ); + + for ( size_t i = 1; i < 10; i++ ) { + if (i == 2 || i == 3) + continue; + testTransaction = TestTransaction::defaultTransaction( i ); + res = tq.import( testTransaction.transaction() ); + BOOST_REQUIRE( res == ImportResult::Success ); + } + + BOOST_REQUIRE( tq.status().current == maxTxCount ); + BOOST_REQUIRE( tq.status().future == 0 ); +} + BOOST_AUTO_TEST_CASE( tqEqueue ) { TransactionQueue tq; TestTransaction testTransaction = TestTransaction::defaultTransaction(); diff --git a/test/unittests/libskale/HashSnapshot.cpp b/test/unittests/libskale/HashSnapshot.cpp index 564f9dc41..77ca9f4fe 100644 --- a/test/unittests/libskale/HashSnapshot.cpp +++ b/test/unittests/libskale/HashSnapshot.cpp @@ -283,7 +283,7 @@ struct SnapshotHashingFixture : public TestOutputHelperFixture, public FixtureCo // "eth tests", tempDir.path(), "", chainParams, WithExisting::Kill, {"eth"}, // true ) ); - mgr.reset( new SnapshotManager( boost::filesystem::path( BTRFS_DIR_PATH ), + mgr.reset( new SnapshotManager( chainParams, boost::filesystem::path( BTRFS_DIR_PATH ), {BlockChain::getChainDirName( chainParams ), "filestorage"} ) ); boost::filesystem::create_directory( diff --git a/test/unittests/libskale/SnapshotManager.cpp b/test/unittests/libskale/SnapshotManager.cpp index b3d46dc1a..a50951bf0 100644 --- a/test/unittests/libskale/SnapshotManager.cpp +++ b/test/unittests/libskale/SnapshotManager.cpp @@ -160,7 +160,7 @@ BOOST_AUTO_TEST_SUITE( BtrfsTestSuite, BOOST_FIXTURE_TEST_CASE( SimplePositiveTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); // add files 1 fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "vol1" / "d11" ); @@ -168,7 +168,7 @@ BOOST_FIXTURE_TEST_CASE( SimplePositiveTest, BtrfsFixture, BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol1" / "d11" ) ); BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol2" / "d21" ) ); - auto latest0 = mgr.getLatestSnasphots(); + auto latest0 = mgr.getLatestSnapshots(); std::pair< int, int > expected0 { 0, 0 }; BOOST_REQUIRE( latest0 == expected0 ); @@ -183,7 +183,7 @@ BOOST_FIXTURE_TEST_CASE( SimplePositiveTest, BtrfsFixture, BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol1" / "d12" ) ); BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol2" / "d21" ) ); - auto latest1 = mgr.getLatestSnasphots(); + auto latest1 = mgr.getLatestSnapshots(); std::pair< int, int > expected1 { 0, 1 }; BOOST_REQUIRE( latest1 == expected1 ); @@ -215,12 +215,12 @@ BOOST_FIXTURE_TEST_CASE( SimplePositiveTest, BtrfsFixture, BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol1" / "d12" ) ); BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol2" / "d21" ) ); - auto latest2 = mgr.getLatestSnasphots(); + auto latest2 = mgr.getLatestSnapshots(); std::pair< int, int > expected2 { 1, 2 }; BOOST_REQUIRE( latest2 == expected2 ); mgr.doSnapshot( 3 ); - auto latest3 = mgr.getLatestSnasphots(); + auto latest3 = mgr.getLatestSnapshots(); std::pair< int, int > expected3 { 2, 3 }; BOOST_REQUIRE( latest3 == expected3 ); @@ -231,14 +231,14 @@ BOOST_FIXTURE_TEST_CASE( SimplePositiveTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( NoBtrfsTest, NoBtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - BOOST_REQUIRE_THROW( SnapshotManager mgr( fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ), + BOOST_REQUIRE_THROW( SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ), SnapshotManager::CannotPerformBtrfsOperation ); } BOOST_FIXTURE_TEST_CASE( BadPathTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { BOOST_REQUIRE_EXCEPTION( - SnapshotManager mgr( fs::path( BTRFS_DIR_PATH ) / "_invalid", {"vol1", "vol2"} ), + SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ) / "_invalid", {"vol1", "vol2"} ), SnapshotManager::InvalidPath, [this]( const SnapshotManager::InvalidPath& ex ) -> bool { return ex.path == fs::path( BTRFS_DIR_PATH ) / "_invalid"; } ); @@ -267,17 +267,17 @@ BOOST_FIXTURE_TEST_CASE( InaccessiblePathTest, BtrfsFixture, dropRoot(); - BOOST_REQUIRE_EXCEPTION( SnapshotManager mgr( fs::path( BTRFS_DIR_PATH ) / "_no_w", {"vol1"} ), + BOOST_REQUIRE_EXCEPTION( SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ) / "_no_w", {"vol1"} ), SnapshotManager::CannotCreate, [this]( const SnapshotManager::CannotCreate& ex ) -> bool { return ex.path == fs::path( BTRFS_DIR_PATH ) / "_no_w" / "snapshots"; } ); - BOOST_REQUIRE_EXCEPTION( SnapshotManager mgr( fs::path( BTRFS_DIR_PATH ) / "_no_x", {"vol1"} ), + BOOST_REQUIRE_EXCEPTION( SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ) / "_no_x", {"vol1"} ), SnapshotManager::CannotCreate, [this]( const SnapshotManager::CannotCreate& ex ) -> bool { return ex.path == fs::path( BTRFS_DIR_PATH ) / "_no_x" / "snapshots"; } ); - BOOST_REQUIRE_EXCEPTION( SnapshotManager mgr( fs::path( BTRFS_DIR_PATH ) / "_no_r", {"vol1"} ), + BOOST_REQUIRE_EXCEPTION( SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ) / "_no_r", {"vol1"} ), SnapshotManager::CannotCreate, [this]( const SnapshotManager::CannotCreate& ex ) -> bool { return ex.path == fs::path( BTRFS_DIR_PATH ) / "_no_x" / "snapshots"; } ); @@ -285,7 +285,7 @@ BOOST_FIXTURE_TEST_CASE( InaccessiblePathTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( SnapshotTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); BOOST_REQUIRE_NO_THROW( mgr.doSnapshot( 2 ) ); BOOST_REQUIRE_THROW( mgr.doSnapshot( 2 ), SnapshotManager::SnapshotPresent ); @@ -314,7 +314,7 @@ BOOST_FIXTURE_TEST_CASE( SnapshotTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( RestoreTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); BOOST_REQUIRE_THROW( mgr.restoreSnapshot( 2 ), SnapshotManager::SnapshotAbsent ); @@ -330,7 +330,7 @@ BOOST_FIXTURE_TEST_CASE( RestoreTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( DiffTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); mgr.doSnapshot( 2 ); fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "vol1" / "dir" ); mgr.doSnapshot( 4 ); @@ -363,7 +363,7 @@ BOOST_FIXTURE_TEST_CASE( DiffTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( ImportTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); BOOST_REQUIRE_THROW( mgr.importDiff( 8 ), SnapshotManager::InvalidPath ); @@ -400,7 +400,7 @@ BOOST_FIXTURE_TEST_CASE( ImportTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( SnapshotRotationTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); BOOST_REQUIRE_NO_THROW( mgr.doSnapshot( 1 ) ); sleep( 1 ); @@ -421,7 +421,7 @@ BOOST_FIXTURE_TEST_CASE( SnapshotRotationTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( DiffRotationTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); fs::path diff12 = mgr.getDiffPath( 2 ); { @@ -451,7 +451,7 @@ BOOST_FIXTURE_TEST_CASE( DiffRotationTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( RemoveSnapshotTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); mgr.doSnapshot( 1 ); mgr.doSnapshot( 2 ); @@ -469,7 +469,7 @@ BOOST_FIXTURE_TEST_CASE( RemoveSnapshotTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( CleanupTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); mgr.doSnapshot( 1 ); mgr.doSnapshot( 2 ); diff --git a/test/unittests/libweb3core/LevelDBHash.cpp b/test/unittests/libweb3core/LevelDBHash.cpp index 0b42d06f4..67dde80cb 100644 --- a/test/unittests/libweb3core/LevelDBHash.cpp +++ b/test/unittests/libweb3core/LevelDBHash.cpp @@ -14,7 +14,7 @@ BOOST_AUTO_TEST_CASE( hash ) { std::unique_ptr< dev::db::LevelDB > db( new dev::db::LevelDB( td.path() ) ); BOOST_REQUIRE( db ); - for ( size_t i = 0; i < 1234567; ++i ) { + for ( size_t i = 0; i < 123; ++i ) { std::string key = std::to_string( 43 + i ); std::string value = std::to_string( i ); db->insert( dev::db::Slice(key), dev::db::Slice(value) ); @@ -28,7 +28,7 @@ BOOST_AUTO_TEST_CASE( hash ) { std::unique_ptr< dev::db::LevelDB > db_copy( new dev::db::LevelDB( td.path() ) ); BOOST_REQUIRE( db_copy ); - for ( size_t i = 0; i < 1234567; ++i ) { + for ( size_t i = 0; i < 123; ++i ) { std::string key = std::to_string( 43 + i ); std::string value = std::to_string( i ); db_copy->insert( dev::db::Slice(key), dev::db::Slice(value) ); @@ -44,7 +44,7 @@ BOOST_AUTO_TEST_CASE( hash ) { std::unique_ptr< dev::db::LevelDB > db_diff( new dev::db::LevelDB( td.path() ) ); BOOST_REQUIRE( db_diff ); - for ( size_t i = 0; i < 1234567; ++i ) { + for ( size_t i = 0; i < 123; ++i ) { std::string key = std::to_string( 42 + i ); std::string value = std::to_string( i ); db_diff->insert( dev::db::Slice(key), dev::db::Slice(value) ); diff --git a/test/unittests/libweb3jsonrpc/WebThreeStubClient.cpp b/test/unittests/libweb3jsonrpc/WebThreeStubClient.cpp index f6ba70ea2..597104bf2 100644 --- a/test/unittests/libweb3jsonrpc/WebThreeStubClient.cpp +++ b/test/unittests/libweb3jsonrpc/WebThreeStubClient.cpp @@ -1322,3 +1322,23 @@ Json::Value WebThreeStubClient::debug_traceCall( throw jsonrpc::JsonRpcException( jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString() ); } + +Json::Value WebThreeStubClient::debug_doStateDbCompaction() { + Json::Value p; + Json::Value result = this->CallMethod( "debug_doStateDbCompaction", p ); + if ( result.isUInt64() ) + return result.asUInt64(); + else + throw jsonrpc::JsonRpcException( + jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString() ); +} + +Json::Value WebThreeStubClient::debug_doBlocksDbCompaction() { + Json::Value p; + Json::Value result = this->CallMethod( "debug_doBlocksDbCompaction", p ); + if ( result.isUInt64() ) + return result.asUInt64(); + else + throw jsonrpc::JsonRpcException( + jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString() ); +} diff --git a/test/unittests/libweb3jsonrpc/WebThreeStubClient.h b/test/unittests/libweb3jsonrpc/WebThreeStubClient.h index 02990d90d..d6acd634c 100644 --- a/test/unittests/libweb3jsonrpc/WebThreeStubClient.h +++ b/test/unittests/libweb3jsonrpc/WebThreeStubClient.h @@ -158,6 +158,8 @@ class WebThreeStubClient : public jsonrpc::Client { const std::string& param1, const Json::Value& param2 ) noexcept( false ); Json::Value debug_traceCall( const Json::Value& param1, const std::string& param2, const Json::Value& param3 ) noexcept( false ); + Json::Value debug_doStateDbCompaction() noexcept( false ); + Json::Value debug_doBlocksDbCompaction() noexcept( false ); }; #endif // JSONRPC_CPP_STUB_WEBTHREESTUBCLIENT_H_ diff --git a/test/unittests/libweb3jsonrpc/jsonrpc.cpp b/test/unittests/libweb3jsonrpc/jsonrpc.cpp index 2e7e84d03..5ef568135 100644 --- a/test/unittests/libweb3jsonrpc/jsonrpc.cpp +++ b/test/unittests/libweb3jsonrpc/jsonrpc.cpp @@ -78,7 +78,8 @@ static std::string const c_genesisConfigString = "EIP158ForkBlock": "0x00", "byzantiumForkBlock": "0x00", "constantinopleForkBlock": "0x00", - "skaleDisableChainIdCheck": true + "skaleDisableChainIdCheck": true, + "externalGasDifficulty": "0x1" }, "genesis": { "author" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", @@ -110,7 +111,8 @@ static std::string const c_genesisConfigString = { "nodeID": 1112, "ip": "127.0.0.1", "basePort": )"+std::to_string( rand_port ) + R"(, "schainIndex" : 1, "publicKey": "0xfa"} ], "revertableFSPatchTimestamp": 0, - "contractStorageZeroValuePatchTimestamp": 0 + "contractStorageZeroValuePatchTimestamp": 0, + "powCheckPatchTimestamp": 1 } }, "accounts": { @@ -793,22 +795,23 @@ BOOST_AUTO_TEST_CASE( simple_contract ) { result, "0x0000000000000000000000000000000000000000000000000000000000000007" ); } +/* // As block rotation is not exact now - let's use approximate comparisons #define REQUIRE_APPROX_EQUAL(a, b) BOOST_REQUIRE(4*(a) > 3*(b) && 4*(a) < 5*(b)) -BOOST_AUTO_TEST_CASE( logs_range ) { +BOOST_AUTO_TEST_CASE( logs_range, *boost::unit_test::disabled() ) { JsonRpcFixture fixture; dev::eth::simulateMining( *( fixture.client ), 1 ); -/* -pragma solidity >=0.4.10 <0.7.0; -contract Logger{ - fallback() external payable { - log2(bytes32(block.number+1), bytes32(block.number), "dimalit"); - } -} -*/ +//pragma solidity >=0.4.10 <0.7.0; + +//contract Logger{ +// fallback() external payable { +// log2(bytes32(block.number+1), bytes32(block.number), "dimalit"); +// } +//} + string bytecode = "6080604052348015600f57600080fd5b50607d80601d6000396000f3fe60806040527f64696d616c69740000000000000000000000000000000000000000000000000043600102600143016001026040518082815260200191505060405180910390a200fea2646970667358221220ecafb98cd573366a37976cb7a4489abe5389d1b5989cd7b7136c8eb0c5ba0b5664736f6c63430006000033"; @@ -963,6 +966,7 @@ contract Logger{ BOOST_REQUIRE_EQUAL(res["blockNumber"], "0x200"); BOOST_REQUIRE_EQUAL(res["to"], contractAddress); } +*/ BOOST_AUTO_TEST_CASE( deploy_contract_from_owner ) { JsonRpcFixture fixture( c_genesisConfigString ); @@ -1802,6 +1806,65 @@ contract Logger{ BOOST_REQUIRE_EQUAL(logs.size(), 24); } +BOOST_AUTO_TEST_CASE( estimate_gas_low_gas_txn ) { + JsonRpcFixture fixture; + dev::eth::simulateMining( *( fixture.client ), 10 ); + + auto senderAddress = fixture.coinbase.address(); + +/* +// SPDX-License-Identifier: None +pragma solidity ^0.6.0; + +contract TestEstimateGas { + uint256[256] number; + uint256 counter = 0; + + function store(uint256 x) public { + number[counter] = x; + counter += 1; + } + + function clear(uint256 pos) public { + number[pos] = 0; + } +} +*/ + + string bytecode = "608060405260006101005534801561001657600080fd5b50610104806100266000396000f3fe6080604052348015600f57600080fd5b506004361060325760003560e01c80636057361d146037578063c0fe1af8146062575b600080fd5b606060048036036020811015604b57600080fd5b8101908080359060200190929190505050608d565b005b608b60048036036020811015607657600080fd5b810190808035906020019092919050505060b8565b005b806000610100546101008110609e57fe5b018190555060016101006000828254019250508190555050565b60008082610100811060c657fe5b01819055505056fea26469706673582212206c8da972693a5b8c9bf59c197c4a0c554e9f51abd20047572c9c19125b533d2964736f6c634300060c0033"; + + Json::Value create; + create["code"] = bytecode; + create["gas"] = "180000"; // TODO or change global default of 90000? + + string deployHash = fixture.rpcClient->eth_sendTransaction( create ); + dev::eth::mineTransaction( *( fixture.client ), 1 ); + + Json::Value deployReceipt = fixture.rpcClient->eth_getTransactionReceipt( deployHash ); + string contractAddress = deployReceipt["contractAddress"].asString(); + + Json::Value txStore1; // call store(1) + txStore1["to"] = contractAddress; + txStore1["data"] = "0x6057361d0000000000000000000000000000000000000000000000000000000000000001"; + txStore1["from"] = toJS( senderAddress ); + txStore1["gasPrice"] = fixture.rpcClient->eth_gasPrice(); + string txHash = fixture.rpcClient->eth_call( txStore1, "latest" ); + + Json::Value estimateGasCall; // call clear(0) + estimateGasCall["to"] = contractAddress; + estimateGasCall["data"] = "0xc0fe1af80000000000000000000000000000000000000000000000000000000000000000"; + estimateGasCall["from"] = toJS( senderAddress ); + estimateGasCall["gasPrice"] = fixture.rpcClient->eth_gasPrice(); + string estimatedGas = fixture.rpcClient->eth_estimateGas( estimateGasCall ); + + dev::bytes data = dev::jsToBytes( estimateGasCall["data"].asString() ); + + BOOST_REQUIRE( dev::jsToU256( estimatedGas ) > dev::eth::TransactionBase::baseGasRequired( + false, &data, fixture.client->chainParams().scheduleForBlockNumber( + fixture.client->number() ) ) ); + BOOST_REQUIRE( dev::jsToU256( estimatedGas ) == 21871 ); +} + BOOST_AUTO_TEST_CASE( storage_limit_contract ) { JsonRpcFixture fixture; dev::eth::simulateMining( *( fixture.client ), 10 ); @@ -2253,6 +2316,43 @@ BOOST_AUTO_TEST_CASE( oracle, *boost::unit_test::disabled() ) { }*/ +BOOST_AUTO_TEST_CASE( doDbCompactionDebugCall ) { + JsonRpcFixture fixture; + + fixture.rpcClient->debug_doStateDbCompaction(); + + fixture.rpcClient->debug_doBlocksDbCompaction(); +} + +BOOST_AUTO_TEST_CASE( powTxnGasLimit ) { + JsonRpcFixture fixture(c_genesisConfigString, false, false, true, false); + + // mine blocks without transactions + dev::eth::simulateMining( *( fixture.client ), 2000000 ); + + string senderAddress = toJS(fixture.coinbase.address()); + + Json::Value txPOW1; + txPOW1["to"] = "0x0000000000000000000000000000000000000033"; + txPOW1["from"] = senderAddress; + txPOW1["gas"] = "100000"; + txPOW1["gasPrice"] = "0xa449dcaf2bca14e6bd0ac650eed9555008363002b2fc3a4c8422b7a9525a8135"; // gas 200k + txPOW1["value"] = 1; + string txHash = fixture.rpcClient->eth_sendTransaction( txPOW1 ); + dev::eth::mineTransaction( *( fixture.client ), 1 ); + + Json::Value receipt1 = fixture.rpcClient->eth_getTransactionReceipt( txHash ); + BOOST_REQUIRE( receipt1["status"] == string( "0x1" ) ); + + Json::Value txPOW2; + txPOW2["to"] = "0x0000000000000000000000000000000000000033"; + txPOW2["from"] = senderAddress; + txPOW2["gas"] = "100000"; + txPOW2["gasPrice"] = "0xc5002ab03e1e7e196b3d0ffa9801e783fcd48d4c6d972f1389ab63f4e2d0bef0"; // gas 1m + txPOW2["value"] = 100; + BOOST_REQUIRE_THROW( fixture.rpcClient->eth_sendTransaction( txPOW2 ), jsonrpc::JsonRpcException ); // block gas limit reached + } + BOOST_AUTO_TEST_CASE( EIP1898Calls ) { JsonRpcFixture fixture; @@ -2765,6 +2865,133 @@ BOOST_AUTO_TEST_CASE( mtm_import_future_txs ) { // BOOST_REQUIRE( !mtm ); // } +// historic node shall ignore invalid transactions in block +BOOST_AUTO_TEST_CASE( skip_invalid_transactions ) { + JsonRpcFixture fixture( c_genesisConfigString, true, true, false, true ); + dev::eth::simulateMining( *( fixture.client ), 1 ); // 2 Ether + + cout << "Balance: " << fixture.rpcClient->eth_getBalance(fixture.accountHolder->allAccounts()[0].hex(), "latest") << endl; + + // 1 import 1 transaction to increase block number + // also send 1 eth to account2 + // TODO repair mineMoney function! (it asserts) + Json::Value txJson; + txJson["from"] = fixture.coinbase.address().hex(); + txJson["gas"] = "200000"; + txJson["gasPrice"] = "5000000000000"; + txJson["to"] = fixture.account2.address().hex(); + txJson["value"] = "1000000000000000000"; + + txJson["nonce"] = "0"; + TransactionSkeleton ts1 = toTransactionSkeleton( txJson ); + ts1 = fixture.client->populateTransactionWithDefaults( ts1 ); + pair< bool, Secret > ar1 = fixture.accountHolder->authenticate( ts1 ); + Transaction tx1( ts1, ar1.second ); + fixture.client->importTransaction( tx1 ); + + // 1 eth left (returned to author) + dev::eth::mineTransaction(*(fixture.client), 1); + cout << "Balance2: " << fixture.rpcClient->eth_getBalance(fixture.accountHolder->allAccounts()[0].hex(), "latest") << endl; + + // 2 import 4 transactions with money for 1st, 2nd, and 3rd + + // require full 1 Ether for gas+value + txJson["gas"] = "100000"; + txJson["nonce"] = "1"; + txJson["value"] = "500000000000000000";// take 0.5 eth out + ts1 = toTransactionSkeleton( txJson ); + ts1 = fixture.client->populateTransactionWithDefaults( ts1 ); + ar1 = fixture.accountHolder->authenticate( ts1 ); + tx1 = Transaction( ts1, ar1.second ); + + txJson["nonce"] = "2"; + TransactionSkeleton ts2 = toTransactionSkeleton( txJson ); + ts2 = fixture.client->populateTransactionWithDefaults( ts2 ); + pair< bool, Secret > ar2 = fixture.accountHolder->authenticate( ts2 ); + Transaction tx2( ts2, ar2.second ); + + txJson["from"] = fixture.account2.address().hex(); + txJson["nonce"] = "0"; + txJson["value"] = "0"; + txJson["gasPrice"] = "20000000000"; + txJson["gas"] = "53000"; + TransactionSkeleton ts3 = toTransactionSkeleton( txJson ); + ts3 = fixture.client->populateTransactionWithDefaults( ts3 ); + pair< bool, Secret > ar3 = fixture.accountHolder->authenticate( ts3 ); + Transaction tx3( ts3, ar3.second ); + + txJson["nonce"] = "1"; + TransactionSkeleton ts4 = toTransactionSkeleton( txJson ); + ts3 = fixture.client->populateTransactionWithDefaults( ts4 ); + pair< bool, Secret > ar4 = fixture.accountHolder->authenticate( ts4 ); + Transaction tx4( ts3, ar3.second ); + + h256 h4 = fixture.client->importTransaction( tx4 ); // ok + h256 h2 = fixture.client->importTransaction( tx2 ); // invalid + h256 h3 = fixture.client->importTransaction( tx3 ); // ok + h256 h1 = fixture.client->importTransaction( tx1 ); // ok + + dev::eth::mineTransaction(*(fixture.client), 1); + cout << "Balance3: " << fixture.rpcClient->eth_getBalance(fixture.accountHolder->allAccounts()[0].hex(), "latest") << endl; + + (void)h1; + (void)h2; + (void)h3; + (void)h4; + +#ifdef HISTORIC_STATE + // 3 check that historic node sees only 3 txns + + // 1 Block + Json::Value block = fixture.rpcClient->eth_getBlockByNumber("latest", "false"); + + BOOST_REQUIRE_EQUAL(block["transactions"].size(), 3); + BOOST_REQUIRE_EQUAL(block["transactions"][0]["transactionIndex"], "0x0"); + BOOST_REQUIRE_EQUAL(block["transactions"][1]["transactionIndex"], "0x1"); + BOOST_REQUIRE_EQUAL(block["transactions"][2]["transactionIndex"], "0x2"); + + // 2 receipts + Json::Value r1,r3,r4; + BOOST_REQUIRE_NO_THROW(r1 = fixture.rpcClient->eth_getTransactionReceipt(toJS(h1))); + BOOST_REQUIRE_THROW (fixture.rpcClient->eth_getTransactionReceipt(toJS(h2)), jsonrpc::JsonRpcException); + BOOST_REQUIRE_NO_THROW(r3 = fixture.rpcClient->eth_getTransactionReceipt(toJS(h3))); + BOOST_REQUIRE_NO_THROW(r4 = fixture.rpcClient->eth_getTransactionReceipt(toJS(h4))); + + BOOST_REQUIRE_EQUAL(r1["transactionIndex"], "0x0"); + BOOST_REQUIRE_EQUAL(r3["transactionIndex"], "0x1"); + BOOST_REQUIRE_EQUAL(r4["transactionIndex"], "0x2"); + + // 3 transaction by index + Json::Value t0 = fixture.rpcClient->eth_getTransactionByBlockNumberAndIndex("latest", "0"); + Json::Value t1 = fixture.rpcClient->eth_getTransactionByBlockNumberAndIndex("latest", "1"); + Json::Value t2 = fixture.rpcClient->eth_getTransactionByBlockNumberAndIndex("latest", "2"); + + BOOST_REQUIRE_EQUAL(jsToFixed<32>(t0["hash"].asString()), h1); + BOOST_REQUIRE_EQUAL(jsToFixed<32>(t1["hash"].asString()), h3); + BOOST_REQUIRE_EQUAL(jsToFixed<32>(t2["hash"].asString()), h4); + + string bh = r1["blockHash"].asString(); + + t0 = fixture.rpcClient->eth_getTransactionByBlockHashAndIndex(bh, "0"); + t1 = fixture.rpcClient->eth_getTransactionByBlockHashAndIndex(bh, "1"); + t2 = fixture.rpcClient->eth_getTransactionByBlockHashAndIndex(bh, "2"); + + BOOST_REQUIRE_EQUAL(jsToFixed<32>(t0["hash"].asString()), h1); + BOOST_REQUIRE_EQUAL(jsToFixed<32>(t1["hash"].asString()), h3); + BOOST_REQUIRE_EQUAL(jsToFixed<32>(t2["hash"].asString()), h4); + + // 4 transaction by hash + BOOST_REQUIRE_THROW (fixture.rpcClient->eth_getTransactionByHash(toJS(h2)), jsonrpc::JsonRpcException); + + // 5 transaction count + Json::Value cnt = fixture.rpcClient->eth_getBlockTransactionCountByNumber("latest"); + BOOST_REQUIRE_EQUAL(cnt.asString(), "0x3"); + cnt = fixture.rpcClient->eth_getBlockTransactionCountByHash(bh); + BOOST_REQUIRE_EQUAL(cnt.asString(), "0x3"); +#endif +} + + BOOST_FIXTURE_TEST_SUITE( RestrictedAddressSuite, RestrictedAddressFixture ) BOOST_AUTO_TEST_CASE( direct_call ) { @@ -2967,4 +3194,73 @@ BOOST_AUTO_TEST_CASE( uncached_filestorage ) { BOOST_AUTO_TEST_SUITE_END() +BOOST_FIXTURE_TEST_SUITE( GappedCacheSuite, JsonRpcFixture ) + +#ifdef HISTORIC_STATE + +BOOST_AUTO_TEST_CASE( test_blocks ) { + dev::rpc::_detail::GappedTransactionIndexCache cache(10, *client); + BOOST_REQUIRE_EQUAL(cache.realBlockTransactionCount(LatestBlock), 0); + BOOST_REQUIRE_EQUAL(cache.realBlockTransactionCount(PendingBlock), 0); + BOOST_REQUIRE_EQUAL(cache.realBlockTransactionCount(999999999), 0); +} + +BOOST_AUTO_TEST_CASE( test_transactions ) { + + simulateMining(*client, 1, Address("0xf6c2a4ba2350e58a45916a03d0faa70dcc5dcfbf")); + + dev::rpc::_detail::GappedTransactionIndexCache cache(10, *client); + + Transaction invalid( + fromHex("0x0011223344556677889900112233445566778899001122334455667788990011223344556677889900112233" + "445566778899001122334455667788990011223344556677889900112233445566778899001122334455667788" + "990011223344556677889900112233445566778899" ), + CheckTransaction::None, true ); + + Transaction valid( + fromHex( "0xf86c808504a817c80083015f90943d7112ee86223baf0a506b9d2a77595cbbba51d1872386f26fc10000801ca0655757fd0650a65a373c48a4dc0f3d6ac5c3831aa0cc2cb863a5909dc6c25f72a071882ee8633466a243c0ea64dadb3120c1ca7a5cc7433c6c0b1c861a85322265" ), + CheckTransaction::None ); + valid.checkOutExternalGas( 1 ); + + client->importTransactionsAsBlock(Transactions{invalid, valid}, 1); + + BOOST_REQUIRE_EQUAL(cache.realBlockTransactionCount(LatestBlock), 2); + BOOST_REQUIRE_EQUAL(cache.gappedBlockTransactionCount(LatestBlock), 1); + BOOST_REQUIRE_EQUAL(cache.realIndexFromGapped(LatestBlock, 0), 1); + BOOST_REQUIRE_EQUAL(cache.gappedIndexFromReal(LatestBlock, 1), 0); + BOOST_REQUIRE_THROW(cache.gappedIndexFromReal(LatestBlock, 0), std::out_of_range); + BOOST_REQUIRE_EQUAL(cache.transactionPresent(LatestBlock, 0), false); + BOOST_REQUIRE_EQUAL(cache.transactionPresent(LatestBlock, 1), true); +} + +BOOST_AUTO_TEST_CASE( test_exceptions ) { + + simulateMining(*client, 1, Address("0xf6c2a4ba2350e58a45916a03d0faa70dcc5dcfbf")); + + dev::rpc::_detail::GappedTransactionIndexCache cache(10, *client); + + Transaction invalid( + fromHex("0x0011223344556677889900112233445566778899001122334455667788990011223344556677889900112233" + "445566778899001122334455667788990011223344556677889900112233445566778899001122334455667788" + "990011223344556677889900112233445566778899" ), + CheckTransaction::None, true ); + + Transaction valid( + fromHex( "0xf86c808504a817c80083015f90943d7112ee86223baf0a506b9d2a77595cbbba51d1872386f26fc10000801ca0655757fd0650a65a373c48a4dc0f3d6ac5c3831aa0cc2cb863a5909dc6c25f72a071882ee8633466a243c0ea64dadb3120c1ca7a5cc7433c6c0b1c861a85322265" ), + CheckTransaction::None ); + valid.checkOutExternalGas( 1 ); + + client->importTransactionsAsBlock(Transactions{invalid, valid}, 1); + + BOOST_REQUIRE_THROW(cache.realIndexFromGapped(LatestBlock, 1), std::out_of_range); + BOOST_REQUIRE_THROW(cache.realIndexFromGapped(LatestBlock, 2), std::out_of_range); + BOOST_REQUIRE_THROW(cache.gappedIndexFromReal(LatestBlock, 2), std::out_of_range); + BOOST_REQUIRE_THROW(cache.gappedIndexFromReal(LatestBlock, 0), std::out_of_range); + BOOST_REQUIRE_THROW(cache.transactionPresent(LatestBlock, 2), std::out_of_range); +} + +#endif + +BOOST_AUTO_TEST_SUITE_END() + BOOST_AUTO_TEST_SUITE_END()