diff --git a/.github/workflows/functional-tests.yml b/.github/workflows/functional-tests.yml index 027486dec..d09ff82a3 100644 --- a/.github/workflows/functional-tests.yml +++ b/.github/workflows/functional-tests.yml @@ -24,7 +24,7 @@ with: token: ${{ secrets.PERSONAL_ACCESS_TOKEN }} repository: skalenetwork/skale-ci-integration_tests - ref: master + ref: v3.20.0 submodules: recursive - name: Set up Node uses: actions/setup-node@v3.4.0 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index ca1114248..4c4b8d943 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -169,7 +169,7 @@ jobs: sudo rm -rf /tmp/tests/* cd build/test export NO_NTP_CHECK=1 - export NO_ULIMIT_CHECK=1 + export NO_ULIMIT_CHECK=1 function run_test() { ./testeth --report_level=detailed -t "$1" -- --express && touch "/tmp/tests/${1}Passed"; } run_test TransitionTests run_test TransactionTests diff --git a/VERSION b/VERSION index 0b3135213..eb9b76c9f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.19.3 +3.20.0 diff --git a/cmake/EthUtils.cmake b/cmake/EthUtils.cmake index 27afe8682..14256b00a 100644 --- a/cmake/EthUtils.cmake +++ b/cmake/EthUtils.cmake @@ -57,7 +57,7 @@ macro(eth_add_test NAME) add_custom_target("test.${NAME}" DEPENDS testeth WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} - COMMAND ${CMAKE_COMMAND} -DETH_TEST_NAME="${NAME}" -DCTEST_COMMAND="${CTEST_COMMAND}" -P "${ETH_SCRIPTS_DIR}/runtest.cmake" + COMMAND ${CMAKE_COMMAND} -DETH_TEST_NAME="${NAME}" -DCTEST_COMMAND="${CMAKE_CTEST_COMMAND}" -P "${ETH_SCRIPTS_DIR}/runtest.cmake" ) endmacro() diff --git a/deps/build.sh b/deps/build.sh index 664c42448..ce0e030c7 100755 --- a/deps/build.sh +++ b/deps/build.sh @@ -1158,16 +1158,20 @@ then # echo -e "${COLOR_INFO}configuring it${COLOR_DOTS}...${COLOR_RESET}" cd libuv - eval ./autogen.sh - eval ./configure "${CONF_CROSSCOMPILING_OPTS_GENERIC}" --enable-static --disable-shared --with-pic --prefix="$INSTALL_ROOT" "${CONF_DEBUG_OPTIONS}" + # eval ./autogen.sh + # eval ./configure "${CONF_CROSSCOMPILING_OPTS_GENERIC}" --enable-static --disable-shared --with-pic --prefix="$INSTALL_ROOT" "${CONF_DEBUG_OPTIONS}" #--with-sysroot=="$INSTALL_ROOT" - cd .. + mkdir build && cd build + eval "$CMAKE" "${CMAKE_CROSSCOMPILING_OPTS}" -DCMAKE_INSTALL_PREFIX="$INSTALL_ROOT" -DCMAKE_BUILD_TYPE="$TOP_CMAKE_BUILD_TYPE" \ + -DBUILD_SHARED_LIBS=OFF -DLIBUV_BUILD_SHARED=OFF\ + .. + cd ../.. fi echo -e "${COLOR_INFO}building it${COLOR_DOTS}...${COLOR_RESET}" - cd libuv + cd libuv/build eval "$MAKE" "${PARALLEL_MAKE_OPTIONS}" eval "$MAKE" "${PARALLEL_MAKE_OPTIONS}" install - cd .. + cd ../.. cd "$SOURCES_ROOT" else echo -e "${COLOR_SUCCESS}SKIPPED${COLOR_RESET}" @@ -1391,13 +1395,21 @@ then cd boost_1_68_0 echo -e "${COLOR_INFO}configuring and building it${COLOR_DOTS}...${COLOR_RESET}" eval ./bootstrap.sh --prefix="$INSTALL_ROOT" --with-libraries=atomic,context,filesystem,program_options,regex,system,thread,date_time,iostreams + + if [ "$DEBUG" = "1" ]; then + variant=debug + else + variant=release + fi + if [ ${ARCH} = "arm" ] then sed -i -e 's#using gcc ;#using gcc : arm : /usr/local/toolchains/gcc7.2-arm/bin/arm-linux-gnueabihf-g++ ;#g' project-config.jam - eval ./b2 "${CONF_CROSSCOMPILING_OPTS_BOOST}" cxxflags=-fPIC cflags=-fPIC "${PARALLEL_MAKE_OPTIONS}" --prefix="$INSTALL_ROOT" --layout=system variant=debug link=static threading=multi install + eval ./b2 "${CONF_CROSSCOMPILING_OPTS_BOOST}" cxxflags=-fPIC cflags=-fPIC "${PARALLEL_MAKE_OPTIONS}" --prefix="$INSTALL_ROOT" --layout=system variant=$variant link=static threading=multi install else - eval ./b2 cxxflags=-fPIC cflags=-fPIC "${PARALLEL_MAKE_OPTIONS}" --prefix="$INSTALL_ROOT" --layout=system variant=debug link=static threading=multi install + eval ./b2 cxxflags=-fPIC cflags=-fPIC "${PARALLEL_MAKE_OPTIONS}" --prefix="$INSTALL_ROOT" --layout=system variant=$variant link=static threading=multi install fi + cd .. cd "$SOURCES_ROOT" else @@ -2082,6 +2094,7 @@ then eval tar -xzf folly-from-git.tar.gz fi echo -e "${COLOR_INFO}fixing it${COLOR_DOTS}...${COLOR_RESET}" + sed -i 's/list(APPEND FOLLY_LINK_LIBRARIES ${LIBUNWIND_LIBRARIES})/list(APPEND FOLLY_LINK_LIBRARIES ${LIBUNWIND_LIBRARIES} lzma)/' ./folly/CMake/folly-deps.cmake sed -i 's/google::InstallFailureFunction(abort);/google::InstallFailureFunction( reinterpret_cast < google::logging_fail_func_t > ( abort ) );/g' ./folly/folly/init/Init.cpp echo -e "${COLOR_INFO}configuring it${COLOR_DOTS}...${COLOR_RESET}" cd folly @@ -2091,6 +2104,8 @@ then -DBOOST_ROOT="$INSTALL_ROOT" -DBOOST_LIBRARYDIR="$INSTALL_ROOT/lib" -DBoost_NO_WARN_NEW_VERSIONS=1 -DBoost_DEBUG=ON \ -DBUILD_SHARED_LIBS=OFF \ -DBUILD_TESTS=OFF -DBUILD_BROKEN_TESTS=OFF -DBUILD_HANGING_TESTS=OFF -DBUILD_SLOW_TESTS=OFF \ + -DCMAKE_INCLUDE_PATH="${INSTALL_ROOT}/include" \ + -DCMAKE_LIBRARY_PATH="${INSTALL_ROOT}/lib" \ .. cd .. else @@ -2100,6 +2115,9 @@ then cd build2 eval "$MAKE" "${PARALLEL_MAKE_OPTIONS}" eval "$MAKE" "${PARALLEL_MAKE_OPTIONS}" install + if [ "$DEBUG" = "0" ]; then + eval strip --strip-debug "${INSTALL_ROOT}"/lib/libfolly*.a + fi cd "$SOURCES_ROOT" else echo -e "${COLOR_SUCCESS}SKIPPED${COLOR_RESET}" diff --git a/libethcore/ChainOperationParams.cpp b/libethcore/ChainOperationParams.cpp index 4f11713a9..26eedfe39 100644 --- a/libethcore/ChainOperationParams.cpp +++ b/libethcore/ChainOperationParams.cpp @@ -82,8 +82,10 @@ EVMSchedule const ChainOperationParams::makeEvmSchedule( result = EIP158Schedule; else if ( _workingBlockNumber >= EIP150ForkBlock ) result = EIP150Schedule; + else if ( _workingBlockNumber >= homesteadForkBlock ) + return HomesteadSchedule; else - result = HomesteadSchedule; + return FrontierSchedule; // 2 based on previous - decide by timestamp if ( PushZeroPatch::isEnabledWhen( _committedBlockTimestamp ) ) diff --git a/libethcore/ChainOperationParams.h b/libethcore/ChainOperationParams.h index 1383b84b8..9af1e8d5e 100644 --- a/libethcore/ChainOperationParams.h +++ b/libethcore/ChainOperationParams.h @@ -52,7 +52,10 @@ class PrecompiledContract { u256 const& _blockNumber ) const { return m_cost( _in, _chainParams, _blockNumber ); } - std::pair< bool, bytes > execute( bytesConstRef _in ) const { return m_execute( _in ); } + std::pair< bool, bytes > execute( + bytesConstRef _in, skale::OverlayFS* _overlayFS = nullptr ) const { + return m_execute( _in, _overlayFS ); + } u256 const& startingBlock() const { return m_startingBlock; } @@ -270,9 +273,9 @@ struct ChainOperationParams { Address const& _a, bytesConstRef _in, u256 const& _blockNumber ) const { return precompiled.at( _a ).cost( _in, *this, _blockNumber ); } - std::pair< bool, bytes > executePrecompiled( - Address const& _a, bytesConstRef _in, u256 const& ) const { - return precompiled.at( _a ).execute( _in ); + std::pair< bool, bytes > executePrecompiled( Address const& _a, bytesConstRef _in, u256 const&, + skale::OverlayFS* _overlayFS = nullptr ) const { + return precompiled.at( _a ).execute( _in, _overlayFS ); } bool precompiledExecutionAllowedFrom( Address const& _a, Address const& _from, bool _readOnly ) const { diff --git a/libethcore/EVMSchedule.h b/libethcore/EVMSchedule.h index b9aec53b5..75bd93381 100644 --- a/libethcore/EVMSchedule.h +++ b/libethcore/EVMSchedule.h @@ -94,6 +94,8 @@ struct EVMSchedule { }; static const EVMSchedule DefaultSchedule = EVMSchedule(); +// Used only in GeneralStateTests --all tests +static const EVMSchedule FrontierSchedule = EVMSchedule( false, false, 21000 ); static const EVMSchedule HomesteadSchedule = EVMSchedule( true, true, 53000 ); static const EVMSchedule EIP150Schedule = [] { diff --git a/libethereum/Client.cpp b/libethereum/Client.cpp index 8c01d137f..b44638e89 100644 --- a/libethereum/Client.cpp +++ b/libethereum/Client.cpp @@ -799,12 +799,12 @@ void Client::onPostStateChanged() { void Client::startSealing() { if ( m_wouldSeal == true ) return; - LOG( m_logger ) << cc::notice( "Client::startSealing: " ) << author(); + LOG( m_logger ) << "Client::startSealing: " << author(); if ( author() ) { m_wouldSeal = true; m_signalled.notify_all(); } else - LOG( m_logger ) << cc::warn( "You need to set an author in order to seal!" ); + LOG( m_logger ) << "You need to set an author in order to seal!"; } void Client::rejigSealing() { @@ -812,24 +812,24 @@ void Client::rejigSealing() { if ( sealEngine()->shouldSeal( this ) ) { m_wouldButShouldnot = false; - LOG( m_loggerDetail ) << cc::notice( "Rejigging seal engine..." ); + LOG( m_loggerDetail ) << "Rejigging seal engine..."; DEV_WRITE_GUARDED( x_working ) { if ( m_working.isSealed() ) { - LOG( m_logger ) << cc::notice( "Tried to seal sealed block..." ); + LOG( m_logger ) << "Tried to seal sealed block..."; return; } // TODO is that needed? we have "Generating seal on" below - LOG( m_loggerDetail ) << cc::notice( "Starting to seal block" ) << " " - << cc::warn( "#" ) << cc::num10( m_working.info().number() ); + LOG( m_loggerDetail ) << "Starting to seal block" + << " #" << m_working.info().number(); - // TODO Deduplicate code! + // TODO Deduplicate code dev::h256 stateRootToSet; if ( m_snapshotAgent->getLatestSnapshotBlockNumer() > 0 ) { - dev::h256 state_root_hash = this->m_snapshotAgent->getSnapshotHash( + dev::h256 stateRootHash = this->m_snapshotAgent->getSnapshotHash( m_snapshotAgent->getLatestSnapshotBlockNumer() ); - stateRootToSet = state_root_hash; + stateRootToSet = stateRootHash; } - // propagate current! + // propagate current else if ( this->number() > 0 ) { stateRootToSet = blockInfo( this->hashFromNumber( this->number() ) ).stateRoot(); @@ -847,15 +847,15 @@ void Client::rejigSealing() { if ( wouldSeal() ) { sealEngine()->onSealGenerated( [=]( bytes const& _header ) { - LOG( m_logger ) << cc::success( "Block sealed" ) << " " << cc::warn( "#" ) - << cc::num10( BlockHeader( _header, HeaderData ).number() ); + LOG( m_logger ) << "Block sealed" + << " #" << BlockHeader( _header, HeaderData ).number(); if ( this->submitSealed( _header ) ) m_onBlockSealed( _header ); else - LOG( m_logger ) << cc::error( "Submitting block failed..." ); + LOG( m_logger ) << "Submitting block failed..."; } ); - ctrace << cc::notice( "Generating seal on " ) << m_sealingInfo.hash( WithoutSeal ) - << " " << cc::warn( "#" ) << cc::num10( m_sealingInfo.number() ); + ctrace << "Generating seal on " << m_sealingInfo.hash( WithoutSeal ) << " #" + << m_sealingInfo.number(); sealEngine()->generateSeal( m_sealingInfo ); } } else @@ -868,24 +868,24 @@ void Client::rejigSealing() { void Client::sealUnconditionally( bool submitToBlockChain ) { m_wouldButShouldnot = false; - LOG( m_loggerDetail ) << cc::notice( "Rejigging seal engine..." ); + LOG( m_loggerDetail ) << "Rejigging seal engine..."; DEV_WRITE_GUARDED( x_working ) { if ( m_working.isSealed() ) { - LOG( m_logger ) << cc::notice( "Tried to seal sealed block..." ); + LOG( m_logger ) << "Tried to seal sealed block..."; return; } // TODO is that needed? we have "Generating seal on" below - LOG( m_loggerDetail ) << cc::notice( "Starting to seal block" ) << " " << cc::warn( "#" ) - << cc::num10( m_working.info().number() ); - // latest hash is really updated after NEXT snapshot already started hash computation! - // TODO Deduplicate code! + LOG( m_loggerDetail ) << "Starting to seal block" + << " #" << m_working.info().number(); + // latest hash is really updated after NEXT snapshot already started hash computation + // TODO Deduplicate code dev::h256 stateRootToSet; if ( m_snapshotAgent->getLatestSnapshotBlockNumer() > 0 ) { - dev::h256 state_root_hash = this->m_snapshotAgent->getSnapshotHash( + dev::h256 stateRootHash = this->m_snapshotAgent->getSnapshotHash( m_snapshotAgent->getLatestSnapshotBlockNumer() ); - stateRootToSet = state_root_hash; + stateRootToSet = stateRootHash; } - // propagate current! + // propagate current else if ( this->number() > 0 ) { stateRootToSet = blockInfo( this->hashFromNumber( this->number() ) ).stateRoot(); } else { diff --git a/libethereum/Executive.cpp b/libethereum/Executive.cpp index 242245ba7..4de49515d 100644 --- a/libethereum/Executive.cpp +++ b/libethereum/Executive.cpp @@ -331,11 +331,8 @@ bool Executive::call( CallParameters const& _p, u256 const& _gasPrice, Address c m_gas = ( u256 )( _p.gas - g ); bytes output; bool success; - // dev::eth::g_state = m_s.delegateWrite(); - dev::eth::g_overlayFS = m_s.fs(); - tie( success, output ) = - m_chainParams.executePrecompiled( _p.codeAddress, _p.data, m_envInfo.number() ); - // m_s = dev::eth::g_state.delegateWrite(); + tie( success, output ) = m_chainParams.executePrecompiled( + _p.codeAddress, _p.data, m_envInfo.number(), m_s.fs().get() ); size_t outputSize = output.size(); m_output = owning_bytes_ref{ std::move( output ), 0, outputSize }; if ( !success ) { diff --git a/libethereum/Precompiled.cpp b/libethereum/Precompiled.cpp index ecbf7f770..bfcff6b7c 100644 --- a/libethereum/Precompiled.cpp +++ b/libethereum/Precompiled.cpp @@ -60,7 +60,6 @@ namespace eth { std::shared_ptr< skutils::json_config_file_accessor > g_configAccesssor; std::shared_ptr< SkaleHost > g_skaleHost; -std::shared_ptr< skale::OverlayFS > g_overlayFS; }; // namespace eth }; // namespace dev @@ -281,7 +280,10 @@ boost::filesystem::path getFileStorageDir( const Address& _address ) { } // TODO: check file name and file existance -ETH_REGISTER_PRECOMPILED( createFile )( bytesConstRef _in ) { +ETH_REGISTER_FS_PRECOMPILED( createFile )( bytesConstRef _in, skale::OverlayFS* _overlayFS ) { + if ( !_overlayFS ) + throw runtime_error( "_overlayFS is nullptr " ); + try { auto rawAddress = _in.cropped( 12, 20 ).toBytes(); std::string address; @@ -297,14 +299,14 @@ ETH_REGISTER_PRECOMPILED( createFile )( bytesConstRef _in ) { const fs::path filePath( rawFilename ); const fs::path fsDirectoryPath = getFileStorageDir( Address( address ) ); if ( !fs::exists( fsDirectoryPath ) ) { - g_overlayFS->createDirectory( fsDirectoryPath.string() ); + _overlayFS->createDirectory( fsDirectoryPath.string() ); } const fs::path fsFilePath = fsDirectoryPath / filePath.parent_path(); if ( filePath.filename().extension() == "._hash" ) { throw std::runtime_error( "createFile() failed because _hash extension is not allowed" ); } - g_overlayFS->createFile( ( fsFilePath / filePath.filename() ).string(), fileSize ); + _overlayFS->createFile( ( fsFilePath / filePath.filename() ).string(), fileSize ); u256 code = 1; bytes response = toBigEndian( code ); @@ -322,7 +324,10 @@ ETH_REGISTER_PRECOMPILED( createFile )( bytesConstRef _in ) { return { false, response }; } -ETH_REGISTER_PRECOMPILED( uploadChunk )( bytesConstRef _in ) { +ETH_REGISTER_FS_PRECOMPILED( uploadChunk )( bytesConstRef _in, skale::OverlayFS* _overlayFS ) { + if ( !_overlayFS ) + throw runtime_error( "_overlayFS is nullptr " ); + try { auto rawAddress = _in.cropped( 12, 20 ).toBytes(); std::string address; @@ -349,7 +354,7 @@ ETH_REGISTER_PRECOMPILED( uploadChunk )( bytesConstRef _in ) { const _byte_* data = _in.cropped( 128 + filenameBlocksCount * UINT256_SIZE, dataLength ).data(); - g_overlayFS->writeChunk( filePath.string(), position, dataLength, data ); + _overlayFS->writeChunk( filePath.string(), position, dataLength, data ); u256 code = 1; bytes response = toBigEndian( code ); @@ -451,7 +456,10 @@ ETH_REGISTER_PRECOMPILED( getFileSize )( bytesConstRef _in ) { return { false, response }; } -ETH_REGISTER_PRECOMPILED( deleteFile )( bytesConstRef _in ) { +ETH_REGISTER_FS_PRECOMPILED( deleteFile )( bytesConstRef _in, skale::OverlayFS* _overlayFS ) { + if ( !_overlayFS ) + throw runtime_error( "_overlayFS is nullptr " ); + try { auto rawAddress = _in.cropped( 12, 20 ).toBytes(); std::string address; @@ -462,8 +470,8 @@ ETH_REGISTER_PRECOMPILED( deleteFile )( bytesConstRef _in ) { const fs::path filePath = getFileStorageDir( Address( address ) ) / filename; - g_overlayFS->deleteFile( filePath.string() ); - g_overlayFS->deleteFile( filePath.string() + "._hash" ); + _overlayFS->deleteFile( filePath.string() ); + _overlayFS->deleteFile( filePath.string() + "._hash" ); u256 code = 1; bytes response = toBigEndian( code ); @@ -481,7 +489,10 @@ ETH_REGISTER_PRECOMPILED( deleteFile )( bytesConstRef _in ) { return { false, response }; } -ETH_REGISTER_PRECOMPILED( createDirectory )( bytesConstRef _in ) { +ETH_REGISTER_FS_PRECOMPILED( createDirectory )( bytesConstRef _in, skale::OverlayFS* _overlayFS ) { + if ( !_overlayFS ) + throw runtime_error( "_overlayFS is nullptr " ); + try { auto rawAddress = _in.cropped( 12, 20 ).toBytes(); std::string address; @@ -491,7 +502,7 @@ ETH_REGISTER_PRECOMPILED( createDirectory )( bytesConstRef _in ) { convertBytesToString( _in, 32, directoryPath, directoryPathLength ); const fs::path absolutePath = getFileStorageDir( Address( address ) ) / directoryPath; - g_overlayFS->createDirectory( absolutePath.string() ); + _overlayFS->createDirectory( absolutePath.string() ); u256 code = 1; bytes response = toBigEndian( code ); @@ -509,7 +520,10 @@ ETH_REGISTER_PRECOMPILED( createDirectory )( bytesConstRef _in ) { return { false, response }; } -ETH_REGISTER_PRECOMPILED( deleteDirectory )( bytesConstRef _in ) { +ETH_REGISTER_FS_PRECOMPILED( deleteDirectory )( bytesConstRef _in, skale::OverlayFS* _overlayFS ) { + if ( !_overlayFS ) + throw runtime_error( "_overlayFS is nullptr " ); + try { auto rawAddress = _in.cropped( 12, 20 ).toBytes(); std::string address; @@ -525,8 +539,8 @@ ETH_REGISTER_PRECOMPILED( deleteDirectory )( bytesConstRef _in ) { const std::string absolutePathStr = absolutePath.string(); - g_overlayFS->deleteFile( absolutePathStr + "._hash" ); - g_overlayFS->deleteDirectory( absolutePath.string() ); + _overlayFS->deleteFile( absolutePathStr + "._hash" ); + _overlayFS->deleteDirectory( absolutePath.string() ); u256 code = 1; bytes response = toBigEndian( code ); @@ -544,7 +558,8 @@ ETH_REGISTER_PRECOMPILED( deleteDirectory )( bytesConstRef _in ) { return { false, response }; } -ETH_REGISTER_PRECOMPILED( calculateFileHash )( bytesConstRef _in ) { +ETH_REGISTER_FS_PRECOMPILED( calculateFileHash ) +( bytesConstRef _in, skale::OverlayFS* _overlayFS ) { try { auto rawAddress = _in.cropped( 12, 20 ).toBytes(); std::string address; @@ -560,7 +575,7 @@ ETH_REGISTER_PRECOMPILED( calculateFileHash )( bytesConstRef _in ) { throw std::runtime_error( "calculateFileHash() failed because file does not exist" ); } - g_overlayFS->calculateFileHash( filePath.string() ); + _overlayFS->calculateFileHash( filePath.string() ); u256 code = 1; bytes response = toBigEndian( code ); @@ -1057,30 +1072,6 @@ ETH_REGISTER_PRECOMPILED( getBlockRandom )( bytesConstRef ) { } ETH_REGISTER_PRECOMPILED( addBalance )( [[maybe_unused]] bytesConstRef _in ) { - /* - try { - auto rawAddress = _in.cropped( 0, 20 ).toBytes(); - std::string address; - boost::algorithm::hex( rawAddress.begin(), rawAddress.end(), back_inserter( address ) ); - auto add = parseBigEndianRightPadded( _in, 20, 32 ); - - auto value = u256( add ); - - g_state.addBalance( Address( address ), value ); - - dev::u256 code = 1; - bytes response = toBigEndian( code ); - return {true, response}; - } catch ( std::exception& ex ) { - std::string strError = ex.what(); - if ( strError.empty() ) - strError = "exception without description"; - LOG( getLogger( VerbosityError ) ) - << "Exception in precompiled/addBalance(): " << strError << "\n"; - } catch ( ... ) { - LOG( getLogger( VerbosityError ) ) << "Unknown exception in precompiled/addBalance()\n"; - } - */ dev::u256 code = 0; bytes response = toBigEndian( code ); return { false, response }; // 1st false - means bad error occur @@ -1111,47 +1102,4 @@ ETH_REGISTER_PRECOMPILED( getIMABLSPublicKey )( bytesConstRef ) { return { false, response }; // 1st false - means bad error occur } -// ETH_REGISTER_PRECOMPILED( convertUint256ToString )( bytesConstRef _in ) { -// try { -// auto rawValue = _in.cropped( 0, 32 ).toBytes(); -// std::string strValue = ""; -// boost::algorithm::hex( rawValue.begin(), rawValue.end(), back_inserter( strValue ) ); -// bytes response = stat_string_to_bytes_with_length( strValue ); -// return {true, response}; -// } catch ( std::exception& ex ) { -// std::string strError = ex.what(); -// if ( strError.empty() ) -// strError = "exception without description"; -// LOG( getLogger( VerbosityError ) ) -// << "Exception in precompiled/convertUint256ToString(): " << strError << "\n"; -// } catch ( ... ) { -// LOG( getLogger( VerbosityError ) ) -// << "Unknown exception in precompiled/convertUint256ToString()\n"; -// } -// u256 code = 0; -// bytes response = toBigEndian( code ); -// return {false, response}; // 1st false - means bad error occur -//} -// ETH_REGISTER_PRECOMPILED( convertAddressToString )( bytesConstRef _in ) { -// try { -// auto rawAddress = _in.cropped( 12, 20 ).toBytes(); -// std::string strValue = ""; -// boost::algorithm::hex( rawAddress.begin(), rawAddress.end(), back_inserter( strValue ) ); -// bytes response = stat_string_to_bytes_with_length( strValue ); -// return {true, response}; -// } catch ( std::exception& ex ) { -// std::string strError = ex.what(); -// if ( strError.empty() ) -// strError = "exception without description"; -// LOG( getLogger( VerbosityError ) ) -// << "Exception in precompiled/convertAddressToString(): " << strError << "\n"; -// } catch ( ... ) { -// LOG( getLogger( VerbosityError ) ) -// << "Unknown exception in precompiled/convertAddressToString()\n"; -// } -// u256 code = 0; -// bytes response = toBigEndian( code ); -// return {false, response}; // 1st false - means bad error occur -//} - } // namespace diff --git a/libethereum/Precompiled.h b/libethereum/Precompiled.h index cab79e312..cc0219dc3 100644 --- a/libethereum/Precompiled.h +++ b/libethereum/Precompiled.h @@ -51,11 +51,26 @@ namespace eth { extern std::shared_ptr< skutils::json_config_file_accessor > g_configAccesssor; extern std::shared_ptr< SkaleHost > g_skaleHost; extern skale::State g_state; -extern std::shared_ptr< skale::OverlayFS > g_overlayFS; struct ChainOperationParams; -using PrecompiledExecutor = std::function< std::pair< bool, bytes >( bytesConstRef _in ) >; +// allow call both with overlayFS and without it +class PrecompiledExecutor { +public: + std::pair< bool, bytes > operator()( + bytesConstRef _in, skale::OverlayFS* _overlayFS = nullptr ) const { + return proxy( _in, _overlayFS ); + } + PrecompiledExecutor() {} + PrecompiledExecutor( const std::function< std::pair< bool, bytes >( + bytesConstRef _in, skale::OverlayFS* _overlayFS ) >& _func ) + : proxy( _func ) {} + +private: + std::function< std::pair< bool, bytes >( bytesConstRef _in, skale::OverlayFS* _overlayFS ) > + proxy; +}; + using PrecompiledPricer = std::function< bigint( bytesConstRef _in, ChainOperationParams const& _chainParams, u256 const& _blockNumber ) >; @@ -98,13 +113,26 @@ class PrecompiledRegistrar { static PrecompiledRegistrar* s_this; }; +// ignore _overlayFS param and call registered function with 1 parameter // TODO: unregister on unload with a static object. #define ETH_REGISTER_PRECOMPILED( Name ) \ static std::pair< bool, bytes > __eth_registerPrecompiledFunction##Name( bytesConstRef _in ); \ static PrecompiledExecutor __eth_registerPrecompiledFactory##Name = \ ::dev::eth::PrecompiledRegistrar::registerExecutor( \ - #Name, &__eth_registerPrecompiledFunction##Name ); \ + #Name, PrecompiledExecutor( \ + []( bytesConstRef _in, skale::OverlayFS* ) -> std::pair< bool, bytes > { \ + return __eth_registerPrecompiledFunction##Name( _in ); \ + } ) ); \ + static std::pair< bool, bytes > __eth_registerPrecompiledFunction##Name + +#define ETH_REGISTER_FS_PRECOMPILED( Name ) \ + static std::pair< bool, bytes > __eth_registerPrecompiledFunction##Name( \ + bytesConstRef _in, skale::OverlayFS* _overlayFS ); \ + static PrecompiledExecutor __eth_registerPrecompiledFactory##Name = \ + ::dev::eth::PrecompiledRegistrar::registerExecutor( \ + #Name, PrecompiledExecutor( &__eth_registerPrecompiledFunction##Name ) ); \ static std::pair< bool, bytes > __eth_registerPrecompiledFunction##Name + #define ETH_REGISTER_PRECOMPILED_PRICER( Name ) \ static bigint __eth_registerPricerFunction##Name( \ bytesConstRef _in, ChainOperationParams const& _chainParams, u256 const& _blockNumber ); \ diff --git a/libethereum/SkaleHost.cpp b/libethereum/SkaleHost.cpp index b2d05bd99..0f925c7bb 100644 --- a/libethereum/SkaleHost.cpp +++ b/libethereum/SkaleHost.cpp @@ -601,8 +601,7 @@ void SkaleHost::createBlock( const ConsensusExtFace::transactions_vector& _appro << stCurrent.hex(); // FATAL if mismatch in non-default - if ( _winningNodeIndex != 0 && dev::h256::Arith( stCurrent ) != _stateRoot && - !this->m_client.chainParams().nodeInfo.syncNode ) { + if ( _winningNodeIndex != 0 && dev::h256::Arith( stCurrent ) != _stateRoot ) { LOG( m_errorLogger ) << "FATAL STATE ROOT MISMATCH ERROR: current state root " << dev::h256::Arith( stCurrent ).str() << " is not equal to arrived state root " << _stateRoot.str() diff --git a/libethereum/Transaction.h b/libethereum/Transaction.h index 0f6d5d754..1aecefaaa 100644 --- a/libethereum/Transaction.h +++ b/libethereum/Transaction.h @@ -127,7 +127,7 @@ class Transaction : public TransactionBase { void ignoreExternalGas() { m_externalGasIsChecked = true; - m_externalGas = 0; + m_externalGas.reset(); } private: diff --git a/libskale/SnapshotHashAgent.cpp b/libskale/SnapshotHashAgent.cpp index 83797bce8..4afc006c7 100644 --- a/libskale/SnapshotHashAgent.cpp +++ b/libskale/SnapshotHashAgent.cpp @@ -33,60 +33,61 @@ #include #include -SnapshotHashAgent::SnapshotHashAgent( const dev::eth::ChainParams& chain_params, +SnapshotHashAgent::SnapshotHashAgent( const dev::eth::ChainParams& chainParams, const std::array< std::string, 4 >& common_public_key, - const std::string& ipToDownloadSnapshotFrom ) - : chain_params_( chain_params ), - n_( chain_params.sChain.nodes.size() ), - ipToDownloadSnapshotFrom_( ipToDownloadSnapshotFrom ) { + const std::string& urlToDownloadSnapshotFrom ) + : chainParams_( chainParams ), + n_( chainParams.sChain.nodes.size() ), + urlToDownloadSnapshotFrom_( urlToDownloadSnapshotFrom ) { this->hashes_.resize( n_ ); this->signatures_.resize( n_ ); this->public_keys_.resize( n_ ); - this->is_received_.resize( n_ ); + this->isReceived_.resize( n_ ); for ( size_t i = 0; i < n_; ++i ) { - this->is_received_[i] = false; + this->isReceived_[i] = false; } this->bls_.reset( new libBLS::Bls( ( 2 * this->n_ + 1 ) / 3, this->n_ ) ); - common_public_key_.X.c0 = libff::alt_bn128_Fq( common_public_key[0].c_str() ); - common_public_key_.X.c1 = libff::alt_bn128_Fq( common_public_key[1].c_str() ); - common_public_key_.Y.c0 = libff::alt_bn128_Fq( common_public_key[2].c_str() ); - common_public_key_.Y.c1 = libff::alt_bn128_Fq( common_public_key[3].c_str() ); - common_public_key_.Z = libff::alt_bn128_Fq2::one(); - if ( ( common_public_key_.X == libff::alt_bn128_Fq2::zero() && - common_public_key_.Y == libff::alt_bn128_Fq2::one() ) || - !common_public_key_.is_well_formed() ) { + commonPublicKey_.X.c0 = libff::alt_bn128_Fq( common_public_key[0].c_str() ); + commonPublicKey_.X.c1 = libff::alt_bn128_Fq( common_public_key[1].c_str() ); + commonPublicKey_.Y.c0 = libff::alt_bn128_Fq( common_public_key[2].c_str() ); + commonPublicKey_.Y.c1 = libff::alt_bn128_Fq( common_public_key[3].c_str() ); + commonPublicKey_.Z = libff::alt_bn128_Fq2::one(); + if ( ( commonPublicKey_.X == libff::alt_bn128_Fq2::zero() && + commonPublicKey_.Y == libff::alt_bn128_Fq2::one() ) || + !commonPublicKey_.is_well_formed() ) { // zero or corrupted public key was provided in command line this->readPublicKeyFromConfig(); } } void SnapshotHashAgent::readPublicKeyFromConfig() { - this->common_public_key_.X.c0 = - libff::alt_bn128_Fq( chain_params_.nodeInfo.commonBLSPublicKeys[0].c_str() ); - this->common_public_key_.X.c1 = - libff::alt_bn128_Fq( chain_params_.nodeInfo.commonBLSPublicKeys[1].c_str() ); - this->common_public_key_.Y.c0 = - libff::alt_bn128_Fq( chain_params_.nodeInfo.commonBLSPublicKeys[2].c_str() ); - this->common_public_key_.Y.c1 = - libff::alt_bn128_Fq( chain_params_.nodeInfo.commonBLSPublicKeys[3].c_str() ); - this->common_public_key_.Z = libff::alt_bn128_Fq2::one(); + this->commonPublicKey_.X.c0 = + libff::alt_bn128_Fq( chainParams_.nodeInfo.commonBLSPublicKeys[0].c_str() ); + this->commonPublicKey_.X.c1 = + libff::alt_bn128_Fq( chainParams_.nodeInfo.commonBLSPublicKeys[1].c_str() ); + this->commonPublicKey_.Y.c0 = + libff::alt_bn128_Fq( chainParams_.nodeInfo.commonBLSPublicKeys[2].c_str() ); + this->commonPublicKey_.Y.c1 = + libff::alt_bn128_Fq( chainParams_.nodeInfo.commonBLSPublicKeys[3].c_str() ); + this->commonPublicKey_.Z = libff::alt_bn128_Fq2::one(); } size_t SnapshotHashAgent::verifyAllData() const { size_t verified = 0; for ( size_t i = 0; i < this->n_; ++i ) { - if ( this->chain_params_.nodeInfo.id == this->chain_params_.sChain.nodes[i].id ) { + if ( this->chainParams_.nodeInfo.id == this->chainParams_.sChain.nodes.at( i ).id ) { continue; } - if ( this->is_received_[i] ) { + if ( this->isReceived_.at( i ) ) { bool is_verified = false; libff::inhibit_profiling_info = true; try { - is_verified = this->bls_->Verification( - std::make_shared< std::array< uint8_t, 32 > >( this->hashes_[i].asArray() ), - this->signatures_[i], this->public_keys_[i] ); + is_verified = + this->bls_->Verification( std::make_shared< std::array< uint8_t, 32 > >( + this->hashes_.at( i ).asArray() ), + this->signatures_.at( i ), this->public_keys_.at( i ) ); } catch ( std::exception& ex ) { cerror << ex.what(); } @@ -107,284 +108,263 @@ size_t SnapshotHashAgent::verifyAllData() const { bool SnapshotHashAgent::voteForHash() { std::map< dev::h256, size_t > map_hash; - if ( 3 * this->verifyAllData() < 2 * this->n_ + 1 && ipToDownloadSnapshotFrom_.empty() ) { + if ( 3 * this->verifyAllData() < 2 * this->n_ + 1 && urlToDownloadSnapshotFrom_.empty() ) { return false; } - const std::lock_guard< std::mutex > lock( this->hashes_mutex ); + const std::lock_guard< std::mutex > lock( this->hashesMutex ); for ( size_t i = 0; i < this->n_; ++i ) { - if ( this->chain_params_.nodeInfo.id == this->chain_params_.sChain.nodes[i].id ) { + if ( this->chainParams_.nodeInfo.id == this->chainParams_.sChain.nodes.at( i ).id ) { continue; } - map_hash[this->hashes_[i]] += 1; + map_hash[this->hashes_.at( i )] += 1; } std::map< dev::h256, size_t >::iterator it; - if ( ipToDownloadSnapshotFrom_.empty() ) { - it = std::find_if( - map_hash.begin(), map_hash.end(), [this]( const std::pair< dev::h256, size_t > p ) { - return 3 * p.second > 2 * this->n_; - } ); - cnote << "Snapshot hash is: " << ( *it ).first << " .Verifying it...\n"; - - if ( it == map_hash.end() ) { - throw NotEnoughVotesException( "note enough votes to choose hash" ); - return false; - } else { - std::vector< size_t > idx; - std::vector< libff::alt_bn128_G1 > signatures; - for ( size_t i = 0; i < this->n_; ++i ) { - if ( this->chain_params_.nodeInfo.id == this->chain_params_.sChain.nodes[i].id ) { - continue; - } + it = std::find_if( map_hash.begin(), map_hash.end(), + [this]( const std::pair< dev::h256, size_t > p ) { return 3 * p.second > 2 * this->n_; } ); + cnote << "Snapshot hash is: " << ( *it ).first << ". Verifying it..."; - if ( this->hashes_[i] == ( *it ).first ) { - this->nodes_to_download_snapshot_from_.push_back( i ); - idx.push_back( i + 1 ); - signatures.push_back( this->signatures_[i] ); - } + if ( it == map_hash.end() ) { + throw NotEnoughVotesException( "note enough votes to choose hash" ); + return false; + } else { + std::vector< size_t > idx; + std::vector< libff::alt_bn128_G1 > signatures; + for ( size_t i = 0; i < this->n_; ++i ) { + if ( this->chainParams_.nodeInfo.id == this->chainParams_.sChain.nodes.at( i ).id ) { + continue; } - std::vector< libff::alt_bn128_Fr > lagrange_coeffs; - libff::alt_bn128_G1 common_signature; - try { - lagrange_coeffs = - libBLS::ThresholdUtils::LagrangeCoeffs( idx, ( 2 * this->n_ + 1 ) / 3 ); - common_signature = this->bls_->SignatureRecover( signatures, lagrange_coeffs ); - } catch ( libBLS::ThresholdUtils::IncorrectInput& ex ) { - cerror << cc::error( - "Exception while recovering common signature from other skaleds: " ) - << cc::warn( ex.what() ) << std::endl; - } catch ( libBLS::ThresholdUtils::IsNotWellFormed& ex ) { - cerror << cc::error( - "Exception while recovering common signature from other skaleds: " ) - << cc::warn( ex.what() ) << std::endl; + if ( this->hashes_.at( i ) == ( *it ).first ) { + this->nodesToDownloadSnapshotFrom_.push_back( i ); + idx.push_back( i + 1 ); + signatures.push_back( this->signatures_.at( i ) ); } + } - bool is_verified = false; + std::vector< libff::alt_bn128_Fr > lagrange_coeffs; + libff::alt_bn128_G1 common_signature; + try { + lagrange_coeffs = + libBLS::ThresholdUtils::LagrangeCoeffs( idx, ( 2 * this->n_ + 1 ) / 3 ); + common_signature = this->bls_->SignatureRecover( signatures, lagrange_coeffs ); + } catch ( libBLS::ThresholdUtils::IncorrectInput& ex ) { + cerror << "Exception while recovering common signature from other skaleds: " + << ex.what(); + } catch ( libBLS::ThresholdUtils::IsNotWellFormed& ex ) { + cerror << "Exception while recovering common signature from other skaleds: " + << ex.what(); + } + + bool is_verified = false; + try { + libff::inhibit_profiling_info = true; + is_verified = this->bls_->Verification( + std::make_shared< std::array< uint8_t, 32 > >( ( *it ).first.asArray() ), + common_signature, this->commonPublicKey_ ); + } catch ( libBLS::ThresholdUtils::IsNotWellFormed& ex ) { + cerror << "Exception while verifying common signature from other skaleds: " + << ex.what(); + } + + if ( !is_verified ) { + cerror << "Common BLS signature wasn't verified, probably using incorrect " + "common public key specified in command line. Trying again with " + "common public key from config"; + + libff::alt_bn128_G2 commonPublicKey_from_config; + commonPublicKey_from_config.X.c0 = + libff::alt_bn128_Fq( this->chainParams_.nodeInfo.commonBLSPublicKeys[0].c_str() ); + commonPublicKey_from_config.X.c1 = + libff::alt_bn128_Fq( this->chainParams_.nodeInfo.commonBLSPublicKeys[1].c_str() ); + commonPublicKey_from_config.Y.c0 = + libff::alt_bn128_Fq( this->chainParams_.nodeInfo.commonBLSPublicKeys[2].c_str() ); + commonPublicKey_from_config.Y.c1 = + libff::alt_bn128_Fq( this->chainParams_.nodeInfo.commonBLSPublicKeys[3].c_str() ); + commonPublicKey_from_config.Z = libff::alt_bn128_Fq2::one(); + std::cout << "NEW BLS COMMON PUBLIC KEY:\n"; + commonPublicKey_from_config.print_coordinates(); try { - libff::inhibit_profiling_info = true; is_verified = this->bls_->Verification( std::make_shared< std::array< uint8_t, 32 > >( ( *it ).first.asArray() ), - common_signature, this->common_public_key_ ); + common_signature, commonPublicKey_from_config ); } catch ( libBLS::ThresholdUtils::IsNotWellFormed& ex ) { - cerror << cc::error( - "Exception while verifying common signature from other skaleds: " ) - << cc::warn( ex.what() ) << std::endl; + cerror << "Exception while verifying common signature from other skaleds: " + << ex.what(); } if ( !is_verified ) { - cerror << cc::error( - "Common BLS signature wasn't verified, probably using incorrect " - "common public key specified in command line. Trying again with " - "common public key from config" ) - << std::endl; - - libff::alt_bn128_G2 common_public_key_from_config; - common_public_key_from_config.X.c0 = libff::alt_bn128_Fq( - this->chain_params_.nodeInfo.commonBLSPublicKeys[0].c_str() ); - common_public_key_from_config.X.c1 = libff::alt_bn128_Fq( - this->chain_params_.nodeInfo.commonBLSPublicKeys[1].c_str() ); - common_public_key_from_config.Y.c0 = libff::alt_bn128_Fq( - this->chain_params_.nodeInfo.commonBLSPublicKeys[2].c_str() ); - common_public_key_from_config.Y.c1 = libff::alt_bn128_Fq( - this->chain_params_.nodeInfo.commonBLSPublicKeys[3].c_str() ); - common_public_key_from_config.Z = libff::alt_bn128_Fq2::one(); - std::cout << "NEW BLS COMMON PUBLIC KEY:\n"; - common_public_key_from_config.print_coordinates(); - try { - is_verified = this->bls_->Verification( - std::make_shared< std::array< uint8_t, 32 > >( ( *it ).first.asArray() ), - common_signature, common_public_key_from_config ); - } catch ( libBLS::ThresholdUtils::IsNotWellFormed& ex ) { - cerror - << cc::error( - "Exception while verifying common signature from other skaleds: " ) - << cc::warn( ex.what() ) << std::endl; - } - - if ( !is_verified ) { - cerror << cc::error( - "Common BLS signature wasn't verified, snapshot will not be " - "downloaded. Try to backup node manually using skale-node-cli." ) - << std::endl; - return false; - } else { - cnote << cc::info( - "Common BLS signature was verified with common public key " - "from config." ) - << std::endl; - this->common_public_key_ = common_public_key_from_config; - } + cerror << "Common BLS signature wasn't verified, snapshot will not be " + "downloaded. Try to backup node manually using skale-node-cli."; + return false; + } else { + cnote << "Common BLS signature was verified with common public key " + "from config."; + this->commonPublicKey_ = commonPublicKey_from_config; } + } - this->voted_hash_.first = ( *it ).first; - this->voted_hash_.second = common_signature; + this->votedHash_.first = ( *it ).first; + this->votedHash_.second = common_signature; - return true; - } + return true; + } + + return true; +} + +std::tuple< dev::h256, libff::alt_bn128_G1, libff::alt_bn128_G2 > SnapshotHashAgent::askNodeForHash( + const std::string& url, unsigned blockNumber ) { + jsonrpc::HttpClient* jsonRpcClient = new jsonrpc::HttpClient( url ); + SkaleClient skaleClient( *jsonRpcClient ); + + Json::Value joSignatureResponse; + try { + joSignatureResponse = skaleClient.skale_getSnapshotSignature( blockNumber ); + } catch ( jsonrpc::JsonRpcException& ex ) { + cerror << "WARNING " + << "Error while trying to get snapshot signature from " << url << " : " << ex.what(); + delete jsonRpcClient; + return {}; + } + + if ( !joSignatureResponse.get( "hash", 0 ) || !joSignatureResponse.get( "X", 0 ) || + !joSignatureResponse.get( "Y", 0 ) ) { + cerror << "WARNING " + << " Signature from " + url + + "-th node was not received during " + "getNodesToDownloadSnapshotFrom "; + delete jsonRpcClient; + + return {}; } else { - size_t nodeIdx = std::distance( this->chain_params_.sChain.nodes.begin(), - std::find_if( this->chain_params_.sChain.nodes.begin(), - this->chain_params_.sChain.nodes.end(), [this]( const dev::eth::sChainNode& node ) { - return node.ip == ipToDownloadSnapshotFrom_; - } ) ); - - dev::h256 requiredHashValue = this->hashes_[nodeIdx]; - if ( requiredHashValue == dev::h256() ) { - throw IsNotVerified( "Hash from the required node is empty" ); + std::string strHash = joSignatureResponse["hash"].asString(); + cnote << "Received snapshot hash from " << url << " : " << strHash << '\n'; + + libff::alt_bn128_G1 signature = + libff::alt_bn128_G1( libff::alt_bn128_Fq( joSignatureResponse["X"].asCString() ), + libff::alt_bn128_Fq( joSignatureResponse["Y"].asCString() ), + libff::alt_bn128_Fq::one() ); + + libff::alt_bn128_G2 publicKey; + if ( urlToDownloadSnapshotFrom_.empty() ) { + Json::Value joPublicKeyResponse = skaleClient.skale_imaInfo(); + + publicKey.X.c0 = + libff::alt_bn128_Fq( joPublicKeyResponse["BLSPublicKey0"].asCString() ); + publicKey.X.c1 = + libff::alt_bn128_Fq( joPublicKeyResponse["BLSPublicKey1"].asCString() ); + publicKey.Y.c0 = + libff::alt_bn128_Fq( joPublicKeyResponse["BLSPublicKey2"].asCString() ); + publicKey.Y.c1 = + libff::alt_bn128_Fq( joPublicKeyResponse["BLSPublicKey3"].asCString() ); + publicKey.Z = libff::alt_bn128_Fq2::one(); + } else { + publicKey = libff::alt_bn128_G2::one(); + publicKey.to_affine_coordinates(); } - it = map_hash.find( requiredHashValue ); - - this->voted_hash_.first = ( *it ).first; - this->voted_hash_.second = this->signatures_[nodeIdx]; + delete jsonRpcClient; - this->nodes_to_download_snapshot_from_.push_back( nodeIdx ); + return { dev::h256( strHash ), signature, publicKey }; } - - return true; } std::vector< std::string > SnapshotHashAgent::getNodesToDownloadSnapshotFrom( - unsigned block_number ) { + unsigned blockNumber ) { libff::init_alt_bn128_params(); std::vector< std::thread > threads; - for ( size_t i = 0; i < this->n_; ++i ) { - if ( this->chain_params_.nodeInfo.id == this->chain_params_.sChain.nodes[i].id ) { - continue; - } - - threads.push_back( std::thread( [this, i, block_number]() { - try { - jsonrpc::HttpClient* jsonRpcClient = new jsonrpc::HttpClient( - "http://" + this->chain_params_.sChain.nodes[i].ip + ':' + - ( this->chain_params_.sChain.nodes[i].port + 3 ).convert_to< std::string >() ); - SkaleClient skaleClient( *jsonRpcClient ); + if ( urlToDownloadSnapshotFrom_.empty() ) { + for ( size_t i = 0; i < this->n_; ++i ) { + if ( this->chainParams_.nodeInfo.id == this->chainParams_.sChain.nodes.at( i ).id ) { + continue; + } - Json::Value joSignatureResponse; + threads.push_back( std::thread( [this, i, blockNumber]() { try { - joSignatureResponse = skaleClient.skale_getSnapshotSignature( block_number ); - } catch ( jsonrpc::JsonRpcException& ex ) { - cerror << "WARNING " - << "Error while trying to get snapshot signature from " - << this->chain_params_.sChain.nodes[i].ip << " : " << ex.what(); - delete jsonRpcClient; - return; - } - - if ( !joSignatureResponse.get( "hash", 0 ) || !joSignatureResponse.get( "X", 0 ) || - !joSignatureResponse.get( "Y", 0 ) ) { - cerror << "WARNING " - << " Signature from " + std::to_string( i ) + - "-th node was not received during " - "getNodesToDownloadSnapshotFrom "; - delete jsonRpcClient; - } else { - const std::lock_guard< std::mutex > lock( this->hashes_mutex ); - - this->is_received_[i] = true; - - std::string str_hash = joSignatureResponse["hash"].asString(); - cnote << "Received snapshot hash from " - << "http://" + this->chain_params_.sChain.nodes[i].ip + ':' + - ( this->chain_params_.sChain.nodes[i].port + 3 ) - .convert_to< std::string >() - << " : " << str_hash << '\n'; - - libff::alt_bn128_G1 signature = libff::alt_bn128_G1( - libff::alt_bn128_Fq( joSignatureResponse["X"].asCString() ), - libff::alt_bn128_Fq( joSignatureResponse["Y"].asCString() ), - libff::alt_bn128_Fq::one() ); - - Json::Value joPublicKeyResponse = skaleClient.skale_imaInfo(); - - libff::alt_bn128_G2 public_key; - public_key.X.c0 = - libff::alt_bn128_Fq( joPublicKeyResponse["BLSPublicKey0"].asCString() ); - public_key.X.c1 = - libff::alt_bn128_Fq( joPublicKeyResponse["BLSPublicKey1"].asCString() ); - public_key.Y.c0 = - libff::alt_bn128_Fq( joPublicKeyResponse["BLSPublicKey2"].asCString() ); - public_key.Y.c1 = - libff::alt_bn128_Fq( joPublicKeyResponse["BLSPublicKey3"].asCString() ); - public_key.Z = libff::alt_bn128_Fq2::one(); - - this->hashes_[i] = dev::h256( str_hash ); - this->signatures_[i] = signature; - this->public_keys_[i] = public_key; - - delete jsonRpcClient; + std::string nodeUrl = "http://" + this->chainParams_.sChain.nodes.at( i ).ip + + ':' + + ( this->chainParams_.sChain.nodes.at( i ).port + 3 ) + .convert_to< std::string >(); + auto snapshotData = askNodeForHash( nodeUrl, blockNumber ); + if ( std::get< 0 >( snapshotData ).size ) { + const std::lock_guard< std::mutex > lock( this->hashesMutex ); + + this->isReceived_.at( i ) = true; + this->hashes_.at( i ) = std::get< 0 >( snapshotData ); + this->signatures_.at( i ) = std::get< 1 >( snapshotData ); + this->public_keys_.at( i ) = std::get< 2 >( snapshotData ); + } + } catch ( std::exception& ex ) { + cerror << "Exception while collecting snapshot signatures from other skaleds: " + << ex.what(); } - } catch ( std::exception& ex ) { - cerror - << cc::error( - "Exception while collecting snapshot signatures from other skaleds: " ) - << cc::warn( ex.what() ) << std::endl; - } - } ) ); - } + } ) ); + } - for ( auto& thr : threads ) { - thr.join(); + for ( auto& thr : threads ) { + thr.join(); + } + } else { + auto snapshotData = askNodeForHash( urlToDownloadSnapshotFrom_, blockNumber ); + this->votedHash_ = { std::get< 0 >( snapshotData ), std::get< 1 >( snapshotData ) }; + return { urlToDownloadSnapshotFrom_ }; } bool result = false; - if ( !AmsterdamFixPatch::snapshotHashCheckingEnabled( this->chain_params_ ) ) { + if ( !AmsterdamFixPatch::snapshotHashCheckingEnabled( this->chainParams_ ) ) { // keep only nodes from majorityNodesIds auto majorityNodesIds = AmsterdamFixPatch::majorityNodesIds(); dev::h256 common_hash; // should be same everywhere! for ( size_t pos = 0; pos < this->n_; ++pos ) { - if ( !this->is_received_[pos] ) + if ( !this->isReceived_.at( pos ) ) continue; - u256 id = this->chain_params_.sChain.nodes[pos].id; + u256 id = this->chainParams_.sChain.nodes.at( pos ).id; bool good = majorityNodesIds.end() != std::find( majorityNodesIds.begin(), majorityNodesIds.end(), id ); if ( !good ) continue; if ( common_hash == dev::h256() ) { - common_hash = this->hashes_[pos]; - this->voted_hash_.first = common_hash; + common_hash = this->hashes_.at( pos ); + this->votedHash_.first = common_hash; // .second will ne ignored! - } else if ( this->hashes_[pos] != common_hash ) { + } else if ( this->hashes_.at( pos ) != common_hash ) { result = false; break; } - nodes_to_download_snapshot_from_.push_back( pos ); + nodesToDownloadSnapshotFrom_.push_back( pos ); } // for i - result = this->nodes_to_download_snapshot_from_.size() > 0; + result = this->nodesToDownloadSnapshotFrom_.size() > 0; } else try { result = this->voteForHash(); } catch ( SnapshotHashAgentException& ex ) { - cerror << cc::error( "Exception while voting for snapshot hash from other skaleds: " ) - << cc::warn( ex.what() ) << std::endl; + cerror << "Exception while voting for snapshot hash from other skaleds: " << ex.what(); } catch ( std::exception& ex ) { - cerror << cc::error( "Exception while voting for snapshot hash from other skaleds: " ) - << cc::warn( ex.what() ) << std::endl; + cerror << "Exception while voting for snapshot hash from other skaleds: " << ex.what(); } // catch if ( !result ) { - cnote << "Not enough nodes to choose snapshot hash for block " - << std::to_string( block_number ); + cnote << "Not enough nodes to choose snapshot hash for block " << blockNumber; return {}; } std::vector< std::string > ret; - for ( const size_t idx : this->nodes_to_download_snapshot_from_ ) { + for ( const size_t idx : this->nodesToDownloadSnapshotFrom_ ) { std::string ret_value = - std::string( "http://" ) + std::string( this->chain_params_.sChain.nodes[idx].ip ) + + std::string( "http://" ) + std::string( this->chainParams_.sChain.nodes.at( idx ).ip ) + std::string( ":" ) + - ( this->chain_params_.sChain.nodes[idx].port + 3 ).convert_to< std::string >(); + ( this->chainParams_.sChain.nodes.at( idx ).port + 3 ).convert_to< std::string >(); ret.push_back( ret_value ); } @@ -392,16 +372,16 @@ std::vector< std::string > SnapshotHashAgent::getNodesToDownloadSnapshotFrom( } std::pair< dev::h256, libff::alt_bn128_G1 > SnapshotHashAgent::getVotedHash() const { - if ( this->voted_hash_.first == dev::h256() ) { + if ( this->votedHash_.first == dev::h256() ) { throw std::invalid_argument( "Hash is empty" ); } - if ( AmsterdamFixPatch::snapshotHashCheckingEnabled( this->chain_params_ ) ) { - if ( this->voted_hash_.second == libff::alt_bn128_G1::zero() || - !this->voted_hash_.second.is_well_formed() ) { + if ( AmsterdamFixPatch::snapshotHashCheckingEnabled( this->chainParams_ ) ) { + if ( this->votedHash_.second == libff::alt_bn128_G1::zero() || + !this->votedHash_.second.is_well_formed() ) { throw std::invalid_argument( "Signature is not well formed" ); } } - return this->voted_hash_; + return this->votedHash_; } diff --git a/libskale/SnapshotHashAgent.h b/libskale/SnapshotHashAgent.h index 87d71a659..23a8b5861 100644 --- a/libskale/SnapshotHashAgent.h +++ b/libskale/SnapshotHashAgent.h @@ -65,33 +65,35 @@ class IsNotVerified : public SnapshotHashAgentException { class SnapshotHashAgent { public: - SnapshotHashAgent( const dev::eth::ChainParams& chain_params, - const std::array< std::string, 4 >& common_public_key, - const std::string& ipToDownloadSnapshotFrom ); + SnapshotHashAgent( const dev::eth::ChainParams& chainParams, + const std::array< std::string, 4 >& commonPublicKey, + const std::string& urlToDownloadSnapshotFrom = "" ); - std::vector< std::string > getNodesToDownloadSnapshotFrom( unsigned block_number ); + std::vector< std::string > getNodesToDownloadSnapshotFrom( unsigned blockNumber ); std::pair< dev::h256, libff::alt_bn128_G1 > getVotedHash() const; friend class dev::test::SnapshotHashAgentTest; private: - dev::eth::ChainParams chain_params_; + dev::eth::ChainParams chainParams_; unsigned n_; - std::string ipToDownloadSnapshotFrom_; + std::string urlToDownloadSnapshotFrom_; std::shared_ptr< libBLS::Bls > bls_; std::vector< dev::h256 > hashes_; std::vector< libff::alt_bn128_G1 > signatures_; std::vector< libff::alt_bn128_G2 > public_keys_; - std::vector< size_t > nodes_to_download_snapshot_from_; - std::vector< bool > is_received_; - std::mutex hashes_mutex; - libff::alt_bn128_G2 common_public_key_; + std::vector< size_t > nodesToDownloadSnapshotFrom_; + std::vector< bool > isReceived_; + std::mutex hashesMutex; + libff::alt_bn128_G2 commonPublicKey_; bool voteForHash(); void readPublicKeyFromConfig(); - std::pair< dev::h256, libff::alt_bn128_G1 > voted_hash_; + std::tuple< dev::h256, libff::alt_bn128_G1, libff::alt_bn128_G2 > askNodeForHash( + const std::string& url, unsigned blockNumber ); + std::pair< dev::h256, libff::alt_bn128_G1 > votedHash_; size_t verifyAllData() const; }; diff --git a/libskale/SnapshotManager.cpp b/libskale/SnapshotManager.cpp index 8729d2a74..77a25c902 100644 --- a/libskale/SnapshotManager.cpp +++ b/libskale/SnapshotManager.cpp @@ -48,25 +48,37 @@ namespace fs = boost::filesystem; // Can manage snapshots as non-prvivileged user // For send/receive needs root! -const std::string SnapshotManager::snapshot_hash_file_name = "snapshot_hash.txt"; +const std::string SnapshotManager::snapshotHashFileName = "snapshot_hash.txt"; // exceptions: // - bad data dir // - not btrfs // - volumes don't exist -SnapshotManager::SnapshotManager( const dev::eth::ChainParams& _chain_params, - const fs::path& _dataDir, const std::vector< std::string >& _volumes, - const std::string& _diffsDir ) - : chain_params( _chain_params ) { - assert( _volumes.size() > 0 ); - - data_dir = _dataDir; - volumes = _volumes; - snapshots_dir = data_dir / "snapshots"; +SnapshotManager::SnapshotManager( const dev::eth::ChainParams& _chainParams, + const fs::path& _dataDir, const std::string& _diffsDir ) + : chainParams( _chainParams ) { + dataDir = _dataDir; + coreVolumes = { dev::eth::BlockChain::getChainDirName( chainParams ), "filestorage", + "prices_" + chainParams.nodeInfo.id.str() + ".db", + "blocks_" + chainParams.nodeInfo.id.str() + ".db" }; + +#ifdef HISTORIC_STATE + archiveVolumes = { "historic_roots", "historic_state" }; +#else + archiveVolumes = {}; +#endif + + allVolumes.reserve( coreVolumes.size() + archiveVolumes.size() ); + allVolumes.insert( allVolumes.end(), coreVolumes.begin(), coreVolumes.end() ); +#ifdef HISTORIC_STATE + allVolumes.insert( allVolumes.end(), archiveVolumes.begin(), archiveVolumes.end() ); +#endif + + snapshotsDir = dataDir / "snapshots"; if ( _diffsDir.empty() ) - diffs_dir = data_dir / "diffs"; + diffsDir = dataDir / "diffs"; else - diffs_dir = _diffsDir; + diffsDir = _diffsDir; if ( !fs::exists( _dataDir ) ) try { @@ -81,16 +93,16 @@ SnapshotManager::SnapshotManager( const dev::eth::ChainParams& _chain_params, } try { - fs::create_directory( snapshots_dir ); + fs::create_directory( snapshotsDir ); if ( _diffsDir.empty() ) { - fs::remove_all( diffs_dir ); - fs::create_directory( diffs_dir ); + fs::remove_all( diffsDir ); + fs::create_directory( diffsDir ); } } catch ( const fs::filesystem_error& ex ) { std::throw_with_nested( CannotWrite( ex.path1() ) ); } // catch - for ( const auto& vol : _volumes ) + for ( const auto& vol : allVolumes ) try { // throw if it is present but is NOT btrfs if ( fs::exists( _dataDir / vol ) && 0 != btrfs.present( ( _dataDir / vol ).c_str() ) ) @@ -109,26 +121,26 @@ SnapshotManager::SnapshotManager( const dev::eth::ChainParams& _chain_params, // - cannot read // - cannot write void SnapshotManager::doSnapshot( unsigned _blockNumber ) { - fs::path snapshot_dir = snapshots_dir / to_string( _blockNumber ); + fs::path snapshotDir = snapshotsDir / to_string( _blockNumber ); UnsafeRegion::lock ur_lock; try { - if ( fs::exists( snapshot_dir ) ) + if ( fs::exists( snapshotDir ) ) throw SnapshotPresent( _blockNumber ); } catch ( const fs::filesystem_error& ) { - std::throw_with_nested( CannotRead( snapshot_dir ) ); + std::throw_with_nested( CannotRead( snapshotDir ) ); } // catch try { - fs::create_directory( snapshot_dir ); + fs::create_directory( snapshotDir ); } catch ( const fs::filesystem_error& ) { - std::throw_with_nested( CannotCreate( snapshot_dir ) ); + std::throw_with_nested( CannotCreate( snapshotDir ) ); } // catch int dummy_counter = 0; - for ( const string& vol : volumes ) { - int res = btrfs.subvolume.snapshot_r( ( data_dir / vol ).c_str(), snapshot_dir.c_str() ); + for ( const string& vol : allVolumes ) { + int res = btrfs.subvolume.snapshot_r( ( dataDir / vol ).c_str(), snapshotDir.c_str() ); if ( res ) throw CannotPerformBtrfsOperation( btrfs.last_cmd(), btrfs.strerror() ); if ( dummy_counter++ == 1 ) @@ -140,22 +152,28 @@ void SnapshotManager::doSnapshot( unsigned _blockNumber ) { // - not found/cannot read void SnapshotManager::restoreSnapshot( unsigned _blockNumber ) { try { - if ( !fs::exists( snapshots_dir / to_string( _blockNumber ) ) ) + if ( !fs::exists( snapshotsDir / to_string( _blockNumber ) ) ) throw SnapshotAbsent( _blockNumber ); } catch ( const fs::filesystem_error& ) { - std::throw_with_nested( CannotRead( snapshots_dir / to_string( _blockNumber ) ) ); + std::throw_with_nested( CannotRead( snapshotsDir / to_string( _blockNumber ) ) ); } UnsafeRegion::lock ur_lock; + std::vector< std::string > volumes; + if ( chainParams.nodeInfo.archiveMode && _blockNumber == 0 ) + volumes = coreVolumes; + else + volumes = allVolumes; + int dummy_counter = 0; for ( const string& vol : volumes ) { - if ( fs::exists( data_dir / vol ) ) { - if ( btrfs.subvolume._delete( ( data_dir / vol ).c_str() ) ) + if ( fs::exists( dataDir / vol ) ) { + if ( btrfs.subvolume._delete( ( dataDir / vol ).c_str() ) ) throw CannotPerformBtrfsOperation( btrfs.last_cmd(), btrfs.strerror() ); } if ( btrfs.subvolume.snapshot( - ( snapshots_dir / to_string( _blockNumber ) / vol ).c_str(), data_dir.c_str() ) ) + ( snapshotsDir / to_string( _blockNumber ) / vol ).c_str(), dataDir.c_str() ) ) throw CannotPerformBtrfsOperation( btrfs.last_cmd(), btrfs.strerror() ); if ( dummy_counter++ == 1 ) @@ -175,7 +193,7 @@ boost::filesystem::path SnapshotManager::makeOrGetDiff( unsigned _toBlock ) { if ( fs::is_regular( path ) ) return path; - if ( !fs::exists( snapshots_dir / to_string( _toBlock ) ) ) { + if ( !fs::exists( snapshotsDir / to_string( _toBlock ) ) ) { // TODO wrong error message if this fails fs::remove( path ); throw SnapshotAbsent( _toBlock ); @@ -186,12 +204,13 @@ boost::filesystem::path SnapshotManager::makeOrGetDiff( unsigned _toBlock ) { stringstream volumes_cat; + std::vector< std::string > volumes = _toBlock > 0 ? allVolumes : coreVolumes; for ( auto it = volumes.begin(); it != volumes.end(); ++it ) { const string& vol = *it; if ( it + 1 != volumes.end() ) - volumes_cat << ( snapshots_dir / to_string( _toBlock ) / vol ).string() << " "; + volumes_cat << ( snapshotsDir / to_string( _toBlock ) / vol ).string() << " "; else - volumes_cat << ( snapshots_dir / to_string( _toBlock ) / vol ).string(); + volumes_cat << ( snapshotsDir / to_string( _toBlock ) / vol ).string(); } // for cat UnsafeRegion::lock ur_lock; @@ -214,7 +233,7 @@ boost::filesystem::path SnapshotManager::makeOrGetDiff( unsigned _toBlock ) { // - cannot input as diff (no base state?) void SnapshotManager::importDiff( unsigned _toBlock ) { fs::path diffPath = getDiffPath( _toBlock ); - fs::path snapshot_dir = snapshots_dir / to_string( _toBlock ); + fs::path snapshot_dir = snapshotsDir / to_string( _toBlock ); try { if ( !fs::is_regular_file( diffPath ) ) @@ -233,7 +252,7 @@ void SnapshotManager::importDiff( unsigned _toBlock ) { std::throw_with_nested( CannotCreate( snapshot_dir ) ); } // catch - if ( btrfs.receive( diffPath.c_str(), ( snapshots_dir / to_string( _toBlock ) ).c_str() ) ) { + if ( btrfs.receive( diffPath.c_str(), ( snapshotsDir / to_string( _toBlock ) ).c_str() ) ) { auto ex = CannotPerformBtrfsOperation( btrfs.last_cmd(), btrfs.strerror() ); cleanupDirectory( snapshot_dir ); fs::remove_all( snapshot_dir ); @@ -243,12 +262,12 @@ void SnapshotManager::importDiff( unsigned _toBlock ) { boost::filesystem::path SnapshotManager::getDiffPath( unsigned _toBlock ) { // check existance - assert( boost::filesystem::exists( diffs_dir ) ); - return diffs_dir / ( std::to_string( _toBlock ) ); + assert( boost::filesystem::exists( diffsDir ) ); + return diffsDir / ( std::to_string( _toBlock ) ); } void SnapshotManager::removeSnapshot( unsigned _blockNumber ) { - if ( !fs::exists( snapshots_dir / to_string( _blockNumber ) ) ) { + if ( !fs::exists( snapshotsDir / to_string( _blockNumber ) ) ) { throw SnapshotAbsent( _blockNumber ); } @@ -256,9 +275,9 @@ void SnapshotManager::removeSnapshot( unsigned _blockNumber ) { int dummy_counter = 0; - for ( const auto& volume : this->volumes ) { + for ( const auto& volume : allVolumes ) { int res = btrfs.subvolume._delete( - ( this->snapshots_dir / std::to_string( _blockNumber ) / volume ).string().c_str() ); + ( this->snapshotsDir / std::to_string( _blockNumber ) / volume ).string().c_str() ); if ( res != 0 ) { throw CannotPerformBtrfsOperation( btrfs.last_cmd(), btrfs.strerror() ); @@ -268,28 +287,28 @@ void SnapshotManager::removeSnapshot( unsigned _blockNumber ) { batched_io::test_crash_before_commit( "SnapshotManager::doSnapshot" ); } - fs::remove_all( snapshots_dir / to_string( _blockNumber ) ); + fs::remove_all( snapshotsDir / to_string( _blockNumber ) ); } void SnapshotManager::cleanupButKeepSnapshot( unsigned _keepSnapshot ) { - this->cleanupDirectory( snapshots_dir, snapshots_dir / std::to_string( _keepSnapshot ) ); - this->cleanupDirectory( data_dir, snapshots_dir ); - if ( !fs::exists( diffs_dir ) ) + this->cleanupDirectory( snapshotsDir, snapshotsDir / std::to_string( _keepSnapshot ) ); + this->cleanupDirectory( dataDir, snapshotsDir ); + if ( !fs::exists( diffsDir ) ) try { - boost::filesystem::create_directory( diffs_dir ); + boost::filesystem::create_directory( diffsDir ); } catch ( const fs::filesystem_error& ex ) { std::throw_with_nested( CannotWrite( ex.path1() ) ); } } void SnapshotManager::cleanup() { - this->cleanupDirectory( snapshots_dir ); - this->cleanupDirectory( data_dir ); + this->cleanupDirectory( snapshotsDir ); + this->cleanupDirectory( dataDir ); try { - boost::filesystem::create_directory( snapshots_dir ); - if ( !fs::exists( diffs_dir ) ) - boost::filesystem::create_directory( diffs_dir ); + boost::filesystem::create_directory( snapshotsDir ); + if ( !fs::exists( diffsDir ) ) + boost::filesystem::create_directory( diffsDir ); } catch ( const fs::filesystem_error& ex ) { std::throw_with_nested( CannotWrite( ex.path1() ) ); } // catch @@ -323,7 +342,7 @@ void SnapshotManager::cleanupDirectory( // exeptions: filesystem void SnapshotManager::leaveNLastSnapshots( unsigned n ) { map< int, fs::path, std::greater< int > > numbers; - for ( auto& f : fs::directory_iterator( snapshots_dir ) ) { + for ( auto& f : fs::directory_iterator( snapshotsDir ) ) { // HACK We exclude 0 snapshot forcefully if ( fs::basename( f ) != "0" ) numbers.insert( make_pair( std::stoi( fs::basename( f ) ), f ) ); @@ -334,11 +353,23 @@ void SnapshotManager::leaveNLastSnapshots( unsigned n ) { for ( const auto& p : numbers ) { if ( i++ > n ) { const fs::path& path = p.second; - for ( const string& v : this->volumes ) { + for ( const string& v : coreVolumes ) { + if ( btrfs.subvolume._delete( ( path / v ).c_str() ) ) { + throw CannotPerformBtrfsOperation( btrfs.last_cmd(), btrfs.strerror() ); + } + } + +#ifdef HISTORIC_STATE + for ( const string& v : archiveVolumes ) { + // ignore as it might indicate that archive volumes weren't snapshotted + if ( !fs::exists( path / v ) ) + continue; if ( btrfs.subvolume._delete( ( path / v ).c_str() ) ) { throw CannotPerformBtrfsOperation( btrfs.last_cmd(), btrfs.strerror() ); } } +#endif + fs::remove_all( path ); } // if } // for @@ -346,7 +377,7 @@ void SnapshotManager::leaveNLastSnapshots( unsigned n ) { std::pair< int, int > SnapshotManager::getLatestSnapshots() const { map< int, fs::path, std::greater< int > > numbers; - for ( auto& f : fs::directory_iterator( snapshots_dir ) ) { + for ( auto& f : fs::directory_iterator( snapshotsDir ) ) { // HACK We exclude 0 snapshot forcefully if ( fs::basename( f ) != "0" ) numbers.insert( make_pair( std::stoi( fs::basename( f ) ), f ) ); @@ -372,7 +403,7 @@ std::pair< int, int > SnapshotManager::getLatestSnapshots() const { // exeptions: filesystem void SnapshotManager::leaveNLastDiffs( unsigned n ) { map< int, fs::path, std::greater< int > > numbers; - for ( auto& f : fs::directory_iterator( diffs_dir ) ) { + for ( auto& f : fs::directory_iterator( diffsDir ) ) { try { numbers.insert( make_pair( std::stoi( fs::basename( f ) ), f ) ); } catch ( ... ) { /*ignore non-numbers*/ @@ -390,7 +421,7 @@ void SnapshotManager::leaveNLastDiffs( unsigned n ) { } dev::h256 SnapshotManager::getSnapshotHash( unsigned block_number ) const { - fs::path snapshot_dir = snapshots_dir / to_string( block_number ); + fs::path snapshot_dir = snapshotsDir / to_string( block_number ); try { if ( !fs::exists( snapshot_dir ) ) @@ -399,28 +430,28 @@ dev::h256 SnapshotManager::getSnapshotHash( unsigned block_number ) const { std::throw_with_nested( CannotRead( snapshot_dir ) ); } // catch - std::string hash_file = - ( this->snapshots_dir / std::to_string( block_number ) / this->snapshot_hash_file_name ) + std::string hashFile = + ( this->snapshotsDir / std::to_string( block_number ) / this->snapshotHashFileName ) .string(); if ( !isSnapshotHashPresent( block_number ) ) { - BOOST_THROW_EXCEPTION( SnapshotManager::CannotRead( hash_file ) ); + BOOST_THROW_EXCEPTION( SnapshotManager::CannotRead( hashFile ) ); } dev::h256 hash; try { - std::lock_guard< std::mutex > lock( hash_file_mutex ); - std::ifstream in( hash_file ); + std::lock_guard< std::mutex > lock( hashFileMutex ); + std::ifstream in( hashFile ); in >> hash; } catch ( const std::exception& ex ) { - std::throw_with_nested( SnapshotManager::CannotRead( hash_file ) ); + std::throw_with_nested( SnapshotManager::CannotRead( hashFile ) ); } return hash; } bool SnapshotManager::isSnapshotHashPresent( unsigned _blockNumber ) const { - fs::path snapshot_dir = snapshots_dir / to_string( _blockNumber ); + fs::path snapshot_dir = snapshotsDir / to_string( _blockNumber ); try { if ( !fs::exists( snapshot_dir ) ) @@ -429,13 +460,13 @@ bool SnapshotManager::isSnapshotHashPresent( unsigned _blockNumber ) const { std::throw_with_nested( CannotRead( snapshot_dir ) ); } // catch - boost::filesystem::path hash_file = - this->snapshots_dir / std::to_string( _blockNumber ) / this->snapshot_hash_file_name; + boost::filesystem::path hashFile = snapshot_dir / this->snapshotHashFileName; try { - std::lock_guard< std::mutex > lock( hash_file_mutex ); - return boost::filesystem::exists( hash_file ); + std::lock_guard< std::mutex > lock( hashFileMutex ); + + return boost::filesystem::exists( hashFile ); } catch ( const fs::filesystem_error& ) { - std::throw_with_nested( CannotRead( hash_file ) ); + std::throw_with_nested( CannotRead( hashFile ) ); } } @@ -472,7 +503,7 @@ void SnapshotManager::addLastPriceToHash( unsigned _blockNumber, secp256k1_sha25 dev::u256 last_price = 0; // manually open DB boost::filesystem::path prices_path = - this->snapshots_dir / std::to_string( _blockNumber ) / this->volumes[2]; + this->snapshotsDir / std::to_string( _blockNumber ) / coreVolumes[2]; if ( boost::filesystem::exists( prices_path ) ) { boost::filesystem::directory_iterator it( prices_path ), end; std::string last_price_str; @@ -615,17 +646,16 @@ void SnapshotManager::computeFileStorageHash( const boost::filesystem::path& _fi void SnapshotManager::computeAllVolumesHash( unsigned _blockNumber, secp256k1_sha256_t* ctx, bool is_checking ) const { - assert( this->volumes.size() != 0 ); + assert( allVolumes.size() != 0 ); // TODO XXX Remove volumes structure knowledge from here!! this->computeDatabaseHash( - this->snapshots_dir / std::to_string( _blockNumber ) / this->volumes[0] / "12041" / "state", + this->snapshotsDir / std::to_string( _blockNumber ) / coreVolumes[0] / "12041" / "state", ctx ); - boost::filesystem::path blocks_extras_path = this->snapshots_dir / - std::to_string( _blockNumber ) / this->volumes[0] / - "blocks_and_extras"; + boost::filesystem::path blocks_extras_path = + this->snapshotsDir / std::to_string( _blockNumber ) / coreVolumes[0] / "blocks_and_extras"; // few dbs boost::filesystem::directory_iterator directory_it( blocks_extras_path ), end; @@ -651,12 +681,57 @@ void SnapshotManager::computeAllVolumesHash( // filestorage this->computeFileStorageHash( - this->snapshots_dir / std::to_string( _blockNumber ) / "filestorage", ctx, is_checking ); + this->snapshotsDir / std::to_string( _blockNumber ) / "filestorage", ctx, is_checking ); // if have prices and blocks - if ( _blockNumber && this->volumes.size() > 3 ) { + if ( _blockNumber && allVolumes.size() > 3 ) { this->addLastPriceToHash( _blockNumber, ctx ); } + + // disable this code until further notice + // we haven't implemented hash computation for archive submodules yet + // if ( chainParams.nodeInfo.archiveMode ) { + // // save partial snapshot hash + // secp256k1_sha256_t partialCtx = *ctx; + + // dev::h256 partialHash; + // secp256k1_sha256_finalize( &partialCtx, partialHash.data() ); + + // string hashFile = ( this->snapshotsDir / std::to_string( _blockNumber ) ).string() + + // '/' + + // this->partialSnapshotHashFileName; + + // try { + // std::lock_guard< std::mutex > lock( hashFileMutex ); + // std::ofstream out( hashFile ); + // out.clear(); + // out << partialHash; + // } catch ( const std::exception& ex ) { + // std::throw_with_nested( SnapshotManager::CannotCreate( hashFile ) ); + // } + + + // if ( _blockNumber > 0 ) { + // // archive blocks + // for ( auto& content : contents ) { + // if ( content.leaf().string().find( "archive" ) == std::string::npos ) + // continue; + // this->computeDatabaseHash( content, ctx ); + // } + + //#ifdef HISTORIC_STATE + // // historic dbs + // this->computeDatabaseHash( + // this->snapshotsDir / std::to_string( _blockNumber ) / archiveVolumes[0] / + // dev::eth::BlockChain::getChainDirName( chainParams ) / "state", + // ctx ); + // this->computeDatabaseHash( + // this->snapshotsDir / std::to_string( _blockNumber ) / archiveVolumes[1] / + // dev::eth::BlockChain::getChainDirName( chainParams ) / "state", + // ctx ); + //#endif + // } + // } } void SnapshotManager::computeSnapshotHash( unsigned _blockNumber, bool is_checking ) { @@ -672,10 +747,16 @@ void SnapshotManager::computeSnapshotHash( unsigned _blockNumber, bool is_checki int dummy_counter = 0; - for ( const auto& volume : this->volumes ) { + std::vector< std::string > volumes; + if ( chainParams.nodeInfo.archiveMode && _blockNumber == 0 ) + volumes = coreVolumes; + else + volumes = allVolumes; + + for ( const auto& volume : volumes ) { int res = btrfs.subvolume.property_set( - ( this->snapshots_dir / std::to_string( _blockNumber ) / volume ).string().c_str(), - "ro", "false" ); + ( this->snapshotsDir / std::to_string( _blockNumber ) / volume ).string().c_str(), "ro", + "false" ); if ( res != 0 ) { throw CannotPerformBtrfsOperation( btrfs.last_cmd(), btrfs.strerror() ); @@ -687,10 +768,10 @@ void SnapshotManager::computeSnapshotHash( unsigned _blockNumber, bool is_checki this->computeAllVolumesHash( _blockNumber, &ctx, is_checking ); - for ( const auto& volume : this->volumes ) { + for ( const auto& volume : volumes ) { int res = btrfs.subvolume.property_set( - ( this->snapshots_dir / std::to_string( _blockNumber ) / volume ).string().c_str(), - "ro", "true" ); + ( this->snapshotsDir / std::to_string( _blockNumber ) / volume ).string().c_str(), "ro", + "true" ); if ( res != 0 ) { throw CannotPerformBtrfsOperation( btrfs.last_cmd(), btrfs.strerror() ); @@ -700,11 +781,11 @@ void SnapshotManager::computeSnapshotHash( unsigned _blockNumber, bool is_checki dev::h256 hash; secp256k1_sha256_finalize( &ctx, hash.data() ); - string hash_file = ( this->snapshots_dir / std::to_string( _blockNumber ) ).string() + '/' + - this->snapshot_hash_file_name; + string hash_file = ( this->snapshotsDir / std::to_string( _blockNumber ) ).string() + '/' + + this->snapshotHashFileName; try { - std::lock_guard< std::mutex > lock( hash_file_mutex ); + std::lock_guard< std::mutex > lock( hashFileMutex ); std::ofstream out( hash_file ); out.clear(); out << hash; @@ -714,7 +795,7 @@ void SnapshotManager::computeSnapshotHash( unsigned _blockNumber, bool is_checki } uint64_t SnapshotManager::getBlockTimestamp( unsigned _blockNumber ) const { - fs::path snapshot_dir = snapshots_dir / to_string( _blockNumber ); + fs::path snapshot_dir = snapshotsDir / to_string( _blockNumber ); try { if ( !fs::exists( snapshot_dir ) ) @@ -723,22 +804,21 @@ uint64_t SnapshotManager::getBlockTimestamp( unsigned _blockNumber ) const { std::throw_with_nested( CannotRead( snapshot_dir ) ); } - fs::path db_dir = this->snapshots_dir / std::to_string( _blockNumber ); + fs::path db_dir = this->snapshotsDir / std::to_string( _blockNumber ); int res = btrfs.subvolume.property_set( - ( db_dir / this->volumes[0] ).string().c_str(), "ro", "false" ); + ( db_dir / coreVolumes.at( 0 ) ).string().c_str(), "ro", "false" ); if ( res != 0 ) { throw CannotPerformBtrfsOperation( btrfs.last_cmd(), btrfs.strerror() ); } - dev::eth::BlockChain bc( chain_params, db_dir, false ); + dev::eth::BlockChain bc( chainParams, db_dir, false ); dev::h256 hash = bc.numberHash( _blockNumber ); uint64_t timestamp = dev::eth::BlockHeader( bc.block( hash ) ).timestamp(); - res = btrfs.subvolume.property_set( - ( db_dir / this->volumes[0] ).string().c_str(), "ro", "true" ); + ( db_dir / coreVolumes.at( 0 ) ).string().c_str(), "ro", "true" ); if ( res != 0 ) { throw CannotPerformBtrfsOperation( btrfs.last_cmd(), btrfs.strerror() ); diff --git a/libskale/SnapshotManager.h b/libskale/SnapshotManager.h index 26aa1d411..4a5ca58d8 100644 --- a/libskale/SnapshotManager.h +++ b/libskale/SnapshotManager.h @@ -152,9 +152,8 @@ class SnapshotManager { /////////////// MORE INTERESTING STUFF //////////////// public: - SnapshotManager( const dev::eth::ChainParams& _chain_params, - const boost::filesystem::path& _dataDir, const std::vector< std::string >& _volumes, - const std::string& diffs_dir = std::string() ); + SnapshotManager( const dev::eth::ChainParams& _chainParams, + const boost::filesystem::path& _dataDir, const std::string& diffs_dir = std::string() ); void doSnapshot( unsigned _blockNumber ); void restoreSnapshot( unsigned _blockNumber ); boost::filesystem::path makeOrGetDiff( unsigned _toBlock ); @@ -178,15 +177,17 @@ class SnapshotManager { const boost::filesystem::path& _dirPath ); private: - boost::filesystem::path data_dir; - std::vector< std::string > volumes; - boost::filesystem::path snapshots_dir; - boost::filesystem::path diffs_dir; + boost::filesystem::path dataDir; + std::vector< std::string > coreVolumes; + std::vector< std::string > archiveVolumes; + std::vector< std::string > allVolumes; + boost::filesystem::path snapshotsDir; + boost::filesystem::path diffsDir; - static const std::string snapshot_hash_file_name; - mutable std::mutex hash_file_mutex; + static const std::string snapshotHashFileName; + mutable std::mutex hashFileMutex; - dev::eth::ChainParams chain_params; + dev::eth::ChainParams chainParams; void cleanupDirectory( const boost::filesystem::path& p, const boost::filesystem::path& _keepDirectory = "" ); diff --git a/libskale/broadcaster.cpp b/libskale/broadcaster.cpp index 20d15b232..ee625536d 100644 --- a/libskale/broadcaster.cpp +++ b/libskale/broadcaster.cpp @@ -90,17 +90,9 @@ void* ZmqBroadcaster::server_socket() const { if ( !m_zmq_server_socket ) { m_zmq_server_socket = zmq_socket( m_zmq_context, ZMQ_PUB ); - int val = 15000; - zmq_setsockopt( m_zmq_server_socket, ZMQ_HEARTBEAT_IVL, &val, sizeof( val ) ); - val = 3000; - zmq_setsockopt( m_zmq_server_socket, ZMQ_HEARTBEAT_TIMEOUT, &val, sizeof( val ) ); - val = 60000; - zmq_setsockopt( m_zmq_server_socket, ZMQ_HEARTBEAT_TTL, &val, sizeof( val ) ); - - val = 16; + int val = 16; zmq_setsockopt( m_zmq_server_socket, ZMQ_SNDHWM, &val, sizeof( val ) ); - const dev::eth::ChainParams& ch = m_client.chainParams(); // connect server to clients @@ -124,11 +116,11 @@ void* ZmqBroadcaster::client_socket() const { int value = 1; zmq_setsockopt( m_zmq_client_socket, ZMQ_TCP_KEEPALIVE, &value, sizeof( value ) ); - value = 300; + value = 15; zmq_setsockopt( m_zmq_client_socket, ZMQ_TCP_KEEPALIVE_IDLE, &value, sizeof( value ) ); value = 10; zmq_setsockopt( m_zmq_client_socket, ZMQ_TCP_KEEPALIVE_CNT, &value, sizeof( value ) ); - value = 300; + value = 15; zmq_setsockopt( m_zmq_client_socket, ZMQ_TCP_KEEPALIVE_INTVL, &value, sizeof( value ) ); value = 16; diff --git a/libweb3jsonrpc/CMakeLists.txt b/libweb3jsonrpc/CMakeLists.txt index b1b50a770..00a748f89 100644 --- a/libweb3jsonrpc/CMakeLists.txt +++ b/libweb3jsonrpc/CMakeLists.txt @@ -36,6 +36,10 @@ set(sources Web3Face.h WhisperFace.h + Tracing.h + Tracing.cpp + TracingFace.h + SkalePerformanceTracker.h SkalePerformanceTracker.cpp SkalePerformanceTrackerFace.h diff --git a/libweb3jsonrpc/Debug.cpp b/libweb3jsonrpc/Debug.cpp index dc7de9e52..10d01f4a3 100644 --- a/libweb3jsonrpc/Debug.cpp +++ b/libweb3jsonrpc/Debug.cpp @@ -26,246 +26,8 @@ using namespace dev::eth; using namespace skale; -#define THROW_TRACE_JSON_EXCEPTION( __MSG__ ) \ - throw jsonrpc::JsonRpcException( std::string( __FUNCTION__ ) + ":" + \ - std::to_string( __LINE__ ) + ":" + std::string( __MSG__ ) ) - - -void Debug::checkPrivilegedAccess() const { - if ( !m_enablePrivilegedApis ) { - BOOST_THROW_EXCEPTION( jsonrpc::JsonRpcException( "This API call is not enabled" ) ); - } -} - -void Debug::checkHistoricStateEnabled() const { -#ifndef HISTORIC_STATE - BOOST_THROW_EXCEPTION( - jsonrpc::JsonRpcException( "This API call is available on archive nodes only" ) ); -#endif -} - -Debug::Debug( eth::Client& _eth, SkaleDebugInterface* _debugInterface, const string& argv, - bool _enablePrivilegedApis ) - : m_eth( _eth ), - m_debugInterface( _debugInterface ), - m_argvOptions( argv ), - m_blockTraceCache( MAX_BLOCK_TRACES_CACHE_ITEMS, MAX_BLOCK_TRACES_CACHE_SIZE ), - m_enablePrivilegedApis( _enablePrivilegedApis ) {} - - -h256 Debug::blockHash( string const& _blockNumberOrHash ) const { - checkPrivilegedAccess(); - if ( isHash< h256 >( _blockNumberOrHash ) ) - return h256( _blockNumberOrHash.substr( _blockNumberOrHash.size() - 64, 64 ) ); - try { - return m_eth.blockChain().numberHash( stoul( _blockNumberOrHash ) ); - } catch ( ... ) { - THROW_TRACE_JSON_EXCEPTION( "Invalid argument" ); - } -} - -Json::Value Debug::debug_traceBlockByNumber( const string& -#ifdef HISTORIC_STATE - _blockNumber -#endif - , - Json::Value const& -#ifdef HISTORIC_STATE - _jsonTraceConfig -#endif -) { - Json::Value ret; - checkHistoricStateEnabled(); -#ifdef HISTORIC_STATE - auto bN = jsToBlockNumber( _blockNumber ); - - if ( bN == LatestBlock || bN == PendingBlock ) { - bN = m_eth.number(); - } - - if ( !m_eth.isKnown( bN ) ) { - THROW_TRACE_JSON_EXCEPTION( "Unknown block number:" + _blockNumber ); - } - - if ( bN == 0 ) { - THROW_TRACE_JSON_EXCEPTION( "Block number must be more than zero" ); - } - - try { - return m_eth.traceBlock( bN, _jsonTraceConfig ); - } catch ( std::exception const& _e ) { - THROW_TRACE_JSON_EXCEPTION( _e.what() ); - } catch ( ... ) { - THROW_TRACE_JSON_EXCEPTION( "Unknown server error" ); - } -#else - THROW_TRACE_JSON_EXCEPTION( "This API call is only supported on archive nodes" ); -#endif -} - -Json::Value Debug::debug_traceBlockByHash( string const& -#ifdef HISTORIC_STATE - _blockHash -#endif - , - Json::Value const& -#ifdef HISTORIC_STATE - _jsonTraceConfig -#endif -) { - checkHistoricStateEnabled(); - -#ifdef HISTORIC_STATE - h256 h = jsToFixed< 32 >( _blockHash ); - - if ( !m_eth.isKnown( h ) ) { - THROW_TRACE_JSON_EXCEPTION( "Unknown block hash" + _blockHash ); - } - - BlockNumber bN = m_eth.numberFromHash( h ); - - if ( bN == 0 ) { - THROW_TRACE_JSON_EXCEPTION( "Block number must be more than zero" ); - } - - try { - return m_eth.traceBlock( bN, _jsonTraceConfig ); - } catch ( std::exception const& _e ) { - THROW_TRACE_JSON_EXCEPTION( _e.what() ); - } catch ( ... ) { - THROW_TRACE_JSON_EXCEPTION( "Unknown server error" ); - } -#else - THROW_TRACE_JSON_EXCEPTION( "This API call is only supported on archive nodes" ); -#endif -} - - -Json::Value Debug::debug_traceTransaction( string const& -#ifdef HISTORIC_STATE - _txHashStr -#endif - , - Json::Value const& -#ifdef HISTORIC_STATE - _jsonTraceConfig -#endif -) { - - checkHistoricStateEnabled(); -#ifdef HISTORIC_STATE - auto txHash = h256( _txHashStr ); - - LocalisedTransaction localisedTransaction = m_eth.localisedTransaction( txHash ); - - if ( localisedTransaction.blockHash() == h256( 0 ) ) { - THROW_TRACE_JSON_EXCEPTION( - "Can't find committed transaction with this hash:" + _txHashStr ); - } - - auto blockNumber = localisedTransaction.blockNumber(); - - - if ( !m_eth.isKnown( blockNumber ) ) { - THROW_TRACE_JSON_EXCEPTION( "Unknown block number:" + to_string( blockNumber ) ); - } - - if ( blockNumber == 0 ) { - THROW_TRACE_JSON_EXCEPTION( "Block number must be more than zero" ); - } - - try { - Json::Value tracedBlock; - - tracedBlock = m_eth.traceBlock( blockNumber, _jsonTraceConfig ); - STATE_CHECK( tracedBlock.isArray() ) - STATE_CHECK( !tracedBlock.empty() ) - - - string lowerCaseTxStr = _txHashStr; - for ( auto& c : lowerCaseTxStr ) { - c = std::tolower( static_cast< unsigned char >( c ) ); - } - - - for ( Json::Value::ArrayIndex i = 0; i < tracedBlock.size(); i++ ) { - Json::Value& transactionTrace = tracedBlock[i]; - STATE_CHECK( transactionTrace.isObject() ); - STATE_CHECK( transactionTrace.isMember( "txHash" ) ); - if ( transactionTrace["txHash"] == lowerCaseTxStr ) { - STATE_CHECK( transactionTrace.isMember( "result" ) ); - return transactionTrace["result"]; - } - } - - THROW_TRACE_JSON_EXCEPTION( "Transaction not found in block" ); - - } catch ( jsonrpc::JsonRpcException& ) { - throw; - } catch ( std::exception const& _e ) { - THROW_TRACE_JSON_EXCEPTION( _e.what() ); - } catch ( ... ) { - THROW_TRACE_JSON_EXCEPTION( "Unknown server error" ); - } -#else - BOOST_THROW_EXCEPTION( - jsonrpc::JsonRpcException( "This API call is only supported on archive nodes" ) ); -#endif -} - -Json::Value Debug::debug_traceCall( Json::Value const& -#ifdef HISTORIC_STATE - _call -#endif - , - std::string const& -#ifdef HISTORIC_STATE - _blockNumber -#endif - , - Json::Value const& -#ifdef HISTORIC_STATE - _jsonTraceConfig -#endif -) { - - Json::Value ret; - checkHistoricStateEnabled(); - -#ifdef HISTORIC_STATE - - try { - auto bN = jsToBlockNumber( _blockNumber ); - - if ( bN == LatestBlock || bN == PendingBlock ) { - bN = m_eth.number(); - } - - if ( !m_eth.isKnown( bN ) ) { - THROW_TRACE_JSON_EXCEPTION( "Unknown block number:" + _blockNumber ); - } - - if ( bN == 0 ) { - THROW_TRACE_JSON_EXCEPTION( "Block number must be more than zero" ); - } - - TransactionSkeleton ts = toTransactionSkeleton( _call ); - - return m_eth.traceCall( - ts.from, ts.value, ts.to, ts.data, ts.gas, ts.gasPrice, bN, _jsonTraceConfig ); - } catch ( jsonrpc::JsonRpcException& ) { - throw; - } catch ( std::exception const& _e ) { - THROW_TRACE_JSON_EXCEPTION( _e.what() ); - } catch ( ... ) { - THROW_TRACE_JSON_EXCEPTION( "Unknown server error" ); - } - -#else - BOOST_THROW_EXCEPTION( - jsonrpc::JsonRpcException( "This API call is only supported on archive nodes" ) ); -#endif -} +Debug::Debug( eth::Client& _eth, SkaleDebugInterface* _debugInterface, const string& argv ) + : m_eth( _eth ), m_debugInterface( _debugInterface ), m_argvOptions( argv ) {} Json::Value Debug::debug_accountRangeAt( string const&, int, string const&, int ) { @@ -280,22 +42,17 @@ string Debug::debug_preimage( string const& ) { BOOST_THROW_EXCEPTION( jsonrpc::JsonRpcException( "This API call is not supported" ) ); } - void Debug::debug_pauseBroadcast( bool _pause ) { - checkPrivilegedAccess(); m_eth.skaleHost()->pauseBroadcast( _pause ); } void Debug::debug_pauseConsensus( bool _pause ) { - checkPrivilegedAccess(); m_eth.skaleHost()->pauseConsensus( _pause ); } void Debug::debug_forceBlock() { - checkPrivilegedAccess(); m_eth.skaleHost()->forceEmptyBlock(); } void Debug::debug_forceBroadcast( const string& _transactionHash ) { - checkPrivilegedAccess(); try { h256 h = jsToFixed< 32 >( _transactionHash ); if ( !m_eth.isKnownTransaction( h ) ) @@ -311,32 +68,26 @@ void Debug::debug_forceBroadcast( const string& _transactionHash ) { } string Debug::debug_interfaceCall( const string& _arg ) { - checkPrivilegedAccess(); return m_debugInterface->call( _arg ); } string Debug::debug_getVersion() { - checkPrivilegedAccess(); return Version; } string Debug::debug_getArguments() { - checkPrivilegedAccess(); return m_argvOptions; } string Debug::debug_getConfig() { - checkPrivilegedAccess(); return m_eth.chainParams().getOriginalJson(); } string Debug::debug_getSchainName() { - checkPrivilegedAccess(); return m_eth.chainParams().sChain.name; } uint64_t Debug::debug_getSnapshotCalculationTime() { - checkPrivilegedAccess(); return m_eth.getSnapshotCalculationTime(); } @@ -345,7 +96,6 @@ uint64_t Debug::debug_getSnapshotHashCalculationTime() { } uint64_t Debug::debug_doStateDbCompaction() { - checkPrivilegedAccess(); auto t1 = boost::chrono::high_resolution_clock::now(); m_eth.doStateDbCompaction(); auto t2 = boost::chrono::high_resolution_clock::now(); @@ -354,7 +104,6 @@ uint64_t Debug::debug_doStateDbCompaction() { } uint64_t Debug::debug_doBlocksDbCompaction() { - checkPrivilegedAccess(); auto t1 = boost::chrono::high_resolution_clock::now(); m_eth.doBlocksDbCompaction(); auto t2 = boost::chrono::high_resolution_clock::now(); diff --git a/libweb3jsonrpc/Debug.h b/libweb3jsonrpc/Debug.h index 63c88405a..bc94d38d7 100644 --- a/libweb3jsonrpc/Debug.h +++ b/libweb3jsonrpc/Debug.h @@ -19,13 +19,10 @@ class Client; namespace rpc { class SessionManager; -constexpr size_t MAX_BLOCK_TRACES_CACHE_SIZE = 64 * 1024 * 1024; -constexpr size_t MAX_BLOCK_TRACES_CACHE_ITEMS = 1024 * 1024; - class Debug : public DebugFace { public: explicit Debug( eth::Client& _eth, SkaleDebugInterface* _debugInterface = nullptr, - const std::string& argv = std::string(), bool _enablePrivilegedApis = false ); + const std::string& argv = std::string() ); virtual RPCModules implementedModules() const override { return RPCModules{ RPCModule{ "debug", "1.0" } }; @@ -33,14 +30,6 @@ class Debug : public DebugFace { virtual Json::Value debug_accountRangeAt( std::string const& _blockHashOrNumber, int _txIndex, std::string const& _addressHash, int _maxResults ) override; - virtual Json::Value debug_traceTransaction( - std::string const& _txHash, Json::Value const& _json ) override; - virtual Json::Value debug_traceCall( Json::Value const& _call, std::string const& _blockNumber, - Json::Value const& _options ) override; - virtual Json::Value debug_traceBlockByNumber( - std::string const& _blockNumber, Json::Value const& _json ) override; - virtual Json::Value debug_traceBlockByHash( - std::string const& _blockHash, Json::Value const& _json ) override; virtual Json::Value debug_storageRangeAt( std::string const& _blockHashOrNumber, int _txIndex, std::string const& _address, std::string const& _begin, int _maxResults ) override; virtual std::string debug_preimage( std::string const& _hashedKey ) override; @@ -68,15 +57,6 @@ class Debug : public DebugFace { eth::Client& m_eth; SkaleDebugInterface* m_debugInterface = nullptr; std::string m_argvOptions; - cache::lru_ordered_memory_constrained_cache< std::string, Json::Value > m_blockTraceCache; - bool m_enablePrivilegedApis; - - - h256 blockHash( std::string const& _blockHashOrNumber ) const; - - void checkPrivilegedAccess() const; - - void checkHistoricStateEnabled() const; }; } // namespace rpc diff --git a/libweb3jsonrpc/DebugFace.h b/libweb3jsonrpc/DebugFace.h index cfee3b7cb..6c57c383c 100644 --- a/libweb3jsonrpc/DebugFace.h +++ b/libweb3jsonrpc/DebugFace.h @@ -19,10 +19,6 @@ class DebugFace : public ServerInterface< DebugFace > { jsonrpc::JSON_STRING, "param4", jsonrpc::JSON_INTEGER, NULL ), &dev::rpc::DebugFace::debug_accountRangeAtI ); - - this->bindAndAddMethod( jsonrpc::Procedure( "debug_traceTransaction", - jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL ), - &dev::rpc::DebugFace::debug_traceTransactionI ); this->bindAndAddMethod( jsonrpc::Procedure( "debug_storageRangeAt", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, "param2", @@ -32,16 +28,6 @@ class DebugFace : public ServerInterface< DebugFace > { this->bindAndAddMethod( jsonrpc::Procedure( "debug_preimage", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL ), &dev::rpc::DebugFace::debug_preimageI ); - this->bindAndAddMethod( jsonrpc::Procedure( "debug_traceBlockByNumber", - jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL ), - &dev::rpc::DebugFace::debug_traceBlockByNumberI ); - this->bindAndAddMethod( jsonrpc::Procedure( "debug_traceBlockByHash", - jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL ), - &dev::rpc::DebugFace::debug_traceBlockByHashI ); - this->bindAndAddMethod( jsonrpc::Procedure( "debug_traceCall", jsonrpc::PARAMS_BY_POSITION, - jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_OBJECT, "param2", - jsonrpc::JSON_STRING, "param3", jsonrpc::JSON_OBJECT, NULL ), - &dev::rpc::DebugFace::debug_traceCallI ); this->bindAndAddMethod( jsonrpc::Procedure( "debug_pauseConsensus", jsonrpc::PARAMS_BY_POSITION, @@ -108,24 +94,6 @@ class DebugFace : public ServerInterface< DebugFace > { request[2u].asString(), request[3u].asInt() ); } - inline virtual Json::Value getTracer( const Json::Value& request ) { - if ( !request.isArray() || request.empty() || request.size() > 2 ) { - BOOST_THROW_EXCEPTION( - jsonrpc::JsonRpcException( jsonrpc::Errors::ERROR_RPC_INVALID_PARAMS ) ); - } - if ( request.size() == 2 ) { - if ( !request[1u].isObject() ) { - BOOST_THROW_EXCEPTION( - jsonrpc::JsonRpcException( jsonrpc::Errors::ERROR_RPC_INVALID_PARAMS ) ); - } - return request[1u]; - - } else { - return { Json::objectValue }; - } - } - - inline virtual void debug_storageRangeAtI( const Json::Value& request, Json::Value& response ) { response = this->debug_storageRangeAt( request[0u].asString(), request[1u].asInt(), request[2u].asString(), request[3u].asString(), request[4u].asInt() ); @@ -134,23 +102,6 @@ class DebugFace : public ServerInterface< DebugFace > { response = this->debug_preimage( request[0u].asString() ); } - inline virtual void debug_traceTransactionI( - const Json::Value& request, Json::Value& response ) { - response = this->debug_traceTransaction( request[0u].asString(), getTracer( request ) ); - } - - inline virtual void debug_traceBlockByNumberI( - const Json::Value& request, Json::Value& response ) { - response = this->debug_traceBlockByNumber( request[0u].asString(), getTracer( request ) ); - } - inline virtual void debug_traceBlockByHashI( - const Json::Value& request, Json::Value& response ) { - response = this->debug_traceBlockByHash( request[0u].asString(), getTracer( request ) ); - } - inline virtual void debug_traceCallI( const Json::Value& request, Json::Value& response ) { - response = this->debug_traceCall( request[0u], request[1u].asString(), request[2u] ); - } - virtual void debug_pauseBroadcastI( const Json::Value& request, Json::Value& response ) { this->debug_pauseBroadcast( request[0u].asBool() ); response = true; // TODO make void @@ -210,17 +161,9 @@ class DebugFace : public ServerInterface< DebugFace > { virtual Json::Value debug_accountRangeAt( const std::string& param1, int param2, const std::string& param3, int param4 ) = 0; - virtual Json::Value debug_traceTransaction( - const std::string& param1, const Json::Value& param2 ) = 0; virtual Json::Value debug_storageRangeAt( const std::string& param1, int param2, const std::string& param3, const std::string& param4, int param5 ) = 0; virtual std::string debug_preimage( const std::string& param1 ) = 0; - virtual Json::Value debug_traceBlockByNumber( - const std::string& param1, const Json::Value& param2 ) = 0; - virtual Json::Value debug_traceBlockByHash( - const std::string& param1, const Json::Value& param2 ) = 0; - virtual Json::Value debug_traceCall( Json::Value const& _call, std::string const& _blockNumber, - Json::Value const& _options ) = 0; virtual void debug_pauseBroadcast( bool pause ) = 0; virtual void debug_pauseConsensus( bool pause ) = 0; virtual void debug_forceBlock() = 0; diff --git a/libweb3jsonrpc/Skale.cpp b/libweb3jsonrpc/Skale.cpp index 27b71b1cb..8e81dad4e 100644 --- a/libweb3jsonrpc/Skale.cpp +++ b/libweb3jsonrpc/Skale.cpp @@ -33,6 +33,7 @@ #include #include +#include #include #include @@ -40,10 +41,8 @@ #include -//#include #include -//#include #include #include @@ -121,9 +120,9 @@ std::string Skale::skale_shutdownInstance() { std::string s = ex.what(); if ( s.empty() ) s = "no description"; - cerror << "Exception in shutdown event handler: " << s << "\n"; + cerror << "Exception in shutdown event handler: " << s; } catch ( ... ) { - cerror << "Unknown exception in shutdown event handler\n"; + cerror << "Unknown exception in shutdown event handler"; } } // for( auto & fn : g_list_fn_on_shutdown ) g_list_fn_on_shutdown.clear(); @@ -138,7 +137,7 @@ std::string Skale::skale_receiveTransaction( std::string const& _rlp ) { try { return toJS( m_client.skaleHost()->receiveTransaction( _rlp ) ); } catch ( Exception const& ) { - throw jsonrpc::JsonRpcException( exceptionToErrorMessage() ); // TODO test! + throw jsonrpc::JsonRpcException( exceptionToErrorMessage() ); // TODO test } } @@ -150,9 +149,6 @@ size_t g_nMaxChunckSize = 100 * 1024 * 1024; // '{"jsonrpc":"2.0","method":"skale_getSnapshot","params":{ "blockNumber": "latest" },"id":73}' // nlohmann::json Skale::impl_skale_getSnapshot( const nlohmann::json& joRequest, Client& client ) { - // std::cout << cc::attention( "------------ " ) << cc::info( "skale_getSnapshot" ) << - // cc::normal( " call with " ) << cc::j( joRequest ) << "\n"; - std::lock_guard< std::mutex > lock( m_snapshot_mutex ); nlohmann::json joResponse = nlohmann::json::object(); @@ -210,22 +206,23 @@ nlohmann::json Skale::impl_skale_getSnapshot( const nlohmann::json& joRequest, C m_client.chainParams().sChain.snapshotDownloadInactiveTimeout || time( NULL ) - currentSnapshotTime < m_client.chainParams().sChain.snapshotDownloadInactiveTimeout ) && - time( NULL ) - currentSnapshotTime < - m_client.chainParams().sChain.snapshotDownloadTimeout ) { + ( time( NULL ) - currentSnapshotTime < + m_client.chainParams().sChain.snapshotDownloadTimeout || + m_client.chainParams().nodeInfo.archiveMode ) ) { if ( threadExitRequested ) break; sleep( 10 ); } clog( VerbosityInfo, "skale_downloadSnapshotFragmentMonitorThread" ) - << "Unlocking shared space.\n"; + << "Unlocking shared space."; std::lock_guard< std::mutex > lock( m_snapshot_mutex ); if ( currentSnapshotBlockNumber >= 0 ) { try { fs::remove( currentSnapshotPath ); clog( VerbosityInfo, "skale_downloadSnapshotFragmentMonitorThread" ) - << "Deleted snapshot file.\n"; + << "Deleted snapshot file."; } catch ( ... ) { } currentSnapshotBlockNumber = -1; @@ -235,11 +232,8 @@ nlohmann::json Skale::impl_skale_getSnapshot( const nlohmann::json& joRequest, C } ) ); } - // - // size_t sizeOfFile = fs::file_size( currentSnapshotPath ); - // - // + joResponse["dataSize"] = sizeOfFile; joResponse["maxAllowedChunkSize"] = g_nMaxChunckSize; return joResponse; @@ -268,9 +262,6 @@ Json::Value Skale::skale_getSnapshot( const Json::Value& request ) { // std::vector< uint8_t > Skale::ll_impl_skale_downloadSnapshotFragment( const fs::path& fp, size_t idxFrom, size_t sizeOfChunk ) { - // size_t sizeOfFile = fs::file_size( fp ); - // - // std::ifstream f; f.open( fp.native(), std::ios::in | std::ios::binary ); if ( !f.is_open() ) @@ -292,7 +283,7 @@ std::vector< uint8_t > Skale::impl_skale_downloadSnapshotFragmentBinary( } fs::path fp = currentSnapshotPath; - // + size_t idxFrom = joRequest["from"].get< size_t >(); size_t sizeOfChunk = joRequest["size"].get< size_t >(); size_t sizeOfFile = fs::file_size( fp ); @@ -317,7 +308,7 @@ nlohmann::json Skale::impl_skale_downloadSnapshotFragmentJSON( const nlohmann::j "first"; fs::path fp = currentSnapshotPath; - // + size_t idxFrom = joRequest["from"].get< size_t >(); size_t sizeOfChunk = joRequest["size"].get< size_t >(); size_t sizeOfFile = fs::file_size( fp ); @@ -333,8 +324,7 @@ nlohmann::json Skale::impl_skale_downloadSnapshotFragmentJSON( const nlohmann::j if ( sizeOfChunk + idxFrom == sizeOfFile ) clog( VerbosityInfo, "skale_downloadSnapshotFragment" ) - << cc::success( "Sent all chunks for " ) << cc::p( currentSnapshotPath.string() ) - << "\n"; + << "Sent all chunks for " << currentSnapshotPath.string(); joResponse["size"] = sizeOfChunk; joResponse["data"] = strBase64; @@ -367,7 +357,8 @@ std::string Skale::skale_getLatestSnapshotBlockNumber() { Json::Value Skale::skale_getSnapshotSignature( unsigned blockNumber ) { dev::eth::ChainParams chainParams = this->m_client.chainParams(); - if ( chainParams.nodeInfo.keyShareName.empty() || chainParams.nodeInfo.sgxServerUrl.empty() ) + if ( !chainParams.nodeInfo.syncNode && ( chainParams.nodeInfo.keyShareName.empty() || + chainParams.nodeInfo.sgxServerUrl.empty() ) ) throw jsonrpc::JsonRpcException( "Snapshot signing is not enabled" ); if ( blockNumber != 0 && blockNumber != this->m_client.getLatestSnapshotBlockNumer() ) { @@ -376,118 +367,127 @@ Json::Value Skale::skale_getSnapshotSignature( unsigned blockNumber ) { } try { - dev::h256 snapshot_hash = this->m_client.getSnapshotHash( blockNumber ); - if ( !snapshot_hash ) + dev::h256 snapshotHash = this->m_client.getSnapshotHash( blockNumber ); + if ( !snapshotHash ) throw std::runtime_error( "Requested hash of block " + to_string( blockNumber ) + " is absent" ); - std::string sgxServerURL = chainParams.nodeInfo.sgxServerUrl; - skutils::url u( sgxServerURL ); - - nlohmann::json joCall = nlohmann::json::object(); - joCall["jsonrpc"] = "2.0"; - joCall["method"] = "blsSignMessageHash"; - if ( u.scheme() == "zmq" ) - joCall["type"] = "BLSSignReq"; - nlohmann::json obj = nlohmann::json::object(); - - obj["keyShareName"] = chainParams.nodeInfo.keyShareName; - obj["messageHash"] = snapshot_hash.hex(); - obj["n"] = chainParams.sChain.nodes.size(); - obj["t"] = chainParams.sChain.t; - - auto it = std::find_if( chainParams.sChain.nodes.begin(), chainParams.sChain.nodes.end(), - [chainParams]( const dev::eth::sChainNode& schain_node ) { - return schain_node.id == chainParams.nodeInfo.id; - } ); - assert( it != chainParams.sChain.nodes.end() ); - dev::eth::sChainNode schain_node = *it; - - joCall["params"] = obj; - - // TODO deduplicate with SkaleHost! - std::string sgx_cert_path = getenv( "SGX_CERT_FOLDER" ) ? getenv( "SGX_CERT_FOLDER" ) : ""; - if ( sgx_cert_path.empty() ) - sgx_cert_path = "/skale_node_data/sgx_certs/"; - else if ( sgx_cert_path[sgx_cert_path.length() - 1] != '/' ) - sgx_cert_path += '/'; - - const char* sgx_cert_filename = getenv( "SGX_CERT_FILE" ); - if ( sgx_cert_filename == nullptr ) - sgx_cert_filename = "sgx.crt"; - - const char* sgx_key_filename = getenv( "SGX_KEY_FILE" ); - if ( sgx_key_filename == nullptr ) - sgx_key_filename = "sgx.key"; - - skutils::http::SSL_client_options ssl_options; - ssl_options.client_cert = sgx_cert_path + sgx_cert_filename; - ssl_options.client_key = sgx_cert_path + sgx_key_filename; + nlohmann::json joSignature = nlohmann::json::object(); + if ( !chainParams.nodeInfo.syncNode ) { + std::string sgxServerURL = chainParams.nodeInfo.sgxServerUrl; + skutils::url u( sgxServerURL ); + + nlohmann::json joCall = nlohmann::json::object(); + joCall["jsonrpc"] = "2.0"; + joCall["method"] = "blsSignMessageHash"; + if ( u.scheme() == "zmq" ) + joCall["type"] = "BLSSignReq"; + nlohmann::json obj = nlohmann::json::object(); + + obj["keyShareName"] = chainParams.nodeInfo.keyShareName; + obj["messageHash"] = snapshotHash.hex(); + obj["n"] = chainParams.sChain.nodes.size(); + obj["t"] = chainParams.sChain.t; + + auto it = + std::find_if( chainParams.sChain.nodes.begin(), chainParams.sChain.nodes.end(), + [chainParams]( const dev::eth::sChainNode& schain_node ) { + return schain_node.id == chainParams.nodeInfo.id; + } ); + assert( it != chainParams.sChain.nodes.end() ); + dev::eth::sChainNode schain_node = *it; + + joCall["params"] = obj; + + // TODO deduplicate with SkaleHost + std::string sgx_cert_path = + getenv( "SGX_CERT_FOLDER" ) ? getenv( "SGX_CERT_FOLDER" ) : ""; + if ( sgx_cert_path.empty() ) + sgx_cert_path = "/skale_node_data/sgx_certs/"; + else if ( sgx_cert_path[sgx_cert_path.length() - 1] != '/' ) + sgx_cert_path += '/'; + + const char* sgx_cert_filename = getenv( "SGX_CERT_FILE" ); + if ( sgx_cert_filename == nullptr ) + sgx_cert_filename = "sgx.crt"; + + const char* sgx_key_filename = getenv( "SGX_KEY_FILE" ); + if ( sgx_key_filename == nullptr ) + sgx_key_filename = "sgx.key"; + + skutils::http::SSL_client_options ssl_options; + ssl_options.client_cert = sgx_cert_path + sgx_cert_filename; + ssl_options.client_key = sgx_cert_path + sgx_key_filename; - skutils::rest::client cli( skutils::rest::g_nClientConnectionTimeoutMS ); - cli.optsSSL_ = ssl_options; - bool fl = cli.open( sgxServerURL ); - if ( !fl ) { - clog( VerbosityError, "skale_getSnapshotSignature" ) - << cc::fatal( "FATAL:" ) - << cc::error( " Exception while trying to connect to sgx server: " ) - << cc::warn( "connection refused" ) << std::endl; - } + skutils::rest::client cli( skutils::rest::g_nClientConnectionTimeoutMS ); + cli.optsSSL_ = ssl_options; + bool fl = cli.open( sgxServerURL ); + if ( !fl ) { + clog( VerbosityError, "skale_getSnapshotSignature" ) + << "FATAL:" + << " Exception while trying to connect to sgx server: " + << "connection refused"; + } - skutils::rest::data_t d; - while ( true ) { - clog( VerbosityInfo, "skale_getSnapshotSignature" ) - << cc::ws_tx( ">>> SGX call >>>" ) << " " << cc::j( joCall ) << std::endl; - d = cli.call( joCall ); - if ( d.ei_.et_ != skutils::http::common_network_exception::error_type::et_no_error ) { - if ( d.ei_.et_ == skutils::http::common_network_exception::error_type::et_unknown || - d.ei_.et_ == skutils::http::common_network_exception::error_type::et_fatal ) { - clog( VerbosityError, "skale_getSnapshotSignature" ) - << cc::error( "ERROR:" ) - << cc::error( " Exception while trying to connect to sgx server: " ) - << cc::error( " error with connection: " ) << cc::info( " retrying... " ) - << std::endl; + skutils::rest::data_t d; + while ( true ) { + clog( VerbosityInfo, "skale_getSnapshotSignature" ) << ">>> SGX call >>>" + << " " << joCall; + d = cli.call( joCall ); + if ( d.ei_.et_ != + skutils::http::common_network_exception::error_type::et_no_error ) { + if ( d.ei_.et_ == + skutils::http::common_network_exception::error_type::et_unknown || + d.ei_.et_ == + skutils::http::common_network_exception::error_type::et_fatal ) { + clog( VerbosityError, "skale_getSnapshotSignature" ) + << "ERROR:" + << " Exception while trying to connect to sgx server: " + << " error with connection: " + << " retrying... "; + } else { + clog( VerbosityError, "skale_getSnapshotSignature" ) + << "ERROR:" + << " Exception while trying to connect to sgx server: " + << " error with ssl certificates " << d.ei_.strError_; + } } else { - clog( VerbosityError, "skale_getSnapshotSignature" ) - << cc::error( "ERROR:" ) - << cc::error( " Exception while trying to connect to sgx server: " ) - << cc::error( " error with ssl certificates " ) - << cc::error( d.ei_.strError_ ) << std::endl; + break; } - } else { - break; } - } - if ( d.empty() ) { - static const char g_strErrMsg[] = "SGX Server call to blsSignMessageHash failed"; - clog( VerbosityError, "skale_getSnapshotSignature" ) - << cc::error( "!!! SGX call error !!!" ) << " " << cc::error( g_strErrMsg ) - << std::endl; - throw std::runtime_error( g_strErrMsg ); - } + if ( d.empty() ) { + static const char g_strErrMsg[] = "SGX Server call to blsSignMessageHash failed"; + clog( VerbosityError, "skale_getSnapshotSignature" ) << "!!! SGX call error !!!" + << " " << g_strErrMsg; + throw std::runtime_error( g_strErrMsg ); + } - nlohmann::json joAnswer = nlohmann::json::parse( d.s_ ); - nlohmann::json joResponse = - ( joAnswer.count( "result" ) > 0 ) ? joAnswer["result"] : joAnswer; - clog( VerbosityInfo, "skale_getSnapshotSignature" ) - << cc::ws_rx( "<<< SGX call <<<" ) << " " << cc::j( joResponse ) << std::endl; - if ( joResponse["status"] != 0 ) { - throw std::runtime_error( - "SGX Server call to blsSignMessageHash returned non-zero status" ); + nlohmann::json joAnswer = nlohmann::json::parse( d.s_ ); + nlohmann::json joResponse = + ( joAnswer.count( "result" ) > 0 ) ? joAnswer["result"] : joAnswer; + clog( VerbosityInfo, "skale_getSnapshotSignature" ) << "<<< SGX call <<<" + << " " << joResponse; + if ( joResponse["status"] != 0 ) { + throw std::runtime_error( + "SGX Server call to blsSignMessageHash returned non-zero status" ); + } + std::string signature_with_helper = joResponse["signatureShare"].get< std::string >(); + + std::vector< std::string > splidString; + splidString = boost::split( + splidString, signature_with_helper, []( char c ) { return c == ':'; } ); + + joSignature["X"] = splidString.at( 0 ); + joSignature["Y"] = splidString.at( 1 ); + joSignature["helper"] = splidString.at( 3 ); + } else { + joSignature["X"] = "1"; + joSignature["Y"] = "2"; + joSignature["helper"] = "1"; } - std::string signature_with_helper = joResponse["signatureShare"].get< std::string >(); - std::vector< std::string > splited_string; - splited_string = boost::split( - splited_string, signature_with_helper, []( char c ) { return c == ':'; } ); - - nlohmann::json joSignature = nlohmann::json::object(); - - joSignature["X"] = splited_string[0]; - joSignature["Y"] = splited_string[1]; - joSignature["helper"] = splited_string[3]; - joSignature["hash"] = snapshot_hash.hex(); + joSignature["hash"] = snapshotHash.hex(); std::string strSignature = joSignature.dump(); Json::Value response; @@ -582,14 +582,14 @@ std::string Skale::oracle_checkResult( std::string& receipt ) { namespace snapshot { bool download( const std::string& strURLWeb3, unsigned& block_number, const fs::path& saveTo, - fn_progress_t onProgress, bool isBinaryDownload, std::string* pStrErrorDescription ) { + fn_progress_t onProgress, bool isBinaryDownload, std::string* pStrErrorDescription, + bool forArchiveNode ) { if ( pStrErrorDescription ) pStrErrorDescription->clear(); std::ofstream f; try { boost::filesystem::remove( saveTo ); - // - // + if ( block_number == unsigned( -1 ) ) { // this means "latest" skutils::rest::client cli( skutils::rest::g_nClientConnectionTimeoutMS ); @@ -597,8 +597,9 @@ bool download( const std::string& strURLWeb3, unsigned& block_number, const fs:: if ( pStrErrorDescription ) ( *pStrErrorDescription ) = "REST failed to connect to server(1)"; clog( VerbosityError, "download snapshot" ) - << cc::fatal( "FATAL:" ) << " " - << cc::error( "REST failed to connect to server(1)" ) << "\n"; + << "FATAL:" + << " " + << "REST failed to connect to server(1)"; return false; } @@ -610,24 +611,23 @@ bool download( const std::string& strURLWeb3, unsigned& block_number, const fs:: if ( d.empty() ) { if ( pStrErrorDescription ) ( *pStrErrorDescription ) = "Failed to get latest bockNumber"; - clog( VerbosityError, "download snapshot" ) - << cc::fatal( "FATAL:" ) << " " - << cc::error( "Failed to get latest bockNumber" ) << "\n"; + clog( VerbosityError, "download snapshot" ) << "FATAL:" + << " " + << "Failed to get latest bockNumber"; return false; } // TODO catch? nlohmann::json joAnswer = nlohmann::json::parse( d.s_ ); block_number = dev::eth::jsToBlockNumber( joAnswer["result"].get< std::string >() ); } - // - // + skutils::rest::client cli( skutils::rest::g_nClientConnectionTimeoutMS ); if ( !cli.open( strURLWeb3 ) ) { if ( pStrErrorDescription ) ( *pStrErrorDescription ) = "REST failed to connect to server(2)"; - clog( VerbosityError, "download snapshot" ) - << cc::fatal( "FATAL:" ) << " " - << cc::error( "REST failed to connect to server(2)" ) << "\n"; + clog( VerbosityError, "download snapshot" ) << "FATAL:" + << " " + << "REST failed to connect to server(2)"; return false; } @@ -636,26 +636,26 @@ bool download( const std::string& strURLWeb3, unsigned& block_number, const fs:: joIn["method"] = "skale_getSnapshot"; nlohmann::json joParams = nlohmann::json::object(); joParams["blockNumber"] = block_number; + joParams["forArchiveNode"] = forArchiveNode; joIn["params"] = joParams; skutils::rest::data_t d = cli.call( joIn ); if ( !d.err_s_.empty() ) { if ( pStrErrorDescription ) ( *pStrErrorDescription ) = "REST call failed: " + d.err_s_; - clog( VerbosityError, "download snapshot" ) - << cc::fatal( "FATAL:" ) << " " << cc::error( "REST call failed: " ) - << cc::warn( d.err_s_ ) << "\n"; + clog( VerbosityError, "download snapshot" ) << "FATAL:" + << " " + << "REST call failed: " << d.err_s_; return false; } if ( d.empty() ) { if ( pStrErrorDescription ) ( *pStrErrorDescription ) = "REST call failed"; - clog( VerbosityError, "download snapshot" ) - << cc::fatal( "FATAL:" ) << " " << cc::error( "REST call failed" ) << "\n"; + clog( VerbosityError, "download snapshot" ) << "FATAL:" + << " " + << "REST call failed"; return false; } - // std::cout << cc::success( "REST call success" ) << "\n" << cc::j( d.s_ ) << "\n"; nlohmann::json joAnswer = nlohmann::json::parse( d.s_ ); - // std::cout << cc::normal( "Got answer(1) " ) << cc::j( joAnswer ) << std::endl; nlohmann::json joSnapshotInfo = joAnswer["result"]; if ( joSnapshotInfo.count( "error" ) > 0 ) { std::string s; @@ -668,16 +668,15 @@ bool download( const std::string& strURLWeb3, unsigned& block_number, const fs:: } if ( pStrErrorDescription ) ( *pStrErrorDescription ) = s; - clog( VerbosityError, "download snapshot" ) - << cc::fatal( "FATAL:" ) << " " << cc::error( s ) << "\n"; + clog( VerbosityError, "download snapshot" ) << "FATAL:" + << " " << s; return false; } size_t sizeOfFile = joSnapshotInfo["dataSize"].get< size_t >(); size_t maxAllowedChunkSize = joSnapshotInfo["maxAllowedChunkSize"].get< size_t >(); size_t idxChunk, cntChunks = sizeOfFile / maxAllowedChunkSize + ( ( ( sizeOfFile % maxAllowedChunkSize ) > 0 ) ? 1 : 0 ); - // - // + f.open( saveTo.native(), std::ios::out | std::ios::binary ); if ( !f.is_open() ) { std::string s; @@ -704,18 +703,16 @@ bool download( const std::string& strURLWeb3, unsigned& block_number, const fs:: if ( pStrErrorDescription ) ( *pStrErrorDescription ) = "REST call failed(fragment downloader)"; clog( VerbosityError, "download snapshot" ) - << cc::fatal( "FATAL:" ) << " " - << cc::error( "REST call failed(fragment downloader)" ) << "\n"; + << "FATAL:" + << " " + << "REST call failed(fragment downloader)"; return false; } std::vector< uint8_t > buffer; if ( isBinaryDownload ) buffer.insert( buffer.end(), d.s_.begin(), d.s_.end() ); else { - // std::cout << cc::success( "REST call success(fragment downloader)" ) << "\n" << nlohmann::json joAnswer = nlohmann::json::parse( d.s_ ); - // std::cout << cc::normal( "Got answer(2) " ) << cc::j( joAnswer ) << std::endl; - // cc::j( d.s_ ) << "\n"; nlohmann::json joFragment = joAnswer["result"]; if ( joFragment.count( "error" ) > 0 ) { std::string s; @@ -723,8 +720,8 @@ bool download( const std::string& strURLWeb3, unsigned& block_number, const fs:: s += joFragment["error"].get< std::string >(); if ( pStrErrorDescription ) ( *pStrErrorDescription ) = s; - clog( VerbosityError, "download snapshot" ) - << cc::fatal( "FATAL:" ) << " " << cc::error( s ) << "\n"; + clog( VerbosityError, "download snapshot" ) << "FATAL:" + << " " << s; return false; } // size_t sizeArrived = joFragment["size"]; diff --git a/libweb3jsonrpc/Skale.h b/libweb3jsonrpc/Skale.h index 3a39db3e7..93f3ded44 100644 --- a/libweb3jsonrpc/Skale.h +++ b/libweb3jsonrpc/Skale.h @@ -118,7 +118,7 @@ typedef std::function< bool( size_t idxChunck, size_t cntChunks ) > fn_progress_ extern bool download( const std::string& strURLWeb3, unsigned& block_number, const fs::path& saveTo, fn_progress_t onProgress, bool isBinaryDownload = true, - std::string* pStrErrorDescription = nullptr ); + std::string* pStrErrorDescription = nullptr, bool forArchiveNode = false ); }; // namespace snapshot diff --git a/libweb3jsonrpc/Tracing.cpp b/libweb3jsonrpc/Tracing.cpp new file mode 100644 index 000000000..59f924760 --- /dev/null +++ b/libweb3jsonrpc/Tracing.cpp @@ -0,0 +1,246 @@ +#include "Tracing.h" + +#include +#include +#include + +#ifdef HISTORIC_STATE + +#include +#include +#endif + +using namespace std; +using namespace dev; +using namespace dev::rpc; +using namespace dev::eth; + + +#define THROW_TRACE_JSON_EXCEPTION( __MSG__ ) \ + throw jsonrpc::JsonRpcException( std::string( __FUNCTION__ ) + ":" + \ + std::to_string( __LINE__ ) + ":" + std::string( __MSG__ ) ) + +void Tracing::checkHistoricStateEnabled() const { +#ifndef HISTORIC_STATE + BOOST_THROW_EXCEPTION( + jsonrpc::JsonRpcException( "This API call is available on archive nodes only" ) ); +#endif +} + +Tracing::Tracing( eth::Client& _eth, const string& argv ) + : m_eth( _eth ), + m_argvOptions( argv ), + m_blockTraceCache( MAX_BLOCK_TRACES_CACHE_ITEMS, MAX_BLOCK_TRACES_CACHE_SIZE ) {} + +h256 Tracing::blockHash( string const& _blockNumberOrHash ) const { + if ( isHash< h256 >( _blockNumberOrHash ) ) + return h256( _blockNumberOrHash.substr( _blockNumberOrHash.size() - 64, 64 ) ); + try { + return m_eth.blockChain().numberHash( stoul( _blockNumberOrHash ) ); + } catch ( ... ) { + THROW_TRACE_JSON_EXCEPTION( "Invalid argument" ); + } +} + +Json::Value Tracing::tracing_traceBlockByNumber( const string& +#ifdef HISTORIC_STATE + _blockNumber +#endif + , + Json::Value const& +#ifdef HISTORIC_STATE + _jsonTraceConfig +#endif +) { + Json::Value ret; + checkHistoricStateEnabled(); +#ifdef HISTORIC_STATE + auto bN = jsToBlockNumber( _blockNumber ); + + if ( bN == LatestBlock || bN == PendingBlock ) { + bN = m_eth.number(); + } + + if ( !m_eth.isKnown( bN ) ) { + THROW_TRACE_JSON_EXCEPTION( "Unknown block number:" + _blockNumber ); + } + + if ( bN == 0 ) { + THROW_TRACE_JSON_EXCEPTION( "Block number must be more than zero" ); + } + + try { + return m_eth.traceBlock( bN, _jsonTraceConfig ); + } catch ( std::exception const& _e ) { + THROW_TRACE_JSON_EXCEPTION( _e.what() ); + } catch ( ... ) { + THROW_TRACE_JSON_EXCEPTION( "Unknown server error" ); + } +#else + THROW_TRACE_JSON_EXCEPTION( "This API call is only supported on archive nodes" ); +#endif +} + +Json::Value Tracing::tracing_traceBlockByHash( string const& +#ifdef HISTORIC_STATE + _blockHash +#endif + , + Json::Value const& +#ifdef HISTORIC_STATE + _jsonTraceConfig +#endif +) { + checkHistoricStateEnabled(); + +#ifdef HISTORIC_STATE + h256 h = jsToFixed< 32 >( _blockHash ); + + if ( !m_eth.isKnown( h ) ) { + THROW_TRACE_JSON_EXCEPTION( "Unknown block hash" + _blockHash ); + } + + BlockNumber bN = m_eth.numberFromHash( h ); + + if ( bN == 0 ) { + THROW_TRACE_JSON_EXCEPTION( "Block number must be more than zero" ); + } + + try { + return m_eth.traceBlock( bN, _jsonTraceConfig ); + } catch ( std::exception const& _e ) { + THROW_TRACE_JSON_EXCEPTION( _e.what() ); + } catch ( ... ) { + THROW_TRACE_JSON_EXCEPTION( "Unknown server error" ); + } +#else + THROW_TRACE_JSON_EXCEPTION( "This API call is only supported on archive nodes" ); +#endif +} + + +Json::Value Tracing::tracing_traceTransaction( string const& +#ifdef HISTORIC_STATE + _txHashStr +#endif + , + Json::Value const& +#ifdef HISTORIC_STATE + _jsonTraceConfig +#endif +) { + + checkHistoricStateEnabled(); +#ifdef HISTORIC_STATE + auto txHash = h256( _txHashStr ); + + LocalisedTransaction localisedTransaction = m_eth.localisedTransaction( txHash ); + + if ( localisedTransaction.blockHash() == h256( 0 ) ) { + THROW_TRACE_JSON_EXCEPTION( + "Can't find committed transaction with this hash:" + _txHashStr ); + } + + auto blockNumber = localisedTransaction.blockNumber(); + + + if ( !m_eth.isKnown( blockNumber ) ) { + THROW_TRACE_JSON_EXCEPTION( "Unknown block number:" + to_string( blockNumber ) ); + } + + if ( blockNumber == 0 ) { + THROW_TRACE_JSON_EXCEPTION( "Block number must be more than zero" ); + } + + try { + Json::Value tracedBlock; + + tracedBlock = m_eth.traceBlock( blockNumber, _jsonTraceConfig ); + STATE_CHECK( tracedBlock.isArray() ) + STATE_CHECK( !tracedBlock.empty() ) + + + string lowerCaseTxStr = _txHashStr; + for ( auto& c : lowerCaseTxStr ) { + c = std::tolower( static_cast< unsigned char >( c ) ); + } + + + for ( Json::Value::ArrayIndex i = 0; i < tracedBlock.size(); i++ ) { + Json::Value& transactionTrace = tracedBlock[i]; + STATE_CHECK( transactionTrace.isObject() ); + STATE_CHECK( transactionTrace.isMember( "txHash" ) ); + if ( transactionTrace["txHash"] == lowerCaseTxStr ) { + STATE_CHECK( transactionTrace.isMember( "result" ) ); + return transactionTrace["result"]; + } + } + + THROW_TRACE_JSON_EXCEPTION( "Transaction not found in block" ); + + } catch ( jsonrpc::JsonRpcException& ) { + throw; + } catch ( std::exception const& _e ) { + THROW_TRACE_JSON_EXCEPTION( _e.what() ); + } catch ( ... ) { + THROW_TRACE_JSON_EXCEPTION( "Unknown server error" ); + } +#else + BOOST_THROW_EXCEPTION( + jsonrpc::JsonRpcException( "This API call is only supported on archive nodes" ) ); +#endif +} + +Json::Value Tracing::tracing_traceCall( Json::Value const& +#ifdef HISTORIC_STATE + _call +#endif + , + std::string const& +#ifdef HISTORIC_STATE + _blockNumber +#endif + , + Json::Value const& +#ifdef HISTORIC_STATE + _jsonTraceConfig +#endif +) { + + Json::Value ret; + checkHistoricStateEnabled(); + +#ifdef HISTORIC_STATE + + try { + auto bN = jsToBlockNumber( _blockNumber ); + + if ( bN == LatestBlock || bN == PendingBlock ) { + bN = m_eth.number(); + } + + if ( !m_eth.isKnown( bN ) ) { + THROW_TRACE_JSON_EXCEPTION( "Unknown block number:" + _blockNumber ); + } + + if ( bN == 0 ) { + THROW_TRACE_JSON_EXCEPTION( "Block number must be more than zero" ); + } + + TransactionSkeleton ts = toTransactionSkeleton( _call ); + + return m_eth.traceCall( + ts.from, ts.value, ts.to, ts.data, ts.gas, ts.gasPrice, bN, _jsonTraceConfig ); + } catch ( jsonrpc::JsonRpcException& ) { + throw; + } catch ( std::exception const& _e ) { + THROW_TRACE_JSON_EXCEPTION( _e.what() ); + } catch ( ... ) { + THROW_TRACE_JSON_EXCEPTION( "Unknown server error" ); + } + +#else + BOOST_THROW_EXCEPTION( + jsonrpc::JsonRpcException( "This API call is only supported on archive nodes" ) ); +#endif +} diff --git a/libweb3jsonrpc/Tracing.h b/libweb3jsonrpc/Tracing.h new file mode 100644 index 000000000..73b262187 --- /dev/null +++ b/libweb3jsonrpc/Tracing.h @@ -0,0 +1,60 @@ +#ifndef TRACING_H +#define TRACING_H + +#include "TracingFace.h" +#include "test/tools/libtestutils/FixedClient.h" + +#include +#include +#include +#include + +class SkaleHost; +class SkaleDebugInterface; + +namespace dev { +namespace eth { +class Client; + +} // namespace eth +namespace rpc { +class SessionManager; + +constexpr size_t MAX_BLOCK_TRACES_CACHE_SIZE = 64 * 1024 * 1024; +constexpr size_t MAX_BLOCK_TRACES_CACHE_ITEMS = 1024 * 1024; + +class Tracing : public TracingFace { +public: + explicit Tracing( eth::Client& _eth, const std::string& argv = std::string() ); + + virtual RPCModules implementedModules() const override { + return RPCModules{ RPCModule{ "debug", "1.0" } }; + } + + virtual Json::Value tracing_traceTransaction( + std::string const& _txHash, Json::Value const& _json ) override; + virtual Json::Value tracing_traceCall( Json::Value const& _call, + std::string const& _blockNumber, Json::Value const& _options ) override; + virtual Json::Value tracing_traceBlockByNumber( + std::string const& _blockNumber, Json::Value const& _json ) override; + virtual Json::Value tracing_traceBlockByHash( + std::string const& _blockHash, Json::Value const& _json ) override; + +private: + eth::Client& m_eth; + std::string m_argvOptions; + cache::lru_ordered_memory_constrained_cache< std::string, Json::Value > m_blockTraceCache; + bool m_enablePrivilegedApis; + + h256 blockHash( std::string const& _blockHashOrNumber ) const; + + void checkPrivilegedAccess() const; + + void checkHistoricStateEnabled() const; +}; + +} // namespace rpc +} // namespace dev + + +#endif // TRACING_H diff --git a/libweb3jsonrpc/TracingFace.h b/libweb3jsonrpc/TracingFace.h new file mode 100644 index 000000000..a4296ec21 --- /dev/null +++ b/libweb3jsonrpc/TracingFace.h @@ -0,0 +1,75 @@ +#ifndef TRACINGFACE_H +#define TRACINGFACE_H + +#include "ModularServer.h" +#include "boost/throw_exception.hpp" + +namespace dev { +namespace rpc { +class TracingFace : public ServerInterface< TracingFace > { +public: + TracingFace() { + this->bindAndAddMethod( jsonrpc::Procedure( "debug_traceTransaction", + jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL ), + &dev::rpc::TracingFace::tracing_traceTransactionI ); + this->bindAndAddMethod( jsonrpc::Procedure( "debug_traceBlockByNumber", + jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL ), + &dev::rpc::TracingFace::tracing_traceBlockByNumberI ); + this->bindAndAddMethod( jsonrpc::Procedure( "debug_traceBlockByHash", + jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL ), + &dev::rpc::TracingFace::tracing_traceBlockByHashI ); + this->bindAndAddMethod( jsonrpc::Procedure( "debug_traceCall", jsonrpc::PARAMS_BY_POSITION, + jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_OBJECT, "param2", + jsonrpc::JSON_STRING, "param3", jsonrpc::JSON_OBJECT, NULL ), + &dev::rpc::TracingFace::tracing_traceCallI ); + } + + inline virtual Json::Value getTracer( const Json::Value& request ) { + if ( !request.isArray() || request.empty() || request.size() > 2 ) { + BOOST_THROW_EXCEPTION( + jsonrpc::JsonRpcException( jsonrpc::Errors::ERROR_RPC_INVALID_PARAMS ) ); + } + if ( request.size() == 2 ) { + if ( !request[1u].isObject() ) { + BOOST_THROW_EXCEPTION( + jsonrpc::JsonRpcException( jsonrpc::Errors::ERROR_RPC_INVALID_PARAMS ) ); + } + return request[1u]; + + } else { + return { Json::objectValue }; + } + } + + inline virtual void tracing_traceTransactionI( + const Json::Value& request, Json::Value& response ) { + response = this->tracing_traceTransaction( request[0u].asString(), getTracer( request ) ); + } + + inline virtual void tracing_traceBlockByNumberI( + const Json::Value& request, Json::Value& response ) { + response = this->tracing_traceBlockByNumber( request[0u].asString(), getTracer( request ) ); + } + inline virtual void tracing_traceBlockByHashI( + const Json::Value& request, Json::Value& response ) { + response = this->tracing_traceBlockByHash( request[0u].asString(), getTracer( request ) ); + } + inline virtual void tracing_traceCallI( const Json::Value& request, Json::Value& response ) { + response = this->tracing_traceCall( request[0u], request[1u].asString(), request[2u] ); + } + + virtual Json::Value tracing_traceTransaction( + const std::string& param1, const Json::Value& param2 ) = 0; + virtual Json::Value tracing_traceBlockByNumber( + const std::string& param1, const Json::Value& param2 ) = 0; + virtual Json::Value tracing_traceBlockByHash( + const std::string& param1, const Json::Value& param2 ) = 0; + virtual Json::Value tracing_traceCall( Json::Value const& _call, + std::string const& _blockNumber, Json::Value const& _options ) = 0; +}; + +} // namespace rpc +} // namespace dev + + +#endif // TRACINGFACE_H diff --git a/skale-vm/main.cpp b/skale-vm/main.cpp index 50c4cc55d..117da3cfe 100644 --- a/skale-vm/main.cpp +++ b/skale-vm/main.cpp @@ -286,17 +286,20 @@ int main( int argc, char** argv ) { } // Ignore decoding errors. } - unique_ptr< SealEngineFace > se( ChainParams( genesisInfo( networkName ) ).createSealEngine() ); + ChainParams chainParams( genesisInfo( networkName ) ); LastBlockHashes lastBlockHashes; - EnvInfo const envInfo( - blockHeader, lastBlockHashes, 0 /* gasUsed */, se->chainParams().chainID ); + EnvInfo const envInfo( blockHeader, lastBlockHashes, 0 /*_committedBlockTimestamp*/, + 0 /* gasUsed */, chainParams.chainID ); + EVMSchedule evmSchedule = chainParams.makeEvmSchedule( 0, envInfo.number() ); + + state = state.createStateModifyCopy(); Transaction t; Address contractDestination( "1122334455667788991011121314151617181920" ); if ( !code.empty() ) { // Deploy the code on some fake account to be called later. Account account( 0, 0 ); - auto const latestVersion = se->evmSchedule( envInfo.number() ).accountVersion; + auto const latestVersion = evmSchedule.accountVersion; account.setCode( bytes{ code }, latestVersion ); std::unordered_map< Address, Account > map; map[contractDestination] = account; @@ -307,10 +310,12 @@ int main( int argc, char** argv ) { // data. t = Transaction( value, gasPrice, gas, data, 0 ); + t.ignoreExternalGas(); // for tests + state.addBalance( sender, value ); - // HACK 0 here is for gasPrice - Executive executive( state, envInfo, *se, 0 ); + // HACK 1st 0 here is for gasPrice + Executive executive( state, envInfo, chainParams, 0, 0 ); ExecutionResult res; executive.setResultRecipient( res ); t.forceSender( sender ); @@ -346,9 +351,8 @@ int main( int argc, char** argv ) { bytes output = std::move( res.output ); if ( mode == Mode::Statistics ) { - cout << "Gas used: " << res.gasUsed << " (+" - << t.baseGasRequired( se->evmSchedule( envInfo.number() ) ) << " for transaction, -" - << res.gasRefunded << " refunded)\n"; + cout << "Gas used: " << res.gasUsed << " (+" << t.baseGasRequired( evmSchedule ) + << " for transaction, -" << res.gasRefunded << " refunded)\n"; cout << "Output: " << toHex( output ) << "\n"; LogEntries logs = executive.logs(); cout << logs.size() << " logs" << ( logs.empty() ? "." : ":" ) << "\n"; @@ -385,5 +389,8 @@ int main( int argc, char** argv ) { << '\n'; cout << "exec time: " << fixed << setprecision( 6 ) << execTime << '\n'; } + + state.releaseWriteLock(); + return 0; } diff --git a/skaled/main.cpp b/skaled/main.cpp index 9382c6dda..03a02ffa5 100644 --- a/skaled/main.cpp +++ b/skaled/main.cpp @@ -75,6 +75,7 @@ #include #include #include +#include #include #include @@ -265,7 +266,7 @@ void downloadSnapshot( unsigned block_number, std::shared_ptr< SnapshotManager > << cc::normal( " of " ) << cc::size10( cntChunks ) << "\r"; return true; // continue download }, - isBinaryDownload, &strErrorDescription ); + isBinaryDownload, &strErrorDescription, chainParams.nodeInfo.archiveMode ); std::cout << " \r"; // clear // progress // line @@ -342,19 +343,14 @@ std::array< std::string, 4 > getBLSPublicKeyToVerifySnapshot( const ChainParams& return arrayCommonPublicKey; } -unsigned getBlockToDownladSnapshot( const dev::eth::sChainNode& nodeInfo ) { - std::string blockNumber_url = std::string( "http://" ) + std::string( nodeInfo.ip ) + - std::string( ":" ) + - ( nodeInfo.port + 3 ).convert_to< std::string >(); - +unsigned getBlockToDownladSnapshot( const std::string& nodeUrl ) { clog( VerbosityInfo, "getBlockToDownladSnapshot" ) - << cc::notice( "Asking node " ) << cc::p( nodeInfo.sChainIndex.str() ) << ' ' - << cc::notice( blockNumber_url ) << cc::notice( " for latest snapshot block number." ); + << "Asking node " << ' ' << nodeUrl << " for latest snapshot block number."; - unsigned blockNumber = getLatestSnapshotBlockNumber( blockNumber_url ); + unsigned blockNumber = getLatestSnapshotBlockNumber( nodeUrl ); clog( VerbosityInfo, "getBlockToDownladSnapshot" ) - << cc::notice( "Latest Snapshot Block Number" ) + cc::debug( " is: " ) - << cc::p( std::to_string( blockNumber ) ) << " (from " << blockNumber_url << ")"; + << std::string( "Latest Snapshot Block Number is: " ) << blockNumber << " (from " << nodeUrl + << ")"; return blockNumber; } @@ -367,8 +363,7 @@ voteForSnapshotHash( try { listUrlsToDownload = snapshotHashAgent->getNodesToDownloadSnapshotFrom( blockNumber ); clog( VerbosityInfo, "voteForSnapshotHash" ) - << cc::notice( "Got urls to download snapshot from " ) - << cc::p( std::to_string( listUrlsToDownload.size() ) ) << cc::notice( " nodes " ); + << "Got urls to download snapshot from " << listUrlsToDownload.size() << " nodes "; if ( listUrlsToDownload.size() == 0 ) return { listUrlsToDownload, votedHash }; @@ -392,16 +387,17 @@ bool checkLocalSnapshot( std::shared_ptr< SnapshotManager >& snapshotManager, un dev::h256 calculated_hash = snapshotManager->getSnapshotHash( blockNumber ); if ( calculated_hash == votedHash ) { - clog( VerbosityInfo, "checkLocalSnapshot" ) << cc::notice( - "Will delete all snapshots except" + std::to_string( blockNumber ) ); + clog( VerbosityInfo, "checkLocalSnapshot" ) + << std::string( "Will delete all snapshots except " ) + << std::to_string( blockNumber ); snapshotManager->cleanupButKeepSnapshot( blockNumber ); snapshotManager->restoreSnapshot( blockNumber ); - std::cout << cc::success( "Snapshot restore success for block " ) - << cc::u( to_string( blockNumber ) ) << std::endl; + clog( VerbosityInfo, "checkLocalSnapshot" ) + << "Snapshot restore success for block " << std::to_string( blockNumber ); return true; } else { clog( VerbosityWarning, "checkLocalSnapshot" ) - << cc::warn( "Snapshot is present locally but its hash is different" ); + << "Snapshot is present locally but its hash is different"; } } // if present } catch ( const std::exception& ex ) { @@ -417,7 +413,7 @@ bool tryDownloadSnapshot( std::shared_ptr< SnapshotManager >& snapshotManager, const std::pair< dev::h256, libff::alt_bn128_G1 >& votedHash, unsigned blockNumber, bool isRegularSnapshot ) { clog( VerbosityInfo, "tryDownloadSnapshot" ) - << cc::notice( "Will cleanup data dir and snapshots dir if needed" ); + << "Will cleanup data dir and snapshots dir if needed"; if ( isRegularSnapshot ) snapshotManager->cleanup(); @@ -450,8 +446,8 @@ bool tryDownloadSnapshot( std::shared_ptr< SnapshotManager >& snapshotManager, successfullDownload = true; if ( isRegularSnapshot ) { snapshotManager->restoreSnapshot( blockNumber ); - std::cout << "Snapshot restore success for block " << to_string( blockNumber ) - << std::endl; + clog( VerbosityInfo, "tryDownloadSnapshot" ) + << "Snapshot restore success for block " << to_string( blockNumber ); } return successfullDownload; } else { @@ -471,56 +467,75 @@ bool tryDownloadSnapshot( std::shared_ptr< SnapshotManager >& snapshotManager, return false; } -void downloadAndProccessSnapshot( std::shared_ptr< SnapshotManager >& snapshotManager, - const ChainParams& chainParams, bool requireSnapshotMajority, - const std::string& ipToDownloadSnapshotFrom, bool isRegularSnapshot ) { - std::array< std::string, 4 > arrayCommonPublicKey = - getBLSPublicKeyToVerifySnapshot( chainParams ); +bool downloadSnapshotFromUrl( std::shared_ptr< SnapshotManager >& snapshotManager, + const ChainParams& chainParams, const std::array< std::string, 4 >& arrayCommonPublicKey, + const std::string& urlToDownloadSnapshotFrom, bool isRegularSnapshot, + bool forceDownload = false ) { + unsigned blockNumber = 0; + if ( isRegularSnapshot ) + blockNumber = getBlockToDownladSnapshot( urlToDownloadSnapshotFrom ); - bool successfullDownload = false; + std::unique_ptr< SnapshotHashAgent > snapshotHashAgent; + if ( forceDownload ) + snapshotHashAgent.reset( + new SnapshotHashAgent( chainParams, arrayCommonPublicKey, urlToDownloadSnapshotFrom ) ); + else + snapshotHashAgent.reset( new SnapshotHashAgent( chainParams, arrayCommonPublicKey ) ); - for ( size_t idx = 0; idx < chainParams.sChain.nodes.size() && !successfullDownload; ++idx ) - try { - if ( !requireSnapshotMajority && - std::string( chainParams.sChain.nodes[idx].ip ) != ipToDownloadSnapshotFrom ) - continue; + libff::init_alt_bn128_params(); + std::pair< dev::h256, libff::alt_bn128_G1 > votedHash; + std::vector< std::string > listUrlsToDownload; + std::tie( listUrlsToDownload, votedHash ) = + voteForSnapshotHash( snapshotHashAgent, blockNumber ); - if ( chainParams.nodeInfo.id == chainParams.sChain.nodes[idx].id ) - continue; + if ( listUrlsToDownload.empty() ) { + if ( !isRegularSnapshot ) + return true; + clog( VerbosityWarning, "downloadAndProccessSnapshot" ) + << "No nodes to download from - will skip " << urlToDownloadSnapshotFrom; + return false; + } - unsigned blockNumber = 0; - if ( isRegularSnapshot ) - blockNumber = getBlockToDownladSnapshot( chainParams.sChain.nodes[idx] ); + bool successfullDownload = checkLocalSnapshot( snapshotManager, blockNumber, votedHash.first ); + if ( successfullDownload ) + return successfullDownload; - std::unique_ptr< SnapshotHashAgent > snapshotHashAgent( new SnapshotHashAgent( - chainParams, arrayCommonPublicKey, ipToDownloadSnapshotFrom ) ); + successfullDownload = tryDownloadSnapshot( snapshotManager, chainParams, listUrlsToDownload, + votedHash, blockNumber, isRegularSnapshot ); - libff::init_alt_bn128_params(); - std::pair< dev::h256, libff::alt_bn128_G1 > votedHash; - std::vector< std::string > listUrlsToDownload; - std::tie( listUrlsToDownload, votedHash ) = - voteForSnapshotHash( snapshotHashAgent, blockNumber ); + return successfullDownload; +} - if ( listUrlsToDownload.empty() ) { - if ( !isRegularSnapshot ) - return; - clog( VerbosityWarning, "downloadAndProccessSnapshot" ) - << cc::warn( "No nodes to download from - will skip " + std::to_string( idx ) ); - continue; - } +void downloadAndProccessSnapshot( std::shared_ptr< SnapshotManager >& snapshotManager, + const ChainParams& chainParams, const std::string& urlToDownloadSnapshotFrom, + bool isRegularSnapshot ) { + std::array< std::string, 4 > arrayCommonPublicKey = + getBLSPublicKeyToVerifySnapshot( chainParams ); - successfullDownload = - checkLocalSnapshot( snapshotManager, blockNumber, votedHash.first ); - if ( successfullDownload ) - break; + bool successfullDownload = false; + + if ( !urlToDownloadSnapshotFrom.empty() ) + successfullDownload = downloadSnapshotFromUrl( snapshotManager, chainParams, + arrayCommonPublicKey, urlToDownloadSnapshotFrom, isRegularSnapshot, true ); + else { + for ( size_t idx = 0; idx < chainParams.sChain.nodes.size() && !successfullDownload; ++idx ) + try { + if ( chainParams.nodeInfo.id == chainParams.sChain.nodes.at( idx ).id ) + continue; - successfullDownload = tryDownloadSnapshot( snapshotManager, chainParams, - listUrlsToDownload, votedHash, blockNumber, isRegularSnapshot ); - } catch ( std::exception& ex ) { - clog( VerbosityWarning, "downloadAndProccessSnapshot" ) - << cc::warn( "Exception while trying to set up snapshot: " ) - << cc::warn( dev::nested_exception_what( ex ) ); - } // for blockNumber_url + std::string nodeUrl = + std::string( "http://" ) + + std::string( chainParams.sChain.nodes.at( idx ).ip ) + std::string( ":" ) + + ( chainParams.sChain.nodes.at( idx ).port + 3 ).convert_to< std::string >(); + + successfullDownload = downloadSnapshotFromUrl( snapshotManager, chainParams, + arrayCommonPublicKey, nodeUrl, isRegularSnapshot ); + } catch ( std::exception& ex ) { + clog( VerbosityWarning, "downloadAndProccessSnapshot" ) + << "Exception while trying to set up snapshot: " + << dev::nested_exception_what( ex ); + } // for blockNumber_url + } if ( !successfullDownload ) { throw std::runtime_error( "FATAL: tried to download snapshot from everywhere!" ); @@ -1580,23 +1595,25 @@ int main( int argc, char** argv ) try { downloadSnapshotFlag = true; } - bool requireSnapshotMajority = true; - std::string ipToDownloadSnapshotFrom = ""; + std::string urlToDownloadSnapshotFrom = ""; if ( vm.count( "no-snapshot-majority" ) ) { - requireSnapshotMajority = false; - ipToDownloadSnapshotFrom = vm["no-snapshot-majority"].as< string >(); + urlToDownloadSnapshotFrom = vm["no-snapshot-majority"].as< string >(); + clog( VerbosityInfo, "main" ) + << "Manually set url to download snapshot from: " << urlToDownloadSnapshotFrom; } if ( chainParams.sChain.snapshotIntervalSec > 0 || downloadSnapshotFlag ) { - // auto mostRecentBlocksDBPath = (getDataDir() / ( "blocks_" + chainParams.nodeInfo.id.str() - // + ".db" )) / "1.db"; - - snapshotManager.reset( new SnapshotManager( chainParams, getDataDir(), - { BlockChain::getChainDirName( chainParams ), "filestorage", - "prices_" + chainParams.nodeInfo.id.str() + ".db", - "blocks_" + chainParams.nodeInfo.id.str() + ".db"/*, - mostRecentBlocksDBPath.string()*/ }, - sharedSpace ? sharedSpace->getPath() : "" ) ); + std::vector< std::string > coreVolumes = { BlockChain::getChainDirName( chainParams ), + "filestorage", "prices_" + chainParams.nodeInfo.id.str() + ".db", + "blocks_" + chainParams.nodeInfo.id.str() + ".db" }; + std::vector< std::string > archiveVolumes = {}; + if ( chainParams.nodeInfo.archiveMode ) { +#ifdef HISTORIC_STATE + archiveVolumes.insert( archiveVolumes.end(), { "historic_roots", "historic_state" } ); +#endif + } + snapshotManager.reset( new SnapshotManager( + chainParams, getDataDir(), sharedSpace ? sharedSpace->getPath() : "" ) ); } bool downloadGenesisForSyncNode = false; @@ -1615,30 +1632,21 @@ int main( int argc, char** argv ) try { statusAndControl->setExitState( StatusAndControl::StartFromSnapshot, true ); statusAndControl->setSubsystemRunning( StatusAndControl::SnapshotDownloader, true ); - if ( !ipToDownloadSnapshotFrom.empty() && - std::find_if( chainParams.sChain.nodes.begin(), chainParams.sChain.nodes.end(), - [&ipToDownloadSnapshotFrom]( const dev::eth::sChainNode& node ) { - return node.ip == ipToDownloadSnapshotFrom; - } ) == chainParams.sChain.nodes.end() ) - throw std::runtime_error( - "ipToDownloadSnapshotFrom provided is incorrect - no such node in schain" ); - std::unique_ptr< std::lock_guard< SharedSpace > > sharedSpace_lock; if ( sharedSpace ) sharedSpace_lock.reset( new std::lock_guard< SharedSpace >( *sharedSpace ) ); try { if ( !downloadGenesisForSyncNode ) - downloadAndProccessSnapshot( snapshotManager, chainParams, requireSnapshotMajority, - ipToDownloadSnapshotFrom, true ); + downloadAndProccessSnapshot( + snapshotManager, chainParams, urlToDownloadSnapshotFrom, true ); else { try { - downloadAndProccessSnapshot( snapshotManager, chainParams, - requireSnapshotMajority, ipToDownloadSnapshotFrom, false ); + downloadAndProccessSnapshot( + snapshotManager, chainParams, urlToDownloadSnapshotFrom, false ); snapshotManager->restoreSnapshot( 0 ); } catch ( SnapshotManager::SnapshotAbsent& ) { - clog( VerbosityWarning, "main" ) - << cc::warn( "Snapshot for 0 block is not found" ); + clog( VerbosityWarning, "main" ) << "Snapshot for 0 block is not found"; } } @@ -1648,16 +1656,18 @@ int main( int argc, char** argv ) try { } catch ( SnapshotManager::SnapshotAbsent& ex ) { // sleep before send skale_getSnapshot again - will receive error clog( VerbosityInfo, "main" ) - << cc::warn( "Will sleep for 60 seconds before downloading 0 snapshot" ); - sleep( 60 ); + << std::string( "Will sleep for " ) + << chainParams.sChain.snapshotDownloadInactiveTimeout + << std::string( " seconds before downloading 0 snapshot" ); + sleep( chainParams.sChain.snapshotDownloadInactiveTimeout ); - downloadAndProccessSnapshot( snapshotManager, chainParams, requireSnapshotMajority, - ipToDownloadSnapshotFrom, false ); + downloadAndProccessSnapshot( + snapshotManager, chainParams, urlToDownloadSnapshotFrom, false ); } } catch ( std::exception& ) { std::throw_with_nested( std::runtime_error( - cc::error( " Fatal error in downloadAndProccessSnapshot! Will exit " ) ) ); + std::string( " Fatal error in downloadAndProccessSnapshot! Will exit " ) ) ); } } // if --download-snapshot @@ -1943,7 +1953,7 @@ int main( int argc, char** argv ) try { rpc::SkaleStats, /// skaleStats rpc::NetFace, rpc::Web3Face, rpc::PersonalFace, rpc::AdminEthFace, // SKALE rpc::AdminNetFace, - rpc::DebugFace, rpc::SkalePerformanceTracker, rpc::TestFace >; + rpc::DebugFace, rpc::SkalePerformanceTracker, rpc::TracingFace, rpc::TestFace >; sessionManager.reset( new rpc::SessionManager() ); accountHolder.reset( new SimpleAccountHolder( @@ -1980,16 +1990,16 @@ int main( int argc, char** argv ) try { auto pAdminEthFace = bEnabledAPIs_admin ? new rpc::AdminEth( *g_client, *gasPricer.get(), keyManager, *sessionManager.get() ) : nullptr; -#ifdef HISTORIC_STATE - // debug interface is always enabled in historic state, but - // non-tracing calls are only available if bEnabledAPIs_debug is true - auto pDebugFace = - new rpc::Debug( *g_client, &debugInterface, argv_string, bEnabledAPIs_debug ); -#else - // debug interface is enabled on core node if bEnabledAPIs_debug is true auto pDebugFace = bEnabledAPIs_debug ? - new rpc::Debug( *g_client, &debugInterface, argv_string, true ) : + new rpc::Debug( *g_client, &debugInterface, argv_string ) : nullptr; + +#ifdef HISTORIC_STATE + // tracing interface is always enabled for the historic state nodes + auto pTracingFace = new rpc::Tracing( *g_client, argv_string ); +#else + // tracing interface is only enabled for the historic state nodes + auto pTracingFace = nullptr; #endif @@ -1997,9 +2007,9 @@ int main( int argc, char** argv ) try { new rpc::SkalePerformanceTracker( configPath.string() ) : nullptr; - g_jsonrpcIpcServer.reset( - new FullServer( pEthFace, pSkaleFace, pSkaleStatsFace, pNetFace, pWeb3Face, - pPersonalFace, pAdminEthFace, pDebugFace, pPerformanceTrackerFace, nullptr ) ); + g_jsonrpcIpcServer.reset( new FullServer( pEthFace, pSkaleFace, pSkaleStatsFace, pNetFace, + pWeb3Face, pPersonalFace, pAdminEthFace, pDebugFace, pPerformanceTrackerFace, + pTracingFace, nullptr ) ); if ( is_ipc ) { try { diff --git a/test/historicstate/configs/basic_config.json b/test/historicstate/configs/basic_config.json index a108a100f..1fd278d37 100644 --- a/test/historicstate/configs/basic_config.json +++ b/test/historicstate/configs/basic_config.json @@ -317,7 +317,8 @@ "collectionQueueSize": 2, "collectionDuration": 10, "transactionQueueSize": 100, - "maxOpenLeveldbFiles": 25 + "maxOpenLeveldbFiles": 25, + "testSignatures": true }, "sChain": { diff --git a/test/historicstate/hardhat/README.md b/test/historicstate/hardhat/README.md index 74fca7e30..f8c70a4a4 100644 --- a/test/historicstate/hardhat/README.md +++ b/test/historicstate/hardhat/README.md @@ -30,12 +30,12 @@ npm install Now run test against skaled ```shell -npx hardhat run scripts/trace.js --network skaled +npx hardhat run scripts/trace.ts --network skaled ``` To run the same test against geth ```shell -npx hardhat run scripts/trace.js --network geth +npx hardhat run scripts/trace.ts --network geth ``` diff --git a/test/historicstate/hardhat/scripts/trace.ts b/test/historicstate/hardhat/scripts/trace.ts index d6280a68b..f6de806c9 100644 --- a/test/historicstate/hardhat/scripts/trace.ts +++ b/test/historicstate/hardhat/scripts/trace.ts @@ -371,10 +371,7 @@ async function callDebugTraceCall(_deployedContract: any, _tracer: string, _trac data: _deployedContract.interface.encodeFunctionData("getBalance", []) }; - const returnData = await ethers.provider.call(transaction, currentBlock - 1); - - const result = _deployedContract.interface.decodeFunctionResult("getBalance", returnData); - + let returnData = await ethers.provider.call(transaction, currentBlock - 1); console.log("Calling debug_traceCall to generate " + _traceFileName); diff --git a/test/tools/jsontests/BlockChainTests.cpp b/test/tools/jsontests/BlockChainTests.cpp index a99211bf0..2959da8eb 100644 --- a/test/tools/jsontests/BlockChainTests.cpp +++ b/test/tools/jsontests/BlockChainTests.cpp @@ -1111,10 +1111,6 @@ BOOST_AUTO_TEST_CASE( stZeroCallsTest, *boost::unit_test::precondition( dev::test::run_not_express ) ) {} BOOST_AUTO_TEST_CASE( stZeroCallsRevert, *boost::unit_test::precondition( dev::test::run_not_express ) ) {} -BOOST_AUTO_TEST_CASE( stCodeSizeLimit, - *boost::unit_test::precondition( dev::test::run_not_express ) ) {} -BOOST_AUTO_TEST_CASE( stCreateTest, - *boost::unit_test::precondition( dev::test::run_not_express ) ) {} BOOST_AUTO_TEST_CASE( stRevertTest, *boost::unit_test::precondition( dev::test::run_not_express ) ) {} diff --git a/test/tools/libtesteth/ImportTest.cpp b/test/tools/libtesteth/ImportTest.cpp index a805514e8..0a3bf579b 100644 --- a/test/tools/libtesteth/ImportTest.cpp +++ b/test/tools/libtesteth/ImportTest.cpp @@ -418,6 +418,7 @@ void ImportTest::importTransaction( json_spirit::mObject const& _o, eth::Transac toInt( _o.at( "gasLimit" ) ), Address( _o.at( "to" ).get_str() ), importData( _o ), toInt( _o.at( "nonce" ) ), Secret( _o.at( "secretKey" ).get_str() ) ); + o_tr.ignoreExternalGas(); } else { requireJsonFields( _o, "transaction", {{"data", jsonVType::str_type}, {"gasLimit", jsonVType::str_type}, @@ -429,6 +430,7 @@ void ImportTest::importTransaction( json_spirit::mObject const& _o, eth::Transac RLP transactionRLP( transactionRLPStream.out() ); try { o_tr = Transaction( transactionRLP.data(), CheckTransaction::Everything ); + o_tr.ignoreExternalGas(); } catch ( InvalidSignature const& ) { // create unsigned transaction o_tr = _o.at( "to" ).get_str().empty() ? @@ -438,6 +440,7 @@ void ImportTest::importTransaction( json_spirit::mObject const& _o, eth::Transac Transaction( toInt( _o.at( "value" ) ), toInt( _o.at( "gasPrice" ) ), toInt( _o.at( "gasLimit" ) ), Address( _o.at( "to" ).get_str() ), importData( _o ), toInt( _o.at( "nonce" ) ) ); + o_tr.ignoreExternalGas(); } catch ( Exception& _e ) { cnote << "invalid transaction" << boost::diagnostic_information( _e ); } diff --git a/test/tools/libtestutils/Common.cpp b/test/tools/libtestutils/Common.cpp index 7f3c39c04..64baa6746 100644 --- a/test/tools/libtestutils/Common.cpp +++ b/test/tools/libtestutils/Common.cpp @@ -39,7 +39,7 @@ boost::filesystem::path dev::test::getTestPath() { return Options::get().testpath; string testPath; - const char* ptestPath = getenv( "ETHEREUM_TEST_PATH" ); + static const char* ptestPath = getenv( "ETHEREUM_TEST_PATH" ); if ( ptestPath == nullptr ) { clog( VerbosityDebug, "test" ) diff --git a/test/unittests/libdevcore/CommonJS.cpp b/test/unittests/libdevcore/CommonJS.cpp index bd2223344..4896d9b76 100644 --- a/test/unittests/libdevcore/CommonJS.cpp +++ b/test/unittests/libdevcore/CommonJS.cpp @@ -105,26 +105,26 @@ BOOST_AUTO_TEST_CASE( test_jsToFixed, *boost::unit_test::precondition( dev::test "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" ) ); h256 b( "0x000000000000000000000000000000000000000000000000000000740c54b42f" ); BOOST_CHECK( b == jsToFixed< 32 >( "498423084079" ) ); - BOOST_CHECK( h256() == jsToFixed< 32 >( "NotAHexadecimalOrDecimal" ) ); + BOOST_CHECK_THROW( jsToFixed< 32 >( "NotAHexadecimalOrDecimal" ), std::invalid_argument ); } BOOST_AUTO_TEST_CASE( test_jsToInt, *boost::unit_test::precondition( dev::test::run_not_express ) ) { BOOST_CHECK( 43832124 == jsToInt( "43832124" ) ); BOOST_CHECK( 1342356623 == jsToInt( "0x5002bc8f" ) ); BOOST_CHECK( 3483942 == jsToInt( "015224446" ) ); - BOOST_CHECK( 0 == jsToInt( "NotAHexadecimalOrDecimal" ) ); + BOOST_CHECK_THROW( jsToInt( "NotAHexadecimalOrDecimal" ), std::invalid_argument ); BOOST_CHECK( u256( "983298932490823474234" ) == jsToInt< 32 >( "983298932490823474234" ) ); BOOST_CHECK( u256( "983298932490823474234" ) == jsToInt< 32 >( "0x354e03915c00571c3a" ) ); - BOOST_CHECK( u256() == jsToInt< 32 >( "NotAHexadecimalOrDecimal" ) ); + BOOST_CHECK_THROW( jsToInt< 32 >( "NotAHexadecimalOrDecimal" ), std::invalid_argument ); BOOST_CHECK( u128( "228273101986715476958866839113050921216" ) == jsToInt< 16 >( "0xabbbccddeeff11223344556677889900" ) ); - BOOST_CHECK( u128() == jsToInt< 16 >( "NotAHexadecimalOrDecimal" ) ); + BOOST_CHECK_THROW( jsToInt< 16 >( "NotAHexadecimalOrDecimal" ), std::invalid_argument ); } BOOST_AUTO_TEST_CASE( test_jsToU256, *boost::unit_test::precondition( dev::test::run_not_express ) ) { BOOST_CHECK( u256( "983298932490823474234" ) == jsToU256( "983298932490823474234" ) ); - BOOST_CHECK( u256() == jsToU256( "NotAHexadecimalOrDecimal" ) ); + BOOST_CHECK_THROW( jsToU256( "NotAHexadecimalOrDecimal" ), std::invalid_argument ); } BOOST_AUTO_TEST_SUITE_END() diff --git a/test/unittests/libethereum/ClientTest.cpp b/test/unittests/libethereum/ClientTest.cpp index 9cc2e0b93..dba0d1122 100644 --- a/test/unittests/libethereum/ClientTest.cpp +++ b/test/unittests/libethereum/ClientTest.cpp @@ -262,7 +262,7 @@ class TestClientSnapshotsFixture : public TestOutputHelperFixture, public Fixtur // ), dir, // dir, chainParams, WithExisting::Kill, {"eth"}, testingMode ) ); std::shared_ptr< SnapshotManager > mgr; - mgr.reset( new SnapshotManager( chainParams, m_tmpDir.path(), { BlockChain::getChainDirName( chainParams ), "vol2", "filestorage"} ) ); + mgr.reset( new SnapshotManager( chainParams, m_tmpDir.path() ) ); // boost::filesystem::create_directory( // m_tmpDir.path() / "vol1" / "12041" ); // boost::filesystem::create_directory( @@ -1007,8 +1007,8 @@ static std::string const c_skaleConfigString = R"E( "sChain": { "schainName": "TestChain", "schainID": 1, - "snapshotIntervalSec": 10, - "emptyBlockIntervalMs": -1, + "snapshotIntervalSec": 5, + "emptyBlockIntervalMs": 4000, "nodes": [ { "nodeID": 1112, "ip": "127.0.0.1", "basePort": )E"+std::to_string( rand_port ) + R"E(, "ip6": "::1", "basePort6": 1231, "schainIndex" : 1, "publicKey" : "0xfa"} ] @@ -1030,7 +1030,7 @@ static std::string const c_skaleConfigString = R"E( BOOST_AUTO_TEST_SUITE( ClientSnapshotsSuite, *boost::unit_test::precondition( option_all_tests ) ) -BOOST_AUTO_TEST_CASE( ClientSnapshotsTest, *boost::unit_test::precondition( dev::test::run_not_express ) ) { +BOOST_AUTO_TEST_CASE( ClientSnapshotsTest, *boost::unit_test::disabled() ) { TestClientSnapshotsFixture fixture( c_skaleConfigString ); ClientTest* testClient = asClientTest( fixture.ethereum() ); @@ -1038,12 +1038,9 @@ BOOST_AUTO_TEST_CASE( ClientSnapshotsTest, *boost::unit_test::precondition( dev: BOOST_REQUIRE( testClient->getSnapshotHash( 0 ) != dev::h256() ); - BOOST_REQUIRE( testClient->mineBlocks( 1 ) ); - - testClient->importTransactionsAsBlock( - Transactions(), 1000, testClient->latestBlock().info().timestamp() + 86410 ); + std::this_thread::sleep_for( 5000ms ); - BOOST_REQUIRE( fs::exists( fs::path( fixture.getTmpDataDir() ) / "snapshots" / "3" ) ); + BOOST_REQUIRE( fs::exists( fs::path( fixture.getTmpDataDir() ) / "snapshots" / "2" ) ); secp256k1_sha256_t ctx; secp256k1_sha256_initialize( &ctx ); @@ -1055,13 +1052,15 @@ BOOST_AUTO_TEST_CASE( ClientSnapshotsTest, *boost::unit_test::precondition( dev: secp256k1_sha256_finalize( &ctx, empty_state_root_hash.data() ); BOOST_REQUIRE( testClient->latestBlock().info().stateRoot() == empty_state_root_hash ); - std::this_thread::sleep_for( 6000ms ); - BOOST_REQUIRE( fs::exists( fs::path( fixture.getTmpDataDir() ) / "snapshots" / "3" / "snapshot_hash.txt" ) ); - dev::h256 hash = testClient->hashFromNumber( 3 ); + std::this_thread::sleep_for( 1000ms ); + + BOOST_REQUIRE( fs::exists( fs::path( fixture.getTmpDataDir() ) / "snapshots" / "2" / "snapshot_hash.txt" ) ); + + dev::h256 hash = testClient->hashFromNumber( 2 ); uint64_t timestampFromBlockchain = testClient->blockInfo( hash ).timestamp(); - BOOST_REQUIRE_EQUAL( timestampFromBlockchain, testClient->getBlockTimestampFromSnapshot( 3 ) ); + BOOST_REQUIRE_EQUAL( timestampFromBlockchain, testClient->getBlockTimestampFromSnapshot( 2 ) ); } BOOST_AUTO_TEST_SUITE_END() diff --git a/test/unittests/libethereum/PrecompiledTest.cpp b/test/unittests/libethereum/PrecompiledTest.cpp index 7be67c511..3483a4699 100644 --- a/test/unittests/libethereum/PrecompiledTest.cpp +++ b/test/unittests/libethereum/PrecompiledTest.cpp @@ -1796,66 +1796,6 @@ BOOST_AUTO_TEST_CASE( getConfigVariable ) { BOOST_REQUIRE( !res.first ); } -// temporary merge tests for getConfigVariable -// because of the specifics in test design -//BOOST_AUTO_TEST_CASE( getConfigVariableAddress ) { -// ChainParams chainParams; -// chainParams = chainParams.loadConfig( genesisInfoSkaleConfigTest ); -// chainParams.sealEngineName = NoProof::name(); -// chainParams.allowFutureBlocks = true; - -// dev::eth::g_configAccesssor.reset( new skutils::json_config_file_accessor( "../../test/unittests/libethereum/PrecompiledConfig.json" ) ); - -// std::unique_ptr client; -// dev::TransientDirectory m_tmpDir; -// auto monitor = make_shared< InstanceMonitor >("test"); -// setenv("DATA_DIR", m_tmpDir.path().c_str(), 1); -// client.reset( new eth::ClientTest( chainParams, ( int ) chainParams.networkID, -// shared_ptr< GasPricer >(), nullptr, monitor, m_tmpDir.path(), dev::WithExisting::Kill ) ); - -// client->injectSkaleHost(); -// client->startWorking(); - -// client->setAuthor( Address("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") ); - -// ClientTest* testClient = asClientTest( client.get() ); - -// testClient->mineBlocks( 1 ); -// testClient->importTransactionsAsBlock( dev::eth::Transactions(), 1000, 4294967294 ); -// dev::eth::g_skaleHost = testClient->skaleHost(); - -// PrecompiledExecutor exec = PrecompiledRegistrar::executor( "getConfigVariableAddress" ); - -// std::string input = stringToHex( "skaleConfig.sChain.nodes.0.owner" ); -// bytes in = fromHex( numberToHex( 32 ) + input ); -// auto res = exec( bytesConstRef( in.data(), in.size() ) ); - -// BOOST_REQUIRE( res.first ); -// BOOST_REQUIRE( res.second == fromHex("0x23bbe8db4e347b4e8c937c1c8350e4b5ed33adb3db69cbdb7a38e1f40a1b82fe") ); - -// input = stringToHex( "skaleConfig.sChain.nodes.0.id" ); -// input = input.substr(0, 58); // remove 0s in the end - -// in = fromHex( numberToHex( 29 ) + input ); -// res = exec( bytesConstRef( in.data(), in.size() ) ); - -// BOOST_REQUIRE( !res.first ); - -// input = stringToHex( "skaleConfig.sChain.nodes.0.schainIndex" ); -// input = input.substr(0, 76); // remove 0s in the end -// in = fromHex( numberToHex( 38 ) + input ); -// res = exec( bytesConstRef( in.data(), in.size() ) ); - -// BOOST_REQUIRE( !res.first ); - -// input = stringToHex( "skaleConfig.sChain.nodes.0.unknownField" ); -// input = input.substr(0, 78); // remove 0s in the end -// in = fromHex( numberToHex( 39 ) + input ); -// res = exec( bytesConstRef( in.data(), in.size() ) ); - -// BOOST_REQUIRE( !res.first ); -//} - struct FilestorageFixture : public TestOutputHelperFixture { FilestorageFixture() { ownerAddress = Address( "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" ); @@ -1875,7 +1815,7 @@ struct FilestorageFixture : public TestOutputHelperFixture { file.seekp( static_cast< long >( fileSize ) - 1 ); file.write( "0", 1 ); - dev::eth::g_overlayFS = std::make_shared< skale::OverlayFS >( true ); + m_overlayFS = std::make_shared< skale::OverlayFS >( true ); } ~FilestorageFixture() override { @@ -1889,6 +1829,7 @@ struct FilestorageFixture : public TestOutputHelperFixture { std::string fileName; std::size_t fileSize; boost::filesystem::path pathToFile; + std::shared_ptr< skale::OverlayFS > m_overlayFS; }; BOOST_FIXTURE_TEST_SUITE( FilestoragePrecompiledTests, FilestorageFixture ) @@ -1901,11 +1842,11 @@ BOOST_AUTO_TEST_CASE( createFile ) { bytes in = fromHex( hexAddress + numberToHex( fileName.length() ) + stringToHex( fileName ) + numberToHex( fileSize ) ); - auto res = exec( bytesConstRef( in.data(), in.size() ) ); + auto res = exec( bytesConstRef( in.data(), in.size() ), m_overlayFS.get() ); BOOST_REQUIRE( res.first ); - dev::eth::g_overlayFS->commit(); + m_overlayFS->commit(); BOOST_REQUIRE( boost::filesystem::exists( path ) ); BOOST_REQUIRE( boost::filesystem::file_size( path ) == fileSize ); remove( path.c_str() ); @@ -1919,10 +1860,10 @@ BOOST_AUTO_TEST_CASE( fileWithHashExtension ) { bytes in = fromHex( hexAddress + numberToHex( fileName.length() ) + stringToHex( fileName ) + numberToHex( fileSize ) ); - auto res = exec( bytesConstRef( in.data(), in.size() ) ); + auto res = exec( bytesConstRef( in.data(), in.size() ), m_overlayFS.get() ); BOOST_REQUIRE( res.first == false); - dev::eth::g_overlayFS->commit(); + m_overlayFS->commit(); BOOST_REQUIRE( !boost::filesystem::exists( path ) ); } @@ -1932,10 +1873,10 @@ BOOST_AUTO_TEST_CASE( uploadChunk ) { std::string data = "random_data"; bytes in = fromHex( hexAddress + numberToHex( fileName.length() ) + stringToHex( fileName ) + numberToHex( 0 ) + numberToHex( data.length() ) + stringToHex( data ) ); - auto res = exec( bytesConstRef( in.data(), in.size() ) ); + auto res = exec( bytesConstRef( in.data(), in.size() ), m_overlayFS.get() ); BOOST_REQUIRE( res.first ); - dev::eth::g_overlayFS->commit(); + m_overlayFS->commit(); std::ifstream ifs( pathToFile.string() ); std::string content; std::copy_n( std::istreambuf_iterator< char >( ifs.rdbuf() ), data.length(), @@ -1948,7 +1889,7 @@ BOOST_AUTO_TEST_CASE( readChunk ) { bytes in = fromHex( hexAddress + numberToHex( fileName.length() ) + stringToHex( fileName ) + numberToHex( 0 ) + numberToHex( fileSize ) ); - auto res = exec( bytesConstRef( in.data(), in.size() ) ); + auto res = exec( bytesConstRef( in.data(), in.size() ), m_overlayFS.get() ); BOOST_REQUIRE( res.first ); std::ifstream file( pathToFile.c_str(), std::ios_base::binary ); @@ -1965,7 +1906,7 @@ BOOST_AUTO_TEST_CASE( readMaliciousChunk ) { fileName = "../../test"; bytes in = fromHex( hexAddress + numberToHex( fileName.length() ) + stringToHex( fileName ) + numberToHex( 0 ) + numberToHex( fileSize ) ); - auto res = exec( bytesConstRef( in.data(), in.size() ) ); + auto res = exec( bytesConstRef( in.data(), in.size() ), m_overlayFS.get() ); BOOST_REQUIRE( res.first == false); } @@ -1973,7 +1914,7 @@ BOOST_AUTO_TEST_CASE( getFileSize ) { PrecompiledExecutor exec = PrecompiledRegistrar::executor( "getFileSize" ); bytes in = fromHex( hexAddress + numberToHex( fileName.length() ) + stringToHex( fileName ) ); - auto res = exec( bytesConstRef( in.data(), in.size() ) ); + auto res = exec( bytesConstRef( in.data(), in.size() ), m_overlayFS.get() ); BOOST_REQUIRE( res.first ); BOOST_REQUIRE( res.second == toBigEndian( static_cast< u256 >( fileSize ) ) ); } @@ -1984,7 +1925,7 @@ BOOST_AUTO_TEST_CASE( getMaliciousFileSize ) { fileName = "../../test"; bytes in = fromHex( hexAddress + numberToHex( fileName.length() ) + stringToHex( fileName ) ); - auto res = exec( bytesConstRef( in.data(), in.size() ) ); + auto res = exec( bytesConstRef( in.data(), in.size() ), m_overlayFS.get() ); BOOST_REQUIRE( !res.first ); } @@ -1992,23 +1933,23 @@ BOOST_AUTO_TEST_CASE( deleteFile ) { PrecompiledExecutor execCreate = PrecompiledRegistrar::executor( "createFile" ); bytes inCreate = fromHex( hexAddress + numberToHex( fileName.length() ) + stringToHex( fileName ) + numberToHex( fileSize ) ); - execCreate( bytesConstRef( inCreate.data(), inCreate.size() ) ); - dev::eth::g_overlayFS->commit(); + execCreate( bytesConstRef( inCreate.data(), inCreate.size() ), m_overlayFS.get() ); + m_overlayFS->commit(); PrecompiledExecutor execHash = PrecompiledRegistrar::executor( "calculateFileHash" ); bytes inHash = fromHex( hexAddress + numberToHex( fileName.length() ) + stringToHex( fileName ) + numberToHex( fileSize ) ); - execHash( bytesConstRef( inHash.data(), inHash.size() ) ); - dev::eth::g_overlayFS->commit(); + execHash( bytesConstRef( inHash.data(), inHash.size() ), m_overlayFS.get() ); + m_overlayFS->commit(); BOOST_REQUIRE( boost::filesystem::exists( pathToFile.string() + "._hash" ) ); PrecompiledExecutor exec = PrecompiledRegistrar::executor( "deleteFile" ); bytes in = fromHex( hexAddress + numberToHex( fileName.length() ) + stringToHex( fileName ) ); - auto res = exec( bytesConstRef( in.data(), in.size() ) ); + auto res = exec( bytesConstRef( in.data(), in.size() ), m_overlayFS.get() ); BOOST_REQUIRE( res.first ); - dev::eth::g_overlayFS->commit(); + m_overlayFS->commit(); BOOST_REQUIRE( !boost::filesystem::exists( pathToFile ) ); BOOST_REQUIRE( !boost::filesystem::exists( pathToFile.string() + "._hash" ) ); } @@ -2021,10 +1962,10 @@ BOOST_AUTO_TEST_CASE( createDirectory ) { dev::getDataDir() / "filestorage" / ownerAddress.hex() / dirName; bytes in = fromHex( hexAddress + numberToHex( dirName.length() ) + stringToHex( dirName ) ); - auto res = exec( bytesConstRef( in.data(), in.size() ) ); + auto res = exec( bytesConstRef( in.data(), in.size() ), m_overlayFS.get() ); BOOST_REQUIRE( res.first ); - dev::eth::g_overlayFS->commit(); + m_overlayFS->commit(); BOOST_REQUIRE( boost::filesystem::exists( pathToDir ) ); remove( pathToDir.c_str() ); } @@ -2038,11 +1979,11 @@ BOOST_AUTO_TEST_CASE( deleteDirectory ) { boost::filesystem::create_directories( pathToDir ); bytes in = fromHex( hexAddress + numberToHex( dirName.length() ) + stringToHex( dirName ) ); - auto res = exec( bytesConstRef( in.data(), in.size() ) ); + auto res = exec( bytesConstRef( in.data(), in.size() ), m_overlayFS.get() ); BOOST_REQUIRE( res.first ); - dev::eth::g_overlayFS->commit(); + m_overlayFS->commit(); BOOST_REQUIRE( !boost::filesystem::exists( pathToDir ) ); } @@ -2060,11 +2001,11 @@ BOOST_AUTO_TEST_CASE( calculateFileHash ) { bytes in = fromHex( hexAddress + numberToHex( fileName.length() ) + stringToHex( fileName ) + numberToHex( fileSize ) ); - auto res = exec( bytesConstRef( in.data(), in.size() ) ); + auto res = exec( bytesConstRef( in.data(), in.size() ), m_overlayFS.get() ); BOOST_REQUIRE( res.first ); - dev::eth::g_overlayFS->commit(); + m_overlayFS->commit(); BOOST_REQUIRE( boost::filesystem::exists( fileHashName ) ); std::ifstream resultFile( fileHashName ); diff --git a/test/unittests/libskale/HashSnapshot.cpp b/test/unittests/libskale/HashSnapshot.cpp index 77ca9f4fe..bc74936ce 100644 --- a/test/unittests/libskale/HashSnapshot.cpp +++ b/test/unittests/libskale/HashSnapshot.cpp @@ -40,7 +40,7 @@ namespace dev { namespace test { class SnapshotHashAgentTest { public: - SnapshotHashAgentTest( ChainParams& _chainParams, const std::string& ipToDownloadSnapshotFrom ) { + SnapshotHashAgentTest( ChainParams& _chainParams, const std::string& urlToDownloadSnapshotFrom ) { std::vector< libff::alt_bn128_Fr > coeffs( _chainParams.sChain.t ); for ( auto& elem : coeffs ) { @@ -84,16 +84,16 @@ class SnapshotHashAgentTest { this->secret_as_is = keys.first; - isSnapshotMajorityRequired = !ipToDownloadSnapshotFrom.empty(); + isSnapshotMajorityRequired = !urlToDownloadSnapshotFrom.empty(); - this->hashAgent_.reset( new SnapshotHashAgent( _chainParams, _chainParams.nodeInfo.commonBLSPublicKeys, ipToDownloadSnapshotFrom ) ); + this->hashAgent_.reset( new SnapshotHashAgent( _chainParams, _chainParams.nodeInfo.commonBLSPublicKeys, urlToDownloadSnapshotFrom ) ); } void fillData( const std::vector< dev::h256 >& snapshot_hashes ) { this->hashAgent_->hashes_ = snapshot_hashes; for ( size_t i = 0; i < this->hashAgent_->n_; ++i ) { - this->hashAgent_->is_received_[i] = true; + this->hashAgent_->isReceived_[i] = true; this->hashAgent_->public_keys_[i] = this->blsPrivateKeys_[i] * libff::alt_bn128_G2::one(); this->hashAgent_->signatures_[i] = libBLS::Bls::Signing( @@ -115,16 +115,16 @@ class SnapshotHashAgentTest { } if ( isSnapshotMajorityRequired ) - return this->hashAgent_->nodes_to_download_snapshot_from_; + return this->hashAgent_->nodesToDownloadSnapshotFrom_; std::vector< size_t > ret; for ( size_t i = 0; i < this->hashAgent_->n_; ++i ) { - if ( this->hashAgent_->chain_params_.nodeInfo.id == - this->hashAgent_->chain_params_.sChain.nodes[i].id ) { + if ( this->hashAgent_->chainParams_.nodeInfo.id == + this->hashAgent_->chainParams_.sChain.nodes[i].id ) { continue; } - if ( this->hashAgent_->hashes_[i] == this->hashAgent_->voted_hash_.first ) { + if ( this->hashAgent_->hashes_[i] == this->hashAgent_->votedHash_.first ) { ret.push_back( i ); } } @@ -283,8 +283,7 @@ struct SnapshotHashingFixture : public TestOutputHelperFixture, public FixtureCo // "eth tests", tempDir.path(), "", chainParams, WithExisting::Kill, {"eth"}, // true ) ); - mgr.reset( new SnapshotManager( chainParams, boost::filesystem::path( BTRFS_DIR_PATH ), - {BlockChain::getChainDirName( chainParams ), "filestorage"} ) ); + mgr.reset( new SnapshotManager( chainParams, boost::filesystem::path( BTRFS_DIR_PATH ) ) ); boost::filesystem::create_directory( boost::filesystem::path( BTRFS_DIR_PATH ) / "filestorage" / "test_dir" ); @@ -510,23 +509,33 @@ BOOST_AUTO_TEST_CASE( noSnapshotMajority ) { } chainParams.nodeInfo.id = 3; + chainParams.sChain.nodes[0].ip = "123.45.68.89"; + chainParams.sChain.nodes[1].ip = "123.45.87.89"; + chainParams.sChain.nodes[2].ip = "123.45.77.89"; + chainParams.sChain.nodes[3].ip = "123.45.67.89"; + std::string url = chainParams.sChain.nodes[3].ip + std::string( ":1234" ); - SnapshotHashAgentTest test_agent( chainParams, chainParams.sChain.nodes[3].ip ); + SnapshotHashAgentTest test_agent( chainParams, url ); dev::h256 hash = dev::h256::random(); std::vector< dev::h256 > snapshot_hashes( chainParams.sChain.nodes.size(), hash ); snapshot_hashes[2] = dev::h256::random(); test_agent.fillData( snapshot_hashes ); - auto nodesToDownloadSnapshotFrom = test_agent.getNodesToDownloadSnapshotFrom(); - BOOST_REQUIRE( nodesToDownloadSnapshotFrom.size() == 1 ); - BOOST_REQUIRE( nodesToDownloadSnapshotFrom[0] == 3 ); + BOOST_REQUIRE_THROW( test_agent.getNodesToDownloadSnapshotFrom(), NotEnoughVotesException ); } BOOST_AUTO_TEST_SUITE_END() BOOST_AUTO_TEST_SUITE( HashSnapshotTestSuite, *boost::unit_test::precondition( option_all_test ) ) +#define WAIT_FOR_THE_NEXT_BLOCK() { \ + auto bn = client->number(); \ + while ( client->number() == bn ) { \ + usleep( 100 ); \ + } \ +} + BOOST_FIXTURE_TEST_CASE( SnapshotHashingTest, SnapshotHashingFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { auto senderAddress = coinbase.address(); @@ -537,21 +546,26 @@ BOOST_FIXTURE_TEST_CASE( SnapshotHashingTest, SnapshotHashingFixture, t["to"] = toJS( receiver.address() ); t["value"] = jsToDecimal( toJS( 10000 * dev::eth::szabo ) ); + BOOST_REQUIRE( client->getLatestSnapshotBlockNumer() == -1 ); + // Mine to generate a non-zero account balance const int blocksToMine = 1; dev::eth::simulateMining( *( client ), blocksToMine ); mgr->doSnapshot( 1 ); mgr->computeSnapshotHash( 1 ); + BOOST_REQUIRE( mgr->isSnapshotHashPresent( 1 ) ); - dev::eth::simulateMining( *( client ), blocksToMine ); - mgr->doSnapshot( 2 ); + BOOST_REQUIRE( client->number() == 1 ); + WAIT_FOR_THE_NEXT_BLOCK(); + mgr->doSnapshot( 2 ); mgr->computeSnapshotHash( 2 ); - - BOOST_REQUIRE( mgr->isSnapshotHashPresent( 1 ) ); BOOST_REQUIRE( mgr->isSnapshotHashPresent( 2 ) ); + BOOST_REQUIRE( client->number() == 2 ); + WAIT_FOR_THE_NEXT_BLOCK(); + auto hash1 = mgr->getSnapshotHash( 1 ); auto hash2 = mgr->getSnapshotHash( 2 ); @@ -562,25 +576,30 @@ BOOST_FIXTURE_TEST_CASE( SnapshotHashingTest, SnapshotHashingFixture, BOOST_REQUIRE_THROW( mgr->getSnapshotHash( 3 ), SnapshotManager::SnapshotAbsent ); // TODO check hash absence separately -} -BOOST_FIXTURE_TEST_CASE( SnapshotHashingFileStorageTest, SnapshotHashingFixture, - *boost::unit_test::precondition( dev::test::run_not_express ) ) { - mgr->doSnapshot( 4 ); + BOOST_REQUIRE( client->number() == 3 ); + WAIT_FOR_THE_NEXT_BLOCK(); + + mgr->doSnapshot( 3 ); + + mgr->computeSnapshotHash( 3, true ); + + BOOST_REQUIRE( mgr->isSnapshotHashPresent( 3 ) ); - mgr->computeSnapshotHash( 4, true ); + dev::h256 hash3_dbl = mgr->getSnapshotHash( 3 ); - BOOST_REQUIRE( mgr->isSnapshotHashPresent( 4 ) ); + mgr->computeSnapshotHash( 3 ); - dev::h256 hash4_dbl = mgr->getSnapshotHash( 4 ); + BOOST_REQUIRE( mgr->isSnapshotHashPresent( 3 ) ); - mgr->computeSnapshotHash( 4 ); + dev::h256 hash3 = mgr->getSnapshotHash( 3 ); - BOOST_REQUIRE( mgr->isSnapshotHashPresent( 4 ) ); + BOOST_REQUIRE( hash3_dbl == hash3 ); - dev::h256 hash4 = mgr->getSnapshotHash( 4 ); + dev::h256 hash = client->hashFromNumber( 3 ); + uint64_t timestampFromBlockchain = client->blockInfo( hash ).timestamp(); - BOOST_REQUIRE( hash4_dbl == hash4 ); + BOOST_REQUIRE_EQUAL( timestampFromBlockchain, mgr->getBlockTimestamp( 3 ) ); } BOOST_AUTO_TEST_SUITE_END() diff --git a/test/unittests/libskale/SnapshotManager.cpp b/test/unittests/libskale/SnapshotManager.cpp index a50951bf0..318017e97 100644 --- a/test/unittests/libskale/SnapshotManager.cpp +++ b/test/unittests/libskale/SnapshotManager.cpp @@ -160,13 +160,15 @@ BOOST_AUTO_TEST_SUITE( BtrfsTestSuite, BOOST_FIXTURE_TEST_CASE( SimplePositiveTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams{}, fs::path( BTRFS_DIR_PATH ) ); + + std::string chainDirName = dev::eth::BlockChain::getChainDirName( dev::eth::ChainParams() ); // add files 1 - fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "vol1" / "d11" ); - fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "vol2" / "d21" ); - BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol1" / "d11" ) ); - BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol2" / "d21" ) ); + fs::create_directory( fs::path( BTRFS_DIR_PATH ) / chainDirName / "d11" ); + fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "filestorage" / "d21" ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / chainDirName / "d11" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "filestorage" / "d21" ) ); auto latest0 = mgr.getLatestSnapshots(); std::pair< int, int > expected0 { 0, 0 }; @@ -174,14 +176,14 @@ BOOST_FIXTURE_TEST_CASE( SimplePositiveTest, BtrfsFixture, // create snapshot 1 and check its presense mgr.doSnapshot( 1 ); - BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" / "vol1" / "d11" ) ); - BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" / "vol2" / "d21" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" / chainDirName / "d11" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" / "filestorage" / "d21" ) ); // add and remove something - fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "vol1" / "d12" ); - fs::remove( fs::path( BTRFS_DIR_PATH ) / "vol2" / "d21" ); - BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol1" / "d12" ) ); - BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol2" / "d21" ) ); + fs::create_directory( fs::path( BTRFS_DIR_PATH ) / chainDirName / "d12" ); + fs::remove( fs::path( BTRFS_DIR_PATH ) / "filestorage" / "d21" ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / chainDirName / "d12" ) ); + BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / "filestorage" / "d21" ) ); auto latest1 = mgr.getLatestSnapshots(); std::pair< int, int > expected1 { 0, 1 }; @@ -189,31 +191,31 @@ BOOST_FIXTURE_TEST_CASE( SimplePositiveTest, BtrfsFixture, // create snapshot 2 and check files 1 and files 2 mgr.doSnapshot( 2 ); - BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / "vol1" / "d11" ) ); - BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / "vol1" / "d12" ) ); - BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / "vol2" / "d21" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / chainDirName / "d11" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / chainDirName / "d12" ) ); + BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / "filestorage" / "d21" ) ); // check that files appear/disappear on restore mgr.restoreSnapshot( 1 ); - BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol1" / "d11" ) ); - BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol2" / "d21" ) ); - BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol1" / "d12" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / chainDirName / "d11" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "filestorage" / "d21" ) ); + BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / chainDirName / "d12" ) ); fs::path diff12 = mgr.makeOrGetDiff( 2 ); - btrfs.subvolume._delete( ( BTRFS_DIR_PATH + "/snapshots/2/vol1" ).c_str() ); - btrfs.subvolume._delete( ( BTRFS_DIR_PATH + "/snapshots/2/vol2" ).c_str() ); + btrfs.subvolume._delete( ( BTRFS_DIR_PATH + "/snapshots/2/" + chainDirName ).c_str() ); + btrfs.subvolume._delete( ( BTRFS_DIR_PATH + "/snapshots/2/filestorage" ).c_str() ); fs::remove_all( BTRFS_DIR_PATH + "/snapshots/2" ); BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" ) ); mgr.importDiff( 2 ); - BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / "vol1" / "d11" ) ); - BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / "vol1" / "d12" ) ); - BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / "vol2" / "d21" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / chainDirName / "d11" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / chainDirName / "d12" ) ); + BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / "filestorage" / "d21" ) ); mgr.restoreSnapshot( 2 ); - BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol1" / "d11" ) ); - BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol1" / "d12" ) ); - BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol2" / "d21" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / chainDirName / "d11" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / chainDirName / "d12" ) ); + BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / "filestorage" / "d21" ) ); auto latest2 = mgr.getLatestSnapshots(); std::pair< int, int > expected2 { 1, 2 }; @@ -231,14 +233,14 @@ BOOST_FIXTURE_TEST_CASE( SimplePositiveTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( NoBtrfsTest, NoBtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - BOOST_REQUIRE_THROW( SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ), + BOOST_REQUIRE_THROW( SnapshotManager mgr( dev::eth::ChainParams{}, fs::path( BTRFS_DIR_PATH ) ), SnapshotManager::CannotPerformBtrfsOperation ); } BOOST_FIXTURE_TEST_CASE( BadPathTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { BOOST_REQUIRE_EXCEPTION( - SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ) / "_invalid", {"vol1", "vol2"} ), + SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ) / "_invalid" ), SnapshotManager::InvalidPath, [this]( const SnapshotManager::InvalidPath& ex ) -> bool { return ex.path == fs::path( BTRFS_DIR_PATH ) / "_invalid"; } ); @@ -246,14 +248,16 @@ BOOST_FIXTURE_TEST_CASE( BadPathTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( InaccessiblePathTest, BtrfsFixture, *boost::unit_test::precondition( []( unsigned long ) -> bool { return false; } ) ) { + std::string chainDirName = dev::eth::BlockChain::getChainDirName( dev::eth::ChainParams() ); + fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "_no_w" ); chmod( ( BTRFS_DIR_PATH + "/_no_w" ).c_str(), 0775 ); - fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "_no_w" / "vol1" ); + fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "_no_w" / chainDirName ); chmod( ( BTRFS_DIR_PATH + "/_no_w/vol1" ).c_str(), 0777 ); fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "_no_x" ); chmod( ( BTRFS_DIR_PATH + "/_no_x" ).c_str(), 0774 ); - fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "_no_x" / "vol1" ); + fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "_no_x" / chainDirName ); chmod( ( BTRFS_DIR_PATH + "/_no_x/vol1" ).c_str(), 0777 ); fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "_no_r" ); @@ -267,17 +271,17 @@ BOOST_FIXTURE_TEST_CASE( InaccessiblePathTest, BtrfsFixture, dropRoot(); - BOOST_REQUIRE_EXCEPTION( SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ) / "_no_w", {"vol1"} ), + BOOST_REQUIRE_EXCEPTION( SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ) / "_no_w" ), SnapshotManager::CannotCreate, [this]( const SnapshotManager::CannotCreate& ex ) -> bool { return ex.path == fs::path( BTRFS_DIR_PATH ) / "_no_w" / "snapshots"; } ); - BOOST_REQUIRE_EXCEPTION( SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ) / "_no_x", {"vol1"} ), + BOOST_REQUIRE_EXCEPTION( SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ) / "_no_x" ), SnapshotManager::CannotCreate, [this]( const SnapshotManager::CannotCreate& ex ) -> bool { return ex.path == fs::path( BTRFS_DIR_PATH ) / "_no_x" / "snapshots"; } ); - BOOST_REQUIRE_EXCEPTION( SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ) / "_no_r", {"vol1"} ), + BOOST_REQUIRE_EXCEPTION( SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ) / "_no_r" ), SnapshotManager::CannotCreate, [this]( const SnapshotManager::CannotCreate& ex ) -> bool { return ex.path == fs::path( BTRFS_DIR_PATH ) / "_no_x" / "snapshots"; } ); @@ -285,7 +289,7 @@ BOOST_FIXTURE_TEST_CASE( InaccessiblePathTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( SnapshotTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams{}, fs::path( BTRFS_DIR_PATH ) ); BOOST_REQUIRE_NO_THROW( mgr.doSnapshot( 2 ) ); BOOST_REQUIRE_THROW( mgr.doSnapshot( 2 ), SnapshotManager::SnapshotPresent ); @@ -314,7 +318,7 @@ BOOST_FIXTURE_TEST_CASE( SnapshotTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( RestoreTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams{}, fs::path( BTRFS_DIR_PATH ) ); BOOST_REQUIRE_THROW( mgr.restoreSnapshot( 2 ), SnapshotManager::SnapshotAbsent ); @@ -324,15 +328,15 @@ BOOST_FIXTURE_TEST_CASE( RestoreTest, BtrfsFixture, BOOST_REQUIRE_EQUAL( 0, btrfs.subvolume._delete( - ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / "vol1" ).c_str() ) ); + ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / "filestorage" ).c_str() ) ); BOOST_REQUIRE_THROW( mgr.restoreSnapshot( 2 ), SnapshotManager::CannotPerformBtrfsOperation ); } BOOST_FIXTURE_TEST_CASE( DiffTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams{}, fs::path( BTRFS_DIR_PATH ) ); mgr.doSnapshot( 2 ); - fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "vol1" / "dir" ); + fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "filestorage" / "dir" ); mgr.doSnapshot( 4 ); BOOST_REQUIRE_THROW( mgr.makeOrGetDiff( 3 ), SnapshotManager::SnapshotAbsent ); @@ -353,7 +357,7 @@ BOOST_FIXTURE_TEST_CASE( DiffTest, BtrfsFixture, BOOST_REQUIRE_GT( fs::file_size( tmp ), 0 ); fs::remove( tmp ); - btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "4" / "vol1" ).c_str() ); + btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "4" / "filestorage" ).c_str() ); BOOST_REQUIRE_THROW( tmp = mgr.makeOrGetDiff( 4 ), SnapshotManager::CannotPerformBtrfsOperation ); @@ -363,7 +367,7 @@ BOOST_FIXTURE_TEST_CASE( DiffTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( ImportTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams{}, fs::path( BTRFS_DIR_PATH ) ); BOOST_REQUIRE_THROW( mgr.importDiff( 8 ), SnapshotManager::InvalidPath ); @@ -375,24 +379,26 @@ BOOST_FIXTURE_TEST_CASE( ImportTest, BtrfsFixture, BOOST_REQUIRE_THROW( mgr.importDiff( 4 ), SnapshotManager::SnapshotPresent ); + std::string chainDirName = dev::eth::BlockChain::getChainDirName( dev::eth::ChainParams() ); + // delete dest - btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "4" / "vol1" ).c_str() ); - btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "4" / "vol2" ).c_str() ); + btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "4" / chainDirName ).c_str() ); + btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "4" / "filestorage" ).c_str() ); fs::remove_all( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "4" ); BOOST_REQUIRE_NO_THROW( mgr.importDiff( 4 ) ); // delete dest - btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "4" / "vol1" ).c_str() ); - btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "4" / "vol2" ).c_str() ); + btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "4" / chainDirName ).c_str() ); + btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "4" / "filestorage" ).c_str() ); fs::remove_all( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "4" ); // no source - btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / "vol1" ).c_str() ); + btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / chainDirName ).c_str() ); // BOOST_REQUIRE_THROW( mgr.importDiff( 2, 4 ), SnapshotManager::CannotPerformBtrfsOperation ); - btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / "vol2" ).c_str() ); + btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" / "filestorage" ).c_str() ); fs::remove_all( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "2" ); // BOOST_REQUIRE_THROW( mgr.importDiff( 2, 4 ), SnapshotManager::CannotPerformBtrfsOperation ); } @@ -400,7 +406,7 @@ BOOST_FIXTURE_TEST_CASE( ImportTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( SnapshotRotationTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams{}, fs::path( BTRFS_DIR_PATH ) ); BOOST_REQUIRE_NO_THROW( mgr.doSnapshot( 1 ) ); sleep( 1 ); @@ -421,7 +427,7 @@ BOOST_FIXTURE_TEST_CASE( SnapshotRotationTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( DiffRotationTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams{}, fs::path( BTRFS_DIR_PATH ) ); fs::path diff12 = mgr.getDiffPath( 2 ); { @@ -451,7 +457,7 @@ BOOST_FIXTURE_TEST_CASE( DiffRotationTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( RemoveSnapshotTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams{}, fs::path( BTRFS_DIR_PATH ) ); mgr.doSnapshot( 1 ); mgr.doSnapshot( 2 ); @@ -469,7 +475,7 @@ BOOST_FIXTURE_TEST_CASE( RemoveSnapshotTest, BtrfsFixture, BOOST_FIXTURE_TEST_CASE( CleanupTest, BtrfsFixture, *boost::unit_test::precondition( dev::test::run_not_express ) ) { - SnapshotManager mgr( dev::eth::ChainParams(), fs::path( BTRFS_DIR_PATH ), {"vol1", "vol2"} ); + SnapshotManager mgr( dev::eth::ChainParams{}, fs::path( BTRFS_DIR_PATH ) ); mgr.doSnapshot( 1 ); mgr.doSnapshot( 2 ); @@ -482,8 +488,56 @@ BOOST_FIXTURE_TEST_CASE( CleanupTest, BtrfsFixture, BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "diffs" ) ); - BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol1" ) ); - BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / "vol2" ) ); + std::string chainDirName = dev::eth::BlockChain::getChainDirName( dev::eth::ChainParams() ); + + BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / chainDirName ) ); + BOOST_REQUIRE( !fs::exists( fs::path( BTRFS_DIR_PATH ) / "filestorage" ) ); } +#ifdef HISTORIC_STATE +BOOST_FIXTURE_TEST_CASE( ArchiveNodeTest, BtrfsFixture, + *boost::unit_test::precondition( dev::test::run_not_express ) ) { + auto chainParams = dev::eth::ChainParams(); + chainParams.nodeInfo.archiveMode = true; + SnapshotManager mgr( chainParams, fs::path( BTRFS_DIR_PATH ) ); + + std::string chainDirName = dev::eth::BlockChain::getChainDirName( dev::eth::ChainParams() ); + + // add files to core volumes + fs::create_directory( fs::path( BTRFS_DIR_PATH ) / chainDirName / "d11" ); + fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "filestorage" / "d21" ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / chainDirName / "d11" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "filestorage" / "d21" ) ); + // archive part + fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "historic_roots" / "d31" ); + fs::create_directory( fs::path( BTRFS_DIR_PATH ) / "historic_state" / "d41" ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "historic_roots" / "d31" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "historic_state" / "d41" ) ); + + // create snapshot 1 and check its presense + mgr.doSnapshot( 1 ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" / chainDirName / "d11" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" / "filestorage" / "d21" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" / "historic_roots" / "d31" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" / "historic_state" / "d41" ) ); + + // make diff for archive node + BOOST_REQUIRE_NO_THROW( mgr.makeOrGetDiff( 1 ) ); + + // delete dest + btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" / chainDirName ).c_str() ); + btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" / "filestorage" ).c_str() ); + btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" / "historic_roots" ).c_str() ); + btrfs.subvolume._delete( ( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" / "historic_state" ).c_str() ); + fs::remove_all( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" ); + + BOOST_REQUIRE_NO_THROW( mgr.importDiff( 1 ) ); + + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" / chainDirName / "d11" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" / "filestorage" / "d21" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" / "historic_roots" / "d31" ) ); + BOOST_REQUIRE( fs::exists( fs::path( BTRFS_DIR_PATH ) / "snapshots" / "1" / "historic_state" / "d41" ) ); +} +#endif + BOOST_AUTO_TEST_SUITE_END() diff --git a/test/unittests/libweb3jsonrpc/jsonrpc.cpp b/test/unittests/libweb3jsonrpc/jsonrpc.cpp index 1aa4a02a8..948b90ec3 100644 --- a/test/unittests/libweb3jsonrpc/jsonrpc.cpp +++ b/test/unittests/libweb3jsonrpc/jsonrpc.cpp @@ -37,7 +37,6 @@ #include #include #include "genesisGeneration2Config.h" -// SKALE#include #include #include #include @@ -195,15 +194,15 @@ static std::string const c_genesisConfigString = "balance": "0", "code": "0x6080604052348015600f57600080fd5b506004361060325760003560e01c80639b063104146037578063cd16ecbf146062575b600080fd5b606060048036036020811015604b57600080fd5b8101908080359060200190929190505050608d565b005b608b60048036036020811015607657600080fd5b81019080803590602001909291905050506097565b005b8060018190555050565b806000819055505056fea265627a7a7231582029df540a7555533ef4b3f66bc4f9abe138b00117d1496efbfd9d035a48cd595e64736f6c634300050d0032", "storage": { - "0x0": "0x01" - }, + "0x0": "0x01" + }, "nonce": "0" }, "0xD2002000000000000000000000000000000000D2": { "balance": "0", "code": "0x608060405234801561001057600080fd5b50600436106100455760003560e01c806313f44d101461005557806338eada1c146100af5780634ba79dfe146100f357610046565b5b6002801461005357600080fd5b005b6100976004803603602081101561006b57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610137565b60405180821515815260200191505060405180910390f35b6100f1600480360360208110156100c557600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506101f4565b005b6101356004803603602081101561010957600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919050505061030f565b005b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16148061019957506101988261042b565b5b806101ed5750600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff165b9050919050565b60008054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146102b5576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260178152602001807f43616c6c6572206973206e6f7420746865206f776e657200000000000000000081525060200191505060405180910390fd5b60018060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff02191690831515021790555050565b60008054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146103d0576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260178152602001807f43616c6c6572206973206e6f7420746865206f776e657200000000000000000081525060200191505060405180910390fd5b6000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff02191690831515021790555050565b600080823b90506000811191505091905056fea26469706673582212202aca1f7abb7d02061b58de9b559eabe1607c880fda3932bbdb2b74fa553e537c64736f6c634300060c0033", "storage": { - }, + }, "nonce": "0" }, "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { @@ -213,10 +212,10 @@ static std::string const c_genesisConfigString = "storage" : { } }, - "0xD2001300000000000000000000000000000000D4": { - "balance": "0", - "nonce": "0", - "storage": {}, + "0xD2001300000000000000000000000000000000D4": { + "balance": "0", + "nonce": "0", + "storage": {}, "code":"0x608060405234801561001057600080fd5b506004361061004c5760003560e01c80632098776714610051578063b8bd717f1461007f578063d37165fa146100ad578063fdde8d66146100db575b600080fd5b61007d6004803603602081101561006757600080fd5b8101908080359060200190929190505050610109565b005b6100ab6004803603602081101561009557600080fd5b8101908080359060200190929190505050610136565b005b6100d9600480360360208110156100c357600080fd5b8101908080359060200190929190505050610170565b005b610107600480360360208110156100f157600080fd5b8101908080359060200190929190505050610191565b005b60005a90505b815a8203101561011e5761010f565b600080fd5b815a8203101561013257610123565b5050565b60005a90505b815a8203101561014b5761013c565b600060011461015957600080fd5b5a90505b815a8203101561016c5761015d565b5050565b60005a9050600081830390505b805a8303101561018c5761017d565b505050565b60005a90505b815a820310156101a657610197565b60016101b157600080fd5b5a90505b815a820310156101c4576101b5565b505056fea264697066735822122089b72532621e7d1849e444ee6efaad4fb8771258e6f79755083dce434e5ac94c64736f6c63430006000033" } } @@ -316,22 +315,12 @@ JsonRpcFixture( const std::string& _config = "", bool _owner = true, chainParams.sChain.multiTransactionMode = _mtmEnabled; chainParams.nodeInfo.syncNode = _isSyncNode; - // web3.reset( new WebThreeDirect( - // "eth tests", tempDir.path(), "", chainParams, WithExisting::Kill, {"eth"}, - // true ) ); - auto monitor = make_shared< InstanceMonitor >("test"); setenv("DATA_DIR", tempDir.path().c_str(), 1); client.reset( new eth::ClientTest( chainParams, ( int ) chainParams.networkID, shared_ptr< GasPricer >(), NULL, monitor, tempDir.path(), WithExisting::Kill ) ); - // client.reset( - // new eth::Client( chainParams, ( int ) chainParams.networkID, shared_ptr< - // GasPricer >(), - // tempDir.path(), "", WithExisting::Kill, TransactionQueue::Limits{100000, - // 1024} ) ); - client->setAuthor( coinbase.address() ); // wait for 1st block - because it's always empty @@ -369,7 +358,7 @@ JsonRpcFixture( const std::string& _config = "", bool _owner = true, rpcServer.reset( new FullServer( ethFace , new rpc::Net( chainParams ), new rpc::Web3(), // TODO Add version parameter here? new rpc::AdminEth( *client, *gasPricer, keyManager, *sessionManager ), - new rpc::Debug( *client, nullptr, "", true), + new rpc::Debug( *client, nullptr, ""), new rpc::Test( *client ) ) ); // @@ -478,18 +467,6 @@ BOOST_AUTO_TEST_CASE( jsonrpc_gasPrice ) { BOOST_CHECK_EQUAL( gasPrice, toJS( 20 * dev::eth::shannon ) ); } -// SKALE disabled -// BOOST_AUTO_TEST_CASE(jsonrpc_isListening) -//{ -// web3->startNetwork(); -// bool listeningOn = rpcClient->net_listening(); -// BOOST_CHECK_EQUAL(listeningOn, web3->isNetworkStarted()); -// -// web3->stopNetwork(); -// bool listeningOff = rpcClient->net_listening(); -// BOOST_CHECK_EQUAL(listeningOff, web3->isNetworkStarted()); -//} - BOOST_AUTO_TEST_CASE( jsonrpc_accounts, *boost::unit_test::precondition( dev::test::run_not_express ) ) { JsonRpcFixture fixture; @@ -517,22 +494,6 @@ BOOST_AUTO_TEST_CASE( jsonrpc_number ) { BOOST_CHECK_EQUAL( numberAfter, fixture.client->number() ); } -// SKALE disabled -// BOOST_AUTO_TEST_CASE(jsonrpc_peerCount) -//{ -// auto peerCount = jsToU256(rpcClient->net_peerCount()); -// BOOST_CHECK_EQUAL(web3->peerCount(), peerCount); -//} - -// BOOST_AUTO_TEST_CASE(jsonrpc_setListening) -//{ -// rpcClient->admin_net_start(adminSession); -// BOOST_CHECK_EQUAL(web3->isNetworkStarted(), true); -// -// rpcClient->admin_net_stop(adminSession); -// BOOST_CHECK_EQUAL(web3->isNetworkStarted(), false); -//} - BOOST_AUTO_TEST_CASE( jsonrpc_netVersion ) { std::string _config = c_genesisConfigString; @@ -540,12 +501,12 @@ BOOST_AUTO_TEST_CASE( jsonrpc_netVersion ) Json::Reader().parse( _config, ret ); // Set chainID = 65535 - ret["params"]["chainID"] = "0xffff"; + ret["params"]["chainID"] = "0xffff"; Json::FastWriter fastWriter; std::string config = fastWriter.write( ret ); JsonRpcFixture fixture( config ); - + auto version = fixture.rpcClient->net_version(); BOOST_CHECK_EQUAL( version, "65535" ); } @@ -588,7 +549,6 @@ BOOST_AUTO_TEST_CASE( eth_sendTransaction ) { BOOST_CHECK_EQUAL( jsToDecimal( balanceString ), "0" ); dev::eth::simulateMining( *( fixture.client ), 1 ); - // BOOST_CHECK_EQUAL(client->blockByNumber(LatestBlock).author(), address); balance = fixture.client->balanceAt( address ); balanceString = fixture.rpcClient->eth_getBalance( toJS( address ), "latest" ); @@ -839,8 +799,8 @@ BOOST_AUTO_TEST_CASE( simple_contract ) { // pragma solidity 0.8.4; // contract test { // uint value; - // function f(uint a) public pure returns(uint d) { - // return a * 7; + // function f(uint a) public pure returns(uint d) { + // return a * 7; // } // function setValue(uint _value) external { // value = _value; @@ -908,191 +868,18 @@ BOOST_AUTO_TEST_CASE( simple_contract ) { transact["to"] = contractAddress; transact["data"] = "0x552410770000000000000000000000000000000000000000000000000000000000000001"; txHash = fixture.rpcClient->eth_sendTransaction( transact ); - dev::eth::mineTransaction( *( fixture.client ), 1 ); + dev::eth::mineTransaction( *( fixture.client ), 1 ); auto res = fixture.rpcClient->eth_getTransactionReceipt( txHash ); - BOOST_REQUIRE_EQUAL( res["status"], string( "0x1" ) ); + BOOST_REQUIRE_EQUAL( res["status"], string( "0x1" ) ); Json::Value inputTx; inputTx["to"] = contractAddress; inputTx["input"] = "0x552410770000000000000000000000000000000000000000000000000000000000000002"; txHash = fixture.rpcClient->eth_sendTransaction( inputTx ); - dev::eth::mineTransaction( *( fixture.client ), 1 ); - res = fixture.rpcClient->eth_getTransactionReceipt( txHash ); - BOOST_REQUIRE_EQUAL( res["status"], string( "0x1" ) ); -} - -/* -// As block rotation is not exact now - let's use approximate comparisons -#define REQUIRE_APPROX_EQUAL(a, b) BOOST_REQUIRE(4*(a) > 3*(b) && 4*(a) < 5*(b)) - -BOOST_AUTO_TEST_CASE( logs_range, *boost::unit_test::disabled() ) { - JsonRpcFixture fixture; - dev::eth::simulateMining( *( fixture.client ), 1 ); - - -//pragma solidity >=0.4.10 <0.7.0; - -//contract Logger{ -// fallback() external payable { -// log2(bytes32(block.number+1), bytes32(block.number), "dimalit"); -// } -//} - - string bytecode = - "6080604052348015600f57600080fd5b50607d80601d6000396000f3fe60806040527f64696d616c69740000000000000000000000000000000000000000000000000043600102600143016001026040518082815260200191505060405180910390a200fea2646970667358221220ecafb98cd573366a37976cb7a4489abe5389d1b5989cd7b7136c8eb0c5ba0b5664736f6c63430006000033"; - - Json::Value create; - create["code"] = bytecode; - create["gas"] = "180000"; // TODO or change global default of 90000? - - string deployHash = fixture.rpcClient->eth_sendTransaction( create ); dev::eth::mineTransaction( *( fixture.client ), 1 ); - - // -> blockNumber = 2 (1 for bootstrapAll, 1 for deploy) - - Json::Value deployReceipt = fixture.rpcClient->eth_getTransactionReceipt( deployHash ); - string contractAddress = deployReceipt["contractAddress"].asString(); - - Json::Value filterObj; - filterObj["address"] = contractAddress; - filterObj["fromBlock"] = "0x1"; - string filterId = fixture.rpcClient->eth_newFilter( filterObj ); - - Json::Value res = fixture.rpcClient->eth_getFilterLogs(filterId); - BOOST_REQUIRE(res.isArray()); - BOOST_REQUIRE_EQUAL(res.size(), 0); - res = fixture.rpcClient->eth_getFilterChanges(filterId); - BOOST_REQUIRE(res.isArray()); - BOOST_REQUIRE_EQUAL(res.size(), 0); - - // need blockNumber==2+255 afterwards - for(int i=0; i<255; ++i){ - Json::Value t; - t["from"] = toJS( fixture.coinbase.address() ); - t["value"] = jsToDecimal( "0" ); - t["to"] = contractAddress; - t["gas"] = "99000"; - - std::string txHash = fixture.rpcClient->eth_sendTransaction( t ); - BOOST_REQUIRE( !txHash.empty() ); - - dev::eth::mineTransaction( *( fixture.client ), 1 ); - } - BOOST_REQUIRE_EQUAL(fixture.client->number(), 2 + 255); - - // ask for logs - Json::Value t; - t["fromBlock"] = 0; // really 3 - t["toBlock"] = 251; - t["address"] = contractAddress; - Json::Value logs = fixture.rpcClient->eth_getLogs(t); - BOOST_REQUIRE(logs.isArray()); - BOOST_REQUIRE_EQUAL(logs.size(), 249); - - // check logs - for(size_t i=0; ieth_sendTransaction( t ); - BOOST_REQUIRE( !lastHash.empty() ); - - dev::eth::mineTransaction( *( fixture.client ), 1 ); - } - BOOST_REQUIRE_EQUAL(fixture.client->number(), 512); - - // ask for logs - t["toBlock"] = 512; - logs = fixture.rpcClient->eth_getLogs(t); - BOOST_REQUIRE(logs.isArray()); - REQUIRE_APPROX_EQUAL(logs.size(), 256+64); - - // and filter - res = fixture.rpcClient->eth_getFilterChanges(filterId); - BOOST_REQUIRE_EQUAL(res.size(), 255+255); // NB!! we had pending here, but then they disappeared! - res = fixture.rpcClient->eth_getFilterLogs(filterId); - REQUIRE_APPROX_EQUAL(res.size(), 256+64); - - ///////////////// OTHER CALLS ////////////////// - // HACK this may return DIFFERENT block! because of undeterministic block rotation! - string existing = "0x1df"; string existing_hash = logs[256+64-1-1-32]["blockHash"].asString(); - //cerr << logs << endl; - - BOOST_REQUIRE_NO_THROW(res = fixture.rpcClient->eth_getBlockByNumber(existing, true)); - BOOST_REQUIRE_EQUAL(res["number"], existing); - BOOST_REQUIRE(res["transactions"].isArray() && res["transactions"].size() == 1); - BOOST_REQUIRE_THROW(fixture.rpcClient->eth_getBlockByNumber(nonexisting, true), jsonrpc::JsonRpcException); - - BOOST_REQUIRE_NO_THROW(res = fixture.rpcClient->eth_getBlockByHash(existing_hash, false)); - REQUIRE_APPROX_EQUAL(dev::eth::jsToBlockNumber(res["number"].asCString()), dev::eth::jsToBlockNumber(existing)); - BOOST_REQUIRE_THROW(fixture.rpcClient->eth_getBlockByHash(nonexisting_hash, true), jsonrpc::JsonRpcException); - - // - - BOOST_REQUIRE_NO_THROW(res = fixture.rpcClient->eth_getBlockTransactionCountByNumber(existing)); - BOOST_REQUIRE_EQUAL(res.asString(), "0x1"); - BOOST_REQUIRE_THROW(fixture.rpcClient->eth_getBlockTransactionCountByNumber(nonexisting), jsonrpc::JsonRpcException); - - BOOST_REQUIRE_NO_THROW(res = fixture.rpcClient->eth_getBlockTransactionCountByHash(existing_hash)); - BOOST_REQUIRE_EQUAL(res.asString(), "0x1"); - BOOST_REQUIRE_THROW(fixture.rpcClient->eth_getBlockTransactionCountByHash(nonexisting_hash), jsonrpc::JsonRpcException); - - // - - BOOST_REQUIRE_NO_THROW(res = fixture.rpcClient->eth_getUncleCountByBlockNumber(existing)); - BOOST_REQUIRE_EQUAL(res.asString(), "0x0"); - BOOST_REQUIRE_THROW(fixture.rpcClient->eth_getUncleCountByBlockNumber(nonexisting), jsonrpc::JsonRpcException); - - BOOST_REQUIRE_NO_THROW(res = fixture.rpcClient->eth_getUncleCountByBlockHash(existing_hash)); - BOOST_REQUIRE_EQUAL(res.asString(), "0x0"); - BOOST_REQUIRE_THROW(fixture.rpcClient->eth_getUncleCountByBlockHash(nonexisting_hash), jsonrpc::JsonRpcException); - - // - - BOOST_REQUIRE_NO_THROW(res = fixture.rpcClient->eth_getTransactionByBlockNumberAndIndex(existing, "0x0")); - BOOST_REQUIRE_EQUAL(res["blockNumber"], existing); - // HACK disabled for undeterminism BOOST_REQUIRE_EQUAL(res["blockHash"], existing_hash); - BOOST_REQUIRE_EQUAL(res["to"], contractAddress); - BOOST_REQUIRE_THROW(fixture.rpcClient->eth_getTransactionByBlockNumberAndIndex(nonexisting, "0x0"), jsonrpc::JsonRpcException); - - BOOST_REQUIRE_NO_THROW(res = fixture.rpcClient->eth_getTransactionByBlockHashAndIndex(existing_hash, "0x0")); - // HACK disabled for undeterminism BOOST_REQUIRE_EQUAL(res["blockNumber"], existing); - BOOST_REQUIRE_EQUAL(res["blockHash"], existing_hash); - BOOST_REQUIRE_EQUAL(res["to"], contractAddress); - BOOST_REQUIRE_THROW(fixture.rpcClient->eth_getTransactionByBlockHashAndIndex(nonexisting_hash, "0x0"), jsonrpc::JsonRpcException); - - // - - BOOST_REQUIRE_THROW(fixture.rpcClient->eth_getUncleByBlockNumberAndIndex(existing, "0x0"), jsonrpc::JsonRpcException); - BOOST_REQUIRE_THROW(fixture.rpcClient->eth_getUncleByBlockNumberAndIndex(nonexisting, "0x0"), jsonrpc::JsonRpcException); - BOOST_REQUIRE_THROW(fixture.rpcClient->eth_getUncleByBlockHashAndIndex(existing_hash, "0x0"), jsonrpc::JsonRpcException); - BOOST_REQUIRE_THROW(fixture.rpcClient->eth_getUncleByBlockHashAndIndex(nonexisting_hash, "0x0"), jsonrpc::JsonRpcException); - - // - - BOOST_REQUIRE_THROW(res = fixture.rpcClient->eth_getTransactionByHash(deployHash), jsonrpc::JsonRpcException); - BOOST_REQUIRE_NO_THROW(res = fixture.rpcClient->eth_getTransactionByHash(lastHash)); - BOOST_REQUIRE_EQUAL(res["blockNumber"], "0x200"); - - BOOST_REQUIRE_THROW(res = fixture.rpcClient->eth_getTransactionReceipt(deployHash), jsonrpc::JsonRpcException); - BOOST_REQUIRE_NO_THROW(res = fixture.rpcClient->eth_getTransactionReceipt(lastHash)); - BOOST_REQUIRE_EQUAL(res["transactionHash"], lastHash); - BOOST_REQUIRE_EQUAL(res["blockNumber"], "0x200"); - BOOST_REQUIRE_EQUAL(res["to"], contractAddress); + res = fixture.rpcClient->eth_getTransactionReceipt( txHash ); + BOOST_REQUIRE_EQUAL( res["status"], string( "0x1" ) ); } -*/ BOOST_AUTO_TEST_CASE( deploy_contract_from_owner ) { JsonRpcFixture fixture( c_genesisConfigString ); @@ -1640,76 +1427,6 @@ BOOST_AUTO_TEST_CASE( web3_sha3, "0xc6888fa159d67f77c2f3d7a402e199802766bd7e8d4d1ecd2274fc920265d56a", result ); } -// SKALE disabled -// BOOST_AUTO_TEST_CASE(debugAccountRangeAtFinalBlockState) -//{ -// // mine to get some balance at coinbase -// dev::eth::mine(*(client), 1); - -// // send transaction to have non-emtpy block -// Address receiver = Address::random(); -// Json::Value tx; -// tx["from"] = toJS(coinbase.address()); -// tx["value"] = toJS(10); -// tx["to"] = toJS(receiver); -// tx["gas"] = toJS(EVMSchedule().txGas); -// tx["gasPrice"] = toJS(10 * dev::eth::szabo); -// string txHash = rpcClient->eth_sendTransaction(tx); -// BOOST_REQUIRE(!txHash.empty()); - -// dev::eth::mineTransaction(*(client), 1); - -// string receiverHash = toString(sha3(receiver)); - -// // receiver doesn't exist in the beginning of the 2nd block -// Json::Value result = rpcClient->debug_accountRangeAt("2", 0, "0", 100); -// BOOST_CHECK(!result["addressMap"].isMember(receiverHash)); - -// // receiver exists in the end of the 2nd block -// result = rpcClient->debug_accountRangeAt("2", 1, "0", 100); -// BOOST_CHECK(result["addressMap"].isMember(receiverHash)); -// BOOST_CHECK_EQUAL(result["addressMap"][receiverHash], toString(receiver)); -//} - -// SKALE disabled -// BOOST_AUTO_TEST_CASE(debugStorageRangeAtFinalBlockState) -//{ -// // mine to get some balance at coinbase -// dev::eth::mine(*(client), 1); - -// // pragma solidity ^0.4.22; -// // contract test -// //{ -// // uint hello = 7; -// //} -// string initCode = -// "608060405260076000553415601357600080fd5b60358060206000396000" -// "f3006080604052600080fd00a165627a7a7230582006db0551577963b544" -// "3e9501b4b10880e186cff876cd360e9ad6e4181731fcdd0029"; - -// Json::Value tx; -// tx["code"] = initCode; -// tx["from"] = toJS(coinbase.address()); -// string txHash = rpcClient->eth_sendTransaction(tx); - -// dev::eth::mineTransaction(*(client), 1); - -// Json::Value receipt = rpcClient->eth_getTransactionReceipt(txHash); -// string contractAddress = receipt["contractAddress"].asString(); - -// // contract doesn't exist in the beginning of the 2nd block -// Json::Value result = rpcClient->debug_storageRangeAt("2", 0, contractAddress, "0", 100); -// BOOST_CHECK(result["storage"].empty()); - -// // contracts exists in the end of the 2nd block -// result = rpcClient->debug_storageRangeAt("2", 1, contractAddress, "0", 100); -// BOOST_CHECK(!result["storage"].empty()); -// string keyHash = toJS(sha3(u256{0})); -// BOOST_CHECK(!result["storage"][keyHash].empty()); -// BOOST_CHECK_EQUAL(result["storage"][keyHash]["key"].asString(), "0x00"); -// BOOST_CHECK_EQUAL(result["storage"][keyHash]["value"].asString(), "0x07"); -//} - BOOST_AUTO_TEST_CASE( test_importRawBlock ) { JsonRpcFixture fixture( c_genesisConfigString ); string blockHash = fixture.rpcClient->test_importRawBlock( @@ -2189,7 +1906,7 @@ BOOST_AUTO_TEST_CASE( transactionWithoutFunds ) { "0200191505060405180910390f35b600081600081905550600190509190" "505600a165627a7a72305820d8407d9cdaaf82966f3fa7a3e665b8cf4e6" "5ee8909b83094a3f856b9051274500029"; - + auto senderAddress = fixture.coinbase.address(); Json::Value create; @@ -2298,7 +2015,7 @@ contract Logger{ }// j overflow } } -*/ +*/ string bytecode = "6080604052348015600f57600080fd5b50609b8061001e6000396000f3fe608060405260015460001b60005460001b4360001b4360001b6040518082815260200191505060405180910390a3600160008154809291906001019190505550600a6001541415606357600060018190555060008081548092919060010191905055505b00fea2646970667358221220fdf2f98961b803b6b32dfc9be766990cbdb17559d9a03724d12fc672e33804b164736f6c634300060c0033"; @@ -2617,7 +2334,7 @@ contract TestEstimateGas { BOOST_AUTO_TEST_CASE( storage_limit_contract ) { JsonRpcFixture fixture; dev::eth::simulateMining( *( fixture.client ), 10 ); - + // pragma solidity 0.4.25; // contract TestStorageLimit { @@ -2645,18 +2362,18 @@ BOOST_AUTO_TEST_CASE( storage_limit_contract ) { // function zero(uint256 index) public { // storageArray[index] = 0; // } - + // function strangeFunction(uint256 index) public { // storageArray[index] = 1; // storageArray[index] = 0; // storageArray[index] = 2; // } // } - + std::string bytecode = "0x608060405234801561001057600080fd5b5061034f806100206000396000f300608060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680630e031ab1146100885780631007f753146100c95780636057361d146100f6578063c298557814610123578063c67cd8841461013a578063d269ad4e14610167578063e0353e5914610194575b600080fd5b34801561009457600080fd5b506100b3600480360381019080803590602001909291905050506101c1565b6040518082815260200191505060405180910390f35b3480156100d557600080fd5b506100f4600480360381019080803590602001909291905050506101e4565b005b34801561010257600080fd5b5061012160048036038101908080359060200190929190505050610204565b005b34801561012f57600080fd5b50610138610233565b005b34801561014657600080fd5b506101656004803603810190808035906020019092919050505061026c565b005b34801561017357600080fd5b50610192600480360381019080803590602001909291905050506102a3565b005b3480156101a057600080fd5b506101bf60048036038101908080359060200190929190505050610302565b005b6000818154811015156101d057fe5b906000526020600020016000915090505481565b6000818154811015156101f357fe5b906000526020600020016000905550565b600081908060018154018082558091505090600182039060005260206000200160009091929091909150555050565b60008080549050905060006001908060018154018082558091505090600182039060005260206000200160009091929091909150555050565b60008190806001815401808255809150509060018203906000526020600020016000909192909190915055506102a0610233565b50565b60016000828154811015156102b457fe5b9060005260206000200181905550600080828154811015156102d257fe5b906000526020600020018190555060026000828154811015156102f157fe5b906000526020600020018190555050565b6000808281548110151561031257fe5b9060005260206000200181905550505600a165627a7a723058201ed095336772c55688864a6b45ca6ab89311c5533f8d38cdf931f1ce38be78080029"; - + auto senderAddress = fixture.coinbase.address(); - + Json::Value create; create["from"] = toJS( senderAddress ); create["data"] = bytecode; @@ -2684,7 +2401,7 @@ BOOST_AUTO_TEST_CASE( storage_limit_contract ) { txHash = fixture.rpcClient->eth_sendTransaction( txPushValueAndCall ); dev::eth::mineTransaction( *( fixture.client ), 1 ); BOOST_REQUIRE( fixture.client->state().storageUsed( contract ) == 96 ); - + Json::Value txPushValue; // call store(2) txPushValue["to"] = contractAddress; txPushValue["data"] = "0x6057361d0000000000000000000000000000000000000000000000000000000000000002"; @@ -2693,7 +2410,7 @@ BOOST_AUTO_TEST_CASE( storage_limit_contract ) { txHash = fixture.rpcClient->eth_sendTransaction( txPushValue ); dev::eth::mineTransaction( *( fixture.client ), 1 ); BOOST_REQUIRE( fixture.client->state().storageUsed( contract ) == 128 ); - + Json::Value txThrow; // trying to call store(3) txThrow["to"] = contractAddress; txThrow["data"] = "0x6057361d0000000000000000000000000000000000000000000000000000000000000003"; @@ -2702,7 +2419,7 @@ BOOST_AUTO_TEST_CASE( storage_limit_contract ) { txHash = fixture.rpcClient->eth_sendTransaction( txThrow ); dev::eth::mineTransaction( *( fixture.client ), 1 ); BOOST_REQUIRE( fixture.client->state().storageUsed( contract ) == 128 ); - + Json::Value txEraseValue; // call erase(2) txEraseValue["to"] = contractAddress; txEraseValue["data"] = "0x1007f7530000000000000000000000000000000000000000000000000000000000000002"; @@ -2906,7 +2623,7 @@ BOOST_AUTO_TEST_CASE( storage_limit_predeployed ) { JsonRpcFixture fixture( c_genesisConfigString ); dev::eth::simulateMining( *( fixture.client ), 20 ); BOOST_REQUIRE( fixture.client->state().storageUsedTotal() == 64 ); - + string contractAddress = "0xC2002000000000000000000000000000000000C2"; string senderAddress = toJS(fixture.coinbase.address()); @@ -3134,7 +2851,7 @@ BOOST_AUTO_TEST_CASE( EIP1898Calls ) { Json::Value eip1898BadFormed3; eip1898BadFormed3["blockHash"] = dev::h256::random().hex(); eip1898BadFormed3["requireCanonical"] = 228; - + Json::Value eip1898BadFormed4; eip1898BadFormed4["blockNumber"] = dev::h256::random().hex(); eip1898BadFormed4["requireCanonical"] = true; @@ -3145,7 +2862,7 @@ BOOST_AUTO_TEST_CASE( EIP1898Calls ) { std::array wellFormedCalls = { eip1898WellFormed, eip1898WellFormed1, eip1898WellFormed2, eip1898WellFormed3 }; std::array badFormedCalls = { eip1898BadFormed, eip1898BadFormed1, eip1898BadFormed2, eip1898BadFormed3, eip1898BadFormed4, eip1898BadFormed5 }; - + auto address = fixture.coinbase.address(); std::string response; @@ -3156,7 +2873,7 @@ BOOST_AUTO_TEST_CASE( EIP1898Calls ) { for (const auto& call: badFormedCalls) { BOOST_REQUIRE_THROW(fixture.rpcClient->eth_getBalanceEIP1898( toJS( address ), call ), jsonrpc::JsonRpcException); } - + for (const auto& call: wellFormedCalls) { Json::Value transactionCallObject; transactionCallObject["to"] = "0x0000000000000000000000000000000000000005"; @@ -3753,15 +3470,15 @@ BOOST_AUTO_TEST_CASE( deploy_controller_generation2 ) { BOOST_AUTO_TEST_CASE( deployment_control_v2 ) { // Inserting ConfigController mockup into config and enabling flexibleDeploymentPatch. - // ConfigController mockup contract: - + // ConfigController mockup contract: + // pragma solidity ^0.8.9; // contract ConfigController { // bool public freeContractDeployment = false; // function isAddressWhitelisted(address addr) external view returns (bool) { // return false; // } - // function isDeploymentAllowed(address origin, address sender) + // function isDeploymentAllowed(address origin, address sender) // external view returns (bool) { // return freeContractDeployment; // } @@ -3770,7 +3487,7 @@ BOOST_AUTO_TEST_CASE( deployment_control_v2 ) { // } // } - string configControllerV2 = + string configControllerV2 = "0x608060405234801561001057600080fd5b506004361061004c576000" "3560e01c806313f44d1014610051578063a2306c4f14610081578063d0" "f557f41461009f578063f7e2a91b146100cf575b600080fd5b61006b60" @@ -3951,7 +3668,7 @@ BOOST_AUTO_TEST_CASE( PrecompiledPrintFakeEth, *boost::unit_test::precondition( balance = fixture.client->balanceAt( jsToAddress( "0x5C4e11842E8Be09264DC1976943571D7AF6d00f8" ) ); BOOST_REQUIRE_EQUAL( balance, 16 ); - Json::Value printFakeEthCall; + Json::Value printFakeEthCall; printFakeEthCall["data"] = "0x5C4e11842E8Be09264DC1976943571D7AF6d00f80000000000000000000000000000000000000000000000000000000000000010"; printFakeEthCall["from"] = "0x5C4e11842E8be09264dc1976943571d7Af6d00F9"; printFakeEthCall["to"] = "0000000000000000000000000000000000000006"; @@ -3962,7 +3679,7 @@ BOOST_AUTO_TEST_CASE( PrecompiledPrintFakeEth, *boost::unit_test::precondition( BOOST_REQUIRE_EQUAL( balance, 16 ); // pragma solidity ^0.4.25; - + // contract Caller { // function call() public view { // bool status; @@ -4122,52 +3839,6 @@ BOOST_AUTO_TEST_CASE( mtm_import_future_txs ) { // TODO: Enable for multitransaction mode checking -// BOOST_AUTO_TEST_CASE( check_multitransaction_mode ) { -// auto _config = c_genesisConfigString; -// Json::Value ret; -// Json::Reader().parse( _config, ret ); -// /* Test contract -// pragma solidity ^0.8.9; -// contract Test { -// function isMTMEnabled() external pure returns (bool) { -// return true; -// } -// } -// */ -// ret["accounts"]["0xD2002000000000000000000000000000000000D2"]["code"] = "0x6080604052348015600f57600080fd5b506004361060285760003560e01c8063bad0396e14602d575b600080fd5b60336047565b604051603e91906069565b60405180910390f35b60006001905090565b60008115159050919050565b6063816050565b82525050565b6000602082019050607c6000830184605c565b9291505056fea26469706673582212208d89ce57f69b9b53e8f0808cbaa6fa8fd21a495ab92d0b48b6e47d903989835464736f6c63430008090033"; -// Json::FastWriter fastWriter; -// std::string config = fastWriter.write( ret ); -// JsonRpcFixture fixture( config ); -// bool mtm = fixture.client->checkMultitransactionMode(fixture.client->state(), fixture.client->gasBidPrice()); -// BOOST_REQUIRE( mtm ); -// } - -// BOOST_AUTO_TEST_CASE( check_multitransaction_mode_false ) { -// auto _config = c_genesisConfigString; -// Json::Value ret; -// Json::Reader().parse( _config, ret ); -// /* Test contract -// pragma solidity ^0.8.9; -// contract Test { -// function isMTMEnabled() external pure returns (bool) { -// return false; -// } -// } -// */ -// ret["accounts"]["0xD2002000000000000000000000000000000000D2"]["code"] = "0x6080604052348015600f57600080fd5b506004361060285760003560e01c8063bad0396e14602d575b600080fd5b60336047565b604051603e91906065565b60405180910390f35b600090565b60008115159050919050565b605f81604c565b82525050565b6000602082019050607860008301846058565b9291505056fea2646970667358221220c88541a65627d63d4b0cc04094bc5b2154a2700c97677dcd5de2ee2a27bed58564736f6c63430008090033"; -// Json::FastWriter fastWriter; -// std::string config = fastWriter.write( ret ); -// JsonRpcFixture fixture( config ); -// bool mtm = fixture.client->checkMultitransactionMode(fixture.client->state(), fixture.client->gasBidPrice()); -// BOOST_REQUIRE( !mtm ); -// } - -// BOOST_AUTO_TEST_CASE( check_multitransaction_mode_empty ) { -// JsonRpcFixture fixture( c_genesisConfigString ); -// bool mtm = fixture.client->checkMultitransactionMode(fixture.client->state(), fixture.client->gasBidPrice()); -// BOOST_REQUIRE( !mtm ); -// } - // historic node shall ignore invalid transactions in block BOOST_AUTO_TEST_CASE( skip_invalid_transactions ) { JsonRpcFixture fixture( c_genesisConfigString, true, true, false, true ); @@ -4463,11 +4134,11 @@ BOOST_AUTO_TEST_SUITE_END() BOOST_AUTO_TEST_SUITE( FilestorageCacheSuite ) BOOST_AUTO_TEST_CASE( cached_filestorage ) { - + auto _config = c_genesisConfigString; Json::Value ret; Json::Reader().parse( _config, ret ); - ret["skaleConfig"]["sChain"]["revertableFSPatchTimestamp"] = 1; + ret["skaleConfig"]["sChain"]["revertableFSPatchTimestamp"] = 1; Json::FastWriter fastWriter; std::string config = fastWriter.write( ret ); RestrictedAddressFixture fixture( config ); @@ -4497,7 +4168,7 @@ BOOST_AUTO_TEST_CASE( uncached_filestorage ) { auto _config = c_genesisConfigString; Json::Value ret; Json::Reader().parse( _config, ret ); - ret["skaleConfig"]["sChain"]["revertableFSPatchTimestamp"] = 9999999999999; + ret["skaleConfig"]["sChain"]["revertableFSPatchTimestamp"] = 9999999999999; Json::FastWriter fastWriter; std::string config = fastWriter.write( ret ); RestrictedAddressFixture fixture( config );