diff --git a/BUILD.md b/BUILD.md index b4201ef0437..31755c36919 100644 --- a/BUILD.md +++ b/BUILD.md @@ -376,11 +376,11 @@ stored inside the build directory, as either of: | Option | Default Value | Description | | --- | ---| ---| | `assert` | OFF | Enable assertions. -| `reporting` | OFF | Build the reporting mode feature. | | `coverage` | OFF | Prepare the coverage report. | -| `tests` | ON | Build tests. | -| `unity` | ON | Configure a unity build. | | `san` | N/A | Enable a sanitizer with Clang. Choices are `thread` and `address`. | +| `tests` | OFF | Build tests. | +| `unity` | ON | Configure a unity build. | +| `xrpld` | OFF | Build the xrpld (`rippled`) application, and not just the libxrpl library. | [Unity builds][5] may be faster for the first build (at the cost of much more memory) since they concatenate sources into fewer diff --git a/CMakeLists.txt b/CMakeLists.txt index a69583f9cbf..0c34f89397d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -115,15 +115,6 @@ else() endif() target_link_libraries(ripple_libs INTERFACE ${nudb}) -if(reporting) - find_package(cassandra-cpp-driver REQUIRED) - find_package(PostgreSQL REQUIRED) - target_link_libraries(ripple_libs INTERFACE - cassandra-cpp-driver::cassandra-cpp-driver - PostgreSQL::PostgreSQL - ) -endif() - if(coverage) include(RippledCov) endif() diff --git a/README.md b/README.md index 45dc2005ea2..cc002a2dd82 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powe ## rippled The server software that powers the XRP Ledger is called `rippled` and is available in this repository under the permissive [ISC open-source license](LICENSE.md). The `rippled` server software is written primarily in C++ and runs on a variety of platforms. The `rippled` server software can run in several modes depending on its [configuration](https://xrpl.org/rippled-server-modes.html). -If you are interested in running an **API Server** (including a **Full History Server**) or a **Reporting Mode** server, take a look at [Clio](https://github.com/XRPLF/clio). rippled Reporting Mode is expected to be replaced by Clio. +If you are interested in running an **API Server** (including a **Full History Server**), take a look at [Clio](https://github.com/XRPLF/clio). (rippled Reporting Mode has been replaced by Clio.) ### Build from Source diff --git a/bin/ci/README.md b/bin/ci/README.md deleted file mode 100644 index 32ae24b3a20..00000000000 --- a/bin/ci/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# Continuous Integration (CI) Scripts - -In this directory are two scripts, `build.sh` and `test.sh` used for building -and testing rippled. - -(For now, they assume Bash and Linux. Once I get Windows containers for -testing, I'll try them there, but if Bash is not available, then they will -soon be joined by PowerShell scripts `build.ps` and `test.ps`.) - -We don't want these scripts to require arcane invocations that can only be -pieced together from within a CI configuration. We want something that humans -can easily invoke, read, and understand, for when we eventually have to test -and debug them interactively. That means: - -(1) They should work with no arguments. -(2) They should document their arguments. -(3) They should expand short arguments into long arguments. - -While we want to provide options for common use cases, we don't need to offer -the kitchen sink. We can rightfully expect users with esoteric, complicated -needs to write their own scripts. - -To make argument-handling easy for us, the implementers, we can just take all -arguments from environment variables. They have the nice advantage that every -command-line uses named arguments. For the benefit of us and our users, we -document those variables at the top of each script. diff --git a/bin/ci/build.sh b/bin/ci/build.sh deleted file mode 100755 index fa7a0c96829..00000000000 --- a/bin/ci/build.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash - -set -o xtrace -set -o errexit - -# The build system. Either 'Unix Makefiles' or 'Ninja'. -GENERATOR=${GENERATOR:-Unix Makefiles} -# The compiler. Either 'gcc' or 'clang'. -COMPILER=${COMPILER:-gcc} -# The build type. Either 'Debug' or 'Release'. -BUILD_TYPE=${BUILD_TYPE:-Debug} -# Additional arguments to CMake. -# We use the `-` substitution here instead of `:-` so that callers can erase -# the default by setting `$CMAKE_ARGS` to the empty string. -CMAKE_ARGS=${CMAKE_ARGS-'-Dwerr=ON'} - -# https://gitlab.kitware.com/cmake/cmake/issues/18865 -CMAKE_ARGS="-DBoost_NO_BOOST_CMAKE=ON ${CMAKE_ARGS}" - -if [[ ${COMPILER} == 'gcc' ]]; then - export CC='gcc' - export CXX='g++' -elif [[ ${COMPILER} == 'clang' ]]; then - export CC='clang' - export CXX='clang++' -fi - -mkdir build -cd build -cmake -G "${GENERATOR}" -DCMAKE_BUILD_TYPE=${BUILD_TYPE} ${CMAKE_ARGS} .. -cmake --build . -- -j $(nproc) diff --git a/bin/ci/test.sh b/bin/ci/test.sh deleted file mode 100755 index 11615d732b7..00000000000 --- a/bin/ci/test.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash - -set -o xtrace -set -o errexit - -# Set to 'true' to run the known "manual" tests in rippled. -MANUAL_TESTS=${MANUAL_TESTS:-false} -# The maximum number of concurrent tests. -CONCURRENT_TESTS=${CONCURRENT_TESTS:-$(nproc)} -# The path to rippled. -RIPPLED=${RIPPLED:-build/rippled} -# Additional arguments to rippled. -RIPPLED_ARGS=${RIPPLED_ARGS:-} - -function join_by { local IFS="$1"; shift; echo "$*"; } - -declare -a manual_tests=( - 'beast.chrono.abstract_clock' - 'beast.unit_test.print' - 'ripple.NodeStore.Timing' - 'ripple.app.Flow_manual' - 'ripple.app.NoRippleCheckLimits' - 'ripple.app.PayStrandAllPairs' - 'ripple.consensus.ByzantineFailureSim' - 'ripple.consensus.DistributedValidators' - 'ripple.consensus.ScaleFreeSim' - 'ripple.tx.CrossingLimits' - 'ripple.tx.FindOversizeCross' - 'ripple.tx.Offer_manual' - 'ripple.tx.OversizeMeta' - 'ripple.tx.PlumpBook' -) - -if [[ ${MANUAL_TESTS} == 'true' ]]; then - RIPPLED_ARGS+=" --unittest=$(join_by , "${manual_tests[@]}")" -else - RIPPLED_ARGS+=" --unittest --quiet --unittest-log" -fi -RIPPLED_ARGS+=" --unittest-jobs ${CONCURRENT_TESTS}" - -${RIPPLED} ${RIPPLED_ARGS} diff --git a/bin/ci/ubuntu/build-and-test.sh b/bin/ci/ubuntu/build-and-test.sh deleted file mode 100755 index 2c1734863fb..00000000000 --- a/bin/ci/ubuntu/build-and-test.sh +++ /dev/null @@ -1,274 +0,0 @@ -#!/usr/bin/env bash -set -ex - -function version_ge() { test "$(echo "$@" | tr " " "\n" | sort -rV | head -n 1)" == "$1"; } - -__dirname=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) -echo "using CC: ${CC}" -"${CC}" --version -export CC - -COMPNAME=$(basename $CC) -echo "using CXX: ${CXX:-notset}" -if [[ $CXX ]]; then - "${CXX}" --version - export CXX -fi -: ${BUILD_TYPE:=Debug} -echo "BUILD TYPE: ${BUILD_TYPE}" - -: ${TARGET:=install} -echo "BUILD TARGET: ${TARGET}" - -JOBS=${NUM_PROCESSORS:-2} -if [[ ${TRAVIS:-false} != "true" ]]; then - JOBS=$((JOBS+1)) -fi - -if [[ ! -z "${CMAKE_EXE:-}" ]] ; then - export PATH="$(dirname ${CMAKE_EXE}):$PATH" -fi - -if [ -x /usr/bin/time ] ; then - : ${TIME:="Duration: %E"} - export TIME - time=/usr/bin/time -else - time= -fi - -echo "Building rippled" -: ${CMAKE_EXTRA_ARGS:=""} -if [[ ${NINJA_BUILD:-} == true ]]; then - CMAKE_EXTRA_ARGS+=" -G Ninja" -fi - -coverage=false -if [[ "${TARGET}" == "coverage" ]] ; then - echo "coverage option detected." - coverage=true -fi - -cmake --version -CMAKE_VER=$(cmake --version | cut -d " " -f 3 | head -1) - -# -# allow explicit setting of the name of the build -# dir, otherwise default to the compiler.build_type -# -: "${BUILD_DIR:=${COMPNAME}.${BUILD_TYPE}}" -BUILDARGS="--target ${TARGET}" -BUILDTOOLARGS="" -if version_ge $CMAKE_VER "3.12.0" ; then - BUILDARGS+=" --parallel" -fi - -if [[ ${NINJA_BUILD:-} == false ]]; then - if version_ge $CMAKE_VER "3.12.0" ; then - BUILDARGS+=" ${JOBS}" - else - BUILDTOOLARGS+=" -j ${JOBS}" - fi -fi - -if [[ ${VERBOSE_BUILD:-} == true ]]; then - CMAKE_EXTRA_ARGS+=" -DCMAKE_VERBOSE_MAKEFILE=ON" - if version_ge $CMAKE_VER "3.14.0" ; then - BUILDARGS+=" --verbose" - else - if [[ ${NINJA_BUILD:-} == false ]]; then - BUILDTOOLARGS+=" verbose=1" - else - BUILDTOOLARGS+=" -v" - fi - fi -fi - -if [[ ${USE_CCACHE:-} == true ]]; then - echo "using ccache with basedir [${CCACHE_BASEDIR:-}]" - CMAKE_EXTRA_ARGS+=" -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache" -fi -if [ -d "build/${BUILD_DIR}" ]; then - rm -rf "build/${BUILD_DIR}" -fi - -mkdir -p "build/${BUILD_DIR}" -pushd "build/${BUILD_DIR}" - -# cleanup possible artifacts -rm -fv CMakeFiles/CMakeOutput.log CMakeFiles/CMakeError.log -# Clean up NIH directories which should be git repos, but aren't -for nih_path in ${NIH_CACHE_ROOT}/*/*/*/src ${NIH_CACHE_ROOT}/*/*/src -do - for dir in lz4 snappy rocksdb - do - if [ -e ${nih_path}/${dir} -a \! -e ${nih_path}/${dir}/.git ] - then - ls -la ${nih_path}/${dir}* - rm -rfv ${nih_path}/${dir}* - fi - done -done - -# generate -${time} cmake ../.. -DCMAKE_BUILD_TYPE=${BUILD_TYPE} ${CMAKE_EXTRA_ARGS} -# Display the cmake output, to help with debugging if something fails -for file in CMakeOutput.log CMakeError.log -do - if [ -f CMakeFiles/${file} ] - then - ls -l CMakeFiles/${file} - cat CMakeFiles/${file} - fi -done -# build -export DESTDIR=$(pwd)/_INSTALLED_ - -${time} eval cmake --build . ${BUILDARGS} -- ${BUILDTOOLARGS} - -if [[ ${TARGET} == "docs" ]]; then - ## mimic the standard test output for docs build - ## to make controlling processes like jenkins happy - if [ -f docs/html/index.html ]; then - echo "1 case, 1 test total, 0 failures" - else - echo "1 case, 1 test total, 1 failures" - fi - exit -fi -popd - -if [[ "${TARGET}" == "validator-keys" ]] ; then - export APP_PATH="$PWD/build/${BUILD_DIR}/validator-keys/validator-keys" -else - export APP_PATH="$PWD/build/${BUILD_DIR}/rippled" -fi -echo "using APP_PATH: ${APP_PATH}" - -# See what we've actually built -ldd ${APP_PATH} - -: ${APP_ARGS:=} - -if [[ "${TARGET}" == "validator-keys" ]] ; then - APP_ARGS="--unittest" -else - function join_by { local IFS="$1"; shift; echo "$*"; } - - # This is a list of manual tests - # in rippled that we want to run - # ORDER matters here...sorted in approximately - # descending execution time (longest running tests at top) - declare -a manual_tests=( - 'ripple.ripple_data.reduce_relay_simulate' - 'ripple.tx.Offer_manual' - 'ripple.tx.CrossingLimits' - 'ripple.tx.PlumpBook' - 'ripple.app.Flow_manual' - 'ripple.tx.OversizeMeta' - 'ripple.consensus.DistributedValidators' - 'ripple.app.NoRippleCheckLimits' - 'ripple.ripple_data.compression' - 'ripple.NodeStore.Timing' - 'ripple.consensus.ByzantineFailureSim' - 'beast.chrono.abstract_clock' - 'beast.unit_test.print' - ) - if [[ ${TRAVIS:-false} != "true" ]]; then - # these two tests cause travis CI to run out of memory. - # TODO: investigate possible workarounds. - manual_tests=( - 'ripple.consensus.ScaleFreeSim' - 'ripple.tx.FindOversizeCross' - "${manual_tests[@]}" - ) - fi - - if [[ ${MANUAL_TESTS:-} == true ]]; then - APP_ARGS+=" --unittest=$(join_by , "${manual_tests[@]}")" - else - APP_ARGS+=" --unittest --quiet --unittest-log" - fi - if [[ ${coverage} == false && ${PARALLEL_TESTS:-} == true ]]; then - APP_ARGS+=" --unittest-jobs ${JOBS}" - fi - - if [[ ${IPV6_TESTS:-} == true ]]; then - APP_ARGS+=" --unittest-ipv6" - fi -fi - -if [[ ${coverage} == true && $CC =~ ^gcc ]]; then - # Push the results (lcov.info) to codecov - codecov -X gcov # don't even try and look for .gcov files ;) - find . -name "*.gcda" | xargs rm -f -fi - -if [[ ${SKIP_TESTS:-} == true ]]; then - echo "skipping tests." - exit -fi - -ulimit -a -corepat=$(cat /proc/sys/kernel/core_pattern) -if [[ ${corepat} =~ ^[:space:]*\| ]] ; then - echo "WARNING: core pattern is piping - can't search for core files" - look_core=false -else - look_core=true - coredir=$(dirname ${corepat}) -fi -if [[ ${look_core} == true ]]; then - before=$(ls -A1 ${coredir}) -fi - -set +e -echo "Running tests for ${APP_PATH}" -if [[ ${MANUAL_TESTS:-} == true && ${PARALLEL_TESTS:-} != true ]]; then - for t in "${manual_tests[@]}" ; do - ${APP_PATH} --unittest=${t} - TEST_STAT=$? - if [[ $TEST_STAT -ne 0 ]] ; then - break - fi - done -else - ${APP_PATH} ${APP_ARGS} - TEST_STAT=$? -fi -set -e - -if [[ ${look_core} == true ]]; then - after=$(ls -A1 ${coredir}) - oIFS="${IFS}" - IFS=$'\n\r' - found_core=false - for l in $(diff -w --suppress-common-lines <(echo "$before") <(echo "$after")) ; do - if [[ "$l" =~ ^[[:space:]]*\>[[:space:]]*(.+)$ ]] ; then - corefile="${BASH_REMATCH[1]}" - echo "FOUND core dump file at '${coredir}/${corefile}'" - gdb_output=$(/bin/mktemp /tmp/gdb_output_XXXXXXXXXX.txt) - found_core=true - gdb \ - -ex "set height 0" \ - -ex "set logging file ${gdb_output}" \ - -ex "set logging on" \ - -ex "print 'ripple::BuildInfo::versionString'" \ - -ex "thread apply all backtrace full" \ - -ex "info inferiors" \ - -ex quit \ - "$APP_PATH" \ - "${coredir}/${corefile}" &> /dev/null - - echo -e "CORE INFO: \n\n $(cat ${gdb_output}) \n\n)" - fi - done - IFS="${oIFS}" -fi - -if [[ ${found_core} == true ]]; then - exit -1 -else - exit $TEST_STAT -fi - diff --git a/bin/ci/ubuntu/build-in-docker.sh b/bin/ci/ubuntu/build-in-docker.sh deleted file mode 100755 index feeabb1189a..00000000000 --- a/bin/ci/ubuntu/build-in-docker.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash -# run our build script in a docker container -# using travis-ci hosts -set -eux - -function join_by { local IFS="$1"; shift; echo "$*"; } - -set +x -echo "VERBOSE_BUILD=true" > /tmp/co.env -matchers=( - 'TRAVIS.*' 'CI' 'CC' 'CXX' - 'BUILD_TYPE' 'TARGET' 'MAX_TIME' - 'CODECOV.+' 'CMAKE.*' '.+_TESTS' - '.+_OPTIONS' 'NINJA.*' 'NUM_.+' - 'NIH_.+' 'BOOST.*' '.*CCACHE.*') - -matchstring=$(join_by '|' "${matchers[@]}") -echo "MATCHSTRING IS:: $matchstring" -env | grep -E "^(${matchstring})=" >> /tmp/co.env -set -x -# need to eliminate TRAVIS_CMD...don't want to pass it to the container -cat /tmp/co.env | grep -v TRAVIS_CMD > /tmp/co.env.2 -mv /tmp/co.env.2 /tmp/co.env -cat /tmp/co.env -mkdir -p -m 0777 ${TRAVIS_BUILD_DIR}/cores -echo "${TRAVIS_BUILD_DIR}/cores/%e.%p" | sudo tee /proc/sys/kernel/core_pattern -docker run \ - -t --env-file /tmp/co.env \ - -v ${TRAVIS_HOME}:${TRAVIS_HOME} \ - -w ${TRAVIS_BUILD_DIR} \ - --cap-add SYS_PTRACE \ - --ulimit "core=-1" \ - $DOCKER_IMAGE \ - /bin/bash -c 'if [[ $CC =~ ([[:alpha:]]+)-([[:digit:].]+) ]] ; then sudo update-alternatives --set ${BASH_REMATCH[1]} /usr/bin/$CC; fi; bin/ci/ubuntu/build-and-test.sh' - - diff --git a/bin/ci/ubuntu/travis-cache-start.sh b/bin/ci/ubuntu/travis-cache-start.sh deleted file mode 100755 index 6811acb9043..00000000000 --- a/bin/ci/ubuntu/travis-cache-start.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash -# some cached files create churn, so save them here for -# later restoration before packing the cache -set -eux -clean_cache="travis_clean_cache" -if [[ ! ( "${TRAVIS_JOB_NAME}" =~ "windows" || \ - "${TRAVIS_JOB_NAME}" =~ "prereq-keep" ) ]] && \ - ( [[ "${TRAVIS_COMMIT_MESSAGE}" =~ "${clean_cache}" ]] || \ - ( [[ -v TRAVIS_PULL_REQUEST_SHA && \ - "${TRAVIS_PULL_REQUEST_SHA}" != "" ]] && \ - git log -1 "${TRAVIS_PULL_REQUEST_SHA}" | grep -cq "${clean_cache}" - - ) - ) -then - find ${TRAVIS_HOME}/_cache -maxdepth 2 -type d - rm -rf ${TRAVIS_HOME}/_cache - mkdir -p ${TRAVIS_HOME}/_cache -fi - -pushd ${TRAVIS_HOME} -if [ -f cache_ignore.tar ] ; then - rm -f cache_ignore.tar -fi - -if [ -d _cache/nih_c ] ; then - find _cache/nih_c -name "build.ninja" | tar rf cache_ignore.tar --files-from - - find _cache/nih_c -name ".ninja_deps" | tar rf cache_ignore.tar --files-from - - find _cache/nih_c -name ".ninja_log" | tar rf cache_ignore.tar --files-from - - find _cache/nih_c -name "*.log" | tar rf cache_ignore.tar --files-from - - find _cache/nih_c -name "*.tlog" | tar rf cache_ignore.tar --files-from - - # show .a files in the cache, for sanity checking - find _cache/nih_c -name "*.a" -ls -fi - -if [ -d _cache/ccache ] ; then - find _cache/ccache -name "stats" | tar rf cache_ignore.tar --files-from - -fi - -if [ -f cache_ignore.tar ] ; then - tar -tf cache_ignore.tar -fi -popd - - diff --git a/cfg/initdb.sh b/cfg/initdb.sh deleted file mode 100755 index 9ca02ed5632..00000000000 --- a/cfg/initdb.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -# Execute this script with a running Postgres server on the current host. -# It should work with the most generic installation of Postgres, -# and is necessary for rippled to store data in Postgres. - -# usage: sudo -u postgres ./initdb.sh -psql -c "CREATE USER rippled" -psql -c "CREATE DATABASE rippled WITH OWNER = rippled" - diff --git a/cfg/rippled-example.cfg b/cfg/rippled-example.cfg index b283900d013..673ab3213e8 100644 --- a/cfg/rippled-example.cfg +++ b/cfg/rippled-example.cfg @@ -13,7 +13,7 @@ # # 4. HTTPS Client # -# 5. Reporting Mode +# 5. # # 6. Database # @@ -883,119 +883,6 @@ # #------------------------------------------------------------------------------- # -# 5. Reporting Mode -# -#------------ -# -# rippled has an optional operating mode called Reporting Mode. In Reporting -# Mode, rippled does not connect to the peer to peer network. Instead, rippled -# will continuously extract data from one or more rippled servers that are -# connected to the peer to peer network (referred to as an ETL source). -# Reporting mode servers will forward RPC requests that require access to the -# peer to peer network (submit, fee, etc) to an ETL source. -# -# [reporting] Settings for Reporting Mode. If and only if this section is -# present, rippled will start in reporting mode. This section -# contains a list of ETL source names, and key-value pairs. The -# ETL source names each correspond to a configuration file -# section; the names must match exactly. The key-value pairs are -# optional. -# -# -# [] -# -# A series of key/value pairs that specify an ETL source. -# -# source_ip = -# -# Required. IP address of the ETL source. Can also be a DNS record. -# -# source_ws_port = -# -# Required. Port on which ETL source is accepting unencrypted websocket -# connections. -# -# source_grpc_port = -# -# Required for ETL. Port on which ETL source is accepting gRPC requests. -# If this option is ommitted, this ETL source cannot actually be used for -# ETL; the Reporting Mode server can still forward RPCs to this ETL -# source, but cannot extract data from this ETL source. -# -# -# Key-value pairs (all optional): -# -# read_only Valid values: 0, 1. Default is 0. If set to 1, the server -# will start in strict read-only mode, and will not perform -# ETL. The server will still handle RPC requests, and will -# still forward RPC requests that require access to the p2p -# network. -# -# start_sequence -# Sequence of first ledger to extract if the database is empty. -# ETL extracts ledgers in order. If this setting is absent and -# the database is empty, ETL will start with the next ledger -# validated by the network. If this setting is present and the -# database is not empty, an exception is thrown. -# -# num_markers Degree of parallelism used during the initial ledger -# download. Only used if the database is empty. Valid values -# are 1-256. A higher degree of parallelism results in a -# faster download, but puts more load on the ETL source. -# Default is 2. -# -# Example: -# -# [reporting] -# etl_source1 -# etl_source2 -# read_only=0 -# start_sequence=32570 -# num_markers=8 -# -# [etl_source1] -# source_ip=1.2.3.4 -# source_ws_port=6005 -# source_grpc_port=50051 -# -# [etl_source2] -# source_ip=5.6.7.8 -# source_ws_port=6005 -# source_grpc_port=50051 -# -# Minimal Example: -# -# [reporting] -# etl_source1 -# -# [etl_source1] -# source_ip=1.2.3.4 -# source_ws_port=6005 -# source_grpc_port=50051 -# -# -# Notes: -# -# Reporting Mode requires Postgres (instead of SQLite). The Postgres -# connection info is specified under the [ledger_tx_tables] config section; -# see the Database section for further documentation. -# -# Each ETL source specified must have gRPC enabled (by adding a [port_grpc] -# section to the config). It is recommended to add a secure_gateway entry to -# the gRPC section, in order to bypass the server's rate limiting. -# This section needs to be added to the config of the ETL source, not -# the config of the reporting node. In the example below, the -# reporting server is running at 127.0.0.1. Multiple IPs can be -# specified in secure_gateway via a comma separated list. -# -# [port_grpc] -# ip = 0.0.0.0 -# port = 50051 -# secure_gateway = 127.0.0.1 -# -# -#------------------------------------------------------------------------------- -# # 6. Database # #------------ @@ -1003,13 +890,7 @@ # rippled creates 4 SQLite database to hold bookkeeping information # about transactions, local credentials, and various other things. # It also creates the NodeDB, which holds all the objects that -# make up the current and historical ledgers. In Reporting Mode, rippled -# uses a Postgres database instead of SQLite. -# -# The simplest way to work with Postgres is to install it locally. -# When it is running, execute the initdb.sh script in the current -# directory as: sudo -u postgres ./initdb.sh -# This will create the rippled user and an empty database of the same name. +# make up the current and historical ledgers. # # The size of the NodeDB grows in proportion to the amount of new data and the # amount of historical data (a configurable setting) so the performance of the @@ -1051,33 +932,10 @@ # keeping full history is not advised, and using online delete is # recommended. # -# type = Cassandra -# -# Apache Cassandra is an open-source, distributed key-value store - see -# https://cassandra.apache.org/ for more details. -# -# Cassandra is an alternative backend to be used only with Reporting Mode. -# See the Reporting Mode section for more details about Reporting Mode. -# # Required keys for NuDB and RocksDB: # # path Location to store the database # -# Required keys for Cassandra: -# -# contact_points IP of a node in the Cassandra cluster -# -# port CQL Native Transport Port -# -# secure_connect_bundle -# Absolute path to a secure connect bundle. When using -# a secure connect bundle, contact_points and port are -# not required. -# -# keyspace Name of Cassandra keyspace to use -# -# table_name Name of table in above keyspace to use -# # Optional keys # # cache_size Size of cache for database records. Default is 16384. @@ -1153,25 +1011,6 @@ # checking until healthy. # Default is 5. # -# Optional keys for Cassandra: -# -# username Username to use if Cassandra cluster requires -# authentication -# -# password Password to use if Cassandra cluster requires -# authentication -# -# max_requests_outstanding -# Limits the maximum number of concurrent database -# writes. Default is 10 million. For slower clusters, -# large numbers of concurrent writes can overload the -# cluster. Setting this option can help eliminate -# write timeouts and other write errors due to the -# cluster being overloaded. -# io_threads -# Set the number of IO threads used by the -# Cassandra driver. Defaults to 4. -# # Notes: # The 'node_db' entry configures the primary, persistent storage. # @@ -1267,42 +1106,6 @@ # This setting may not be combined with the # "safety_level" setting. # -# [ledger_tx_tables] (optional) -# -# conninfo Info for connecting to Postgres. Format is -# postgres://[username]:[password]@[ip]/[database]. -# The database and user must already exist. If this -# section is missing and rippled is running in -# Reporting Mode, rippled will connect as the -# user running rippled to a database with the -# same name. On Linux and Mac OS X, the connection -# will take place using the server's UNIX domain -# socket. On Windows, through the localhost IP -# address. Default is empty. -# -# use_tx_tables Valid values: 1, 0 -# The default is 1 (true). Determines whether to use -# the SQLite transaction database. If set to 0, -# rippled will not write to the transaction database, -# and will reject tx, account_tx and tx_history RPCs. -# In Reporting Mode, this setting is ignored. -# -# max_connections Valid values: any positive integer up to 64 bit -# storage length. This configures the maximum -# number of concurrent connections to postgres. -# Default is the maximum possible value to -# fit in a 64 bit integer. -# -# timeout Number of seconds after which idle postgres -# connections are discconnected. If set to 0, -# connections never timeout. Default is 600. -# -# -# remember_ip Value values: 1, 0 -# Default is 1 (true). Whether to cache host and -# port connection settings. -# -# #------------------------------------------------------------------------------- # # 7. Diagnostics @@ -1566,6 +1369,12 @@ # Admin level API commands over Secure Websockets, when originating # from the same machine (via the loopback adapter at 127.0.0.1). # +# "grpc" +# +# ETL commands for Clio. We recommend setting secure_gateway +# in this section to a comma-separated list of the addresses +# of your Clio servers, in order to bypass rippled's rate limiting. +# # This port is commented out but can be enabled by removing # the '#' from each corresponding line including the entry under [server] # @@ -1648,15 +1457,6 @@ advisory_delete=0 /var/lib/rippled/db -# To use Postgres, uncomment this section and fill in the appropriate connection -# info. Postgres can only be used in Reporting Mode. -# To disable writing to the transaction database, uncomment this section, and -# set use_tx_tables=0 -# [ledger_tx_tables] -# conninfo = postgres://[username]:[password]@[ip]/[database] -# use_tx_tables=1 - - # This needs to be an absolute directory reference, not a relative one. # Modify this value as required. [debug_logfile] @@ -1684,15 +1484,3 @@ validators.txt # set to ssl_verify to 0. [ssl_verify] 1 - - -# To run in Reporting Mode, uncomment this section and fill in the appropriate -# connection info for one or more ETL sources. -# [reporting] -# etl_source -# -# -# [etl_source] -# source_grpc_port=50051 -# source_ws_port=6005 -# source_ip=127.0.0.1 diff --git a/cfg/rippled-reporting.cfg b/cfg/rippled-reporting.cfg deleted file mode 100644 index 9776ef5ee45..00000000000 --- a/cfg/rippled-reporting.cfg +++ /dev/null @@ -1,1638 +0,0 @@ -#------------------------------------------------------------------------------- -# -# -#------------------------------------------------------------------------------- -# -# Contents -# -# 1. Server -# -# 2. Peer Protocol -# -# 3. Ripple Protocol -# -# 4. HTTPS Client -# -# 5. Reporting Mode -# -# 6. Database -# -# 7. Diagnostics -# -# 8. Voting -# -# 9. Misc Settings -# -# 10. Example Settings -# -#------------------------------------------------------------------------------- -# -# Purpose -# -# This file documents and provides examples of all rippled server process -# configuration options. When the rippled server instance is launched, it -# looks for a file with the following name: -# -# rippled.cfg -# -# For more information on where the rippled server instance searches for the -# file, visit: -# -# https://xrpl.org/commandline-usage.html#generic-options -# -# This file should be named rippled.cfg. This file is UTF-8 with DOS, UNIX, -# or Mac style end of lines. Blank lines and lines beginning with '#' are -# ignored. Undefined sections are reserved. No escapes are currently defined. -# -# Notation -# -# In this document a simple BNF notation is used. Angle brackets denote -# required elements, square brackets denote optional elements, and single -# quotes indicate string literals. A vertical bar separating 1 or more -# elements is a logical "or"; any one of the elements may be chosen. -# Parentheses are notational only, and used to group elements; they are not -# part of the syntax unless they appear in quotes. White space may always -# appear between elements, it has no effect on values. -# -# A required identifier -# '=' The equals sign character -# | Logical "or" -# ( ) Used for grouping -# -# -# An identifier is a string of upper or lower case letters, digits, or -# underscores subject to the requirement that the first character of an -# identifier must be a letter. Identifiers are not case sensitive (but -# values may be). -# -# Some configuration sections contain key/value pairs. A line containing -# a key/value pair has this syntax: -# -# '=' -# -# Depending on the section and key, different value types are possible: -# -# A signed integer -# An unsigned integer -# A boolean. 1 = true/yes/on, 0 = false/no/off. -# -# Consult the documentation on the key in question to determine the possible -# value types. -# -# -# -#------------------------------------------------------------------------------- -# -# 1. Server -# -#---------- -# -# -# -# rippled offers various server protocols to clients making inbound -# connections. The listening ports rippled uses are "universal" ports -# which may be configured to handshake in one or more of the available -# supported protocols. These universal ports simplify administration: -# A single open port can be used for multiple protocols. -# -# NOTE At least one server port must be defined in order -# to accept incoming network connections. -# -# -# [server] -# -# A list of port names and key/value pairs. A port name must start with a -# letter and contain only letters and numbers. The name is not case-sensitive. -# For each name in this list, rippled will look for a configuration file -# section with the same name and use it to create a listening port. The -# name is informational only; the choice of name does not affect the function -# of the listening port. -# -# Key/value pairs specified in this section are optional, and apply to all -# listening ports unless the port overrides the value in its section. They -# may be considered default values. -# -# Suggestion: -# -# To avoid a conflict with port names and future configuration sections, -# we recommend prepending "port_" to the port name. This prefix is not -# required, but suggested. -# -# This example defines two ports with different port numbers and settings: -# -# [server] -# port_public -# port_private -# port = 80 -# -# [port_public] -# ip = 0.0.0.0 -# port = 443 -# protocol = peer,https -# -# [port_private] -# ip = 127.0.0.1 -# protocol = http -# -# When rippled is used as a command line client (for example, issuing a -# server stop command), the first port advertising the http or https -# protocol will be used to make the connection. -# -# -# -# [] -# -# A series of key/value pairs that define the settings for the port with -# the corresponding name. These keys are possible: -# -# ip = -# -# Required. Determines the IP address of the network interface to bind -# to. To bind to all available IPv4 interfaces, use 0.0.0.0 -# To binding to all IPv4 and IPv6 interfaces, use :: -# -# NOTE if the ip value is ::, then any incoming IPv4 connections will -# be made as mapped IPv4 addresses. -# -# port = -# -# Required. Sets the port number to use for this port. -# -# protocol = [ http, https, peer ] -# -# Required. A comma-separated list of protocols to support: -# -# http JSON-RPC over HTTP -# https JSON-RPC over HTTPS -# ws Websockets -# wss Secure Websockets -# peer Peer Protocol -# -# Restrictions: -# -# Only one port may be configured to support the peer protocol. -# A port cannot have websocket and non websocket protocols at the -# same time. It is possible have both Websockets and Secure Websockets -# together in one port. -# -# NOTE If no ports support the peer protocol, rippled cannot -# receive incoming peer connections or become a superpeer. -# -# limit = -# -# Optional. An integer value that will limit the number of connected -# clients that the port will accept. Once the limit is reached, new -# connections will be refused until other clients disconnect. -# Omit or set to 0 to allow unlimited numbers of clients. -# -# user = -# password = -# -# When set, these credentials will be required on HTTP/S requests. -# The credentials must be provided using HTTP's Basic Authentication -# headers. If either or both fields are empty, then no credentials are -# required. IP address restrictions, if any, will be checked in addition -# to the credentials specified here. -# -# When acting in the client role, rippled will supply these credentials -# using HTTP's Basic Authentication headers when making outbound HTTP/S -# requests. -# -# admin = [ IP, IP, IP, ... ] -# -# A comma-separated list of IP addresses. -# -# When set, grants administrative command access to the specified IP -# addresses. These commands may be issued over http, https, ws, or wss -# if configured on the port. If not provided, the default is to not allow -# administrative commands. -# -# NOTE A common configuration value for the admin field is "localhost". -# If you are listening on all IPv4/IPv6 addresses by specifing -# ip = :: then you can use admin = ::ffff:127.0.0.1,::1 to allow -# administrative access from both IPv4 and IPv6 localhost -# connections. -# -# *SECURITY WARNING* -# 0.0.0.0 or :: may be used to allow access from any IP address. It must -# be the only address specified and cannot be combined with other IPs. -# Use of this address can compromise server security, please consider its -# use carefully. -# -# admin_user = -# admin_password = -# -# When set, clients must provide these credentials in the submitted -# JSON for any administrative command requests submitted to the HTTP/S, -# WS, or WSS protocol interfaces. If administrative commands are -# disabled for a port, these credentials have no effect. -# -# When acting in the client role, rippled will supply these credentials -# in the submitted JSON for any administrative command requests when -# invoking JSON-RPC commands on remote servers. -# -# secure_gateway = [ IP, IP, IP, ... ] -# -# A comma-separated list of IP addresses. -# -# When set, allows the specified IP addresses to pass HTTP headers -# containing username and remote IP address for each session. If a -# non-empty username is passed in this way, then resource controls -# such as often resulting in "tooBusy" errors will be lifted. However, -# administrative RPC commands such as "stop" will not be allowed. -# The HTTP headers that secure_gateway hosts can set are X-User and -# X-Forwarded-For. Only the X-User header affects resource controls. -# However, both header values are logged to help identify user activity. -# If no X-User header is passed, or if its value is empty, then -# resource controls will default to those for non-administrative users. -# -# The secure_gateway IP addresses are intended to represent -# proxies. Since rippled trusts these hosts, they must be -# responsible for properly authenticating the remote user. -# -# The same IP address cannot be used in both "admin" and "secure_gateway" -# lists for the same port. In this case, rippled will abort with an error -# message to the console shortly after startup -# -# ssl_key = -# ssl_cert = -# ssl_chain = -# -# Use the specified files when configuring SSL on the port. -# -# NOTE If no files are specified and secure protocols are selected, -# rippled will generate an internal self-signed certificate. -# -# The files have these meanings: -# -# ssl_key -# -# Specifies the filename holding the SSL key in PEM format. -# -# ssl_cert -# -# Specifies the path to the SSL certificate file in PEM format. -# This is not needed if the chain includes it. -# -# ssl_chain -# -# If you need a certificate chain, specify the path to the -# certificate chain here. The chain may include the end certificate. -# -# ssl_ciphers = -# -# Control the ciphers which the server will support over SSL on the port, -# specified using the OpenSSL "cipher list format". -# -# NOTE If unspecified, rippled will automatically configure a modern -# cipher suite. This default suite should be widely supported. -# -# You should not modify this string unless you have a specific -# reason and cryptographic expertise. Incorrect modification may -# keep rippled from connecting to other instances of rippled or -# prevent RPC and WebSocket clients from connecting. -# -# send_queue_limit = [1..65535] -# -# A Websocket will disconnect when its send queue exceeds this limit. -# The default is 100. A larger value may help with erratic disconnects but -# may adversely affect server performance. -# -# WebSocket permessage-deflate extension options -# -# These settings configure the optional permessage-deflate extension -# options and may appear on any port configuration entry. They are meaningful -# only to ports which have enabled a WebSocket protocol. -# -# permessage_deflate = -# -# Determines if permessage_deflate extension negotiations are enabled. -# When enabled, clients may request the extension and the server will -# offer the enabled extension in response. -# -# client_max_window_bits = [9..15] -# server_max_window_bits = [9..15] -# client_no_context_takeover = -# server_no_context_takeover = -# -# These optional settings control options related to the permessage-deflate -# extension negotiation. For precise definitions of these fields please see -# the RFC 7692, "Compression Extensions for WebSocket": -# https://tools.ietf.org/html/rfc7692 -# -# compress_level = [0..9] -# -# When set, determines the amount of compression attempted, where 0 is -# the least amount and 9 is the most amount. Higher levels require more -# CPU resources. Levels 1 through 3 use a fast compression algorithm, -# while levels 4 through 9 use a more compact algorithm which uses more -# CPU resources. If unspecified, a default of 3 is used. -# -# memory_level = [1..9] -# -# When set, determines the relative amount of memory used to hold -# intermediate compression data. Higher numbers can give better compression -# ratios at the cost of higher memory and CPU resources. -# -# [rpc_startup] -# -# Specify a list of RPC commands to run at startup. -# -# Examples: -# { "command" : "server_info" } -# { "command" : "log_level", "partition" : "ripplecalc", "severity" : "trace" } -# -# -# -# [websocket_ping_frequency] -# -# -# -# The amount of time to wait in seconds, before sending a websocket 'ping' -# message. Ping messages are used to determine if the remote end of the -# connection is no longer available. -# -# -# [server_domain] -# -# domain name -# -# The domain under which a TOML file applicable to this server can be -# found. A server may lie about its domain so the TOML should contain -# a reference to this server by pubkey in the [nodes] array. -# -# -#------------------------------------------------------------------------------- -# -# 2. Peer Protocol -# -#----------------- -# -# These settings control security and access attributes of the Peer to Peer -# server section of the rippled process. Peer Protocol implements the -# Ripple Payment protocol. It is over peer connections that transactions -# and validations are passed from to machine to machine, to determine the -# contents of validated ledgers. -# -# -# -# [ips] -# -# List of hostnames or ips where the Ripple protocol is served. A default -# starter list is included in the code and used if no other hostnames are -# available. -# -# One address or domain name per line is allowed. A port may must be -# specified after adding a space to the address. The ordering of entries -# does not generally matter. -# -# The default list of entries is: -# - r.ripple.com 51235 -# - sahyadri.isrdc.in 51235 -# -# Examples: -# -# [ips] -# 192.168.0.1 -# 192.168.0.1 2459 -# r.ripple.com 51235 -# -# -# [ips_fixed] -# -# List of IP addresses or hostnames to which rippled should always attempt to -# maintain peer connections with. This is useful for manually forming private -# networks, for example to configure a validation server that connects to the -# Ripple network through a public-facing server, or for building a set -# of cluster peers. -# -# One address or domain names per line is allowed. A port must be specified -# after adding a space to the address. -# -# -# -# [peer_private] -# -# 0 or 1. -# -# 0: Request peers to broadcast your address. Normal outbound peer connections [default] -# 1: Request peers not broadcast your address. Only connect to configured peers. -# -# -# -# [peers_max] -# -# The largest number of desired peer connections (incoming or outgoing). -# Cluster and fixed peers do not count towards this total. There are -# implementation-defined lower limits imposed on this value for security -# purposes. -# -# -# -# [node_seed] -# -# This is used for clustering. To force a particular node seed or key, the -# key can be set here. The format is the same as the validation_seed field. -# To obtain a validation seed, use the validation_create command. -# -# Examples: RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE -# shfArahZT9Q9ckTf3s1psJ7C7qzVN -# -# -# -# [cluster_nodes] -# -# To extend full trust to other nodes, place their node public keys here. -# Generally, you should only do this for nodes under common administration. -# Node public keys start with an 'n'. To give a node a name for identification -# place a space after the public key and then the name. -# -# -# -# [max_transactions] -# -# Configure the maximum number of transactions to have in the job queue -# -# Must be a number between 100 and 1000, defaults to 250 -# -# -# [overlay] -# -# Controls settings related to the peer to peer overlay. -# -# A set of key/value pair parameters to configure the overlay. -# -# public_ip = -# -# If the server has a known, fixed public IPv4 address, -# specify that IP address here in dotted decimal notation. -# Peers will use this information to reject attempt to proxy -# connections to or from this server. -# -# ip_limit = -# -# The maximum number of incoming peer connections allowed by a single -# IP that isn't classified as "private" in RFC1918. The implementation -# imposes some hard and soft upper limits on this value to prevent a -# single host from consuming all inbound slots. If the value is not -# present the server will autoconfigure an appropriate limit. -# -# max_unknown_time = -# -# The maximum amount of time, in seconds, that an outbound connection -# is allowed to stay in the "unknown" tracking state. This option can -# take any value between 300 and 1800 seconds, inclusive. If the option -# is not present the server will autoconfigure an appropriate limit. -# -# The current default (which is subject to change) is 600 seconds. -# -# max_diverged_time = -# -# The maximum amount of time, in seconds, that an outbound connection -# is allowed to stay in the "diverged" tracking state. The option can -# take any value between 60 and 900 seconds, inclusive. If the option -# is not present the server will autoconfigure an appropriate limit. -# -# The current default (which is subject to change) is 300 seconds. -# -# -# [transaction_queue] EXPERIMENTAL -# -# This section is EXPERIMENTAL, and should not be -# present for production configuration settings. -# -# A set of key/value pair parameters to tune the performance of the -# transaction queue. -# -# ledgers_in_queue = -# -# The queue will be limited to this of average ledgers' -# worth of transactions. If the queue fills up, the transactions -# with the lowest fee levels will be dropped from the queue any -# time a transaction with a higher fee level is added. -# Default: 20. -# -# minimum_queue_size = -# -# The queue will always be able to hold at least this of -# transactions, regardless of recent ledger sizes or the value of -# ledgers_in_queue. Default: 2000. -# -# retry_sequence_percent = -# -# If a client replaces a transaction in the queue (same sequence -# number as a transaction already in the queue), the new -# transaction's fee must be more than percent higher -# than the original transaction's fee, or meet the current open -# ledger fee to be considered. Default: 25. -# -# minimum_escalation_multiplier = -# -# At ledger close time, the median fee level of the transactions -# in that ledger is used as a multiplier in escalation -# calculations of the next ledger. This minimum value ensures that -# the escalation is significant. Default: 500. -# -# minimum_txn_in_ledger = -# -# Minimum number of transactions that must be allowed into the -# ledger at the minimum required fee before the required fee -# escalates. Default: 5. -# -# minimum_txn_in_ledger_standalone = -# -# Like minimum_txn_in_ledger when rippled is running in standalone -# mode. Default: 1000. -# -# target_txn_in_ledger = -# -# Number of transactions allowed into the ledger at the minimum -# required fee that the queue will "work toward" as long as -# consensus stays healthy. The limit will grow quickly until it -# reaches or exceeds this number. After that the limit may still -# change, but will stay above the target. If consensus is not -# healthy, the limit will be clamped to this value or lower. -# Default: 50. -# -# maximum_txn_in_ledger = -# -# (Optional) Maximum number of transactions that will be allowed -# into the ledger at the minimum required fee before the required -# fee escalates. Default: no maximum. -# -# normal_consensus_increase_percent = -# -# (Optional) When the ledger has more transactions than "expected", -# and performance is humming along nicely, the expected ledger size -# is updated to the previous ledger size plus this percentage. -# Default: 20 -# -# slow_consensus_decrease_percent = -# -# (Optional) When consensus takes longer than appropriate, the -# expected ledger size is updated to the minimum of the previous -# ledger size or the "expected" ledger size minus this percentage. -# Default: 50 -# -# maximum_txn_per_account = -# -# Maximum number of transactions that one account can have in the -# queue at any given time. Default: 10. -# -# minimum_last_ledger_buffer = -# -# If a transaction has a LastLedgerSequence, it must be at least -# this much larger than the current open ledger sequence number. -# Default: 2. -# -# zero_basefee_transaction_feelevel = -# -# So we don't deal with infinite fee levels, treat any transaction -# with a 0 base fee (ie. SetRegularKey password recovery) as -# having this fee level. -# Default: 256000. -# -# -#------------------------------------------------------------------------------- -# -# 3. Protocol -# -#------------------- -# -# These settings affect the behavior of the server instance with respect -# to protocol level activities such as validating and closing ledgers -# adjusting fees in response to server overloads. -# -# -# -# -# [relay_proposals] -# -# Controls the relaying behavior for proposals received by this server that -# are issued by validators that are not on the server's UNL. -# -# Legal values are: "trusted" and "all". The default is "trusted". -# -# -# [relay_validations] -# -# Controls the relaying behavior for validations received by this server that -# are issued by validators that are not on the server's UNL. -# -# Legal values are: "trusted" and "all". The default is "all". -# -# -# -# -# -# [ledger_history] -# -# The number of past ledgers to acquire on server startup and the minimum to -# maintain while running. -# -# To serve clients, servers need historical ledger data. Servers that don't -# need to serve clients can set this to "none". Servers that want complete -# history can set this to "full". -# -# This must be less than or equal to online_delete (if online_delete is used) -# -# The default is: 256 -# -# -# -# [fetch_depth] -# -# The number of past ledgers to serve to other peers that request historical -# ledger data (or "full" for no limit). -# -# Servers that require low latency and high local performance may wish to -# restrict the historical ledgers they are willing to serve. Setting this -# below 32 can harm network stability as servers require easy access to -# recent history to stay in sync. Values below 128 are not recommended. -# -# The default is: full -# -# -# -# [validation_seed] -# -# To perform validation, this section should contain either a validation seed -# or key. The validation seed is used to generate the validation -# public/private key pair. To obtain a validation seed, use the -# validation_create command. -# -# Examples: RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE -# shfArahZT9Q9ckTf3s1psJ7C7qzVN -# -# -# -# [validator_token] -# -# This is an alternative to [validation_seed] that allows rippled to perform -# validation without having to store the validator keys on the network -# connected server. The field should contain a single token in the form of a -# base64-encoded blob. -# An external tool is available for generating validator keys and tokens. -# -# -# -# [validator_key_revocation] -# -# If a validator's secret key has been compromised, a revocation must be -# generated and added to this field. The revocation notifies peers that it is -# no longer safe to trust the revoked key. The field should contain a single -# revocation in the form of a base64-encoded blob. -# An external tool is available for generating and revoking validator keys. -# -# -# -# [validators_file] -# -# Path or name of a file that determines the nodes to always accept as validators. -# -# The contents of the file should include a [validators] and/or -# [validator_list_sites] and [validator_list_keys] entries. -# [validators] should be followed by a list of validation public keys of -# nodes, one per line. -# [validator_list_sites] should be followed by a list of URIs each serving a -# list of recommended validators. -# [validator_list_keys] should be followed by a list of keys belonging to -# trusted validator list publishers. Validator lists fetched from configured -# sites will only be considered if the list is accompanied by a valid -# signature from a trusted publisher key. -# -# Specify the file by its name or path. -# Unless an absolute path is specified, it will be considered relative to -# the folder in which the rippled.cfg file is located. -# -# Examples: -# /home/ripple/validators.txt -# C:/home/ripple/validators.txt -# -# Example content: -# [validators] -# n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7 -# n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj -# n9L81uNCaPgtUJfaHh89gmdvXKAmSt5Gdsw2g1iPWaPkAHW5Nm4C -# n9KiYM9CgngLvtRCQHZwgC2gjpdaZcCcbt3VboxiNFcKuwFVujzS -# n9LdgEtkmGB9E2h3K4Vp7iGUaKuq23Zr32ehxiU8FWY7xoxbWTSA -# -# -# -# [path_search] -# When searching for paths, the default search aggressiveness. This can take -# exponentially more resources as the size is increased. -# -# The default is: 7 -# -# [path_search_fast] -# [path_search_max] -# When searching for paths, the minimum and maximum search aggressiveness. -# -# If you do not need pathfinding, you can set path_search_max to zero to -# disable it and avoid some expensive bookkeeping. -# -# The default for 'path_search_fast' is 2. The default for 'path_search_max' is 10. -# -# [path_search_old] -# -# For clients that use the legacy path finding interfaces, the search -# aggressiveness to use. The default is 7. -# -# -# -# [fee_default] -# -# Sets the base cost of a transaction in drops. Used when the server has -# no other source of fee information, such as signing transactions offline. -# -# -# -# [workers] -# -# Configures the number of threads for processing work submitted by peers -# and clients. If not specified, then the value is automatically set to the -# number of processor threads plus 2 for networked nodes. Nodes running in -# stand alone mode default to 1 worker. -# -# -# -# [network_id] -# -# Specify the network which this server is configured to connect to and -# track. If set, the server will not establish connections with servers -# that are explicitly configured to track another network. -# -# Network identifiers are usually unsigned integers in the range 0 to -# 4294967295 inclusive. The server also maps the following well-known -# names to the corresponding numerical identifier: -# -# main -> 0 -# testnet -> 1 -# devnet -> 2 -# -# If this value is not specified the server is not explicitly configured -# to track a particular network. -# -# -# [ledger_replay] -# -# 0 or 1. -# -# 0: Disable the ledger replay feature [default] -# 1: Enable the ledger replay feature. With this feature enabled, when -# acquiring a ledger from the network, a rippled node only downloads -# the ledger header and the transactions instead of the whole ledger. -# And the ledger is built by applying the transactions to the parent -# ledger. -# -#------------------------------------------------------------------------------- -# -# 4. HTTPS Client -# -#---------------- -# -# The rippled server instance uses HTTPS GET requests in a variety of -# circumstances, including but not limited to contacting trusted domains to -# fetch information such as mapping an email address to a Ripple Payment -# Network address. -# -# [ssl_verify] -# -# 0 or 1. -# -# 0. HTTPS client connections will not verify certificates. -# 1. Certificates will be checked for HTTPS client connections. -# -# If not specified, this parameter defaults to 1. -# -# -# -# [ssl_verify_file] -# -# -# -# A file system path leading to the certificate verification file for -# HTTPS client requests. -# -# -# -# [ssl_verify_dir] -# -# -# -# -# A file system path leading to a file or directory containing the root -# certificates that the server will accept for verifying HTTP servers. -# Used only for outbound HTTPS client connections. -# -#------------------------------------------------------------------------------- -# -# 5. Reporting Mode -# -#------------ -# -# rippled has an optional operating mode called Reporting Mode. In Reporting -# Mode, rippled does not connect to the peer to peer network. Instead, rippled -# will continuously extract data from one or more rippled servers that are -# connected to the peer to peer network (referred to as an ETL source). -# Reporting mode servers will forward RPC requests that require access to the -# peer to peer network (submit, fee, etc) to an ETL source. -# -# [reporting] Settings for Reporting Mode. If and only if this section is -# present, rippled will start in reporting mode. This section -# contains a list of ETL source names, and key-value pairs. The -# ETL source names each correspond to a configuration file -# section; the names must match exactly. The key-value pairs are -# optional. -# -# -# [] -# -# A series of key/value pairs that specify an ETL source. -# -# source_ip = -# -# Required. IP address of the ETL source. Can also be a DNS record. -# -# source_ws_port = -# -# Required. Port on which ETL source is accepting unencrypted websocket -# connections. -# -# source_grpc_port = -# -# Required for ETL. Port on which ETL source is accepting gRPC requests. -# If this option is ommitted, this ETL source cannot actually be used for -# ETL; the Reporting Mode server can still forward RPCs to this ETL -# source, but cannot extract data from this ETL source. -# -# -# Key-value pairs (all optional): -# -# read_only Valid values: 0, 1. Default is 0. If set to 1, the server -# will start in strict read-only mode, and will not perform -# ETL. The server will still handle RPC requests, and will -# still forward RPC requests that require access to the p2p -# network. -# -# start_sequence -# Sequence of first ledger to extract if the database is empty. -# ETL extracts ledgers in order. If this setting is absent and -# the database is empty, ETL will start with the next ledger -# validated by the network. If this setting is present and the -# database is not empty, an exception is thrown. -# -# num_markers Degree of parallelism used during the initial ledger -# download. Only used if the database is empty. Valid values -# are 1-256. A higher degree of parallelism results in a -# faster download, but puts more load on the ETL source. -# Default is 2. -# -# Example: -# -# [reporting] -# etl_source1 -# etl_source2 -# read_only=0 -# start_sequence=32570 -# num_markers=8 -# -# [etl_source1] -# source_ip=1.2.3.4 -# source_ws_port=6005 -# source_grpc_port=50051 -# -# [etl_source2] -# source_ip=5.6.7.8 -# source_ws_port=6005 -# source_grpc_port=50051 -# -# Minimal Example: -# -# [reporting] -# etl_source1 -# -# [etl_source1] -# source_ip=1.2.3.4 -# source_ws_port=6005 -# source_grpc_port=50051 -# -# -# Notes: -# -# Reporting Mode requires Postgres (instead of SQLite). The Postgres -# connection info is specified under the [ledger_tx_tables] config section; -# see the Database section for further documentation. -# -# Each ETL source specified must have gRPC enabled (by adding a [port_grpc] -# section to the config). It is recommended to add a secure_gateway entry to -# the gRPC section, in order to bypass the server's rate limiting. -# This section needs to be added to the config of the ETL source, not -# the config of the reporting node. In the example below, the -# reporting server is running at 127.0.0.1. Multiple IPs can be -# specified in secure_gateway via a comma separated list. -# -# [port_grpc] -# ip = 0.0.0.0 -# port = 50051 -# secure_gateway = 127.0.0.1 -# -# -#------------------------------------------------------------------------------- -# -# 6. Database -# -#------------ -# -# rippled creates 4 SQLite database to hold bookkeeping information -# about transactions, local credentials, and various other things. -# It also creates the NodeDB, which holds all the objects that -# make up the current and historical ledgers. In Reporting Mode, rippled -# uses a Postgres database instead of SQLite. -# -# The simplest way to work with Postgres is to install it locally. -# When it is running, execute the initdb.sh script in the current -# directory as: sudo -u postgres ./initdb.sh -# This will create the rippled user and an empty database of the same name. -# -# The size of the NodeDB grows in proportion to the amount of new data and the -# amount of historical data (a configurable setting) so the performance of the -# underlying storage media where the NodeDB is placed can significantly affect -# the performance of the server. -# -# Partial pathnames will be considered relative to the location of -# the rippled.cfg file. -# -# [node_db] Settings for the Node Database (required) -# -# Format (without spaces): -# One or more lines of case-insensitive key / value pairs: -# '=' -# ... -# -# Example: -# type=nudb -# path=db/nudb -# -# The "type" field must be present and controls the choice of backend: -# -# type = NuDB -# -# NuDB is a high-performance database written by Ripple Labs and optimized -# for rippled and solid-state drives. -# -# NuDB maintains its high speed regardless of the amount of history -# stored. Online delete may be selected, but is not required. NuDB is -# available on all platforms that rippled runs on. -# -# type = RocksDB -# -# RocksDB is an open-source, general-purpose key/value store - see -# http://rocksdb.org/ for more details. -# -# RocksDB is an alternative backend for systems that don't use solid-state -# drives. Because RocksDB's performance degrades as it stores more data, -# keeping full history is not advised, and using online delete is -# recommended. -# -# type = Cassandra -# -# Apache Cassandra is an open-source, distributed key-value store - see -# https://cassandra.apache.org/ for more details. -# -# Cassandra is an alternative backend to be used only with Reporting Mode. -# See the Reporting Mode section for more details about Reporting Mode. -# -# Required keys for NuDB and RocksDB: -# -# path Location to store the database -# -# Required keys for Cassandra: -# -# contact_points IP of a node in the Cassandra cluster -# -# port CQL Native Transport Port -# -# secure_connect_bundle -# Absolute path to a secure connect bundle. When using -# a secure connect bundle, contact_points and port are -# not required. -# -# keyspace Name of Cassandra keyspace to use -# -# table_name Name of table in above keyspace to use -# -# Optional keys -# -# cache_size Size of cache for database records. Default is 16384. -# Setting this value to 0 will use the default value. -# -# cache_age Length of time in minutes to keep database records -# cached. Default is 5 minutes. Setting this value to -# 0 will use the default value. -# -# Note: if neither cache_size nor cache_age is -# specified, the cache for database records will not -# be created. If only one of cache_size or cache_age -# is specified, the cache will be created using the -# default value for the unspecified parameter. -# -# Note: the cache will not be created if online_delete -# is specified. -# -# Optional keys for NuDB or RocksDB: -# -# earliest_seq The default is 32570 to match the XRP ledger -# network's earliest allowed sequence. Alternate -# networks may set this value. Minimum value of 1. -# -# online_delete Minimum value of 256. Enable automatic purging -# of older ledger information. Maintain at least this -# number of ledger records online. Must be greater -# than or equal to ledger_history. -# -# These keys modify the behavior of online_delete, and thus are only -# relevant if online_delete is defined and non-zero: -# -# advisory_delete 0 for disabled, 1 for enabled. If set, the -# administrative RPC call "can_delete" is required -# to enable online deletion of ledger records. -# Online deletion does not run automatically if -# non-zero and the last deletion was on a ledger -# greater than the current "can_delete" setting. -# Default is 0. -# -# delete_batch When automatically purging, SQLite database -# records are deleted in batches. This value -# controls the maximum size of each batch. Larger -# batches keep the databases locked for more time, -# which may cause other functions to fall behind, -# and thus cause the node to lose sync. -# Default is 100. -# -# back_off_milliseconds -# Number of milliseconds to wait between -# online_delete batches to allow other functions -# to catch up. -# Default is 100. -# -# age_threshold_seconds -# The online delete process will only run if the -# latest validated ledger is younger than this -# number of seconds. -# Default is 60. -# -# recovery_wait_seconds -# The online delete process checks periodically -# that rippled is still in sync with the network, -# and that the validated ledger is less than -# 'age_threshold_seconds' old. By default, if it -# is not the online delete process aborts and -# tries again later. If 'recovery_wait_seconds' -# is set and rippled is out of sync, but likely to -# recover quickly, then online delete will wait -# this number of seconds for rippled to get back -# into sync before it aborts. -# Set this value if the node is otherwise staying -# in sync, or recovering quickly, but the online -# delete process is unable to finish. -# Default is unset. -# -# Optional keys for Cassandra: -# -# username Username to use if Cassandra cluster requires -# authentication -# -# password Password to use if Cassandra cluster requires -# authentication -# -# max_requests_outstanding -# Limits the maximum number of concurrent database -# writes. Default is 10 million. For slower clusters, -# large numbers of concurrent writes can overload the -# cluster. Setting this option can help eliminate -# write timeouts and other write errors due to the -# cluster being overloaded. -# -# Notes: -# The 'node_db' entry configures the primary, persistent storage. -# -# The 'import_db' is used with the '--import' command line option to -# migrate the specified database into the current database given -# in the [node_db] section. -# -# [import_db] Settings for performing a one-time import (optional) -# [database_path] Path to the book-keeping databases. -# -# The server creates and maintains 4 to 5 bookkeeping SQLite databases in -# the 'database_path' location. If you omit this configuration setting, -# the server creates a directory called "db" located in the same place as -# your rippled.cfg file. -# Partial pathnames are relative to the location of the rippled executable. -# -# [sqlite] Tuning settings for the SQLite databases (optional) -# -# Format (without spaces): -# One or more lines of case-insensitive key / value pairs: -# '=' -# ... -# -# Example 1: -# safety_level=low -# -# Example 2: -# journal_mode=off -# synchronous=off -# -# WARNING: These settings can have significant effects on data integrity, -# particularly in systemic failure scenarios. It is strongly recommended -# that they be left at their defaults unless the server is having -# performance issues during normal operation or during automatic purging -# (online_delete) operations. A warning will be logged on startup if -# 'ledger_history' is configured to store more than 10,000,000 ledgers and -# any of these settings are less safe than the default. This is due to the -# inordinate amount of time and bandwidth it will take to safely rebuild a -# corrupted database of that size from other peers. -# -# Optional keys: -# -# safety_level Valid values: high, low -# The default is "high", which tunes the SQLite -# databases in the most reliable mode, and is -# equivalent to: -# journal_mode=wal -# synchronous=normal -# temp_store=file -# "low" is equivalent to: -# journal_mode=memory -# synchronous=off -# temp_store=memory -# These "low" settings trade speed and reduced I/O -# for a higher risk of data loss. See the -# individual settings below for more information. -# This setting may not be combined with any of the -# other tuning settings: "journal_mode", -# "synchronous", or "temp_store". -# -# journal_mode Valid values: delete, truncate, persist, memory, wal, off -# The default is "wal", which uses a write-ahead -# log to implement database transactions. -# Alternately, "memory" saves disk I/O, but if -# rippled crashes during a transaction, the -# database is likely to be corrupted. -# See https://www.sqlite.org/pragma.html#pragma_journal_mode -# for more details about the available options. -# This setting may not be combined with the -# "safety_level" setting. -# -# synchronous Valid values: off, normal, full, extra -# The default is "normal", which works well with -# the "wal" journal mode. Alternatively, "off" -# allows rippled to continue as soon as data is -# passed to the OS, which can significantly -# increase speed, but risks data corruption if -# the host computer crashes before writing that -# data to disk. -# See https://www.sqlite.org/pragma.html#pragma_synchronous -# for more details about the available options. -# This setting may not be combined with the -# "safety_level" setting. -# -# temp_store Valid values: default, file, memory -# The default is "file", which will use files -# for temporary database tables and indices. -# Alternatively, "memory" may save I/O, but -# rippled does not currently use many, if any, -# of these temporary objects. -# See https://www.sqlite.org/pragma.html#pragma_temp_store -# for more details about the available options. -# This setting may not be combined with the -# "safety_level" setting. -# -# [ledger_tx_tables] (optional) -# -# conninfo Info for connecting to Postgres. Format is -# postgres://[username]:[password]@[ip]/[database]. -# The database and user must already exist. If this -# section is missing and rippled is running in -# Reporting Mode, rippled will connect as the -# user running rippled to a database with the -# same name. On Linux and Mac OS X, the connection -# will take place using the server's UNIX domain -# socket. On Windows, through the localhost IP -# address. Default is empty. -# -# use_tx_tables Valid values: 1, 0 -# The default is 1 (true). Determines whether to use -# the SQLite transaction database. If set to 0, -# rippled will not write to the transaction database, -# and will reject tx, account_tx and tx_history RPCs. -# In Reporting Mode, this setting is ignored. -# -# max_connections Valid values: any positive integer up to 64 bit -# storage length. This configures the maximum -# number of concurrent connections to postgres. -# Default is the maximum possible value to -# fit in a 64 bit integer. -# -# timeout Number of seconds after which idle postgres -# connections are discconnected. If set to 0, -# connections never timeout. Default is 600. -# -# -# remember_ip Value values: 1, 0 -# Default is 1 (true). Whether to cache host and -# port connection settings. -# -# -#------------------------------------------------------------------------------- -# -# 7. Diagnostics -# -#--------------- -# -# These settings are designed to help server administrators diagnose -# problems, and obtain detailed information about the activities being -# performed by the rippled process. -# -# -# -# [debug_logfile] -# -# Specifies where a debug logfile is kept. By default, no debug log is kept. -# Unless absolute, the path is relative the directory containing this file. -# -# Example: debug.log -# -# -# -# [insight] -# -# Configuration parameters for the Beast. Insight stats collection module. -# -# Insight is a module that collects information from the areas of rippled -# that have instrumentation. The configuration parameters control where the -# collection metrics are sent. The parameters are expressed as key = value -# pairs with no white space. The main parameter is the choice of server: -# -# "server" -# -# Choice of server to send metrics to. Currently the only choice is -# "statsd" which sends UDP packets to a StatsD daemon, which must be -# running while rippled is running. More information on StatsD is -# available here: -# https://github.com/b/statsd_spec -# -# When server=statsd, these additional keys are used: -# -# "address" The UDP address and port of the listening StatsD server, -# in the format, n.n.n.n:port. -# -# "prefix" A string prepended to each collected metric. This is used -# to distinguish between different running instances of rippled. -# -# If this section is missing, or the server type is unspecified or unknown, -# statistics are not collected or reported. -# -# Example: -# -# [insight] -# server=statsd -# address=192.168.0.95:4201 -# prefix=my_validator -# -# [perf] -# -# Configuration of performance logging. If enabled, write Json-formatted -# performance-oriented data periodically to a distinct log file. -# -# "perf_log" A string specifying the pathname of the performance log -# file. A relative pathname will log relative to the -# configuration directory. Required to enable -# performance logging. -# -# "log_interval" Integer value for number of seconds between writing -# to performance log. Default 1. -# -# Example: -# [perf] -# perf_log=/var/log/rippled/perf.log -# log_interval=2 -# -#------------------------------------------------------------------------------- -# -# 8. Voting -# -#---------- -# -# The vote settings configure settings for the entire Ripple network. -# While a single instance of rippled cannot unilaterally enforce network-wide -# settings, these choices become part of the instance's vote during the -# consensus process for each voting ledger. -# -# [voting] -# -# A set of key/value pair parameters used during voting ledgers. -# -# reference_fee = -# -# The cost of the reference transaction fee, specified in drops. -# The reference transaction is the simplest form of transaction. -# It represents an XRP payment between two parties. -# -# If this parameter is unspecified, rippled will use an internal -# default. Don't change this without understanding the consequences. -# -# Example: -# reference_fee = 10 # 10 drops -# -# account_reserve = -# -# The account reserve requirement is specified in drops. The portion of an -# account's XRP balance that is at or below the reserve may only be -# spent on transaction fees, and not transferred out of the account. -# -# If this parameter is unspecified, rippled will use an internal -# default. Don't change this without understanding the consequences. -# -# Example: -# account_reserve = 10000000 # 10 XRP -# -# owner_reserve = -# -# The owner reserve is the amount of XRP reserved in the account for -# each ledger item owned by the account. Ledger items an account may -# own include trust lines, open orders, and tickets. -# -# If this parameter is unspecified, rippled will use an internal -# default. Don't change this without understanding the consequences. -# -# Example: -# owner_reserve = 2000000 # 2 XRP -# -#------------------------------------------------------------------------------- -# -# 9. Misc Settings -# -#----------------- -# -# [node_size] -# -# Tunes the servers based on the expected load and available memory. Legal -# sizes are "tiny", "small", "medium", "large", and "huge". We recommend -# you start at the default and raise the setting if you have extra memory. -# -# The code attempts to automatically determine the appropriate size for -# this parameter based on the amount of RAM and the number of execution -# cores available to the server. The current decision matrix is: -# -# | | Cores | -# |---------|------------------------| -# | RAM | 1 | 2 or 3 | ≥ 4 | -# |---------|------|--------|--------| -# | < ~8GB | tiny | tiny | tiny | -# | < ~12GB | tiny | small | small | -# | < ~16GB | tiny | small | medium | -# | < ~24GB | tiny | small | large | -# | < ~32GB | tiny | small | huge | -# -# [signing_support] -# -# Specifies whether the server will accept "sign" and "sign_for" commands -# from remote users. Even if the commands are sent over a secure protocol -# like secure websocket, this should generally be discouraged, because it -# requires sending the secret to use for signing to the server. In order -# to sign transactions, users should prefer to use a standalone signing -# tool instead. -# -# This flag has no effect on the "sign" and "sign_for" command line options -# that rippled makes available. -# -# The default value of this field is "false" -# -# Example: -# -# [signing_support] -# true -# -# [crawl] -# -# List of options to control what data is reported through the /crawl endpoint -# See https://xrpl.org/peer-crawler.html -# -# -# -# Enable or disable access to /crawl requests. Default is '1' which -# enables access. -# -# overlay = -# -# Report information about peers this server is connected to, similar -# to the "peers" RPC API. Default is '1' which means to report peer -# overlay info. -# -# server = -# -# Report information about the local server, similar to the "server_state" -# RPC API. Default is '1' which means to report local server info. -# -# counts = -# -# Report information about the local server health counters, similar to -# the "get_counts" RPC API. Default is '0' which means not to report -# server counts. -# -# unl = -# -# Report information about the local server's validator lists, similar to -# the "validators" and "validator_list_sites" RPC APIs. Default is '1' -# which means to report server validator lists. -# -# Examples: -# -# [crawl] -# 0 -# -# [crawl] -# overlay = 1 -# server = 1 -# counts = 0 -# unl = 1 -# -# [vl] -# -# Options to control what data is reported through the /vl endpoint -# See [...] -# -# enable = -# -# Enable or disable access to /vl requests. Default is '1' which -# enables access. -# -# [beta_rpc_api] -# -# 0 or 1. -# -# 0: Disable the beta API version for JSON-RPC and WebSocket [default] -# 1: Enable the beta API version for testing. The beta API version -# contains breaking changes that require a new API version number. -# They are not ready for public consumption. -# -#------------------------------------------------------------------------------- -# -# 10. Example Settings -# -#-------------------- -# -# Administrators can use these values as a starting point for configuring -# their instance of rippled, but each value should be checked to make sure -# it meets the business requirements for the organization. -# -# Server -# -# These example configuration settings create these ports: -# -# "peer" -# -# Peer protocol open to everyone. This is required to accept -# incoming rippled connections. This does not affect automatic -# or manual outgoing Peer protocol connections. -# -# "rpc" -# -# Administrative RPC commands over HTTPS, when originating from -# the same machine (via the loopback adapter at 127.0.0.1). -# -# "wss_admin" -# -# Admin level API commands over Secure Websockets, when originating -# from the same machine (via the loopback adapter at 127.0.0.1). -# -# This port is commented out but can be enabled by removing -# the '#' from each corresponding line including the entry under [server] -# -# "wss_public" -# -# Guest level API commands over Secure Websockets, open to everyone. -# -# For HTTPS and Secure Websockets ports, if no certificate and key file -# are specified then a self-signed certificate will be generated on startup. -# If you have a certificate and key file, uncomment the corresponding lines -# and ensure the paths to the files are correct. -# -# NOTE -# -# To accept connections on well known ports such as 80 (HTTP) or -# 443 (HTTPS), most operating systems will require rippled to -# run with administrator privileges, or else rippled will not start. - -[server] -port_rpc_admin_local -port_peer -port_ws_admin_local -port_ws_public -#port_grpc -#ssl_key = /etc/ssl/private/server.key -#ssl_cert = /etc/ssl/certs/server.crt - -[port_rpc_admin_local] -port = 5006 -ip = 127.0.0.1 -admin = 127.0.0.1 -protocol = http - -[port_peer] -port = 51235 -ip = 0.0.0.0 -# alternatively, to accept connections on IPv4 + IPv6, use: -#ip = :: -protocol = peer - -[port_ws_admin_local] -port = 6007 -ip = 127.0.0.1 -admin = 127.0.0.1 -protocol = ws - -#[port_grpc#] -#port = 50051 -#ip = 0.0.0.0 -#secure_gateway = 127.0.0.1 - -[port_ws_public] -port = 6008 -ip = 127.0.0.1 -protocol = ws - -#------------------------------------------------------------------------------- - -# This is primary persistent datastore for rippled. This includes transaction -# metadata, account states, and ledger headers. Helpful information can be -# found at https://xrpl.org/capacity-planning.html#node-db-type -# type=NuDB is recommended for non-validators with fast SSDs. Validators or -# slow / spinning disks should use RocksDB. Caution: Spinning disks are -# not recommended. They do not perform well enough to consistently remain -# synced to the network. -# online_delete=512 is recommended to delete old ledgers while maintaining at -# least 512. -# advisory_delete=0 allows the online delete process to run automatically -# when the node has approximately two times the "online_delete" value of -# ledgers. No external administrative command is required to initiate -# deletion. -[node_db] -type=NuDB -path=/var/lib/rippled-reporting/db/nudb -# online_delete=512 # -advisory_delete=0 - -[database_path] -/var/lib/rippled-reporting/db - -# To use Postgres, uncomment this section and fill in the appropriate connection -# info. Postgres can only be used in Reporting Mode. -# To disable writing to the transaction database, uncomment this section, and -# set use_tx_tables=0 -# [ledger_tx_tables] -# conninfo = postgres://:@localhost/ -# use_tx_tables=1 - - -# This needs to be an absolute directory reference, not a relative one. -# Modify this value as required. -[debug_logfile] -/var/log/rippled-reporting/debug.log - -# To use the XRP test network -# (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html), -# use the following [ips] section: -# [ips] -# r.altnet.rippletest.net 51235 - -# File containing trusted validator keys or validator list publishers. -# Unless an absolute path is specified, it will be considered relative to the -# folder in which the rippled.cfg file is located. -[validators_file] -/opt/rippled-reporting/etc/validators.txt - -# Turn down default logging to save disk space in the long run. -# Valid values here are trace, debug, info, warning, error, and fatal -[rpc_startup] -{ "command": "log_level", "severity": "info" } - -# If ssl_verify is 1, certificates will be validated. -# To allow the use of self-signed certificates for development or internal use, -# set to ssl_verify to 0. -[ssl_verify] -1 - - -# To run in Reporting Mode, uncomment this section and fill in the appropriate -# connection info for one or more ETL sources. -[reporting] -etl_source - -[etl_source] -source_grpc_port=50051 -source_ws_port=6005 -source_ip=127.0.0.1 diff --git a/cmake/RippledCore.cmake b/cmake/RippledCore.cmake index 18a424c484b..3b850354ed4 100644 --- a/cmake/RippledCore.cmake +++ b/cmake/RippledCore.cmake @@ -129,14 +129,6 @@ if(xrpld) target_compile_definitions(rippled PRIVATE RIPPLED_RUNNING_IN_CI) endif () - if(reporting) - set(suffix -reporting) - set_target_properties(rippled PROPERTIES OUTPUT_NAME rippled-reporting) - get_target_property(BIN_NAME rippled OUTPUT_NAME) - message(STATUS "Reporting mode build: rippled renamed ${BIN_NAME}") - target_compile_definitions(rippled PRIVATE RIPPLED_REPORTING) - endif() - # any files that don't play well with unity should be added here if(tests) set_source_files_properties( diff --git a/cmake/RippledSettings.cmake b/cmake/RippledSettings.cmake index a431bb61389..b81843cd5b5 100644 --- a/cmake/RippledSettings.cmake +++ b/cmake/RippledSettings.cmake @@ -10,8 +10,6 @@ option(assert "Enables asserts, even in release builds" OFF) option(xrpld "Build xrpld" ON) -option(reporting "Build rippled with reporting mode enabled" OFF) - option(tests "Build tests" ON) option(unity "Creates a build using UNITY support in cmake. This is the default" ON) diff --git a/conanfile.py b/conanfile.py index 425fee8b682..14fc49a1946 100644 --- a/conanfile.py +++ b/conanfile.py @@ -15,7 +15,6 @@ class Xrpl(ConanFile): 'coverage': [True, False], 'fPIC': [True, False], 'jemalloc': [True, False], - 'reporting': [True, False], 'rocksdb': [True, False], 'shared': [True, False], 'static': [True, False], @@ -44,7 +43,6 @@ class Xrpl(ConanFile): 'coverage': False, 'fPIC': True, 'jemalloc': False, - 'reporting': False, 'rocksdb': True, 'shared': False, 'static': True, @@ -52,8 +50,6 @@ class Xrpl(ConanFile): 'unity': False, 'xrpld': False, - 'cassandra-cpp-driver/*:shared': False, - 'cassandra-cpp-driver/*:use_atomic': None, 'date/*:header_only': True, 'grpc/*:shared': False, 'grpc/*:secure': True, @@ -72,7 +68,6 @@ class Xrpl(ConanFile): 'libarchive/*:with_pcreposix': False, 'libarchive/*:with_xattr': False, 'libarchive/*:with_zlib': False, - 'libpq/*:shared': False, 'lz4/*:shared': False, 'openssl/*:shared': False, 'protobuf/*:shared': False, @@ -110,9 +105,6 @@ def requirements(self): self.requires('sqlite3/3.42.0', force=True) if self.options.jemalloc: self.requires('jemalloc/5.3.0') - if self.options.reporting: - self.requires('cassandra-cpp-driver/2.15.3') - self.requires('libpq/14.7') if self.options.rocksdb: self.requires('rocksdb/6.29.5') @@ -139,7 +131,6 @@ def generate(self): tc.variables['assert'] = self.options.assertions tc.variables['coverage'] = self.options.coverage tc.variables['jemalloc'] = self.options.jemalloc - tc.variables['reporting'] = self.options.reporting tc.variables['rocksdb'] = self.options.rocksdb tc.variables['BUILD_SHARED_LIBS'] = self.options.shared tc.variables['static'] = self.options.static diff --git a/include/xrpl/beast/test/fail_counter.h b/include/xrpl/beast/test/fail_counter.h deleted file mode 100644 index d0cae77ddad..00000000000 --- a/include/xrpl/beast/test/fail_counter.h +++ /dev/null @@ -1,160 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_TEST_FAIL_COUNTER_HPP -#define BEAST_TEST_FAIL_COUNTER_HPP - -#include -#include - -namespace beast { -namespace test { - -enum class error { fail_error = 1 }; - -namespace detail { - -class fail_error_category : public boost::system::error_category -{ -public: - const char* - name() const noexcept override - { - return "test"; - } - - std::string - message(int ev) const override - { - switch (static_cast(ev)) - { - default: - case error::fail_error: - return "test error"; - } - } - - boost::system::error_condition - default_error_condition(int ev) const noexcept override - { - return boost::system::error_condition{ev, *this}; - } - - bool - equivalent(int ev, boost::system::error_condition const& condition) - const noexcept override - { - return condition.value() == ev && &condition.category() == this; - } - - bool - equivalent(error_code const& error, int ev) const noexcept override - { - return error.value() == ev && &error.category() == this; - } -}; - -inline boost::system::error_category const& -get_error_category() -{ - static fail_error_category const cat{}; - return cat; -} - -} // namespace detail - -inline error_code -make_error_code(error ev) -{ - return error_code{ - static_cast::type>(ev), - detail::get_error_category()}; -} - -/** An error code with an error set on default construction - - Default constructed versions of this object will have - an error code set right away. This helps tests find code - which forgets to clear the error code on success. -*/ -struct fail_error_code : error_code -{ - fail_error_code() : error_code(make_error_code(error::fail_error)) - { - } - - template - fail_error_code(Arg0&& arg0, ArgN&&... argn) - : error_code(arg0, std::forward(argn)...) - { - } -}; - -/** A countdown to simulated failure. - - On the Nth operation, the class will fail with the specified - error code, or the default error code of @ref error::fail_error. -*/ -class fail_counter -{ - std::size_t n_; - error_code ec_; - -public: - fail_counter(fail_counter&&) = default; - - /** Construct a counter. - - @param The 0-based index of the operation to fail on or after. - */ - explicit fail_counter( - std::size_t n, - error_code ev = make_error_code(error::fail_error)) - : n_(n), ec_(ev) - { - } - - /// Throw an exception on the Nth failure - void - fail() - { - if (n_ > 0) - --n_; - if (!n_) - BOOST_THROW_EXCEPTION(system_error{ec_}); - } - - /// Set an error code on the Nth failure - bool - fail(error_code& ec) - { - if (n_ > 0) - --n_; - if (!n_) - { - ec = ec_; - return true; - } - ec.assign(0, ec.category()); - return false; - } -}; - -} // namespace test -} // namespace beast - -namespace boost { -namespace system { -template <> -struct is_error_code_enum -{ - static bool const value = true; -}; -} // namespace system -} // namespace boost - -#endif diff --git a/include/xrpl/beast/test/fail_stream.h b/include/xrpl/beast/test/fail_stream.h deleted file mode 100644 index 161e73ef091..00000000000 --- a/include/xrpl/beast/test/fail_stream.h +++ /dev/null @@ -1,182 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_TEST_FAIL_STREAM_HPP -#define BEAST_TEST_FAIL_STREAM_HPP - -#include -#include -#include -#include -#include -#include -#include - -namespace beast { -namespace test { - -/** A stream wrapper that fails. - - On the Nth operation, the stream will fail with the specified - error code, or the default error code of invalid_argument. -*/ -template -class fail_stream -{ - boost::optional fc_; - fail_counter* pfc_; - NextLayer next_layer_; - -public: - using next_layer_type = typename std::remove_reference::type; - - using lowest_layer_type = typename get_lowest_layer::type; - - fail_stream(fail_stream&&) = delete; - fail_stream(fail_stream const&) = delete; - fail_stream& - operator=(fail_stream&&) = delete; - fail_stream& - operator=(fail_stream const&) = delete; - - template - explicit fail_stream(std::size_t n, Args&&... args) - : fc_(n), pfc_(&*fc_), next_layer_(std::forward(args)...) - { - } - - template - explicit fail_stream(fail_counter& fc, Args&&... args) - : pfc_(&fc), next_layer_(std::forward(args)...) - { - } - - next_layer_type& - next_layer() - { - return next_layer_; - } - - lowest_layer_type& - lowest_layer() - { - return next_layer_.lowest_layer(); - } - - lowest_layer_type const& - lowest_layer() const - { - return next_layer_.lowest_layer(); - } - - boost::asio::io_service& - get_io_service() - { - return next_layer_.get_io_service(); - } - - template - std::size_t - read_some(MutableBufferSequence const& buffers) - { - pfc_->fail(); - return next_layer_.read_some(buffers); - } - - template - std::size_t - read_some(MutableBufferSequence const& buffers, error_code& ec) - { - if (pfc_->fail(ec)) - return 0; - return next_layer_.read_some(buffers, ec); - } - - template - async_return_type - async_read_some(MutableBufferSequence const& buffers, ReadHandler&& handler) - { - error_code ec; - if (pfc_->fail(ec)) - { - async_completion init{ - handler}; - next_layer_.get_io_service().post( - bind_handler(init.completion_handler, ec, 0)); - return init.result.get(); - } - return next_layer_.async_read_some( - buffers, std::forward(handler)); - } - - template - std::size_t - write_some(ConstBufferSequence const& buffers) - { - pfc_->fail(); - return next_layer_.write_some(buffers); - } - - template - std::size_t - write_some(ConstBufferSequence const& buffers, error_code& ec) - { - if (pfc_->fail(ec)) - return 0; - return next_layer_.write_some(buffers, ec); - } - - template - async_return_type - async_write_some(ConstBufferSequence const& buffers, WriteHandler&& handler) - { - error_code ec; - if (pfc_->fail(ec)) - { - async_completion init{ - handler}; - next_layer_.get_io_service().post( - bind_handler(init.completion_handler, ec, 0)); - return init.result.get(); - } - return next_layer_.async_write_some( - buffers, std::forward(handler)); - } - - friend void - teardown( - websocket::teardown_tag, - fail_stream& stream, - boost::system::error_code& ec) - { - if (stream.pfc_->fail(ec)) - return; - beast::websocket_helpers::call_teardown(stream.next_layer(), ec); - } - - template - friend void - async_teardown( - websocket::teardown_tag, - fail_stream& stream, - TeardownHandler&& handler) - { - error_code ec; - if (stream.pfc_->fail(ec)) - { - stream.get_io_service().post(bind_handler(std::move(handler), ec)); - return; - } - beast::websocket_helpers::call_async_teardown( - stream.next_layer(), std::forward(handler)); - } -}; - -} // namespace test -} // namespace beast - -#endif diff --git a/include/xrpl/beast/test/pipe_stream.h b/include/xrpl/beast/test/pipe_stream.h deleted file mode 100644 index 762419a539a..00000000000 --- a/include/xrpl/beast/test/pipe_stream.h +++ /dev/null @@ -1,482 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_TEST_PIPE_STREAM_HPP -#define BEAST_TEST_PIPE_STREAM_HPP - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace beast { -namespace test { - -/** A bidirectional in-memory communication channel - - An instance of this class provides a client and server - endpoint that are automatically connected to each other - similarly to a connected socket. - - Test pipes are used to facilitate writing unit tests - where the behavior of the transport is tightly controlled - to help illuminate all code paths (for code coverage) -*/ -class pipe -{ -public: - using buffer_type = flat_buffer; - -private: - struct read_op - { - virtual ~read_op() = default; - virtual void - operator()() = 0; - }; - - struct state - { - std::mutex m; - buffer_type b; - std::condition_variable cv; - std::unique_ptr op; - bool eof = false; - }; - - state s_[2]; - -public: - /** Represents an endpoint. - - Each pipe has a client stream and a server stream. - */ - class stream - { - friend class pipe; - - template - class read_op_impl; - - state& in_; - state& out_; - boost::asio::io_service& ios_; - fail_counter* fc_ = nullptr; - std::size_t read_max_ = (std::numeric_limits::max)(); - std::size_t write_max_ = (std::numeric_limits::max)(); - - stream(state& in, state& out, boost::asio::io_service& ios) - : in_(in), out_(out), ios_(ios), buffer(in_.b) - { - } - - public: - using buffer_type = pipe::buffer_type; - - /// Direct access to the underlying buffer - buffer_type& buffer; - - /// Counts the number of read calls - std::size_t nread = 0; - - /// Counts the number of write calls - std::size_t nwrite = 0; - - ~stream() = default; - stream(stream&&) = default; - - /// Set the fail counter on the object - void - fail(fail_counter& fc) - { - fc_ = &fc; - } - - /// Return the `io_service` associated with the object - boost::asio::io_service& - get_io_service() - { - return ios_; - } - - /// Set the maximum number of bytes returned by read_some - void - read_size(std::size_t n) - { - read_max_ = n; - } - - /// Set the maximum number of bytes returned by write_some - void - write_size(std::size_t n) - { - write_max_ = n; - } - - /// Returns a string representing the pending input data - string_view - str() const - { - using boost::asio::buffer_cast; - using boost::asio::buffer_size; - return { - buffer_cast(*in_.b.data().begin()), - buffer_size(*in_.b.data().begin())}; - } - - /// Clear the buffer holding the input data - void - clear() - { - in_.b.consume((std::numeric_limits::max)()); - } - - /** Close the stream. - - The other end of the pipe will see - `boost::asio::error::eof` on read. - */ - template - void - close(); - - template - std::size_t - read_some(MutableBufferSequence const& buffers); - - template - std::size_t - read_some(MutableBufferSequence const& buffers, error_code& ec); - - template - async_return_type - async_read_some( - MutableBufferSequence const& buffers, - ReadHandler&& handler); - - template - std::size_t - write_some(ConstBufferSequence const& buffers); - - template - std::size_t - write_some(ConstBufferSequence const& buffers, error_code&); - - template - async_return_type - async_write_some( - ConstBufferSequence const& buffers, - WriteHandler&& handler); - - friend void - teardown( - websocket::teardown_tag, - stream&, - boost::system::error_code& ec) - { - ec.assign(0, ec.category()); - } - - template - friend void - async_teardown( - websocket::teardown_tag, - stream& s, - TeardownHandler&& handler) - { - s.get_io_service().post( - bind_handler(std::move(handler), error_code{})); - } - }; - - /** Constructor. - - The client and server endpoints will use the same `io_service`. - */ - explicit pipe(boost::asio::io_service& ios) - : client(s_[0], s_[1], ios), server(s_[1], s_[0], ios) - { - } - - /** Constructor. - - The client and server endpoints will different `io_service` objects. - */ - explicit pipe(boost::asio::io_service& ios1, boost::asio::io_service& ios2) - : client(s_[0], s_[1], ios1), server(s_[1], s_[0], ios2) - { - } - - /// Represents the client endpoint - stream client; - - /// Represents the server endpoint - stream server; -}; - -//------------------------------------------------------------------------------ - -template -class pipe::stream::read_op_impl : public pipe::read_op -{ - stream& s_; - Buffers b_; - Handler h_; - -public: - read_op_impl(stream& s, Buffers const& b, Handler&& h) - : s_(s), b_(b), h_(std::move(h)) - { - } - - read_op_impl(stream& s, Buffers const& b, Handler const& h) - : s_(s), b_(b), h_(h) - { - } - - void - operator()() override; -}; - -//------------------------------------------------------------------------------ - -template -void -pipe::stream::read_op_impl::operator()() -{ - using boost::asio::buffer_copy; - using boost::asio::buffer_size; - s_.ios_.post([&]() { - BOOST_ASSERT(s_.in_.op); - std::unique_lock lock{s_.in_.m}; - if (s_.in_.b.size() > 0) - { - auto const bytes_transferred = - buffer_copy(b_, s_.in_.b.data(), s_.read_max_); - s_.in_.b.consume(bytes_transferred); - auto& s = s_; - Handler h{std::move(h_)}; - lock.unlock(); - s.in_.op.reset(nullptr); - ++s.nread; - s.ios_.post( - bind_handler(std::move(h), error_code{}, bytes_transferred)); - } - else - { - BOOST_ASSERT(s_.in_.eof); - auto& s = s_; - Handler h{std::move(h_)}; - lock.unlock(); - s.in_.op.reset(nullptr); - ++s.nread; - s.ios_.post(bind_handler(std::move(h), boost::asio::error::eof, 0)); - } - }); -} - -//------------------------------------------------------------------------------ - -template -void -pipe::stream::close() -{ - std::lock_guard lock{out_.m}; - out_.eof = true; - if (out_.op) - out_.op.get()->operator()(); - else - out_.cv.notify_all(); -} - -template -std::size_t -pipe::stream::read_some(MutableBufferSequence const& buffers) -{ - static_assert( - is_mutable_buffer_sequence::value, - "MutableBufferSequence requirements not met"); - error_code ec; - auto const n = read_some(buffers, ec); - if (ec) - BOOST_THROW_EXCEPTION(system_error{ec}); - return n; -} - -template -std::size_t -pipe::stream::read_some(MutableBufferSequence const& buffers, error_code& ec) -{ - static_assert( - is_mutable_buffer_sequence::value, - "MutableBufferSequence requirements not met"); - using boost::asio::buffer_copy; - using boost::asio::buffer_size; - BOOST_ASSERT(!in_.op); - BOOST_ASSERT(buffer_size(buffers) > 0); - if (fc_ && fc_->fail(ec)) - return 0; - std::unique_lock lock{in_.m}; - in_.cv.wait(lock, [&]() { return in_.b.size() > 0 || in_.eof; }); - std::size_t bytes_transferred; - if (in_.b.size() > 0) - { - ec.assign(0, ec.category()); - bytes_transferred = buffer_copy(buffers, in_.b.data(), read_max_); - in_.b.consume(bytes_transferred); - } - else - { - BOOST_ASSERT(in_.eof); - bytes_transferred = 0; - ec = boost::asio::error::eof; - } - ++nread; - return bytes_transferred; -} - -template -async_return_type -pipe::stream::async_read_some( - MutableBufferSequence const& buffers, - ReadHandler&& handler) -{ - static_assert( - is_mutable_buffer_sequence::value, - "MutableBufferSequence requirements not met"); - using boost::asio::buffer_copy; - using boost::asio::buffer_size; - BOOST_ASSERT(!in_.op); - BOOST_ASSERT(buffer_size(buffers) > 0); - async_completion init{handler}; - if (fc_) - { - error_code ec; - if (fc_->fail(ec)) - return ios_.post(bind_handler(init.completion_handler, ec, 0)); - } - { - std::unique_lock lock{in_.m}; - if (in_.eof) - { - lock.unlock(); - ++nread; - ios_.post(bind_handler( - init.completion_handler, boost::asio::error::eof, 0)); - } - else if (buffer_size(buffers) == 0 || buffer_size(in_.b.data()) > 0) - { - auto const bytes_transferred = - buffer_copy(buffers, in_.b.data(), read_max_); - in_.b.consume(bytes_transferred); - lock.unlock(); - ++nread; - ios_.post(bind_handler( - init.completion_handler, error_code{}, bytes_transferred)); - } - else - { - in_.op.reset( - new read_op_impl< - handler_type, - MutableBufferSequence>{ - *this, buffers, init.completion_handler}); - } - } - return init.result.get(); -} - -template -std::size_t -pipe::stream::write_some(ConstBufferSequence const& buffers) -{ - static_assert( - is_const_buffer_sequence::value, - "ConstBufferSequence requirements not met"); - BOOST_ASSERT(!out_.eof); - error_code ec; - auto const bytes_transferred = write_some(buffers, ec); - if (ec) - BOOST_THROW_EXCEPTION(system_error{ec}); - return bytes_transferred; -} - -template -std::size_t -pipe::stream::write_some(ConstBufferSequence const& buffers, error_code& ec) -{ - static_assert( - is_const_buffer_sequence::value, - "ConstBufferSequence requirements not met"); - using boost::asio::buffer_copy; - using boost::asio::buffer_size; - BOOST_ASSERT(!out_.eof); - if (fc_ && fc_->fail(ec)) - return 0; - auto const n = (std::min)(buffer_size(buffers), write_max_); - std::unique_lock lock{out_.m}; - auto const bytes_transferred = buffer_copy(out_.b.prepare(n), buffers); - out_.b.commit(bytes_transferred); - lock.unlock(); - if (out_.op) - out_.op.get()->operator()(); - else - out_.cv.notify_all(); - ++nwrite; - ec.assign(0, ec.category()); - return bytes_transferred; -} - -template -async_return_type -pipe::stream::async_write_some( - ConstBufferSequence const& buffers, - WriteHandler&& handler) -{ - static_assert( - is_const_buffer_sequence::value, - "ConstBufferSequence requirements not met"); - using boost::asio::buffer_copy; - using boost::asio::buffer_size; - BOOST_ASSERT(!out_.eof); - async_completion init{handler}; - if (fc_) - { - error_code ec; - if (fc_->fail(ec)) - return ios_.post(bind_handler(init.completion_handler, ec, 0)); - } - auto const n = (std::min)(buffer_size(buffers), write_max_); - std::unique_lock lock{out_.m}; - auto const bytes_transferred = buffer_copy(out_.b.prepare(n), buffers); - out_.b.commit(bytes_transferred); - lock.unlock(); - if (out_.op) - out_.op.get()->operator()(); - else - out_.cv.notify_all(); - ++nwrite; - ios_.post( - bind_handler(init.completion_handler, error_code{}, bytes_transferred)); - return init.result.get(); -} - -} // namespace test -} // namespace beast - -#endif diff --git a/include/xrpl/beast/test/sig_wait.h b/include/xrpl/beast/test/sig_wait.h deleted file mode 100644 index 92720561fa6..00000000000 --- a/include/xrpl/beast/test/sig_wait.h +++ /dev/null @@ -1,29 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_TEST_SIG_WAIT_HPP -#define BEAST_TEST_SIG_WAIT_HPP - -#include - -namespace beast { -namespace test { - -/// Block until SIGINT or SIGTERM is received. -inline void -sig_wait() -{ - boost::asio::io_service ios; - boost::asio::signal_set signals(ios, SIGINT, SIGTERM); - signals.async_wait([&](boost::system::error_code const&, int) {}); - ios.run(); -} - -} // namespace test -} // namespace beast - -#endif diff --git a/include/xrpl/beast/test/string_iostream.h b/include/xrpl/beast/test/string_iostream.h deleted file mode 100644 index bed6299a2bc..00000000000 --- a/include/xrpl/beast/test/string_iostream.h +++ /dev/null @@ -1,166 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_TEST_STRING_IOSTREAM_HPP -#define BEAST_TEST_STRING_IOSTREAM_HPP - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace beast { -namespace test { - -/** A SyncStream and AsyncStream that reads from a string and writes to another - string. - - This class behaves like a socket, except that written data is - appended to a string exposed as a public data member, and when - data is read it comes from a string provided at construction. -*/ -class string_iostream -{ - std::string s_; - boost::asio::const_buffer cb_; - boost::asio::io_service& ios_; - std::size_t read_max_; - -public: - std::string str; - - string_iostream( - boost::asio::io_service& ios, - std::string s, - std::size_t read_max = (std::numeric_limits::max)()) - : s_(std::move(s)) - , cb_(boost::asio::buffer(s_)) - , ios_(ios) - , read_max_(read_max) - { - } - - boost::asio::io_service& - get_io_service() - { - return ios_; - } - - template - std::size_t - read_some(MutableBufferSequence const& buffers) - { - error_code ec; - auto const n = read_some(buffers, ec); - if (ec) - BOOST_THROW_EXCEPTION(system_error{ec}); - return n; - } - - template - std::size_t - read_some(MutableBufferSequence const& buffers, error_code& ec) - { - auto const n = - boost::asio::buffer_copy(buffers, buffer_prefix(read_max_, cb_)); - if (n > 0) - { - ec.assign(0, ec.category()); - cb_ = cb_ + n; - } - else - { - ec = boost::asio::error::eof; - } - return n; - } - - template - async_return_type - async_read_some(MutableBufferSequence const& buffers, ReadHandler&& handler) - { - auto const n = - boost::asio::buffer_copy(buffers, boost::asio::buffer(s_)); - error_code ec; - if (n > 0) - s_.erase(0, n); - else - ec = boost::asio::error::eof; - async_completion init{ - handler}; - ios_.post(bind_handler(init.completion_handler, ec, n)); - return init.result.get(); - } - - template - std::size_t - write_some(ConstBufferSequence const& buffers) - { - error_code ec; - auto const n = write_some(buffers, ec); - if (ec) - BOOST_THROW_EXCEPTION(system_error{ec}); - return n; - } - - template - std::size_t - write_some(ConstBufferSequence const& buffers, error_code& ec) - { - ec.assign(0, ec.category()); - using boost::asio::buffer_cast; - using boost::asio::buffer_size; - auto const n = buffer_size(buffers); - str.reserve(str.size() + n); - for (boost::asio::const_buffer buffer : buffers) - str.append(buffer_cast(buffer), buffer_size(buffer)); - return n; - } - - template - async_return_type - async_write_some(ConstBufferSequence const& buffers, WriteHandler&& handler) - { - error_code ec; - auto const bytes_transferred = write_some(buffers, ec); - async_completion init{ - handler}; - get_io_service().post( - bind_handler(init.completion_handler, ec, bytes_transferred)); - return init.result.get(); - } - - friend void - teardown( - websocket::teardown_tag, - string_iostream&, - boost::system::error_code& ec) - { - ec.assign(0, ec.category()); - } - - template - friend void - async_teardown( - websocket::teardown_tag, - string_iostream& stream, - TeardownHandler&& handler) - { - stream.get_io_service().post( - bind_handler(std::move(handler), error_code{})); - } -}; - -} // namespace test -} // namespace beast - -#endif diff --git a/include/xrpl/beast/test/string_istream.h b/include/xrpl/beast/test/string_istream.h deleted file mode 100644 index 83cb3cfef5d..00000000000 --- a/include/xrpl/beast/test/string_istream.h +++ /dev/null @@ -1,155 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_TEST_STRING_ISTREAM_HPP -#define BEAST_TEST_STRING_ISTREAM_HPP - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace beast { -namespace test { - -/** A SyncStream and AsyncStream that reads from a string. - - This class behaves like a socket, except that written data is simply - discarded, and when data is read it comes from a string provided - at construction. -*/ -class string_istream -{ - std::string s_; - boost::asio::const_buffer cb_; - boost::asio::io_service& ios_; - std::size_t read_max_; - -public: - string_istream( - boost::asio::io_service& ios, - std::string s, - std::size_t read_max = (std::numeric_limits::max)()) - : s_(std::move(s)) - , cb_(boost::asio::buffer(s_)) - , ios_(ios) - , read_max_(read_max) - { - } - - boost::asio::io_service& - get_io_service() - { - return ios_; - } - - template - std::size_t - read_some(MutableBufferSequence const& buffers) - { - error_code ec; - auto const n = read_some(buffers, ec); - if (ec) - BOOST_THROW_EXCEPTION(system_error{ec}); - return n; - } - - template - std::size_t - read_some(MutableBufferSequence const& buffers, error_code& ec) - { - auto const n = boost::asio::buffer_copy(buffers, cb_, read_max_); - if (n > 0) - { - ec.assign(0, ec.category()); - cb_ = cb_ + n; - } - else - { - ec = boost::asio::error::eof; - } - return n; - } - - template - async_return_type - async_read_some(MutableBufferSequence const& buffers, ReadHandler&& handler) - { - auto const n = - boost::asio::buffer_copy(buffers, boost::asio::buffer(s_)); - error_code ec; - if (n > 0) - s_.erase(0, n); - else - ec = boost::asio::error::eof; - async_completion init{ - handler}; - ios_.post(bind_handler(init.completion_handler, ec, n)); - return init.result.get(); - } - - template - std::size_t - write_some(ConstBufferSequence const& buffers) - { - error_code ec; - auto const n = write_some(buffers, ec); - if (ec) - BOOST_THROW_EXCEPTION(system_error{ec}); - return n; - } - - template - std::size_t - write_some(ConstBufferSequence const& buffers, error_code& ec) - { - ec.assign(0, ec.category()); - return boost::asio::buffer_size(buffers); - } - - template - async_return_type - async_write_some(ConstBuffeSequence const& buffers, WriteHandler&& handler) - { - async_completion init{ - handler}; - ios_.post(bind_handler( - init.completion_handler, - error_code{}, - boost::asio::buffer_size(buffers))); - return init.result.get(); - } - - friend void - teardown( - websocket::teardown_tag, - string_istream&, - boost::system::error_code& ec) - { - ec.assign(0, ec.category()); - } - - template - friend void - async_teardown( - websocket::teardown_tag, - string_istream& stream, - TeardownHandler&& handler) - { - stream.get_io_service().post( - bind_handler(std::move(handler), error_code{})); - } -}; - -} // namespace test -} // namespace beast - -#endif diff --git a/include/xrpl/beast/test/string_ostream.h b/include/xrpl/beast/test/string_ostream.h deleted file mode 100644 index 9edf69be88f..00000000000 --- a/include/xrpl/beast/test/string_ostream.h +++ /dev/null @@ -1,137 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_TEST_STRING_OSTREAM_HPP -#define BEAST_TEST_STRING_OSTREAM_HPP - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace beast { -namespace test { - -class string_ostream -{ - boost::asio::io_service& ios_; - std::size_t write_max_; - -public: - std::string str; - - explicit string_ostream( - boost::asio::io_service& ios, - std::size_t write_max = (std::numeric_limits::max)()) - : ios_(ios), write_max_(write_max) - { - } - - boost::asio::io_service& - get_io_service() - { - return ios_; - } - - template - std::size_t - read_some(MutableBufferSequence const& buffers) - { - error_code ec; - auto const n = read_some(buffers, ec); - if (ec) - BOOST_THROW_EXCEPTION(system_error{ec}); - return n; - } - - template - std::size_t - read_some(MutableBufferSequence const&, error_code& ec) - { - ec = boost::asio::error::eof; - return 0; - } - - template - async_return_type - async_read_some(MutableBufferSequence const&, ReadHandler&& handler) - { - async_completion init{ - handler}; - ios_.post( - bind_handler(init.completion_handler, boost::asio::error::eof, 0)); - return init.result.get(); - } - - template - std::size_t - write_some(ConstBufferSequence const& buffers) - { - error_code ec; - auto const n = write_some(buffers, ec); - if (ec) - BOOST_THROW_EXCEPTION(system_error{ec}); - return n; - } - - template - std::size_t - write_some(ConstBufferSequence const& buffers, error_code& ec) - { - ec.assign(0, ec.category()); - using boost::asio::buffer_cast; - using boost::asio::buffer_size; - auto const n = (std::min)(buffer_size(buffers), write_max_); - str.reserve(str.size() + n); - for (boost::asio::const_buffer buffer : buffer_prefix(n, buffers)) - str.append(buffer_cast(buffer), buffer_size(buffer)); - return n; - } - - template - async_return_type - async_write_some(ConstBufferSequence const& buffers, WriteHandler&& handler) - { - error_code ec; - auto const bytes_transferred = write_some(buffers, ec); - async_completion init{ - handler}; - get_io_service().post( - bind_handler(init.completion_handler, ec, bytes_transferred)); - return init.result.get(); - } - - friend void - teardown( - websocket::teardown_tag, - string_ostream&, - boost::system::error_code& ec) - { - ec.assign(0, ec.category()); - } - - template - friend void - async_teardown( - websocket::teardown_tag, - string_ostream& stream, - TeardownHandler&& handler) - { - stream.get_io_service().post( - bind_handler(std::move(handler), error_code{})); - } -}; - -} // namespace test -} // namespace beast - -#endif diff --git a/include/xrpl/beast/test/test_allocator.h b/include/xrpl/beast/test/test_allocator.h deleted file mode 100644 index 598e22c2766..00000000000 --- a/include/xrpl/beast/test/test_allocator.h +++ /dev/null @@ -1,159 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_TEST_TEST_ALLOCATOR_HPP -#define BEAST_TEST_TEST_ALLOCATOR_HPP - -#include -#include -#include - -namespace beast { -namespace test { - -struct test_allocator_info -{ - std::size_t id; - std::size_t ncopy = 0; - std::size_t nmove = 0; - std::size_t nmassign = 0; - std::size_t ncpassign = 0; - std::size_t nselect = 0; - - test_allocator_info() - : id([] { - static std::atomic sid(0); - return ++sid; - }()) - { - } -}; - -template -class test_allocator; - -template -struct test_allocator_base -{ -}; - -template -struct test_allocator_base -{ - static test_allocator - select_on_container_copy_construction( - test_allocator const& a) - { - return test_allocator{}; - } -}; - -template -class test_allocator - : public test_allocator_base -{ - std::shared_ptr info_; - - template - friend class test_allocator; - -public: - using value_type = T; - - using propagate_on_container_copy_assignment = - std::integral_constant; - - using propagate_on_container_move_assignment = - std::integral_constant; - - using propagate_on_container_swap = std::integral_constant; - - template - struct rebind - { - using other = test_allocator; - }; - - test_allocator() : info_(std::make_shared()) - { - } - - test_allocator(test_allocator const& u) noexcept : info_(u.info_) - { - ++info_->ncopy; - } - - template - test_allocator( - test_allocator const& u) noexcept - : info_(u.info_) - { - ++info_->ncopy; - } - - test_allocator(test_allocator&& t) : info_(t.info_) - { - ++info_->nmove; - } - - test_allocator& - operator=(test_allocator const& u) noexcept - { - info_ = u.info_; - ++info_->ncpassign; - return *this; - } - - test_allocator& - operator=(test_allocator&& u) noexcept - { - info_ = u.info_; - ++info_->nmassign; - return *this; - } - - value_type* - allocate(std::size_t n) - { - return static_cast(::operator new(n * sizeof(value_type))); - } - - void - deallocate(value_type* p, std::size_t) noexcept - { - ::operator delete(p); - } - - bool - operator==(test_allocator const& other) const - { - return id() == other.id() || Equal; - } - - bool - operator!=(test_allocator const& other) const - { - return !this->operator==(other); - } - - std::size_t - id() const - { - return info_->id; - } - - test_allocator_info const* - operator->() const - { - return info_.get(); - } -}; - -} // namespace test -} // namespace beast - -#endif diff --git a/include/xrpl/beast/unit_test/dstream.h b/include/xrpl/beast/unit_test/dstream.h deleted file mode 100644 index ff2036a12cb..00000000000 --- a/include/xrpl/beast/unit_test/dstream.h +++ /dev/null @@ -1,121 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_UNIT_TEST_DSTREAM_HPP -#define BEAST_UNIT_TEST_DSTREAM_HPP - -#include -#include -#include -#include -#include -#include - -#ifdef BOOST_WINDOWS -#include -//#include -#endif - -namespace beast { -namespace unit_test { - -#ifdef BOOST_WINDOWS - -namespace detail { - -template -class dstream_buf : public std::basic_stringbuf -{ - using ostream = std::basic_ostream; - - bool dbg_; - ostream& os_; - - template - void - write(T const*) = delete; - - void - write(char const* s) - { - if (dbg_) - /*boost::detail::winapi*/ ::OutputDebugStringA(s); - os_ << s; - } - - void - write(wchar_t const* s) - { - if (dbg_) - /*boost::detail::winapi*/ ::OutputDebugStringW(s); - os_ << s; - } - -public: - explicit dstream_buf(ostream& os) - : os_(os), dbg_(/*boost::detail::winapi*/ ::IsDebuggerPresent() != 0) - { - } - - ~dstream_buf() - { - sync(); - } - - int - sync() override - { - write(this->str().c_str()); - this->str(""); - return 0; - } -}; - -} // namespace detail - -/** std::ostream with Visual Studio IDE redirection. - - Instances of this stream wrap a specified `std::ostream` - (such as `std::cout` or `std::cerr`). If the IDE debugger - is attached when the stream is created, output will be - additionally copied to the Visual Studio Output window. -*/ -template < - class CharT, - class Traits = std::char_traits, - class Allocator = std::allocator> -class basic_dstream : public std::basic_ostream -{ - detail::dstream_buf buf_; - -public: - /** Construct a stream. - - @param os The output stream to wrap. - */ - explicit basic_dstream(std::ostream& os) - : std::basic_ostream(&buf_), buf_(os) - { - if (os.flags() & std::ios::unitbuf) - std::unitbuf(*this); - } -}; - -using dstream = basic_dstream; -using dwstream = basic_dstream; - -#else - -using dstream = std::ostream&; -using dwstream = std::wostream&; - -#endif - -} // namespace unit_test -} // namespace beast - -#endif diff --git a/include/xrpl/beast/utility/hash_pair.h b/include/xrpl/beast/utility/hash_pair.h deleted file mode 100644 index 08042b34778..00000000000 --- a/include/xrpl/beast/utility/hash_pair.h +++ /dev/null @@ -1,72 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of Beast: https://github.com/vinniefalco/Beast - Copyright 2013, Vinnie Falco - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef BEAST_UTILITY_HASH_PAIR_H_INCLUDED -#define BEAST_UTILITY_HASH_PAIR_H_INCLUDED - -#include -#include - -#include -#include - -namespace std { - -/** Specialization of std::hash for any std::pair type. */ -template -struct hash> - : private boost::base_from_member, 0>, - private boost::base_from_member, 1> -{ -private: - using first_hash = boost::base_from_member, 0>; - using second_hash = boost::base_from_member, 1>; - -public: - hash() - { - } - - hash( - std::hash const& first_hash_, - std::hash const& second_hash_) - : first_hash(first_hash_), second_hash(second_hash_) - { - } - - std::size_t - operator()(std::pair const& value) - { - std::size_t result(first_hash::member(value.first)); - boost::hash_combine(result, second_hash::member(value.second)); - return result; - } - - std::size_t - operator()(std::pair const& value) const - { - std::size_t result(first_hash::member(value.first)); - boost::hash_combine(result, second_hash::member(value.second)); - return result; - } -}; - -} // namespace std - -#endif diff --git a/include/xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.proto b/include/xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.proto index 995edba48a1..01a23fbe375 100644 --- a/include/xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.proto +++ b/include/xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.proto @@ -11,7 +11,7 @@ import "org/xrpl/rpc/v1/get_ledger_diff.proto"; // These methods are binary only methods for retrieiving arbitrary ledger state -// via gRPC. These methods are used by clio and reporting mode, but can also be +// via gRPC. These methods are used by clio, but can also be // used by any client that wants to extract ledger state in an efficient manner. // They do not directly mimic the JSON equivalent methods. service XRPLedgerAPIService { diff --git a/include/xrpl/protocol/ErrorCodes.h b/include/xrpl/protocol/ErrorCodes.h index 6d5590ec605..d8ec3052b7b 100644 --- a/include/xrpl/protocol/ErrorCodes.h +++ b/include/xrpl/protocol/ErrorCodes.h @@ -136,8 +136,8 @@ enum error_code_i { rpcINVALID_LGR_RANGE = 79, rpcEXPIRED_VALIDATOR_LIST = 80, - // Reporting - rpcFAILED_TO_FORWARD = 90, + // unused = 90, + // DEPRECATED. New code must not use this value. rpcREPORTING_UNSUPPORTED = 91, rpcOBJECT_NOT_FOUND = 92, @@ -148,8 +148,7 @@ enum error_code_i { // Oracle rpcORACLE_MALFORMED = 94, - rpcLAST = - rpcORACLE_MALFORMED // rpcLAST should always equal the last code.= + rpcLAST = rpcORACLE_MALFORMED // rpcLAST should always equal the last code. }; /** Codes returned in the `warnings` array of certain RPC commands. @@ -160,7 +159,7 @@ enum warning_code_i { warnRPC_UNSUPPORTED_MAJORITY = 1001, warnRPC_AMENDMENT_BLOCKED = 1002, warnRPC_EXPIRED_VALIDATOR_LIST = 1003, - warnRPC_REPORTING = 1004 + // unused = 1004 }; //------------------------------------------------------------------------------ diff --git a/src/libxrpl/protocol/ErrorCodes.cpp b/src/libxrpl/protocol/ErrorCodes.cpp index 28024fab093..4c934f4fd53 100644 --- a/src/libxrpl/protocol/ErrorCodes.cpp +++ b/src/libxrpl/protocol/ErrorCodes.cpp @@ -71,7 +71,6 @@ constexpr static ErrorInfo unorderedErrorInfos[]{ {rpcDST_ISR_MALFORMED, "dstIsrMalformed", "Destination issuer is malformed.", 400}, {rpcEXCESSIVE_LGR_RANGE, "excessiveLgrRange", "Ledger range exceeds 1000.", 400}, {rpcFORBIDDEN, "forbidden", "Bad credentials.", 403}, - {rpcFAILED_TO_FORWARD, "failedToForward", "Failed to forward request to p2p node", 503}, {rpcHIGH_FEE, "highFee", "Current transaction fee exceeds your limit.", 402}, {rpcINTERNAL, "internal", "Internal error.", 500}, {rpcINVALID_LGR_RANGE, "invalidLgrRange", "Ledger range is invalid.", 400}, @@ -97,7 +96,6 @@ constexpr static ErrorInfo unorderedErrorInfos[]{ {rpcNO_PF_REQUEST, "noPathRequest", "No pathfinding request in progress.", 404}, {rpcOBJECT_NOT_FOUND, "objectNotFound", "The requested object was not found.", 404}, {rpcPUBLIC_MALFORMED, "publicMalformed", "Public key is malformed.", 400}, - {rpcREPORTING_UNSUPPORTED, "reportingUnsupported", "Requested operation not supported by reporting mode server", 405}, {rpcSENDMAX_MALFORMED, "sendMaxMalformed", "SendMax amount malformed.", 400}, {rpcSIGNING_MALFORMED, "signingMalformed", "Signing of transaction is malformed.", 400}, {rpcSLOW_DOWN, "slowDown", "You are placing too much load on the server.", 429}, diff --git a/src/test/rpc/ReportingETL_test.cpp b/src/test/rpc/ReportingETL_test.cpp deleted file mode 100644 index 8a030938832..00000000000 --- a/src/test/rpc/ReportingETL_test.cpp +++ /dev/null @@ -1,1144 +0,0 @@ - -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include - -#include -#include -#include -#include -#include - -namespace ripple { -namespace test { - -class ReportingETL_test : public beast::unit_test::suite -{ - // gRPC stuff - class GrpcLedgerClient : public GRPCTestClientBase - { - public: - org::xrpl::rpc::v1::GetLedgerRequest request; - org::xrpl::rpc::v1::GetLedgerResponse reply; - - explicit GrpcLedgerClient(std::string const& port) - : GRPCTestClientBase(port) - { - } - - void - GetLedger() - { - status = stub_->GetLedger(&context, request, &reply); - } - }; - void - testGetLedger() - { - testcase("GetLedger"); - using namespace test::jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = - *(*config)[SECTION_PORT_GRPC].get("port"); - Env env(*this, std::move(config)); - - env.close(); - - auto ledger = env.app().getLedgerMaster().getLedgerBySeq(3); - - BEAST_EXPECT(env.current()->info().seq == 4); - - auto grpcLedger = [&grpcPort]( - auto sequence, - bool transactions, - bool expand, - bool get_objects, - bool get_object_neighbors) { - GrpcLedgerClient grpcClient{grpcPort}; - - grpcClient.request.mutable_ledger()->set_sequence(sequence); - grpcClient.request.set_transactions(transactions); - grpcClient.request.set_expand(expand); - grpcClient.request.set_get_objects(get_objects); - grpcClient.request.set_get_object_neighbors(get_object_neighbors); - - grpcClient.GetLedger(); - return std::make_pair(grpcClient.status, grpcClient.reply); - }; - - { - auto [status, reply] = grpcLedger(3, false, false, false, false); - - BEAST_EXPECT(status.ok()); - BEAST_EXPECT(reply.validated()); - BEAST_EXPECT(!reply.has_hashes_list()); - BEAST_EXPECT(!reply.has_transactions_list()); - BEAST_EXPECT(!reply.skiplist_included()); - BEAST_EXPECT(reply.ledger_objects().objects_size() == 0); - - Serializer s; - addRaw(ledger->info(), s, true); - BEAST_EXPECT(s.slice() == makeSlice(reply.ledger_header())); - } - - Account const alice{"alice"}; - Account const bob{"bob"}; - env.fund(XRP(10000), alice); - env.fund(XRP(10000), bob); - env.close(); - - ledger = env.app().getLedgerMaster().getLedgerBySeq(4); - - std::vector hashes; - std::vector> transactions; - std::vector> metas; - for (auto& [sttx, meta] : ledger->txs) - { - hashes.push_back(sttx->getTransactionID()); - transactions.push_back(sttx); - metas.push_back(meta); - } - - Serializer s; - addRaw(ledger->info(), s, true); - - { - auto [status, reply] = grpcLedger(4, true, false, false, false); - BEAST_EXPECT(status.ok()); - BEAST_EXPECT(reply.validated()); - BEAST_EXPECT(reply.has_hashes_list()); - BEAST_EXPECT(reply.hashes_list().hashes_size() == hashes.size()); - BEAST_EXPECT( - uint256::fromVoid(reply.hashes_list().hashes(0).data()) == - hashes[0]); - BEAST_EXPECT( - uint256::fromVoid(reply.hashes_list().hashes(1).data()) == - hashes[1]); - BEAST_EXPECT( - uint256::fromVoid(reply.hashes_list().hashes(2).data()) == - hashes[2]); - BEAST_EXPECT( - uint256::fromVoid(reply.hashes_list().hashes(3).data()) == - hashes[3]); - - BEAST_EXPECT(!reply.has_transactions_list()); - BEAST_EXPECT(!reply.skiplist_included()); - BEAST_EXPECT(reply.ledger_objects().objects_size() == 0); - - BEAST_EXPECT(s.slice() == makeSlice(reply.ledger_header())); - } - - { - auto [status, reply] = grpcLedger(4, true, true, false, false); - - BEAST_EXPECT(status.ok()); - BEAST_EXPECT(reply.validated()); - BEAST_EXPECT(!reply.has_hashes_list()); - - BEAST_EXPECT(reply.has_transactions_list()); - BEAST_EXPECT(reply.transactions_list().transactions_size() == 4); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(0) - .transaction_blob()) == - transactions[0]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(0) - .metadata_blob()) == - metas[0]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(1) - .transaction_blob()) == - transactions[1]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(1) - .metadata_blob()) == - metas[1]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(2) - .transaction_blob()) == - transactions[2]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(2) - .metadata_blob()) == - metas[2]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(3) - .transaction_blob()) == - transactions[3]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(3) - .metadata_blob()) == - metas[3]->getSerializer().slice()); - - BEAST_EXPECT(!reply.skiplist_included()); - BEAST_EXPECT(reply.ledger_objects().objects_size() == 0); - - BEAST_EXPECT(s.slice() == makeSlice(reply.ledger_header())); - } - - { - auto [status, reply] = grpcLedger(4, true, true, true, false); - - BEAST_EXPECT(status.ok()); - BEAST_EXPECT(reply.validated()); - BEAST_EXPECT(!reply.has_hashes_list()); - - BEAST_EXPECT(reply.has_transactions_list()); - BEAST_EXPECT(reply.transactions_list().transactions_size() == 4); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(0) - .transaction_blob()) == - transactions[0]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(0) - .metadata_blob()) == - metas[0]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(1) - .transaction_blob()) == - transactions[1]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(1) - .metadata_blob()) == - metas[1]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(2) - .transaction_blob()) == - transactions[2]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(2) - .metadata_blob()) == - metas[2]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(3) - .transaction_blob()) == - transactions[3]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(3) - .metadata_blob()) == - metas[3]->getSerializer().slice()); - BEAST_EXPECT(reply.skiplist_included()); - - BEAST_EXPECT(s.slice() == makeSlice(reply.ledger_header())); - - auto parent = env.app().getLedgerMaster().getLedgerBySeq(3); - - SHAMap::Delta differences; - - int maxDifferences = std::numeric_limits::max(); - - bool res = parent->stateMap().compare( - ledger->stateMap(), differences, maxDifferences); - BEAST_EXPECT(res); - - size_t idx = 0; - for (auto& [k, v] : differences) - { - BEAST_EXPECT( - k == - uint256::fromVoid( - reply.ledger_objects().objects(idx).key().data())); - if (v.second) - { - BEAST_EXPECT( - v.second->slice() == - makeSlice(reply.ledger_objects().objects(idx).data())); - } - ++idx; - } - } - { - auto [status, reply] = grpcLedger(4, true, true, true, true); - - BEAST_EXPECT(status.ok()); - BEAST_EXPECT(reply.validated()); - BEAST_EXPECT(!reply.has_hashes_list()); - BEAST_EXPECT(reply.object_neighbors_included()); - - BEAST_EXPECT(reply.has_transactions_list()); - BEAST_EXPECT(reply.transactions_list().transactions_size() == 4); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(0) - .transaction_blob()) == - transactions[0]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(0) - .metadata_blob()) == - metas[0]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(1) - .transaction_blob()) == - transactions[1]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(1) - .metadata_blob()) == - metas[1]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(2) - .transaction_blob()) == - transactions[2]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(2) - .metadata_blob()) == - metas[2]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(3) - .transaction_blob()) == - transactions[3]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(3) - .metadata_blob()) == - metas[3]->getSerializer().slice()); - BEAST_EXPECT(reply.skiplist_included()); - - BEAST_EXPECT(s.slice() == makeSlice(reply.ledger_header())); - - auto parent = env.app().getLedgerMaster().getLedgerBySeq(3); - - SHAMap::Delta differences; - - int maxDifferences = std::numeric_limits::max(); - - bool res = parent->stateMap().compare( - ledger->stateMap(), differences, maxDifferences); - BEAST_EXPECT(res); - - size_t idx = 0; - - for (auto& [k, v] : differences) - { - auto obj = reply.ledger_objects().objects(idx); - BEAST_EXPECT(k == uint256::fromVoid(obj.key().data())); - if (v.second) - { - BEAST_EXPECT(v.second->slice() == makeSlice(obj.data())); - } - else - BEAST_EXPECT(obj.data().size() == 0); - - if (!(v.first && v.second)) - { - auto succ = ledger->stateMap().upper_bound(k); - auto pred = ledger->stateMap().lower_bound(k); - - if (succ != ledger->stateMap().end()) - BEAST_EXPECT( - succ->key() == - uint256::fromVoid(obj.successor().data())); - else - BEAST_EXPECT(obj.successor().size() == 0); - if (pred != ledger->stateMap().end()) - BEAST_EXPECT( - pred->key() == - uint256::fromVoid(obj.predecessor().data())); - else - BEAST_EXPECT(obj.predecessor().size() == 0); - } - ++idx; - } - } - - // Delete an account - - env(noop(alice)); - - std::uint32_t const ledgerCount{ - env.current()->seq() + 257 - env.seq(alice)}; - - for (std::uint32_t i = 0; i < ledgerCount; ++i) - env.close(); - - auto const acctDelFee{drops(env.current()->fees().increment)}; - env(acctdelete(alice, bob), fee(acctDelFee)); - env.close(); - - { - auto [status, reply] = - grpcLedger(env.closed()->seq(), true, true, true, true); - - BEAST_EXPECT(status.ok()); - BEAST_EXPECT(reply.validated()); - auto base = - env.app().getLedgerMaster().getLedgerBySeq(env.closed()->seq()); - - auto parent = env.app().getLedgerMaster().getLedgerBySeq( - env.closed()->seq() - 1); - - SHAMap::Delta differences; - - int maxDifferences = std::numeric_limits::max(); - - bool res = parent->stateMap().compare( - base->stateMap(), differences, maxDifferences); - BEAST_EXPECT(res); - - size_t idx = 0; - for (auto& [k, v] : differences) - { - auto obj = reply.ledger_objects().objects(idx); - BEAST_EXPECT(k == uint256::fromVoid(obj.key().data())); - if (v.second) - { - BEAST_EXPECT( - v.second->slice() == - makeSlice(reply.ledger_objects().objects(idx).data())); - } - else - BEAST_EXPECT(obj.data().size() == 0); - if (!(v.first && v.second)) - { - auto succ = base->stateMap().upper_bound(k); - auto pred = base->stateMap().lower_bound(k); - - if (succ != base->stateMap().end()) - BEAST_EXPECT( - succ->key() == - uint256::fromVoid(obj.successor().data())); - else - BEAST_EXPECT(obj.successor().size() == 0); - if (pred != base->stateMap().end()) - BEAST_EXPECT( - pred->key() == - uint256::fromVoid(obj.predecessor().data())); - else - BEAST_EXPECT(obj.predecessor().size() == 0); - } - - ++idx; - } - } - } - - // gRPC stuff - class GrpcLedgerDataClient : public GRPCTestClientBase - { - public: - org::xrpl::rpc::v1::GetLedgerDataRequest request; - org::xrpl::rpc::v1::GetLedgerDataResponse reply; - - explicit GrpcLedgerDataClient(std::string const& port) - : GRPCTestClientBase(port) - { - } - - void - GetLedgerData() - { - status = stub_->GetLedgerData(&context, request, &reply); - } - }; - void - testGetLedgerData() - { - testcase("GetLedgerData"); - using namespace test::jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = - *(*config)[SECTION_PORT_GRPC].get("port"); - Env env(*this, std::move(config)); - auto grpcLedgerData = [&grpcPort]( - auto sequence, std::string marker = "") { - GrpcLedgerDataClient grpcClient{grpcPort}; - - grpcClient.request.mutable_ledger()->set_sequence(sequence); - if (marker.size()) - { - grpcClient.request.set_marker(marker); - } - - grpcClient.GetLedgerData(); - return std::make_pair(grpcClient.status, grpcClient.reply); - }; - - Account const alice{"alice"}; - env.fund(XRP(100000), alice); - - int num_accounts = 10; - - for (auto i = 0; i < num_accounts; i++) - { - Account const bob{std::string("bob") + std::to_string(i)}; - env.fund(XRP(1000), bob); - } - env.close(); - - { - auto [status, reply] = grpcLedgerData(env.closed()->seq()); - BEAST_EXPECT(status.ok()); - - BEAST_EXPECT( - reply.ledger_objects().objects_size() == num_accounts + 4); - BEAST_EXPECT(reply.marker().size() == 0); - auto ledger = env.closed(); - size_t idx = 0; - for (auto& sle : ledger->sles) - { - BEAST_EXPECT( - sle->getSerializer().slice() == - makeSlice(reply.ledger_objects().objects(idx).data())); - ++idx; - } - } - - { - auto [status, reply] = - grpcLedgerData(env.closed()->seq(), "bad marker"); - BEAST_EXPECT(!status.ok()); - BEAST_EXPECT( - status.error_code() == grpc::StatusCode::INVALID_ARGUMENT); - } - - num_accounts = 3000; - - for (auto i = 0; i < num_accounts; i++) - { - Account const cat{std::string("cat") + std::to_string(i)}; - env.fund(XRP(1000), cat); - if (i % 100 == 0) - env.close(); - } - env.close(); - - { - auto [status, reply] = grpcLedgerData(env.closed()->seq()); - BEAST_EXPECT(status.ok()); - - int maxLimit = RPC::Tuning::pageLength(true); - BEAST_EXPECT(reply.ledger_objects().objects_size() == maxLimit); - BEAST_EXPECT(reply.marker().size() != 0); - - auto [status2, reply2] = - grpcLedgerData(env.closed()->seq(), reply.marker()); - BEAST_EXPECT(status2.ok()); - BEAST_EXPECT(reply2.marker().size() == 0); - - auto ledger = env.closed(); - size_t idx = 0; - for (auto& sle : ledger->sles) - { - auto& obj = idx < maxLimit - ? reply.ledger_objects().objects(idx) - : reply2.ledger_objects().objects(idx - maxLimit); - - BEAST_EXPECT( - sle->getSerializer().slice() == makeSlice(obj.data())); - ++idx; - } - BEAST_EXPECT( - idx == - reply.ledger_objects().objects_size() + - reply2.ledger_objects().objects_size()); - } - } - - // gRPC stuff - class GrpcLedgerDiffClient : public GRPCTestClientBase - { - public: - org::xrpl::rpc::v1::GetLedgerDiffRequest request; - org::xrpl::rpc::v1::GetLedgerDiffResponse reply; - - explicit GrpcLedgerDiffClient(std::string const& port) - : GRPCTestClientBase(port) - { - } - - void - GetLedgerDiff() - { - status = stub_->GetLedgerDiff(&context, request, &reply); - } - }; - - void - testGetLedgerDiff() - { - testcase("GetLedgerDiff"); - using namespace test::jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = - *(*config)[SECTION_PORT_GRPC].get("port"); - Env env(*this, std::move(config)); - - auto grpcLedgerDiff = [&grpcPort]( - auto baseSequence, auto desiredSequence) { - GrpcLedgerDiffClient grpcClient{grpcPort}; - - grpcClient.request.mutable_base_ledger()->set_sequence( - baseSequence); - grpcClient.request.mutable_desired_ledger()->set_sequence( - desiredSequence); - grpcClient.request.set_include_blobs(true); - - grpcClient.GetLedgerDiff(); - return std::make_pair(grpcClient.status, grpcClient.reply); - }; - - int num_accounts = 20; - for (auto i = 0; i < num_accounts; i++) - { - Account const cat{std::string("cat") + std::to_string(i)}; - env.fund(XRP(1000), cat); - if (i % 2 == 0) - env.close(); - } - env.close(); - - auto compareDiffs = [&](auto baseSequence, auto desiredSequence) { - auto [status, reply] = - grpcLedgerDiff(baseSequence, desiredSequence); - - BEAST_EXPECT(status.ok()); - auto desired = - env.app().getLedgerMaster().getLedgerBySeq(desiredSequence); - - auto base = - env.app().getLedgerMaster().getLedgerBySeq(baseSequence); - - SHAMap::Delta differences; - - int maxDifferences = std::numeric_limits::max(); - - bool res = base->stateMap().compare( - desired->stateMap(), differences, maxDifferences); - if (!BEAST_EXPECT(res)) - return false; - - size_t idx = 0; - for (auto& [k, v] : differences) - { - if (!BEAST_EXPECT( - k == - uint256::fromVoid( - reply.ledger_objects().objects(idx).key().data()))) - return false; - if (v.second) - { - if (!BEAST_EXPECT( - v.second->slice() == - makeSlice( - reply.ledger_objects().objects(idx).data()))) - return false; - } - - ++idx; - } - return true; - }; - - // Adjacent ledgers - BEAST_EXPECT( - compareDiffs(env.closed()->seq() - 1, env.closed()->seq())); - - // Adjacent ledgers further in the past - BEAST_EXPECT( - compareDiffs(env.closed()->seq() - 3, env.closed()->seq() - 2)); - - // Non-adjacent ledgers - BEAST_EXPECT( - compareDiffs(env.closed()->seq() - 5, env.closed()->seq() - 1)); - - // Adjacent ledgers but in reverse order - BEAST_EXPECT( - compareDiffs(env.closed()->seq(), env.closed()->seq() - 1)); - - // Non-adjacent ledgers in reverse order - BEAST_EXPECT( - compareDiffs(env.closed()->seq() - 1, env.closed()->seq() - 5)); - } - - // gRPC stuff - class GrpcLedgerEntryClient : public GRPCTestClientBase - { - public: - org::xrpl::rpc::v1::GetLedgerEntryRequest request; - org::xrpl::rpc::v1::GetLedgerEntryResponse reply; - - explicit GrpcLedgerEntryClient(std::string const& port) - : GRPCTestClientBase(port) - { - } - - void - GetLedgerEntry() - { - status = stub_->GetLedgerEntry(&context, request, &reply); - } - }; - - void - testGetLedgerEntry() - { - testcase("GetLedgerDiff"); - using namespace test::jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = - *(*config)[SECTION_PORT_GRPC].get("port"); - Env env(*this, std::move(config)); - - auto grpcLedgerEntry = [&grpcPort](auto sequence, auto key) { - GrpcLedgerEntryClient grpcClient{grpcPort}; - - grpcClient.request.mutable_ledger()->set_sequence(sequence); - grpcClient.request.set_key(key.data(), key.size()); - - grpcClient.GetLedgerEntry(); - return std::make_pair(grpcClient.status, grpcClient.reply); - }; - - Account const alice{"alice"}; - env.fund(XRP(1000), alice); - env.close(); - - for (auto& sle : env.closed()->sles) - { - auto [status, reply] = - grpcLedgerEntry(env.closed()->seq(), sle->key()); - - BEAST_EXPECT(status.ok()); - - BEAST_EXPECT( - uint256::fromVoid(reply.ledger_object().key().data()) == - sle->key()); - BEAST_EXPECT( - makeSlice(reply.ledger_object().data()) == - sle->getSerializer().slice()); - } - } - - void - testNeedCurrentOrClosed() - { - testcase("NeedCurrentOrClosed"); - - { - org::xrpl::rpc::v1::GetLedgerRequest request; - request.mutable_ledger()->set_sequence(1); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_hash(""); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_UNSPECIFIED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CURRENT); - BEAST_EXPECT(needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CLOSED); - BEAST_EXPECT(needCurrentOrClosed(request)); - } - - { - org::xrpl::rpc::v1::GetLedgerDataRequest request; - request.mutable_ledger()->set_sequence(1); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_hash(""); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_UNSPECIFIED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CURRENT); - BEAST_EXPECT(needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CLOSED); - BEAST_EXPECT(needCurrentOrClosed(request)); - } - - { - org::xrpl::rpc::v1::GetLedgerEntryRequest request; - request.mutable_ledger()->set_sequence(1); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_hash(""); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_UNSPECIFIED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CURRENT); - BEAST_EXPECT(needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CLOSED); - BEAST_EXPECT(needCurrentOrClosed(request)); - } - - { - org::xrpl::rpc::v1::GetLedgerDiffRequest request; - - // set desired ledger, so desired ledger does not need current or - // closed - request.mutable_base_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED); - - request.mutable_base_ledger()->set_sequence(1); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_base_ledger()->set_hash(""); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_base_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_base_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_UNSPECIFIED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_base_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CURRENT); - BEAST_EXPECT(needCurrentOrClosed(request)); - request.mutable_base_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CLOSED); - BEAST_EXPECT(needCurrentOrClosed(request)); - - // reset base ledger, so base ledger doesn't need current or closed - request.mutable_base_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED); - - request.mutable_desired_ledger()->set_sequence(1); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_desired_ledger()->set_hash(""); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_desired_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_desired_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_UNSPECIFIED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_desired_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CURRENT); - BEAST_EXPECT(needCurrentOrClosed(request)); - request.mutable_desired_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CLOSED); - BEAST_EXPECT(needCurrentOrClosed(request)); - - // both base and desired need current or closed - request.mutable_base_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CURRENT); - BEAST_EXPECT(needCurrentOrClosed(request)); - } - } - - void - testSecureGateway() - { - testcase("SecureGateway"); - using namespace test::jtx; - { - std::unique_ptr config = envconfig( - addGrpcConfigWithSecureGateway, getEnvLocalhostAddr()); - std::string grpcPort = - *(*config)[SECTION_PORT_GRPC].get("port"); - Env env(*this, std::move(config)); - - env.close(); - - auto ledger = env.app().getLedgerMaster().getLedgerBySeq(3); - - BEAST_EXPECT(env.current()->info().seq == 4); - - auto grpcLedger = [&grpcPort]( - auto sequence, - std::string const& clientIp, - std::string const& user) { - GrpcLedgerClient grpcClient{grpcPort}; - - grpcClient.request.mutable_ledger()->set_sequence(sequence); - grpcClient.request.set_client_ip(clientIp); - grpcClient.request.set_user(user); - - grpcClient.GetLedger(); - return std::make_pair(grpcClient.status, grpcClient.reply); - }; - - { - auto [status, reply] = - grpcLedger(env.current()->info().seq, "", ""); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedger(env.current()->info().seq, "", "ETL"); - BEAST_EXPECT(reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedger(env.current()->info().seq, "", "Reporting"); - BEAST_EXPECT(reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedger(env.current()->info().seq, "127.0.0.1", "ETL"); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedger(env.current()->info().seq, "127.0.0.1", ""); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - } - - { - std::string secureGatewayIp = "44.124.234.79"; - std::unique_ptr config = - envconfig(addGrpcConfigWithSecureGateway, secureGatewayIp); - std::string grpcPort = - *(*config)[SECTION_PORT_GRPC].get("port"); - Env env(*this, std::move(config)); - - env.close(); - - auto ledger = env.app().getLedgerMaster().getLedgerBySeq(3); - - BEAST_EXPECT(env.current()->info().seq == 4); - - auto grpcLedger = [&grpcPort]( - auto sequence, - std::string const& clientIp, - std::string const& user) { - GrpcLedgerClient grpcClient{grpcPort}; - - grpcClient.request.mutable_ledger()->set_sequence(sequence); - grpcClient.request.set_client_ip(clientIp); - grpcClient.request.set_user(user); - - grpcClient.GetLedger(); - return std::make_pair(grpcClient.status, grpcClient.reply); - }; - - { - auto [status, reply] = - grpcLedger(env.current()->info().seq, "", ""); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedger(env.current()->info().seq, "", "ETL"); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = grpcLedger( - env.current()->info().seq, secureGatewayIp, "ETL"); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedger(env.current()->info().seq, secureGatewayIp, ""); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - } - - { - std::unique_ptr config = envconfig( - addGrpcConfigWithSecureGateway, getEnvLocalhostAddr()); - std::string grpcPort = - *(*config)[SECTION_PORT_GRPC].get("port"); - Env env(*this, std::move(config)); - - env.close(); - - auto ledger = env.app().getLedgerMaster().getLedgerBySeq(3); - - BEAST_EXPECT(env.current()->info().seq == 4); - auto grpcLedgerData = [&grpcPort]( - auto sequence, - std::string const& clientIp, - std::string const& user) { - GrpcLedgerDataClient grpcClient{grpcPort}; - - grpcClient.request.mutable_ledger()->set_sequence(sequence); - grpcClient.request.set_client_ip(clientIp); - grpcClient.request.set_user(user); - - grpcClient.GetLedgerData(); - return std::make_pair(grpcClient.status, grpcClient.reply); - }; - { - auto [status, reply] = - grpcLedgerData(env.current()->info().seq, "", ""); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedgerData(env.current()->info().seq, "", "ETL"); - BEAST_EXPECT(reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedgerData(env.current()->info().seq, "", "Reporting"); - BEAST_EXPECT(reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = grpcLedgerData( - env.current()->info().seq, "127.0.0.1", "ETL"); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedgerData(env.current()->info().seq, "127.0.0.1", ""); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - } - { - std::string secureGatewayIp = "44.124.234.79"; - std::unique_ptr config = - envconfig(addGrpcConfigWithSecureGateway, secureGatewayIp); - std::string grpcPort = - *(*config)[SECTION_PORT_GRPC].get("port"); - Env env(*this, std::move(config)); - - env.close(); - - auto ledger = env.app().getLedgerMaster().getLedgerBySeq(3); - - BEAST_EXPECT(env.current()->info().seq == 4); - - auto grpcLedgerData = [&grpcPort]( - auto sequence, - std::string const& clientIp, - std::string const& user) { - GrpcLedgerDataClient grpcClient{grpcPort}; - - grpcClient.request.mutable_ledger()->set_sequence(sequence); - grpcClient.request.set_client_ip(clientIp); - grpcClient.request.set_user(user); - - grpcClient.GetLedgerData(); - return std::make_pair(grpcClient.status, grpcClient.reply); - }; - - { - auto [status, reply] = - grpcLedgerData(env.current()->info().seq, "", ""); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedgerData(env.current()->info().seq, "", "ETL"); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = grpcLedgerData( - env.current()->info().seq, secureGatewayIp, "ETL"); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = grpcLedgerData( - env.current()->info().seq, secureGatewayIp, ""); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - } - } - -public: - void - run() override - { - testGetLedger(); - - testGetLedgerData(); - - testGetLedgerDiff(); - - testGetLedgerEntry(); - - testNeedCurrentOrClosed(); - - testSecureGateway(); - } -}; - -BEAST_DEFINE_TESTSUITE_PRIO(ReportingETL, app, ripple, 2); - -} // namespace test -} // namespace ripple diff --git a/src/xrpld/app/ledger/AcceptedLedger.cpp b/src/xrpld/app/ledger/AcceptedLedger.cpp index 37c943679da..a82323f6286 100644 --- a/src/xrpld/app/ledger/AcceptedLedger.cpp +++ b/src/xrpld/app/ledger/AcceptedLedger.cpp @@ -36,17 +36,8 @@ AcceptedLedger::AcceptedLedger( ledger, item.first, item.second)); }; - if (app.config().reporting()) - { - auto const txs = flatFetchTransactions(*ledger, app); - transactions_.reserve(txs.size()); - insertAll(txs); - } - else - { - transactions_.reserve(256); - insertAll(ledger->txs); - } + transactions_.reserve(256); + insertAll(ledger->txs); std::sort( transactions_.begin(), diff --git a/src/xrpld/app/ledger/Ledger.cpp b/src/xrpld/app/ledger/Ledger.cpp index bcd3b6d4ba7..4991b551cd1 100644 --- a/src/xrpld/app/ledger/Ledger.cpp +++ b/src/xrpld/app/ledger/Ledger.cpp @@ -29,12 +29,10 @@ #include #include #include -#include #include #include #include #include -#include #include #include #include @@ -258,11 +256,6 @@ Ledger::Ledger( if (info_.txHash.isNonZero() && !txMap_.fetchRoot(SHAMapHash{info_.txHash}, nullptr)) { - if (config.reporting()) - { - // Reporting should never have incomplete data - Throw("Missing tx map root for ledger"); - } loaded = false; JLOG(j.warn()) << "Don't have transaction root for ledger" << info_.seq; } @@ -270,11 +263,6 @@ Ledger::Ledger( if (info_.accountHash.isNonZero() && !stateMap_.fetchRoot(SHAMapHash{info_.accountHash}, nullptr)) { - if (config.reporting()) - { - // Reporting should never have incomplete data - Throw("Missing state map root for ledger"); - } loaded = false; JLOG(j.warn()) << "Don't have state data root for ledger" << info_.seq; } @@ -289,7 +277,7 @@ Ledger::Ledger( if (!loaded) { info_.hash = calculateLedgerHash(info_); - if (acquire && !config.reporting()) + if (acquire) family.missingNodeAcquireByHash(info_.hash, info_.seq); } } @@ -1146,92 +1134,4 @@ loadByHash(uint256 const& ledgerHash, Application& app, bool acquire) return {}; } -std::vector< - std::pair, std::shared_ptr>> -flatFetchTransactions(Application& app, std::vector& nodestoreHashes) -{ - if (!app.config().reporting()) - { - assert(false); - Throw( - "flatFetchTransactions: not running in reporting mode"); - } - - std::vector< - std::pair, std::shared_ptr>> - txns; - auto start = std::chrono::system_clock::now(); - auto nodeDb = - dynamic_cast(&(app.getNodeStore())); - if (!nodeDb) - { - assert(false); - Throw( - "Called flatFetchTransactions but database is not DatabaseNodeImp"); - } - auto objs = nodeDb->fetchBatch(nodestoreHashes); - - auto end = std::chrono::system_clock::now(); - JLOG(app.journal("Ledger").debug()) - << " Flat fetch time : " << ((end - start).count() / 1000000000.0) - << " number of transactions " << nodestoreHashes.size(); - assert(objs.size() == nodestoreHashes.size()); - for (size_t i = 0; i < objs.size(); ++i) - { - uint256& nodestoreHash = nodestoreHashes[i]; - auto& obj = objs[i]; - if (obj) - { - auto node = SHAMapTreeNode::makeFromPrefix( - makeSlice(obj->getData()), SHAMapHash{nodestoreHash}); - if (!node) - { - assert(false); - Throw( - "flatFetchTransactions : Error making SHAMap node"); - } - auto item = (static_cast(node.get()))->peekItem(); - if (!item) - { - assert(false); - Throw( - "flatFetchTransactions : Error reading SHAMap node"); - } - auto txnPlusMeta = deserializeTxPlusMeta(*item); - if (!txnPlusMeta.first || !txnPlusMeta.second) - { - assert(false); - Throw( - "flatFetchTransactions : Error deserializing SHAMap node"); - } - txns.push_back(std::move(txnPlusMeta)); - } - else - { - assert(false); - Throw( - "flatFetchTransactions : Containing SHAMap node not found"); - } - } - return txns; -} -std::vector< - std::pair, std::shared_ptr>> -flatFetchTransactions(ReadView const& ledger, Application& app) -{ - if (!app.config().reporting()) - { - assert(false); - return {}; - } - - auto const db = - dynamic_cast(&app.getRelationalDatabase()); - if (!db) - Throw("Failed to get relational database"); - - auto nodestoreHashes = db->getTxHashes(ledger.info().seq); - - return flatFetchTransactions(app, nodestoreHashes); -} } // namespace ripple diff --git a/src/xrpld/app/ledger/Ledger.h b/src/xrpld/app/ledger/Ledger.h index 1591fae1472..0eb102eb518 100644 --- a/src/xrpld/app/ledger/Ledger.h +++ b/src/xrpld/app/ledger/Ledger.h @@ -454,32 +454,6 @@ loadByHash(uint256 const& ledgerHash, Application& app, bool acquire = true); extern std::tuple, std::uint32_t, uint256> getLatestLedger(Application& app); -// *** Reporting Mode Only *** -// Fetch all of the transactions contained in ledger from the nodestore. -// The transactions are fetched directly as a batch, instead of traversing the -// transaction SHAMap. Fetching directly is significantly faster than -// traversing, as there are less database reads, and all of the reads can -// executed concurrently. This function only works in reporting mode. -// @param ledger the ledger for which to fetch the contained transactions -// @param app reference to the Application -// @return vector of (transaction, metadata) pairs -extern std::vector< - std::pair, std::shared_ptr>> -flatFetchTransactions(ReadView const& ledger, Application& app); - -// *** Reporting Mode Only *** -// For each nodestore hash, fetch the transaction. -// The transactions are fetched directly as a batch, instead of traversing the -// transaction SHAMap. Fetching directly is significantly faster than -// traversing, as there are less database reads, and all of the reads can -// executed concurrently. This function only works in reporting mode. -// @param nodestoreHashes hashes of the transactions to fetch -// @param app reference to the Application -// @return vector of (transaction, metadata) pairs -extern std::vector< - std::pair, std::shared_ptr>> -flatFetchTransactions(Application& app, std::vector& nodestoreHashes); - /** Deserialize a SHAMapItem containing a single STTx Throw: diff --git a/src/xrpld/app/ledger/LedgerMaster.h b/src/xrpld/app/ledger/LedgerMaster.h index 5149424e285..dd7f0b6a614 100644 --- a/src/xrpld/app/ledger/LedgerMaster.h +++ b/src/xrpld/app/ledger/LedgerMaster.h @@ -46,24 +46,6 @@ namespace ripple { class Peer; class Transaction; -// This error is thrown when a codepath tries to access the open or closed -// ledger while the server is running in reporting mode. Any RPCs that request -// the open or closed ledger should be forwarded to a p2p node. Usually, the -// decision to forward is made based on the required condition of the handler, -// or which ledger is specified. However, there are some codepaths which are not -// covered by the aforementioned logic (though they probably should), so this -// error is thrown in case a codepath falls through the cracks. -class ReportingShouldProxy : public std::runtime_error -{ -public: - ReportingShouldProxy() - : std::runtime_error( - "Reporting mode has no open or closed ledger. Proxy this " - "request") - { - } -}; - // Tracks the current ledger and any ledgers in the process of closing // Tracks ledger history // Tracks held transactions @@ -97,10 +79,6 @@ class LedgerMaster : public AbstractFetchPackContainer std::shared_ptr getClosedLedger() { - if (app_.config().reporting()) - { - Throw(); - } return mClosedLedger.get(); } diff --git a/src/xrpld/app/ledger/detail/LedgerMaster.cpp b/src/xrpld/app/ledger/detail/LedgerMaster.cpp index dab8f838249..d1eeabeb619 100644 --- a/src/xrpld/app/ledger/detail/LedgerMaster.cpp +++ b/src/xrpld/app/ledger/detail/LedgerMaster.cpp @@ -34,10 +34,9 @@ #include #include #include -#include +#include #include #include -#include #include #include #include @@ -274,12 +273,6 @@ LedgerMaster::getValidatedLedgerAge() { using namespace std::chrono_literals; -#ifdef RIPPLED_REPORTING - if (app_.config().reporting()) - return static_cast(&app_.getRelationalDatabase()) - ->getValidatedLedgerAge(); -#endif - std::chrono::seconds valClose{mValidLedgerSign.load()}; if (valClose == 0s) { @@ -305,12 +298,6 @@ LedgerMaster::isCaughtUp(std::string& reason) { using namespace std::chrono_literals; -#ifdef RIPPLED_REPORTING - if (app_.config().reporting()) - return static_cast(&app_.getRelationalDatabase()) - ->isCaughtUp(reason); -#endif - if (getPublishedLedgerAge() > 3min) { reason = "No recently-published ledger"; @@ -600,9 +587,6 @@ LedgerMaster::clearLedger(std::uint32_t seq) bool LedgerMaster::isValidated(ReadView const& ledger) { - if (app_.config().reporting()) - return true; // Reporting mode only supports validated ledger - if (ledger.open()) return false; @@ -676,32 +660,6 @@ LedgerMaster::getFullValidatedRange( bool LedgerMaster::getValidatedRange(std::uint32_t& minVal, std::uint32_t& maxVal) { - if (app_.config().reporting()) - { - std::string res = getCompleteLedgers(); - try - { - if (res == "empty" || res == "error" || res.empty()) - return false; - else if (size_t delim = res.find('-'); delim != std::string::npos) - { - minVal = std::stol(res.substr(0, delim)); - maxVal = std::stol(res.substr(delim + 1)); - } - else - { - minVal = maxVal = std::stol(res); - } - return true; - } - catch (std::exception const& e) - { - JLOG(m_journal.error()) << "LedgerMaster::getValidatedRange: " - "exception parsing complete ledgers: " - << e.what(); - return false; - } - } if (!getFullValidatedRange(minVal, maxVal)) return false; @@ -1679,25 +1637,12 @@ LedgerMaster::peekMutex() std::shared_ptr LedgerMaster::getCurrentLedger() { - if (app_.config().reporting()) - { - Throw(); - } return app_.openLedger().current(); } std::shared_ptr LedgerMaster::getValidatedLedger() { -#ifdef RIPPLED_REPORTING - if (app_.config().reporting()) - { - auto seq = app_.getRelationalDatabase().getMaxLedgerSeq(); - if (!seq) - return {}; - return getLedgerBySeq(*seq); - } -#endif return mValidLedger.get(); } @@ -1726,11 +1671,6 @@ LedgerMaster::getPublishedLedger() std::string LedgerMaster::getCompleteLedgers() { -#ifdef RIPPLED_REPORTING - if (app_.config().reporting()) - return static_cast(&app_.getRelationalDatabase()) - ->getCompleteLedgers(); -#endif std::lock_guard sl(mCompleteLock); return to_string(mCompleteLedgers); } diff --git a/src/xrpld/app/ledger/detail/LedgerToJson.cpp b/src/xrpld/app/ledger/detail/LedgerToJson.cpp index 11f42cd66ec..3f6869df1d8 100644 --- a/src/xrpld/app/ledger/detail/LedgerToJson.cpp +++ b/src/xrpld/app/ledger/detail/LedgerToJson.cpp @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -245,14 +244,7 @@ fillJsonTx(Object& json, LedgerFill const& fill) } }; - if (fill.context && fill.context->app.config().reporting()) - { - appendAll(flatFetchTransactions(fill.ledger, fill.context->app)); - } - else - { - appendAll(fill.ledger.txs); - } + appendAll(fill.ledger.txs); } catch (std::exception const& ex) { diff --git a/src/xrpld/app/main/Application.cpp b/src/xrpld/app/main/Application.cpp index f3308a091dc..d234f539909 100644 --- a/src/xrpld/app/main/Application.cpp +++ b/src/xrpld/app/main/Application.cpp @@ -45,9 +45,8 @@ #include #include #include +#include #include -#include -#include #include #include #include @@ -236,7 +235,6 @@ class ApplicationImp : public Application, public BasicApp io_latency_sampler m_io_latency_sampler; std::unique_ptr grpcServer_; - std::unique_ptr reportingETL_; //-------------------------------------------------------------------------- @@ -296,8 +294,7 @@ class ApplicationImp : public Application, public BasicApp , m_jobQueue(std::make_unique( [](std::unique_ptr const& config) { - if (config->standalone() && !config->reporting() && - !config->FORCE_MULTI_THREAD) + if (config->standalone() && !config->FORCE_MULTI_THREAD) return 1; if (config->WORKERS) @@ -475,9 +472,6 @@ class ApplicationImp : public Application, public BasicApp std::chrono::milliseconds(100), get_io_service()) , grpcServer_(std::make_unique(*this)) - , reportingETL_( - config_->reporting() ? std::make_unique(*this) - : nullptr) { initAccountIdCache(config_->getValueFor(SizedItem::accountIdCacheSize)); @@ -786,16 +780,12 @@ class ApplicationImp : public Application, public BasicApp OpenLedger& openLedger() override { - if (config_->reporting()) - Throw(); return *openLedger_; } OpenLedger const& openLedger() const override { - if (config_->reporting()) - Throw(); return *openLedger_; } @@ -827,13 +817,6 @@ class ApplicationImp : public Application, public BasicApp return *mWalletDB; } - ReportingETL& - getReportingETL() override - { - assert(reportingETL_.get() != nullptr); - return *reportingETL_; - } - bool serverOkay(std::string& reason) override; @@ -1129,11 +1112,6 @@ class ApplicationImp : public Application, public BasicApp << "; size after: " << cachedSLEs_.size(); } -#ifdef RIPPLED_REPORTING - if (auto pg = dynamic_cast(&*mRelationalDatabase)) - pg->sweep(); -#endif - // Set timer to do another sweep later. setSweepTimer(); } @@ -1275,53 +1253,50 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) auto const startUp = config_->START_UP; JLOG(m_journal.debug()) << "startUp: " << startUp; - if (!config_->reporting()) + if (startUp == Config::FRESH) { - if (startUp == Config::FRESH) - { - JLOG(m_journal.info()) << "Starting new Ledger"; + JLOG(m_journal.info()) << "Starting new Ledger"; - startGenesisLedger(); - } - else if ( - startUp == Config::LOAD || startUp == Config::LOAD_FILE || - startUp == Config::REPLAY) - { - JLOG(m_journal.info()) << "Loading specified Ledger"; + startGenesisLedger(); + } + else if ( + startUp == Config::LOAD || startUp == Config::LOAD_FILE || + startUp == Config::REPLAY) + { + JLOG(m_journal.info()) << "Loading specified Ledger"; - if (!loadOldLedger( - config_->START_LEDGER, - startUp == Config::REPLAY, - startUp == Config::LOAD_FILE, - config_->TRAP_TX_HASH)) + if (!loadOldLedger( + config_->START_LEDGER, + startUp == Config::REPLAY, + startUp == Config::LOAD_FILE, + config_->TRAP_TX_HASH)) + { + JLOG(m_journal.error()) + << "The specified ledger could not be loaded."; + if (config_->FAST_LOAD) { - JLOG(m_journal.error()) - << "The specified ledger could not be loaded."; - if (config_->FAST_LOAD) - { - // Fall back to syncing from the network, such as - // when there's no existing data. - startGenesisLedger(); - } - else - { - return false; - } + // Fall back to syncing from the network, such as + // when there's no existing data. + startGenesisLedger(); + } + else + { + return false; } } - else if (startUp == Config::NETWORK) - { - // This should probably become the default once we have a stable - // network. - if (!config_->standalone()) - m_networkOPs->setNeedNetworkLedger(); + } + else if (startUp == Config::NETWORK) + { + // This should probably become the default once we have a stable + // network. + if (!config_->standalone()) + m_networkOPs->setNeedNetworkLedger(); - startGenesisLedger(); - } - else - { - startGenesisLedger(); - } + startGenesisLedger(); + } + else + { + startGenesisLedger(); } if (auto const& forcedRange = config().FORCED_LEDGER_RANGE_PRESENT) @@ -1330,8 +1305,7 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) forcedRange->first, forcedRange->second); } - if (!config().reporting()) - m_orderBookDB.setup(getLedgerMaster().getCurrentLedger()); + m_orderBookDB.setup(getLedgerMaster().getCurrentLedger()); nodeIdentity_ = getNodeIdentity(*this, cmdline); @@ -1341,60 +1315,55 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) return false; } - if (!config().reporting()) { - { - if (validatorKeys_.configInvalid()) - return false; - - if (!validatorManifests_->load( - getWalletDB(), - "ValidatorManifests", - validatorKeys_.manifest, - config() - .section(SECTION_VALIDATOR_KEY_REVOCATION) - .values())) - { - JLOG(m_journal.fatal()) - << "Invalid configured validator manifest."; - return false; - } + if (validatorKeys_.configInvalid()) + return false; - publisherManifests_->load(getWalletDB(), "PublisherManifests"); + if (!validatorManifests_->load( + getWalletDB(), + "ValidatorManifests", + validatorKeys_.manifest, + config().section(SECTION_VALIDATOR_KEY_REVOCATION).values())) + { + JLOG(m_journal.fatal()) << "Invalid configured validator manifest."; + return false; + } - // It is possible to have a valid ValidatorKeys object without - // setting the signingKey or masterKey. This occurs if the - // configuration file does not have either - // SECTION_VALIDATOR_TOKEN or SECTION_VALIDATION_SEED section. + publisherManifests_->load(getWalletDB(), "PublisherManifests"); - // masterKey for the configuration-file specified validator keys - std::optional localSigningKey; - if (validatorKeys_.keys) - localSigningKey = validatorKeys_.keys->publicKey; + // It is possible to have a valid ValidatorKeys object without + // setting the signingKey or masterKey. This occurs if the + // configuration file does not have either + // SECTION_VALIDATOR_TOKEN or SECTION_VALIDATION_SEED section. - // Setup trusted validators - if (!validators_->load( - localSigningKey, - config().section(SECTION_VALIDATORS).values(), - config().section(SECTION_VALIDATOR_LIST_KEYS).values())) - { - JLOG(m_journal.fatal()) - << "Invalid entry in validator configuration."; - return false; - } - } + // masterKey for the configuration-file specified validator keys + std::optional localSigningKey; + if (validatorKeys_.keys) + localSigningKey = validatorKeys_.keys->publicKey; - if (!validatorSites_->load( - config().section(SECTION_VALIDATOR_LIST_SITES).values())) + // Setup trusted validators + if (!validators_->load( + localSigningKey, + config().section(SECTION_VALIDATORS).values(), + config().section(SECTION_VALIDATOR_LIST_KEYS).values())) { JLOG(m_journal.fatal()) - << "Invalid entry in [" << SECTION_VALIDATOR_LIST_SITES << "]"; + << "Invalid entry in validator configuration."; return false; } + } - // Tell the AmendmentTable who the trusted validators are. - m_amendmentTable->trustChanged(validators_->getQuorumKeys().second); + if (!validatorSites_->load( + config().section(SECTION_VALIDATOR_LIST_SITES).values())) + { + JLOG(m_journal.fatal()) + << "Invalid entry in [" << SECTION_VALIDATOR_LIST_SITES << "]"; + return false; } + + // Tell the AmendmentTable who the trusted validators are. + m_amendmentTable->trustChanged(validators_->getQuorumKeys().second); + //---------------------------------------------------------------------- // // Server @@ -1406,23 +1375,19 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) // move the instantiation inside a conditional: // // if (!config_.standalone()) - if (!config_->reporting()) - { - overlay_ = make_Overlay( - *this, - setup_Overlay(*config_), - *serverHandler_, - *m_resourceManager, - *m_resolver, - get_io_service(), - *config_, - m_collectorManager->collector()); - add(*overlay_); // add to PropertyStream - } + overlay_ = make_Overlay( + *this, + setup_Overlay(*config_), + *serverHandler_, + *m_resourceManager, + *m_resolver, + get_io_service(), + *config_, + m_collectorManager->collector()); + add(*overlay_); // add to PropertyStream // start first consensus round - if (!config_->reporting() && - !m_networkOPs->beginConsensus( + if (!m_networkOPs->beginConsensus( m_ledgerMaster->getClosedLedger()->info().hash)) { JLOG(m_journal.fatal()) << "Unable to start consensus"; @@ -1536,9 +1501,6 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) validatorSites_->start(); - if (reportingETL_) - reportingETL_->start(); - return true; } @@ -1654,10 +1616,6 @@ ApplicationImp::run() m_inboundTransactions->stop(); m_inboundLedgers->stop(); ledgerCleaner_->stop(); - if (reportingETL_) - reportingETL_->stop(); - if (auto pg = dynamic_cast(&*mRelationalDatabase)) - pg->stop(); m_nodeStore->stop(); perfLog_->stop(); diff --git a/src/xrpld/app/main/Application.h b/src/xrpld/app/main/Application.h index d4871317e73..8f2dd606ded 100644 --- a/src/xrpld/app/main/Application.h +++ b/src/xrpld/app/main/Application.h @@ -100,8 +100,6 @@ class RelationalDatabase; class DatabaseCon; class SHAMapStore; -class ReportingETL; - using NodeCache = TaggedCache; template @@ -253,9 +251,6 @@ class Application : public beast::PropertyStream::Source virtual std::chrono::milliseconds getIOLatency() = 0; - virtual ReportingETL& - getReportingETL() = 0; - virtual bool serverOkay(std::string& reason) = 0; diff --git a/src/xrpld/app/main/GRPCServer.cpp b/src/xrpld/app/main/GRPCServer.cpp index 5d5a79db393..89c3d813caa 100644 --- a/src/xrpld/app/main/GRPCServer.cpp +++ b/src/xrpld/app/main/GRPCServer.cpp @@ -18,7 +18,6 @@ //============================================================================== #include -#include #include #include @@ -187,11 +186,6 @@ GRPCServerImpl::CallData::process( InfoSub::pointer(), apiVersion}, request_}; - if (shouldForwardToP2p(context, requiredCondition_)) - { - forwardToP2p(context); - return; - } // Make sure we can currently handle the rpc error_code_i conditionMetRes = @@ -207,18 +201,9 @@ GRPCServerImpl::CallData::process( } else { - try - { - std::pair result = - handler_(context); - setIsUnlimited(result.first, isUnlimited); - responder_.Finish(result.first, result.second, this); - } - catch (ReportingShouldProxy&) - { - forwardToP2p(context); - return; - } + std::pair result = handler_(context); + setIsUnlimited(result.first, isUnlimited); + responder_.Finish(result.first, result.second, this); } } } @@ -229,46 +214,6 @@ GRPCServerImpl::CallData::process( } } -template -void -GRPCServerImpl::CallData::forwardToP2p( - RPC::GRPCContext& context) -{ - if (auto descriptor = - Request::GetDescriptor()->FindFieldByName("client_ip")) - { - Request::GetReflection()->SetString(&request_, descriptor, ctx_.peer()); - JLOG(app_.journal("gRPCServer").debug()) - << "Set client_ip to " << ctx_.peer(); - } - else - { - assert(false); - Throw( - "Attempting to forward but no client_ip field in " - "protobuf message"); - } - auto stub = getP2pForwardingStub(context); - if (stub) - { - grpc::ClientContext clientContext; - Response response; - auto status = forward_(stub.get(), &clientContext, request_, &response); - responder_.Finish(response, status, this); - JLOG(app_.journal("gRPCServer").debug()) << "Forwarded request to tx"; - } - else - { - JLOG(app_.journal("gRPCServer").error()) - << "Failed to forward request to tx"; - grpc::Status status{ - grpc::StatusCode::INTERNAL, - "Attempted to act as proxy but failed " - "to create forwarding stub"}; - responder_.FinishWithError(status, this); - } -} - template bool GRPCServerImpl::CallData::isFinished() @@ -289,29 +234,10 @@ GRPCServerImpl::CallData::getRole(bool isUnlimited) { if (isUnlimited) return Role::IDENTIFIED; - else if (wasForwarded()) - return Role::PROXY; else return Role::USER; } -template -bool -GRPCServerImpl::CallData::wasForwarded() -{ - if (auto descriptor = - Request::GetDescriptor()->FindFieldByName("client_ip")) - { - std::string clientIp = - Request::GetReflection()->GetString(request_, descriptor); - if (!clientIp.empty()) - { - return true; - } - } - return false; -} - template std::optional GRPCServerImpl::CallData::getUser() @@ -338,35 +264,6 @@ GRPCServerImpl::CallData::getClientIpAddress() return {}; } -template -std::optional -GRPCServerImpl::CallData::getProxiedClientIpAddress() -{ - auto endpoint = getProxiedClientEndpoint(); - if (endpoint) - return endpoint->address(); - return {}; -} - -template -std::optional -GRPCServerImpl::CallData::getProxiedClientEndpoint() -{ - auto descriptor = Request::GetDescriptor()->FindFieldByName("client_ip"); - if (descriptor) - { - std::string clientIp = - Request::GetReflection()->GetString(request_, descriptor); - if (!clientIp.empty()) - { - JLOG(app_.journal("gRPCServer").debug()) - << "Got client_ip from request : " << clientIp; - return getEndpoint(clientIp); - } - } - return {}; -} - template std::optional GRPCServerImpl::CallData::getClientEndpoint() @@ -381,8 +278,7 @@ GRPCServerImpl::CallData::clientIsUnlimited() if (!getUser()) return false; auto clientIp = getClientIpAddress(); - auto proxiedIp = getProxiedClientIpAddress(); - if (clientIp && !proxiedIp) + if (clientIp) { for (auto& ip : secureGatewayIPs_) { @@ -414,11 +310,7 @@ Resource::Consumer GRPCServerImpl::CallData::getUsage() { auto endpoint = getClientEndpoint(); - auto proxiedEndpoint = getProxiedClientEndpoint(); - if (proxiedEndpoint) - return app_.getResourceManager().newInboundEndpoint( - beast::IP::from_asio(proxiedEndpoint.value())); - else if (endpoint) + if (endpoint) return app_.getResourceManager().newInboundEndpoint( beast::IP::from_asio(endpoint.value())); Throw("Failed to get client endpoint"); diff --git a/src/xrpld/app/main/Main.cpp b/src/xrpld/app/main/Main.cpp index 799911f63dd..154ac48763b 100644 --- a/src/xrpld/app/main/Main.cpp +++ b/src/xrpld/app/main/Main.cpp @@ -376,7 +376,6 @@ run(int argc, char** argv) "quorum", po::value(), "Override the minimum validation quorum.")( - "reportingReadOnly", "Run in read-only reporting mode")( "silent", "No output to the console after startup.")( "standalone,a", "Run with no peers.")("verbose,v", "Verbose logging.") @@ -401,9 +400,6 @@ run(int argc, char** argv) po::value(), "Trap a specific transaction during replay.")( "start", "Start from a fresh Ledger.")( - "startReporting", - po::value(), - "Start reporting from a fresh Ledger.")( "vacuum", "VACUUM the transaction db.")( "valid", "Consider the initial ledger a valid network ledger."); @@ -659,17 +655,6 @@ run(int argc, char** argv) config->START_UP = Config::FRESH; } - if (vm.count("startReporting")) - { - config->START_UP = Config::FRESH; - config->START_LEDGER = vm["startReporting"].as(); - } - - if (vm.count("reportingReadOnly")) - { - config->setReportingReadOnly(true); - } - if (vm.count("import")) config->doImport = true; diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index 141de7e6b1b..7868807c52a 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -38,9 +38,7 @@ #include #include #include -#include #include -#include #include #include #include @@ -456,15 +454,6 @@ class NetworkOPsImp final : public NetworkOPs void pubValidation(std::shared_ptr const& val) override; - void - forwardValidation(Json::Value const& jvObj) override; - void - forwardManifest(Json::Value const& jvObj) override; - void - forwardProposedTransaction(Json::Value const& jvObj) override; - void - forwardProposedAccountTransaction(Json::Value const& jvObj) override; - //-------------------------------------------------------------------------- // // InfoSub::Source. @@ -2490,8 +2479,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) if (fp != 0) info[jss::fetch_pack] = Json::UInt(fp); - if (!app_.config().reporting()) - info[jss::peers] = Json::UInt(app_.overlay().size()); + info[jss::peers] = Json::UInt(app_.overlay().size()); Json::Value lastClose = Json::objectValue; lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers()); @@ -2514,85 +2502,80 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) if (admin) info[jss::load] = m_job_queue.getJson(); - if (!app_.config().reporting()) + if (auto const netid = app_.overlay().networkID()) + info[jss::network_id] = static_cast(*netid); + + auto const escalationMetrics = + app_.getTxQ().getMetrics(*app_.openLedger().current()); + + auto const loadFactorServer = app_.getFeeTrack().getLoadFactor(); + auto const loadBaseServer = app_.getFeeTrack().getLoadBase(); + /* Scale the escalated fee level to unitless "load factor". + In practice, this just strips the units, but it will continue + to work correctly if either base value ever changes. */ + auto const loadFactorFeeEscalation = + mulDiv( + escalationMetrics.openLedgerFeeLevel, + loadBaseServer, + escalationMetrics.referenceFeeLevel) + .value_or(ripple::muldiv_max); + + auto const loadFactor = std::max( + safe_cast(loadFactorServer), loadFactorFeeEscalation); + + if (!human) + { + info[jss::load_base] = loadBaseServer; + info[jss::load_factor] = trunc32(loadFactor); + info[jss::load_factor_server] = loadFactorServer; + + /* Json::Value doesn't support uint64, so clamp to max + uint32 value. This is mostly theoretical, since there + probably isn't enough extant XRP to drive the factor + that high. + */ + info[jss::load_factor_fee_escalation] = + escalationMetrics.openLedgerFeeLevel.jsonClipped(); + info[jss::load_factor_fee_queue] = + escalationMetrics.minProcessingFeeLevel.jsonClipped(); + info[jss::load_factor_fee_reference] = + escalationMetrics.referenceFeeLevel.jsonClipped(); + } + else { - if (auto const netid = app_.overlay().networkID()) - info[jss::network_id] = static_cast(*netid); - - auto const escalationMetrics = - app_.getTxQ().getMetrics(*app_.openLedger().current()); - - auto const loadFactorServer = app_.getFeeTrack().getLoadFactor(); - auto const loadBaseServer = app_.getFeeTrack().getLoadBase(); - /* Scale the escalated fee level to unitless "load factor". - In practice, this just strips the units, but it will continue - to work correctly if either base value ever changes. */ - auto const loadFactorFeeEscalation = - mulDiv( - escalationMetrics.openLedgerFeeLevel, - loadBaseServer, - escalationMetrics.referenceFeeLevel) - .value_or(ripple::muldiv_max); - - auto const loadFactor = std::max( - safe_cast(loadFactorServer), - loadFactorFeeEscalation); + info[jss::load_factor] = + static_cast(loadFactor) / loadBaseServer; - if (!human) + if (loadFactorServer != loadFactor) + info[jss::load_factor_server] = + static_cast(loadFactorServer) / loadBaseServer; + + if (admin) { - info[jss::load_base] = loadBaseServer; - info[jss::load_factor] = trunc32(loadFactor); - info[jss::load_factor_server] = loadFactorServer; - - /* Json::Value doesn't support uint64, so clamp to max - uint32 value. This is mostly theoretical, since there - probably isn't enough extant XRP to drive the factor - that high. - */ + std::uint32_t fee = app_.getFeeTrack().getLocalFee(); + if (fee != loadBaseServer) + info[jss::load_factor_local] = + static_cast(fee) / loadBaseServer; + fee = app_.getFeeTrack().getRemoteFee(); + if (fee != loadBaseServer) + info[jss::load_factor_net] = + static_cast(fee) / loadBaseServer; + fee = app_.getFeeTrack().getClusterFee(); + if (fee != loadBaseServer) + info[jss::load_factor_cluster] = + static_cast(fee) / loadBaseServer; + } + if (escalationMetrics.openLedgerFeeLevel != + escalationMetrics.referenceFeeLevel && + (admin || loadFactorFeeEscalation != loadFactor)) info[jss::load_factor_fee_escalation] = - escalationMetrics.openLedgerFeeLevel.jsonClipped(); + escalationMetrics.openLedgerFeeLevel.decimalFromReference( + escalationMetrics.referenceFeeLevel); + if (escalationMetrics.minProcessingFeeLevel != + escalationMetrics.referenceFeeLevel) info[jss::load_factor_fee_queue] = - escalationMetrics.minProcessingFeeLevel.jsonClipped(); - info[jss::load_factor_fee_reference] = - escalationMetrics.referenceFeeLevel.jsonClipped(); - } - else - { - info[jss::load_factor] = - static_cast(loadFactor) / loadBaseServer; - - if (loadFactorServer != loadFactor) - info[jss::load_factor_server] = - static_cast(loadFactorServer) / loadBaseServer; - - if (admin) - { - std::uint32_t fee = app_.getFeeTrack().getLocalFee(); - if (fee != loadBaseServer) - info[jss::load_factor_local] = - static_cast(fee) / loadBaseServer; - fee = app_.getFeeTrack().getRemoteFee(); - if (fee != loadBaseServer) - info[jss::load_factor_net] = - static_cast(fee) / loadBaseServer; - fee = app_.getFeeTrack().getClusterFee(); - if (fee != loadBaseServer) - info[jss::load_factor_cluster] = - static_cast(fee) / loadBaseServer; - } - if (escalationMetrics.openLedgerFeeLevel != - escalationMetrics.referenceFeeLevel && - (admin || loadFactorFeeEscalation != loadFactor)) - info[jss::load_factor_fee_escalation] = - escalationMetrics.openLedgerFeeLevel.decimalFromReference( - escalationMetrics.referenceFeeLevel); - if (escalationMetrics.minProcessingFeeLevel != - escalationMetrics.referenceFeeLevel) - info[jss::load_factor_fee_queue] = - escalationMetrics.minProcessingFeeLevel - .decimalFromReference( - escalationMetrics.referenceFeeLevel); - } + escalationMetrics.minProcessingFeeLevel.decimalFromReference( + escalationMetrics.referenceFeeLevel); } bool valid = false; @@ -2600,7 +2583,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) if (lpClosed) valid = true; - else if (!app_.config().reporting()) + else lpClosed = m_ledgerMaster.getClosedLedger(); if (lpClosed) @@ -2631,11 +2614,6 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) l[jss::close_time_offset] = static_cast(closeOffset.count()); -#if RIPPLED_REPORTING - std::int64_t const dbAge = - std::max(m_ledgerMaster.getValidatedLedgerAge().count(), 0L); - l[jss::age] = Json::UInt(dbAge); -#else constexpr std::chrono::seconds highAgeThreshold{1000000}; if (m_ledgerMaster.haveValidated()) { @@ -2655,7 +2633,6 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) Json::UInt(age < highAgeThreshold ? age.count() : 0); } } -#endif } if (valid) @@ -2672,19 +2649,12 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) accounting_.json(info); info[jss::uptime] = UptimeClock::now().time_since_epoch().count(); - if (!app_.config().reporting()) - { - info[jss::jq_trans_overflow] = - std::to_string(app_.overlay().getJqTransOverflow()); - info[jss::peer_disconnects] = - std::to_string(app_.overlay().getPeerDisconnect()); - info[jss::peer_disconnects_resources] = - std::to_string(app_.overlay().getPeerDisconnectCharges()); - } - else - { - info["reporting"] = app_.getReportingETL().getInfo(); - } + info[jss::jq_trans_overflow] = + std::to_string(app_.overlay().getJqTransOverflow()); + info[jss::peer_disconnects] = + std::to_string(app_.overlay().getPeerDisconnect()); + info[jss::peer_disconnects_resources] = + std::to_string(app_.overlay().getPeerDisconnectCharges()); // This array must be sorted in increasing order. static constexpr std::array protocols{ @@ -2780,77 +2750,6 @@ NetworkOPsImp::pubProposedTransaction( pubProposedAccountTransaction(ledger, transaction, result); } -void -NetworkOPsImp::forwardProposedTransaction(Json::Value const& jvObj) -{ - // reporting does not forward validated transactions - // validated transactions will be published to the proper streams when the - // etl process writes a validated ledger - if (jvObj[jss::validated].asBool()) - return; - { - std::lock_guard sl(mSubLock); - - auto it = mStreamMaps[sRTTransactions].begin(); - while (it != mStreamMaps[sRTTransactions].end()) - { - InfoSub::pointer p = it->second.lock(); - - if (p) - { - p->send(jvObj, true); - ++it; - } - else - { - it = mStreamMaps[sRTTransactions].erase(it); - } - } - } - - forwardProposedAccountTransaction(jvObj); -} - -void -NetworkOPsImp::forwardValidation(Json::Value const& jvObj) -{ - std::lock_guard sl(mSubLock); - - for (auto i = mStreamMaps[sValidations].begin(); - i != mStreamMaps[sValidations].end();) - { - if (auto p = i->second.lock()) - { - p->send(jvObj, true); - ++i; - } - else - { - i = mStreamMaps[sValidations].erase(i); - } - } -} - -void -NetworkOPsImp::forwardManifest(Json::Value const& jvObj) -{ - std::lock_guard sl(mSubLock); - - for (auto i = mStreamMaps[sManifests].begin(); - i != mStreamMaps[sManifests].end();) - { - if (auto p = i->second.lock()) - { - p->send(jvObj, true); - ++i; - } - else - { - i = mStreamMaps[sManifests].erase(i); - } - } -} - static void getAccounts(Json::Value const& jvObj, std::vector& accounts) { @@ -2869,74 +2768,6 @@ getAccounts(Json::Value const& jvObj, std::vector& accounts) } } -void -NetworkOPsImp::forwardProposedAccountTransaction(Json::Value const& jvObj) -{ - hash_set notify; - int iProposed = 0; - // check if there are any subscribers before attempting to parse the JSON - { - std::lock_guard sl(mSubLock); - - if (mSubRTAccount.empty()) - return; - } - - // parse the JSON outside of the lock - std::vector accounts; - if (jvObj.isMember(jss::transaction)) - { - try - { - getAccounts(jvObj[jss::transaction], accounts); - } - catch (...) - { - JLOG(m_journal.debug()) - << __func__ << " : " - << "error parsing json for accounts affected"; - return; - } - } - { - std::lock_guard sl(mSubLock); - - if (!mSubRTAccount.empty()) - { - for (auto const& affectedAccount : accounts) - { - auto simiIt = mSubRTAccount.find(affectedAccount); - if (simiIt != mSubRTAccount.end()) - { - auto it = simiIt->second.begin(); - - while (it != simiIt->second.end()) - { - InfoSub::pointer p = it->second.lock(); - - if (p) - { - notify.insert(p); - ++it; - ++iProposed; - } - else - it = simiIt->second.erase(it); - } - } - } - } - } - JLOG(m_journal.trace()) << "forwardProposedAccountTransaction:" - << " iProposed=" << iProposed; - - if (!notify.empty()) - { - for (InfoSub::ref isrListener : notify) - isrListener->send(jvObj, true); - } -} - void NetworkOPsImp::pubLedger(std::shared_ptr const& lpAccepted) { @@ -3053,8 +2884,6 @@ NetworkOPsImp::pubLedger(std::shared_ptr const& lpAccepted) void NetworkOPsImp::reportFeeChange() { - if (app_.config().reporting()) - return; ServerFeeSummary f{ app_.openLedger().current()->fees().base, app_.getTxQ().getMetrics(*app_.openLedger().current()), @@ -3536,30 +3365,8 @@ NetworkOPsImp::unsubAccountInternal( void NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) { - enum DatabaseType { Postgres, Sqlite, None }; + enum DatabaseType { Sqlite, None }; static const auto databaseType = [&]() -> DatabaseType { -#ifdef RIPPLED_REPORTING - if (app_.config().reporting()) - { - // Use a dynamic_cast to return DatabaseType::None - // on failure. - if (dynamic_cast(&app_.getRelationalDatabase())) - { - return DatabaseType::Postgres; - } - return DatabaseType::None; - } - else - { - // Use a dynamic_cast to return DatabaseType::None - // on failure. - if (dynamic_cast(&app_.getRelationalDatabase())) - { - return DatabaseType::Sqlite; - } - return DatabaseType::None; - } -#else // Use a dynamic_cast to return DatabaseType::None // on failure. if (dynamic_cast(&app_.getRelationalDatabase())) @@ -3567,7 +3374,6 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) return DatabaseType::Sqlite; } return DatabaseType::None; -#endif }(); if (databaseType == DatabaseType::None) @@ -3670,40 +3476,6 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) std::optional>> { switch (dbType) { - case Postgres: { - auto db = static_cast( - &app_.getRelationalDatabase()); - RelationalDatabase::AccountTxArgs args; - args.account = accountId; - LedgerRange range{minLedger, maxLedger}; - args.ledger = range; - args.marker = marker; - auto [txResult, status] = db->getAccountTx(args); - if (status != rpcSUCCESS) - { - JLOG(m_journal.debug()) - << "AccountHistory job for account " - << toBase58(accountId) - << " getAccountTx failed"; - return {}; - } - - if (auto txns = - std::get_if( - &txResult.transactions); - txns) - { - return std::make_pair(*txns, txResult.marker); - } - else - { - JLOG(m_journal.debug()) - << "AccountHistory job for account " - << toBase58(accountId) - << " getAccountTx wrong data"; - return {}; - } - } case Sqlite: { auto db = static_cast( &app_.getRelationalDatabase()); diff --git a/src/xrpld/app/misc/NetworkOPs.h b/src/xrpld/app/misc/NetworkOPs.h index d5f43a42972..166b9e9e11f 100644 --- a/src/xrpld/app/misc/NetworkOPs.h +++ b/src/xrpld/app/misc/NetworkOPs.h @@ -261,15 +261,6 @@ class NetworkOPs : public InfoSub::Source virtual void pubValidation(std::shared_ptr const& val) = 0; - virtual void - forwardValidation(Json::Value const& jvObj) = 0; - virtual void - forwardManifest(Json::Value const& jvObj) = 0; - virtual void - forwardProposedTransaction(Json::Value const& jvObj) = 0; - virtual void - forwardProposedAccountTransaction(Json::Value const& jvObj) = 0; - virtual void stateAccounting(Json::Value& obj) = 0; }; diff --git a/src/xrpld/app/misc/SHAMapStoreImp.cpp b/src/xrpld/app/misc/SHAMapStoreImp.cpp index 9344463295b..1ce862b095f 100644 --- a/src/xrpld/app/misc/SHAMapStoreImp.cpp +++ b/src/xrpld/app/misc/SHAMapStoreImp.cpp @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -120,13 +119,6 @@ SHAMapStoreImp::SHAMapStoreImp( if (deleteInterval_) { - if (app_.config().reporting()) - { - Throw( - "Reporting does not support online_delete. Remove " - "online_delete info from config"); - } - // Configuration that affects the behavior of online delete get_if_exists(section, "delete_batch", deleteBatch_); std::uint32_t temp; @@ -188,12 +180,6 @@ SHAMapStoreImp::makeNodeStore(int readThreads) if (deleteInterval_) { - if (app_.config().reporting()) - { - Throw( - "Reporting does not support online_delete. Remove " - "online_delete info from config"); - } SavedState state = state_db_.getState(); auto writableBackend = makeBackendRotating(state.writableDb); auto archiveBackend = makeBackendRotating(state.archiveDb); @@ -279,13 +265,6 @@ SHAMapStoreImp::copyNode(std::uint64_t& nodeCount, SHAMapTreeNode const& node) void SHAMapStoreImp::run() { - if (app_.config().reporting()) - { - assert(false); - Throw( - "Reporting does not support online_delete. Remove " - "online_delete info from config"); - } beast::setCurrentThreadName("SHAMapStore"); LedgerIndex lastRotated = state_db_.getState().lastRotated; netOPs_ = &app_.getOPs(); @@ -597,13 +576,6 @@ SHAMapStoreImp::freshenCaches() void SHAMapStoreImp::clearPrior(LedgerIndex lastRotated) { - if (app_.config().reporting()) - { - assert(false); - Throw( - "Reporting does not support online_delete. Remove " - "online_delete info from config"); - } // Do not allow ledgers to be acquired from the network // that are about to be deleted. minimumOnline_ = lastRotated + 1; diff --git a/src/xrpld/app/misc/detail/Transaction.cpp b/src/xrpld/app/misc/detail/Transaction.cpp index e0c3f260fe5..c8f9df232e0 100644 --- a/src/xrpld/app/misc/detail/Transaction.cpp +++ b/src/xrpld/app/misc/detail/Transaction.cpp @@ -21,11 +21,9 @@ #include #include #include -#include #include #include #include -#include #include #include #include @@ -130,20 +128,6 @@ Transaction::load( return load(id, app, op{range}, ec); } -Transaction::Locator -Transaction::locate(uint256 const& id, Application& app) -{ - auto const db = - dynamic_cast(&app.getRelationalDatabase()); - - if (!db) - { - Throw("Failed to get relational database"); - } - - return db->locateTransaction(id); -} - std::variant< std::pair, std::shared_ptr>, TxSearched> diff --git a/src/xrpld/app/rdb/README.md b/src/xrpld/app/rdb/README.md index f4cb5f203a4..81aaa32f2cf 100644 --- a/src/xrpld/app/rdb/README.md +++ b/src/xrpld/app/rdb/README.md @@ -28,9 +28,7 @@ src/xrpld/app/rdb/ │   ├── detail │   │   ├── Node.cpp │   │   ├── Node.h -│   │   ├── PostgresDatabase.cpp │   │   └── SQLiteDatabase.cpp -│   ├── PostgresDatabase.h │   └── SQLiteDatabase.h ├── detail │   ├── PeerFinder.cpp @@ -50,7 +48,6 @@ src/xrpld/app/rdb/ | File | Contents | | ----------- | ----------- | | `Node.[h\|cpp]` | Defines/Implements methods used by `SQLiteDatabase` for interacting with SQLite node databases| -| `PostgresDatabase.[h\|cpp]` | Defines/Implements the class `PostgresDatabase`/`PostgresDatabaseImp` which inherits from `RelationalDatabase` and is used to operate on the main stores | |`SQLiteDatabase.[h\|cpp]`| Defines/Implements the class `SQLiteDatabase`/`SQLiteDatabaseImp` which inherits from `RelationalDatabase` and is used to operate on the main stores | | `PeerFinder.[h\|cpp]` | Defines/Implements methods for interacting with the PeerFinder SQLite database | |`RelationalDatabase.cpp`| Implements the static method `RelationalDatabase::init` which is used to initialize an instance of `RelationalDatabase` | diff --git a/src/xrpld/app/rdb/RelationalDatabase.h b/src/xrpld/app/rdb/RelationalDatabase.h index b30c94153f7..00e236f20db 100644 --- a/src/xrpld/app/rdb/RelationalDatabase.h +++ b/src/xrpld/app/rdb/RelationalDatabase.h @@ -111,29 +111,6 @@ class RelationalDatabase std::optional marker; }; - /// Struct used to keep track of what to write to transactions and - /// account_transactions tables in Postgres - struct AccountTransactionsData - { - boost::container::flat_set accounts; - uint32_t ledgerSequence; - uint32_t transactionIndex; - uint256 txHash; - uint256 nodestoreHash; - - AccountTransactionsData( - TxMeta const& meta, - uint256 const& nodestoreHash, - beast::Journal j) - : accounts(meta.getAffectedAccounts()) - , ledgerSequence(meta.getLgrSeq()) - , transactionIndex(meta.getIndex()) - , txHash(meta.getTxID()) - , nodestoreHash(nodestoreHash) - { - } - }; - /** * @brief init Creates and returns an appropriate RelationalDatabase * instance based on configuration. diff --git a/src/xrpld/app/rdb/backend/PostgresDatabase.h b/src/xrpld/app/rdb/backend/PostgresDatabase.h deleted file mode 100644 index c2841cefd8c..00000000000 --- a/src/xrpld/app/rdb/backend/PostgresDatabase.h +++ /dev/null @@ -1,113 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_RDB_BACKEND_POSTGRESDATABASE_H_INCLUDED -#define RIPPLE_APP_RDB_BACKEND_POSTGRESDATABASE_H_INCLUDED - -#include - -namespace ripple { - -class PostgresDatabase : public RelationalDatabase -{ -public: - virtual void - stop() = 0; - - /** - * @brief sweep Sweeps the database. - */ - virtual void - sweep() = 0; - - /** - * @brief getCompleteLedgers Returns a string which contains a list of - * completed ledgers. - * @return String with completed ledger sequences - */ - virtual std::string - getCompleteLedgers() = 0; - - /** - * @brief getValidatedLedgerAge Returns the age of the last validated - * ledger. - * @return Age of the last validated ledger in seconds - */ - virtual std::chrono::seconds - getValidatedLedgerAge() = 0; - - /** - * @brief writeLedgerAndTransactions Writes new ledger and transaction data - * into the database. - * @param info Ledger info to write. - * @param accountTxData Transaction data to write - * @return True on success, false on failure. - */ - virtual bool - writeLedgerAndTransactions( - LedgerInfo const& info, - std::vector const& accountTxData) = 0; - - /** - * @brief getTxHashes Returns a vector of the hashes of transactions - * belonging to the ledger with the provided sequence. - * @param seq Ledger sequence - * @return Vector of transaction hashes - */ - virtual std::vector - getTxHashes(LedgerIndex seq) = 0; - - /** - * @brief getAccountTx Get the last account transactions specified by the - * AccountTxArgs struct. - * @param args Arguments which specify the account and which transactions to - * return. - * @return Vector of account transactions and the RPC status response. - */ - virtual std::pair - getAccountTx(AccountTxArgs const& args) = 0; - - /** - * @brief locateTransaction Returns information used to locate - * a transaction. - * @param id Hash of the transaction. - * @return Information used to locate a transaction. Contains a nodestore - * hash and a ledger sequence pair if the transaction was found. - * Otherwise, contains the range of ledgers present in the database - * at the time of search. - */ - virtual Transaction::Locator - locateTransaction(uint256 const& id) = 0; - - /** - * @brief isCaughtUp returns whether the database is caught up with the - * network - * @param[out] reason if the database is not caught up, reason contains a - * helpful message describing why - * @return false if the most recently written ledger has a close time - * over 3 minutes ago, or if there are no ledgers in the - * database. true otherwise - */ - virtual bool - isCaughtUp(std::string& reason) = 0; -}; - -} // namespace ripple - -#endif diff --git a/src/xrpld/app/rdb/backend/detail/PostgresDatabase.cpp b/src/xrpld/app/rdb/backend/detail/PostgresDatabase.cpp deleted file mode 100644 index ac1a9813c2b..00000000000 --- a/src/xrpld/app/rdb/backend/detail/PostgresDatabase.cpp +++ /dev/null @@ -1,1072 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -class PgPool; - -using AccountTxResult = RelationalDatabase::AccountTxResult; -using TxnsData = RelationalDatabase::AccountTxs; -using TxnsDataBinary = RelationalDatabase::MetaTxsList; - -class PostgresDatabaseImp final : public PostgresDatabase -{ -public: - PostgresDatabaseImp( - Application& app, - Config const& config, - JobQueue& jobQueue) - : app_(app) - , j_(app_.journal("PgPool")) - , pgPool_( -#ifdef RIPPLED_REPORTING - make_PgPool(config.section("ledger_tx_tables"), j_) -#endif - ) - { - assert(config.reporting()); -#ifdef RIPPLED_REPORTING - if (config.reporting() && !config.reportingReadOnly()) // use pg - { - initSchema(pgPool_); - } -#endif - } - - void - stop() override - { -#ifdef RIPPLED_REPORTING - pgPool_->stop(); -#endif - } - - void - sweep() override; - - std::optional - getMinLedgerSeq() override; - - std::optional - getMaxLedgerSeq() override; - - std::string - getCompleteLedgers() override; - - std::chrono::seconds - getValidatedLedgerAge() override; - - bool - writeLedgerAndTransactions( - LedgerInfo const& info, - std::vector const& accountTxData) override; - - std::optional - getLedgerInfoByIndex(LedgerIndex ledgerSeq) override; - - std::optional - getNewestLedgerInfo() override; - - std::optional - getLedgerInfoByHash(uint256 const& ledgerHash) override; - - uint256 - getHashByIndex(LedgerIndex ledgerIndex) override; - - std::optional - getHashesByIndex(LedgerIndex ledgerIndex) override; - - std::map - getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) override; - - std::vector - getTxHashes(LedgerIndex seq) override; - - std::vector> - getTxHistory(LedgerIndex startIndex) override; - - std::pair - getAccountTx(AccountTxArgs const& args) override; - - Transaction::Locator - locateTransaction(uint256 const& id) override; - - bool - ledgerDbHasSpace(Config const& config) override; - - bool - transactionDbHasSpace(Config const& config) override; - - bool - isCaughtUp(std::string& reason) override; - -private: - Application& app_; - beast::Journal j_; - std::shared_ptr pgPool_; - - bool - dbHasSpace(Config const& config); -}; - -/** - * @brief loadLedgerInfos Loads the ledger info for the specified - * ledger/s from the database - * @param pgPool Link to postgres database - * @param whichLedger Specifies the ledger to load via ledger sequence, - * ledger hash, a range of ledgers, or std::monostate - * (which loads the most recent) - * @param app Application - * @return Vector of LedgerInfos - */ -static std::vector -loadLedgerInfos( - std::shared_ptr const& pgPool, - std::variant< - std::monostate, - uint256, - uint32_t, - std::pair> const& whichLedger, - Application& app) -{ - std::vector infos; -#ifdef RIPPLED_REPORTING - auto log = app.journal("Ledger"); - assert(app.config().reporting()); - std::stringstream sql; - sql << "SELECT ledger_hash, prev_hash, account_set_hash, trans_set_hash, " - "total_coins, closing_time, prev_closing_time, close_time_res, " - "close_flags, ledger_seq FROM ledgers "; - - if (auto ledgerSeq = std::get_if(&whichLedger)) - { - sql << "WHERE ledger_seq = " + std::to_string(*ledgerSeq); - } - else if (auto ledgerHash = std::get_if(&whichLedger)) - { - sql << ("WHERE ledger_hash = \'\\x" + strHex(*ledgerHash) + "\'"); - } - else if ( - auto minAndMax = - std::get_if>(&whichLedger)) - { - sql - << ("WHERE ledger_seq >= " + std::to_string(minAndMax->first) + - " AND ledger_seq <= " + std::to_string(minAndMax->second)); - } - else - { - sql << ("ORDER BY ledger_seq desc LIMIT 1"); - } - sql << ";"; - - JLOG(log.trace()) << __func__ << " : sql = " << sql.str(); - - auto res = PgQuery(pgPool)(sql.str().data()); - if (!res) - { - JLOG(log.error()) << __func__ << " : Postgres response is null - sql = " - << sql.str(); - assert(false); - return {}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(log.error()) << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - sql = " << sql.str(); - assert(false); - return {}; - } - - JLOG(log.trace()) << __func__ << " Postgres result msg : " << res.msg(); - - if (res.isNull() || res.ntuples() == 0) - { - JLOG(log.debug()) << __func__ - << " : Ledger not found. sql = " << sql.str(); - return {}; - } - else if (res.ntuples() > 0) - { - if (res.nfields() != 10) - { - JLOG(log.error()) << __func__ - << " : Wrong number of fields in Postgres " - "response. Expected 10, but got " - << res.nfields() << " . sql = " << sql.str(); - assert(false); - return {}; - } - } - - for (size_t i = 0; i < res.ntuples(); ++i) - { - char const* hash = res.c_str(i, 0); - char const* prevHash = res.c_str(i, 1); - char const* accountHash = res.c_str(i, 2); - char const* txHash = res.c_str(i, 3); - std::int64_t totalCoins = res.asBigInt(i, 4); - std::int64_t closeTime = res.asBigInt(i, 5); - std::int64_t parentCloseTime = res.asBigInt(i, 6); - std::int64_t closeTimeRes = res.asBigInt(i, 7); - std::int64_t closeFlags = res.asBigInt(i, 8); - std::int64_t ledgerSeq = res.asBigInt(i, 9); - - JLOG(log.trace()) << __func__ << " - Postgres response = " << hash - << " , " << prevHash << " , " << accountHash << " , " - << txHash << " , " << totalCoins << ", " << closeTime - << ", " << parentCloseTime << ", " << closeTimeRes - << ", " << closeFlags << ", " << ledgerSeq - << " - sql = " << sql.str(); - JLOG(log.debug()) << __func__ - << " - Successfully fetched ledger with sequence = " - << ledgerSeq << " from Postgres"; - - using time_point = NetClock::time_point; - using duration = NetClock::duration; - - LedgerInfo info; - if (!info.parentHash.parseHex(prevHash + 2)) - assert(false); - if (!info.txHash.parseHex(txHash + 2)) - assert(false); - if (!info.accountHash.parseHex(accountHash + 2)) - assert(false); - info.drops = totalCoins; - info.closeTime = time_point{duration{closeTime}}; - info.parentCloseTime = time_point{duration{parentCloseTime}}; - info.closeFlags = closeFlags; - info.closeTimeResolution = duration{closeTimeRes}; - info.seq = ledgerSeq; - if (!info.hash.parseHex(hash + 2)) - assert(false); - info.validated = true; - infos.push_back(info); - } - -#endif - return infos; -} - -/** - * @brief loadLedgerHelper Load a ledger info from Postgres - * @param pgPool Link to postgres database - * @param whichLedger Specifies sequence or hash of ledger. Passing - * std::monostate loads the most recent ledger - * @param app The Application - * @return Ledger info - */ -static std::optional -loadLedgerHelper( - std::shared_ptr const& pgPool, - std::variant const& whichLedger, - Application& app) -{ - std::vector infos; - std::visit( - [&infos, &app, &pgPool](auto&& arg) { - infos = loadLedgerInfos(pgPool, arg, app); - }, - whichLedger); - assert(infos.size() <= 1); - if (!infos.size()) - return {}; - return infos[0]; -} - -#ifdef RIPPLED_REPORTING -static bool -writeToLedgersDB(LedgerInfo const& info, PgQuery& pgQuery, beast::Journal& j) -{ - JLOG(j.debug()) << __func__; - auto cmd = boost::format( - R"(INSERT INTO ledgers - VALUES (%u,'\x%s', '\x%s',%u,%u,%u,%u,%u,'\x%s','\x%s'))"); - - auto ledgerInsert = boost::str( - cmd % info.seq % strHex(info.hash) % strHex(info.parentHash) % - info.drops.drops() % info.closeTime.time_since_epoch().count() % - info.parentCloseTime.time_since_epoch().count() % - info.closeTimeResolution.count() % info.closeFlags % - strHex(info.accountHash) % strHex(info.txHash)); - JLOG(j.trace()) << __func__ << " : " - << " : " - << "query string = " << ledgerInsert; - - auto res = pgQuery(ledgerInsert.data()); - - return res; -} - -enum class DataFormat { binary, expanded }; -static std::variant -flatFetchTransactions( - Application& app, - std::vector& nodestoreHashes, - std::vector& ledgerSequences, - DataFormat format) -{ - std::variant ret; - if (format == DataFormat::binary) - ret = TxnsDataBinary(); - else - ret = TxnsData(); - - std::vector< - std::pair, std::shared_ptr>> - txns = flatFetchTransactions(app, nodestoreHashes); - for (size_t i = 0; i < txns.size(); ++i) - { - auto& [txn, meta] = txns[i]; - if (format == DataFormat::binary) - { - auto& transactions = std::get(ret); - Serializer txnSer = txn->getSerializer(); - Serializer metaSer = meta->getSerializer(); - // SerialIter it(item->slice()); - Blob txnBlob = txnSer.getData(); - Blob metaBlob = metaSer.getData(); - transactions.push_back( - std::make_tuple(txnBlob, metaBlob, ledgerSequences[i])); - } - else - { - auto& transactions = std::get(ret); - std::string reason; - auto txnRet = std::make_shared(txn, reason, app); - txnRet->setLedger(ledgerSequences[i]); - txnRet->setStatus(COMMITTED); - auto txMeta = std::make_shared( - txnRet->getID(), ledgerSequences[i], *meta); - transactions.push_back(std::make_pair(txnRet, txMeta)); - } - } - return ret; -} - -static std::pair -processAccountTxStoredProcedureResult( - RelationalDatabase::AccountTxArgs const& args, - Json::Value& result, - Application& app, - beast::Journal j) -{ - AccountTxResult ret; - ret.limit = args.limit; - - try - { - if (result.isMember("transactions")) - { - std::vector nodestoreHashes; - std::vector ledgerSequences; - for (auto& t : result["transactions"]) - { - if (t.isMember("ledger_seq") && t.isMember("nodestore_hash")) - { - uint32_t ledgerSequence = t["ledger_seq"].asUInt(); - std::string nodestoreHashHex = - t["nodestore_hash"].asString(); - nodestoreHashHex.erase(0, 2); - uint256 nodestoreHash; - if (!nodestoreHash.parseHex(nodestoreHashHex)) - assert(false); - - if (nodestoreHash.isNonZero()) - { - ledgerSequences.push_back(ledgerSequence); - nodestoreHashes.push_back(nodestoreHash); - } - else - { - assert(false); - return {ret, {rpcINTERNAL, "nodestoreHash is zero"}}; - } - } - else - { - assert(false); - return {ret, {rpcINTERNAL, "missing postgres fields"}}; - } - } - - assert(nodestoreHashes.size() == ledgerSequences.size()); - ret.transactions = flatFetchTransactions( - app, - nodestoreHashes, - ledgerSequences, - args.binary ? DataFormat::binary : DataFormat::expanded); - - JLOG(j.trace()) << __func__ << " : processed db results"; - - if (result.isMember("marker")) - { - auto& marker = result["marker"]; - assert(marker.isMember("ledger")); - assert(marker.isMember("seq")); - ret.marker = { - marker["ledger"].asUInt(), marker["seq"].asUInt()}; - } - assert(result.isMember("ledger_index_min")); - assert(result.isMember("ledger_index_max")); - ret.ledgerRange = { - result["ledger_index_min"].asUInt(), - result["ledger_index_max"].asUInt()}; - return {ret, rpcSUCCESS}; - } - else if (result.isMember("error")) - { - JLOG(j.debug()) - << __func__ << " : error = " << result["error"].asString(); - return { - ret, - RPC::Status{rpcINVALID_PARAMS, result["error"].asString()}}; - } - else - { - return {ret, {rpcINTERNAL, "unexpected Postgres response"}}; - } - } - catch (std::exception& e) - { - JLOG(j.debug()) << __func__ << " : " - << "Caught exception : " << e.what(); - return {ret, {rpcINTERNAL, e.what()}}; - } -} -#endif - -void -PostgresDatabaseImp::sweep() -{ -#ifdef RIPPLED_REPORTING - pgPool_->idleSweeper(); -#endif -} - -std::optional -PostgresDatabaseImp::getMinLedgerSeq() -{ -#ifdef RIPPLED_REPORTING - auto seq = PgQuery(pgPool_)("SELECT min_ledger()"); - if (!seq) - { - JLOG(j_.error()) << "Error querying minimum ledger sequence."; - } - else if (!seq.isNull()) - return seq.asInt(); -#endif - return {}; -} - -std::optional -PostgresDatabaseImp::getMaxLedgerSeq() -{ -#ifdef RIPPLED_REPORTING - auto seq = PgQuery(pgPool_)("SELECT max_ledger()"); - if (seq && !seq.isNull()) - return seq.asBigInt(); -#endif - return {}; -} - -std::string -PostgresDatabaseImp::getCompleteLedgers() -{ -#ifdef RIPPLED_REPORTING - auto range = PgQuery(pgPool_)("SELECT complete_ledgers()"); - if (range) - return range.c_str(); -#endif - return "error"; -} - -std::chrono::seconds -PostgresDatabaseImp::getValidatedLedgerAge() -{ - using namespace std::chrono_literals; -#ifdef RIPPLED_REPORTING - auto age = PgQuery(pgPool_)("SELECT age()"); - if (!age || age.isNull()) - JLOG(j_.debug()) << "No ledgers in database"; - else - return std::chrono::seconds{age.asInt()}; -#endif - return weeks{2}; -} - -bool -PostgresDatabaseImp::writeLedgerAndTransactions( - LedgerInfo const& info, - std::vector const& accountTxData) -{ -#ifdef RIPPLED_REPORTING - JLOG(j_.debug()) << __func__ << " : " - << "Beginning write to Postgres"; - - try - { - // Create a PgQuery object to run multiple commands over the same - // connection in a single transaction block. - PgQuery pg(pgPool_); - auto res = pg("BEGIN"); - if (!res || res.status() != PGRES_COMMAND_OK) - { - std::stringstream msg; - msg << "bulkWriteToTable : Postgres insert error: " << res.msg(); - Throw(msg.str()); - } - - // Writing to the ledgers db fails if the ledger already exists in the - // db. In this situation, the ETL process has detected there is another - // writer, and falls back to only publishing - if (!writeToLedgersDB(info, pg, j_)) - { - JLOG(j_.warn()) << __func__ << " : " - << "Failed to write to ledgers database."; - return false; - } - - std::stringstream transactionsCopyBuffer; - std::stringstream accountTransactionsCopyBuffer; - for (auto const& data : accountTxData) - { - std::string txHash = strHex(data.txHash); - std::string nodestoreHash = strHex(data.nodestoreHash); - auto idx = data.transactionIndex; - auto ledgerSeq = data.ledgerSequence; - - transactionsCopyBuffer << std::to_string(ledgerSeq) << '\t' - << std::to_string(idx) << '\t' << "\\\\x" - << txHash << '\t' << "\\\\x" << nodestoreHash - << '\n'; - - for (auto const& a : data.accounts) - { - std::string acct = strHex(a); - accountTransactionsCopyBuffer - << "\\\\x" << acct << '\t' << std::to_string(ledgerSeq) - << '\t' << std::to_string(idx) << '\n'; - } - } - - pg.bulkInsert("transactions", transactionsCopyBuffer.str()); - pg.bulkInsert( - "account_transactions", accountTransactionsCopyBuffer.str()); - - res = pg("COMMIT"); - if (!res || res.status() != PGRES_COMMAND_OK) - { - std::stringstream msg; - msg << "bulkWriteToTable : Postgres insert error: " << res.msg(); - assert(false); - Throw(msg.str()); - } - - JLOG(j_.info()) << __func__ << " : " - << "Successfully wrote to Postgres"; - return true; - } - catch (std::exception& e) - { - JLOG(j_.error()) << __func__ - << "Caught exception writing to Postgres : " - << e.what(); - assert(false); - return false; - } -#else - return false; -#endif -} - -std::optional -PostgresDatabaseImp::getLedgerInfoByIndex(LedgerIndex ledgerSeq) -{ - return loadLedgerHelper(pgPool_, ledgerSeq, app_); -} - -std::optional -PostgresDatabaseImp::getNewestLedgerInfo() -{ - return loadLedgerHelper(pgPool_, {}, app_); -} - -std::optional -PostgresDatabaseImp::getLedgerInfoByHash(uint256 const& ledgerHash) -{ - return loadLedgerHelper(pgPool_, ledgerHash, app_); -} - -uint256 -PostgresDatabaseImp::getHashByIndex(LedgerIndex ledgerIndex) -{ - auto infos = loadLedgerInfos(pgPool_, ledgerIndex, app_); - assert(infos.size() <= 1); - if (infos.size()) - return infos[0].hash; - return {}; -} - -std::optional -PostgresDatabaseImp::getHashesByIndex(LedgerIndex ledgerIndex) -{ - LedgerHashPair p; - auto infos = loadLedgerInfos(pgPool_, ledgerIndex, app_); - assert(infos.size() <= 1); - if (infos.size()) - { - p.ledgerHash = infos[0].hash; - p.parentHash = infos[0].parentHash; - return p; - } - return {}; -} - -std::map -PostgresDatabaseImp::getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) -{ - std::map ret; - auto infos = loadLedgerInfos(pgPool_, std::make_pair(minSeq, maxSeq), app_); - for (auto& info : infos) - { - ret[info.seq] = {info.hash, info.parentHash}; - } - return ret; -} - -std::vector -PostgresDatabaseImp::getTxHashes(LedgerIndex seq) -{ - std::vector nodestoreHashes; - -#ifdef RIPPLED_REPORTING - auto log = app_.journal("Ledger"); - - std::string query = - "SELECT nodestore_hash" - " FROM transactions " - " WHERE ledger_seq = " + - std::to_string(seq); - auto res = PgQuery(pgPool_)(query.c_str()); - - if (!res) - { - JLOG(log.error()) << __func__ - << " : Postgres response is null - query = " << query; - assert(false); - return {}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(log.error()) << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - query = " << query; - assert(false); - return {}; - } - - JLOG(log.trace()) << __func__ << " Postgres result msg : " << res.msg(); - - if (res.isNull() || res.ntuples() == 0) - { - JLOG(log.debug()) << __func__ - << " : Ledger not found. query = " << query; - return {}; - } - else if (res.ntuples() > 0) - { - if (res.nfields() != 1) - { - JLOG(log.error()) << __func__ - << " : Wrong number of fields in Postgres " - "response. Expected 1, but got " - << res.nfields() << " . query = " << query; - assert(false); - return {}; - } - } - - JLOG(log.trace()) << __func__ << " : result = " << res.c_str() - << " : query = " << query; - for (size_t i = 0; i < res.ntuples(); ++i) - { - char const* nodestoreHash = res.c_str(i, 0); - uint256 hash; - if (!hash.parseHex(nodestoreHash + 2)) - assert(false); - - nodestoreHashes.push_back(hash); - } -#endif - - return nodestoreHashes; -} - -std::vector> -PostgresDatabaseImp::getTxHistory(LedgerIndex startIndex) -{ - std::vector> ret; - -#ifdef RIPPLED_REPORTING - if (!app_.config().reporting()) - { - assert(false); - Throw( - "called getTxHistory but not in reporting mode"); - } - - std::string sql = boost::str( - boost::format("SELECT nodestore_hash, ledger_seq " - " FROM transactions" - " ORDER BY ledger_seq DESC LIMIT 20 " - "OFFSET %u;") % - startIndex); - - auto res = PgQuery(pgPool_)(sql.data()); - - if (!res) - { - JLOG(j_.error()) << __func__ - << " : Postgres response is null - sql = " << sql; - assert(false); - return {}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(j_.error()) << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - sql = " << sql; - assert(false); - return {}; - } - - JLOG(j_.trace()) << __func__ << " Postgres result msg : " << res.msg(); - - if (res.isNull() || res.ntuples() == 0) - { - JLOG(j_.debug()) << __func__ << " : Empty postgres response"; - assert(false); - return {}; - } - else if (res.ntuples() > 0) - { - if (res.nfields() != 2) - { - JLOG(j_.error()) << __func__ - << " : Wrong number of fields in Postgres " - "response. Expected 1, but got " - << res.nfields() << " . sql = " << sql; - assert(false); - return {}; - } - } - - JLOG(j_.trace()) << __func__ << " : Postgres result = " << res.c_str(); - - std::vector nodestoreHashes; - std::vector ledgerSequences; - for (size_t i = 0; i < res.ntuples(); ++i) - { - uint256 hash; - if (!hash.parseHex(res.c_str(i, 0) + 2)) - assert(false); - nodestoreHashes.push_back(hash); - ledgerSequences.push_back(res.asBigInt(i, 1)); - } - - auto txns = flatFetchTransactions(app_, nodestoreHashes); - for (size_t i = 0; i < txns.size(); ++i) - { - auto const& [sttx, meta] = txns[i]; - assert(sttx); - - std::string reason; - auto txn = std::make_shared(sttx, reason, app_); - txn->setLedger(ledgerSequences[i]); - txn->setStatus(COMMITTED); - ret.push_back(txn); - } - -#endif - return ret; -} - -std::pair -PostgresDatabaseImp::getAccountTx(AccountTxArgs const& args) -{ -#ifdef RIPPLED_REPORTING - pg_params dbParams; - - char const*& command = dbParams.first; - std::vector>& values = dbParams.second; - command = - "SELECT account_tx($1::bytea, $2::bool, " - "$3::bigint, $4::bigint, $5::bigint, $6::bytea, " - "$7::bigint, $8::bool, $9::bigint, $10::bigint)"; - values.resize(10); - values[0] = "\\x" + strHex(args.account); - values[1] = args.forward ? "true" : "false"; - - static std::uint32_t const page_length(200); - if (args.limit == 0 || args.limit > page_length) - values[2] = std::to_string(page_length); - else - values[2] = std::to_string(args.limit); - - if (args.ledger) - { - if (auto range = std::get_if(&args.ledger.value())) - { - values[3] = std::to_string(range->min); - values[4] = std::to_string(range->max); - } - else if (auto hash = std::get_if(&args.ledger.value())) - { - values[5] = ("\\x" + strHex(*hash)); - } - else if ( - auto sequence = std::get_if(&args.ledger.value())) - { - values[6] = std::to_string(*sequence); - } - else if (std::get_if(&args.ledger.value())) - { - // current, closed and validated are all treated as validated - values[7] = "true"; - } - else - { - JLOG(j_.error()) << "doAccountTxStoredProcedure - " - << "Error parsing ledger args"; - return {}; - } - } - - if (args.marker) - { - values[8] = std::to_string(args.marker->ledgerSeq); - values[9] = std::to_string(args.marker->txnSeq); - } - for (size_t i = 0; i < values.size(); ++i) - { - JLOG(j_.trace()) << "value " << std::to_string(i) << " = " - << (values[i] ? values[i].value() : "null"); - } - - auto res = PgQuery(pgPool_)(dbParams); - if (!res) - { - JLOG(j_.error()) << __func__ - << " : Postgres response is null - account = " - << strHex(args.account); - assert(false); - return {{}, {rpcINTERNAL, "Postgres error"}}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(j_.error()) << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - account = " << strHex(args.account); - assert(false); - return {{}, {rpcINTERNAL, "Postgres error"}}; - } - - JLOG(j_.trace()) << __func__ << " Postgres result msg : " << res.msg(); - if (res.isNull() || res.ntuples() == 0) - { - JLOG(j_.debug()) << __func__ - << " : No data returned from Postgres : account = " - << strHex(args.account); - - assert(false); - return {{}, {rpcINTERNAL, "Postgres error"}}; - } - - char const* resultStr = res.c_str(); - JLOG(j_.trace()) << __func__ << " : " - << "postgres result = " << resultStr - << " : account = " << strHex(args.account); - - Json::Value v; - Json::Reader reader; - bool success = reader.parse(resultStr, resultStr + strlen(resultStr), v); - if (success) - { - return processAccountTxStoredProcedureResult(args, v, app_, j_); - } -#endif - // This shouldn't happen. Postgres should return a parseable error - assert(false); - return {{}, {rpcINTERNAL, "Failed to deserialize Postgres result"}}; -} - -Transaction::Locator -PostgresDatabaseImp::locateTransaction(uint256 const& id) -{ -#ifdef RIPPLED_REPORTING - auto baseCmd = boost::format(R"(SELECT tx('%s');)"); - - std::string txHash = "\\x" + strHex(id); - std::string sql = boost::str(baseCmd % txHash); - - auto res = PgQuery(pgPool_)(sql.data()); - - if (!res) - { - JLOG(app_.journal("Transaction").error()) - << __func__ - << " : Postgres response is null - tx ID = " << strHex(id); - assert(false); - return {}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(app_.journal("Transaction").error()) - << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - tx ID = " << strHex(id); - assert(false); - return {}; - } - - JLOG(app_.journal("Transaction").trace()) - << __func__ << " Postgres result msg : " << res.msg(); - if (res.isNull() || res.ntuples() == 0) - { - JLOG(app_.journal("Transaction").debug()) - << __func__ - << " : No data returned from Postgres : tx ID = " << strHex(id); - // This shouldn't happen - assert(false); - return {}; - } - - char const* resultStr = res.c_str(); - JLOG(app_.journal("Transaction").debug()) - << "postgres result = " << resultStr; - - Json::Value v; - Json::Reader reader; - bool success = reader.parse(resultStr, resultStr + strlen(resultStr), v); - if (success) - { - if (v.isMember("nodestore_hash") && v.isMember("ledger_seq")) - { - uint256 nodestoreHash; - if (!nodestoreHash.parseHex( - v["nodestore_hash"].asString().substr(2))) - assert(false); - uint32_t ledgerSeq = v["ledger_seq"].asUInt(); - if (nodestoreHash.isNonZero()) - return {std::make_pair(nodestoreHash, ledgerSeq)}; - } - if (v.isMember("min_seq") && v.isMember("max_seq")) - { - return {ClosedInterval( - v["min_seq"].asUInt(), v["max_seq"].asUInt())}; - } - } -#endif - // Shouldn' happen. Postgres should return the ledger range searched if - // the transaction was not found - assert(false); - Throw( - "Transaction::Locate - Invalid Postgres response"); - return {}; -} - -bool -PostgresDatabaseImp::dbHasSpace(Config const& config) -{ - /* Postgres server could be running on a different machine. */ - - return true; -} - -bool -PostgresDatabaseImp::ledgerDbHasSpace(Config const& config) -{ - return dbHasSpace(config); -} - -bool -PostgresDatabaseImp::transactionDbHasSpace(Config const& config) -{ - return dbHasSpace(config); -} - -std::unique_ptr -getPostgresDatabase(Application& app, Config const& config, JobQueue& jobQueue) -{ - return std::make_unique(app, config, jobQueue); -} - -bool -PostgresDatabaseImp::isCaughtUp(std::string& reason) -{ -#ifdef RIPPLED_REPORTING - using namespace std::chrono_literals; - auto age = PgQuery(pgPool_)("SELECT age()"); - if (!age || age.isNull()) - { - reason = "No ledgers in database"; - return false; - } - if (std::chrono::seconds{age.asInt()} > 3min) - { - reason = "No recently-published ledger"; - return false; - } -#endif - return true; -} - -} // namespace ripple diff --git a/src/xrpld/app/rdb/detail/RelationalDatabase.cpp b/src/xrpld/app/rdb/detail/RelationalDatabase.cpp index 07dc27fd1d3..4a95134d705 100644 --- a/src/xrpld/app/rdb/detail/RelationalDatabase.cpp +++ b/src/xrpld/app/rdb/detail/RelationalDatabase.cpp @@ -26,9 +26,6 @@ namespace ripple { extern std::unique_ptr getSQLiteDatabase(Application& app, Config const& config, JobQueue& jobQueue); -extern std::unique_ptr -getPostgresDatabase(Application& app, Config const& config, JobQueue& jobQueue); - std::unique_ptr RelationalDatabase::init( Application& app, @@ -36,42 +33,30 @@ RelationalDatabase::init( JobQueue& jobQueue) { bool use_sqlite = false; - bool use_postgres = false; - if (config.reporting()) - { - use_postgres = true; - } - else + const Section& rdb_section{config.section(SECTION_RELATIONAL_DB)}; + if (!rdb_section.empty()) { - const Section& rdb_section{config.section(SECTION_RELATIONAL_DB)}; - if (!rdb_section.empty()) + if (boost::iequals(get(rdb_section, "backend"), "sqlite")) { - if (boost::iequals(get(rdb_section, "backend"), "sqlite")) - { - use_sqlite = true; - } - else - { - Throw( - "Invalid rdb_section backend value: " + - get(rdb_section, "backend")); - } + use_sqlite = true; } else { - use_sqlite = true; + Throw( + "Invalid rdb_section backend value: " + + get(rdb_section, "backend")); } } + else + { + use_sqlite = true; + } if (use_sqlite) { return getSQLiteDatabase(app, config, jobQueue); } - else if (use_postgres) - { - return getPostgresDatabase(app, config, jobQueue); - } return std::unique_ptr(); } diff --git a/src/xrpld/app/reporting/ETLHelpers.h b/src/xrpld/app/reporting/ETLHelpers.h deleted file mode 100644 index b11d2c4aa18..00000000000 --- a/src/xrpld/app/reporting/ETLHelpers.h +++ /dev/null @@ -1,195 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED -#define RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -/// This datastructure is used to keep track of the sequence of the most recent -/// ledger validated by the network. There are two methods that will wait until -/// certain conditions are met. This datastructure is able to be "stopped". When -/// the datastructure is stopped, any threads currently waiting are unblocked. -/// Any later calls to methods of this datastructure will not wait. Once the -/// datastructure is stopped, the datastructure remains stopped for the rest of -/// its lifetime. -class NetworkValidatedLedgers -{ - // max sequence validated by network - std::optional max_; - - mutable std::mutex m_; - - mutable std::condition_variable cv_; - - bool stopping_ = false; - -public: - /// Notify the datastructure that idx has been validated by the network - /// @param idx sequence validated by network - void - push(uint32_t idx) - { - std::lock_guard lck(m_); - if (!max_ || idx > *max_) - max_ = idx; - cv_.notify_all(); - } - - /// Get most recently validated sequence. If no ledgers are known to have - /// been validated, this function waits until the next ledger is validated - /// @return sequence of most recently validated ledger. empty optional if - /// the datastructure has been stopped - std::optional - getMostRecent() const - { - std::unique_lock lck(m_); - cv_.wait(lck, [this]() { return max_ || stopping_; }); - return max_; - } - - /// Get most recently validated sequence. - /// @return sequence of most recently validated ledger, or empty optional - /// if no ledgers are known to have been validated. - std::optional - tryGetMostRecent() const - { - std::unique_lock lk(m_); - return max_; - } - - /// Waits for the sequence to be validated by the network - /// @param sequence to wait for - /// @return true if sequence was validated, false otherwise - /// a return value of false means the datastructure has been stopped - bool - waitUntilValidatedByNetwork(uint32_t sequence) - { - std::unique_lock lck(m_); - cv_.wait(lck, [sequence, this]() { - return (max_ && sequence <= *max_) || stopping_; - }); - return !stopping_; - } - - /// Puts the datastructure in the stopped state - /// Future calls to this datastructure will not block - /// This operation cannot be reversed - void - stop() - { - std::lock_guard lck(m_); - stopping_ = true; - cv_.notify_all(); - } -}; - -/// Generic thread-safe queue with an optional maximum size -/// Note, we can't use a lockfree queue here, since we need the ability to wait -/// for an element to be added or removed from the queue. These waits are -/// blocking calls. -template -class ThreadSafeQueue -{ - std::queue queue_; - - mutable std::mutex m_; - std::condition_variable cv_; - std::optional maxSize_; - -public: - /// @param maxSize maximum size of the queue. Calls that would cause the - /// queue to exceed this size will block until free space is available - explicit ThreadSafeQueue(uint32_t maxSize) : maxSize_(maxSize) - { - } - - /// Create a queue with no maximum size - ThreadSafeQueue() = default; - - /// @param elt element to push onto queue - /// if maxSize is set, this method will block until free space is available - void - push(T const& elt) - { - std::unique_lock lck(m_); - // if queue has a max size, wait until not full - if (maxSize_) - cv_.wait(lck, [this]() { return queue_.size() <= *maxSize_; }); - queue_.push(elt); - cv_.notify_all(); - } - - /// @param elt element to push onto queue. elt is moved from - /// if maxSize is set, this method will block until free space is available - void - push(T&& elt) - { - std::unique_lock lck(m_); - // if queue has a max size, wait until not full - if (maxSize_) - cv_.wait(lck, [this]() { return queue_.size() <= *maxSize_; }); - queue_.push(std::move(elt)); - cv_.notify_all(); - } - - /// @return element popped from queue. Will block until queue is non-empty - T - pop() - { - std::unique_lock lck(m_); - cv_.wait(lck, [this]() { return !queue_.empty(); }); - T ret = std::move(queue_.front()); - queue_.pop(); - // if queue has a max size, unblock any possible pushers - if (maxSize_) - cv_.notify_all(); - return ret; - } -}; - -/// Parititions the uint256 keyspace into numMarkers partitions, each of equal -/// size. -inline std::vector -getMarkers(size_t numMarkers) -{ - assert(numMarkers <= 256); - - unsigned char incr = 256 / numMarkers; - - std::vector markers; - markers.reserve(numMarkers); - uint256 base{0}; - for (size_t i = 0; i < numMarkers; ++i) - { - markers.push_back(base); - base.data()[0] += incr; - } - return markers; -} - -} // namespace ripple -#endif diff --git a/src/xrpld/app/reporting/ETLSource.cpp b/src/xrpld/app/reporting/ETLSource.cpp deleted file mode 100644 index 8a181c62b1d..00000000000 --- a/src/xrpld/app/reporting/ETLSource.cpp +++ /dev/null @@ -1,982 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include - -namespace ripple { - -// Create ETL source without grpc endpoint -// Fetch ledger and load initial ledger will fail for this source -// Primarily used in read-only mode, to monitor when ledgers are validated -ETLSource::ETLSource(std::string ip, std::string wsPort, ReportingETL& etl) - : ip_(ip) - , wsPort_(wsPort) - , etl_(etl) - , ioc_(etl.getApplication().getIOService()) - , ws_(std::make_unique< - boost::beast::websocket::stream>( - boost::asio::make_strand(ioc_))) - , resolver_(boost::asio::make_strand(ioc_)) - , networkValidatedLedgers_(etl_.getNetworkValidatedLedgers()) - , journal_(etl_.getApplication().journal("ReportingETL::ETLSource")) - , app_(etl_.getApplication()) - , timer_(ioc_) -{ -} - -ETLSource::ETLSource( - std::string ip, - std::string wsPort, - std::string grpcPort, - ReportingETL& etl) - : ip_(ip) - , wsPort_(wsPort) - , grpcPort_(grpcPort) - , etl_(etl) - , ioc_(etl.getApplication().getIOService()) - , ws_(std::make_unique< - boost::beast::websocket::stream>( - boost::asio::make_strand(ioc_))) - , resolver_(boost::asio::make_strand(ioc_)) - , networkValidatedLedgers_(etl_.getNetworkValidatedLedgers()) - , journal_(etl_.getApplication().journal("ReportingETL::ETLSource")) - , app_(etl_.getApplication()) - , timer_(ioc_) -{ - std::string connectionString; - try - { - connectionString = - beast::IP::Endpoint( - boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)) - .to_string(); - - JLOG(journal_.info()) - << "Using IP to connect to ETL source: " << connectionString; - } - catch (std::exception const&) - { - connectionString = "dns:" + ip_ + ":" + grpcPort_; - JLOG(journal_.info()) - << "Using DNS to connect to ETL source: " << connectionString; - } - try - { - stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub( - grpc::CreateChannel( - connectionString, grpc::InsecureChannelCredentials())); - JLOG(journal_.info()) << "Made stub for remote = " << toString(); - } - catch (std::exception const& e) - { - JLOG(journal_.error()) << "Exception while creating stub = " << e.what() - << " . Remote = " << toString(); - } -} - -void -ETLSource::reconnect(boost::beast::error_code ec) -{ - connected_ = false; - // These are somewhat normal errors. operation_aborted occurs on shutdown, - // when the timer is cancelled. connection_refused will occur repeatedly - // if we cannot connect to the transaction processing process - if (ec != boost::asio::error::operation_aborted && - ec != boost::asio::error::connection_refused) - { - JLOG(journal_.error()) << __func__ << " : " - << "error code = " << ec << " - " << toString(); - } - else - { - JLOG(journal_.warn()) << __func__ << " : " - << "error code = " << ec << " - " << toString(); - } - - if (etl_.isStopping()) - { - JLOG(journal_.debug()) << __func__ << " : " << toString() - << " - etl is stopping. aborting reconnect"; - return; - } - - // exponentially increasing timeouts, with a max of 30 seconds - size_t waitTime = std::min(pow(2, numFailures_), 30.0); - numFailures_++; - timer_.expires_after(boost::asio::chrono::seconds(waitTime)); - timer_.async_wait([this, fname = __func__](auto ec) { - bool startAgain = (ec != boost::asio::error::operation_aborted); - JLOG(journal_.trace()) << fname << " async_wait : ec = " << ec; - close(startAgain); - }); -} - -void -ETLSource::close(bool startAgain) -{ - timer_.cancel(); - ioc_.post([this, startAgain]() { - if (closing_) - return; - - if (ws_->is_open()) - { - // onStop() also calls close(). If the async_close is called twice, - // an assertion fails. Using closing_ makes sure async_close is only - // called once - closing_ = true; - ws_->async_close( - boost::beast::websocket::close_code::normal, - [this, startAgain, fname = __func__](auto ec) { - if (ec) - { - JLOG(journal_.error()) - << fname << " async_close : " - << "error code = " << ec << " - " << toString(); - } - closing_ = false; - if (startAgain) - start(); - }); - } - else if (startAgain) - { - start(); - } - }); -} - -void -ETLSource::start() -{ - JLOG(journal_.trace()) << __func__ << " : " << toString(); - - auto const host = ip_; - auto const port = wsPort_; - - resolver_.async_resolve( - host, port, [this](auto ec, auto results) { onResolve(ec, results); }); -} - -void -ETLSource::onResolve( - boost::beast::error_code ec, - boost::asio::ip::tcp::resolver::results_type results) -{ - JLOG(journal_.trace()) << __func__ << " : ec = " << ec << " - " - << toString(); - if (ec) - { - // try again - reconnect(ec); - } - else - { - boost::beast::get_lowest_layer(*ws_).expires_after( - std::chrono::seconds(30)); - boost::beast::get_lowest_layer(*ws_).async_connect( - results, [this](auto ec, auto ep) { onConnect(ec, ep); }); - } -} - -void -ETLSource::onConnect( - boost::beast::error_code ec, - boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) -{ - JLOG(journal_.trace()) << __func__ << " : ec = " << ec << " - " - << toString(); - if (ec) - { - // start over - reconnect(ec); - } - else - { - numFailures_ = 0; - // Turn off timeout on the tcp stream, because websocket stream has it's - // own timeout system - boost::beast::get_lowest_layer(*ws_).expires_never(); - - // Set suggested timeout settings for the websocket - ws_->set_option( - boost::beast::websocket::stream_base::timeout::suggested( - boost::beast::role_type::client)); - - // Set a decorator to change the User-Agent of the handshake - ws_->set_option(boost::beast::websocket::stream_base::decorator( - [](boost::beast::websocket::request_type& req) { - req.set( - boost::beast::http::field::user_agent, - std::string(BOOST_BEAST_VERSION_STRING) + - " websocket-client-async"); - })); - - // Update the host_ string. This will provide the value of the - // Host HTTP header during the WebSocket handshake. - // See https://tools.ietf.org/html/rfc7230#section-5.4 - auto host = ip_ + ':' + std::to_string(endpoint.port()); - // Perform the websocket handshake - ws_->async_handshake(host, "/", [this](auto ec) { onHandshake(ec); }); - } -} - -void -ETLSource::onHandshake(boost::beast::error_code ec) -{ - JLOG(journal_.trace()) << __func__ << " : ec = " << ec << " - " - << toString(); - if (ec) - { - // start over - reconnect(ec); - } - else - { - Json::Value jv; - jv["command"] = "subscribe"; - - jv["streams"] = Json::arrayValue; - Json::Value ledgerStream("ledger"); - jv["streams"].append(ledgerStream); - Json::Value txnStream("transactions_proposed"); - jv["streams"].append(txnStream); - Json::Value validationStream("validations"); - jv["streams"].append(validationStream); - Json::Value manifestStream("manifests"); - jv["streams"].append(manifestStream); - Json::FastWriter fastWriter; - - JLOG(journal_.trace()) << "Sending subscribe stream message"; - // Send the message - ws_->async_write( - boost::asio::buffer(fastWriter.write(jv)), - [this](auto ec, size_t size) { onWrite(ec, size); }); - } -} - -void -ETLSource::onWrite(boost::beast::error_code ec, size_t bytesWritten) -{ - JLOG(journal_.trace()) << __func__ << " : ec = " << ec << " - " - << toString(); - if (ec) - { - // start over - reconnect(ec); - } - else - { - ws_->async_read( - readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); }); - } -} - -void -ETLSource::onRead(boost::beast::error_code ec, size_t size) -{ - JLOG(journal_.trace()) << __func__ << " : ec = " << ec << " - " - << toString(); - // if error or error reading message, start over - if (ec) - { - reconnect(ec); - } - else - { - handleMessage(); - boost::beast::flat_buffer buffer; - swap(readBuffer_, buffer); - - JLOG(journal_.trace()) - << __func__ << " : calling async_read - " << toString(); - ws_->async_read( - readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); }); - } -} - -bool -ETLSource::handleMessage() -{ - JLOG(journal_.trace()) << __func__ << " : " << toString(); - - setLastMsgTime(); - connected_ = true; - try - { - Json::Value response; - Json::Reader reader; - if (!reader.parse( - static_cast(readBuffer_.data().data()), response)) - { - JLOG(journal_.error()) - << __func__ << " : " - << "Error parsing stream message." - << " Message = " << readBuffer_.data().data(); - return false; - } - - uint32_t ledgerIndex = 0; - if (response.isMember("result")) - { - if (response["result"].isMember(jss::ledger_index)) - { - ledgerIndex = response["result"][jss::ledger_index].asUInt(); - } - if (response[jss::result].isMember(jss::validated_ledgers)) - { - setValidatedRange( - response[jss::result][jss::validated_ledgers].asString()); - } - JLOG(journal_.debug()) - << __func__ << " : " - << "Received a message on ledger " - << " subscription stream. Message : " - << response.toStyledString() << " - " << toString(); - } - else - { - if (etl_.getETLLoadBalancer().shouldPropagateStream(this)) - { - if (response.isMember(jss::transaction)) - { - etl_.getApplication().getOPs().forwardProposedTransaction( - response); - } - else if ( - response.isMember("type") && - response["type"] == "validationReceived") - { - etl_.getApplication().getOPs().forwardValidation(response); - } - else if ( - response.isMember("type") && - response["type"] == "manifestReceived") - { - etl_.getApplication().getOPs().forwardManifest(response); - } - } - - if (response.isMember("type") && response["type"] == "ledgerClosed") - { - JLOG(journal_.debug()) - << __func__ << " : " - << "Received a message on ledger " - << " subscription stream. Message : " - << response.toStyledString() << " - " << toString(); - if (response.isMember(jss::ledger_index)) - { - ledgerIndex = response[jss::ledger_index].asUInt(); - } - if (response.isMember(jss::validated_ledgers)) - { - setValidatedRange( - response[jss::validated_ledgers].asString()); - } - } - } - - if (ledgerIndex != 0) - { - JLOG(journal_.trace()) - << __func__ << " : " - << "Pushing ledger sequence = " << ledgerIndex << " - " - << toString(); - networkValidatedLedgers_.push(ledgerIndex); - } - return true; - } - catch (std::exception const& e) - { - JLOG(journal_.error()) << "Exception in handleMessage : " << e.what(); - return false; - } -} - -class AsyncCallData -{ - std::unique_ptr cur_; - std::unique_ptr next_; - - org::xrpl::rpc::v1::GetLedgerDataRequest request_; - std::unique_ptr context_; - - grpc::Status status_; - - unsigned char nextPrefix_; - - beast::Journal journal_; - -public: - AsyncCallData( - uint256& marker, - std::optional nextMarker, - uint32_t seq, - beast::Journal& j) - : journal_(j) - { - request_.mutable_ledger()->set_sequence(seq); - if (marker.isNonZero()) - { - request_.set_marker(marker.data(), marker.size()); - } - request_.set_user("ETL"); - nextPrefix_ = 0x00; - if (nextMarker) - nextPrefix_ = nextMarker->data()[0]; - - unsigned char prefix = marker.data()[0]; - - JLOG(journal_.debug()) - << "Setting up AsyncCallData. marker = " << strHex(marker) - << " . prefix = " << strHex(std::string(1, prefix)) - << " . nextPrefix_ = " << strHex(std::string(1, nextPrefix_)); - - assert(nextPrefix_ > prefix || nextPrefix_ == 0x00); - - cur_ = std::make_unique(); - - next_ = std::make_unique(); - - context_ = std::make_unique(); - } - - enum class CallStatus { MORE, DONE, ERRORED }; - CallStatus - process( - std::unique_ptr& stub, - grpc::CompletionQueue& cq, - ThreadSafeQueue>& queue, - bool abort = false) - { - JLOG(journal_.debug()) << "Processing calldata"; - if (abort) - { - JLOG(journal_.error()) << "AsyncCallData aborted"; - return CallStatus::ERRORED; - } - if (!status_.ok()) - { - JLOG(journal_.debug()) << "AsyncCallData status_ not ok: " - << " code = " << status_.error_code() - << " message = " << status_.error_message(); - return CallStatus::ERRORED; - } - if (!next_->is_unlimited()) - { - JLOG(journal_.warn()) - << "AsyncCallData is_unlimited is false. Make sure " - "secure_gateway is set correctly at the ETL source"; - assert(false); - } - - std::swap(cur_, next_); - - bool more = true; - - // if no marker returned, we are done - if (cur_->marker().size() == 0) - more = false; - - // if returned marker is greater than our end, we are done - unsigned char prefix = cur_->marker()[0]; - if (nextPrefix_ != 0x00 && prefix >= nextPrefix_) - more = false; - - // if we are not done, make the next async call - if (more) - { - request_.set_marker(std::move(cur_->marker())); - call(stub, cq); - } - - for (auto& obj : cur_->ledger_objects().objects()) - { - auto key = uint256::fromVoidChecked(obj.key()); - if (!key) - throw std::runtime_error("Received malformed object ID"); - - auto& data = obj.data(); - - SerialIter it{data.data(), data.size()}; - std::shared_ptr sle = std::make_shared(it, *key); - - queue.push(sle); - } - - return more ? CallStatus::MORE : CallStatus::DONE; - } - - void - call( - std::unique_ptr& stub, - grpc::CompletionQueue& cq) - { - context_ = std::make_unique(); - - std::unique_ptr> - rpc(stub->PrepareAsyncGetLedgerData(context_.get(), request_, &cq)); - - rpc->StartCall(); - - rpc->Finish(next_.get(), &status_, this); - } - - std::string - getMarkerPrefix() - { - if (next_->marker().size() == 0) - return ""; - else - return strHex(std::string{next_->marker().data()[0]}); - } -}; - -bool -ETLSource::loadInitialLedger( - uint32_t sequence, - ThreadSafeQueue>& writeQueue) -{ - if (!stub_) - return false; - - grpc::CompletionQueue cq; - - void* tag; - - bool ok = false; - - std::vector calls; - std::vector markers{getMarkers(etl_.getNumMarkers())}; - - for (size_t i = 0; i < markers.size(); ++i) - { - std::optional nextMarker; - if (i + 1 < markers.size()) - nextMarker = markers[i + 1]; - calls.emplace_back(markers[i], nextMarker, sequence, journal_); - } - - JLOG(journal_.debug()) << "Starting data download for ledger " << sequence - << ". Using source = " << toString(); - - for (auto& c : calls) - c.call(stub_, cq); - - size_t numFinished = 0; - bool abort = false; - while (numFinished < calls.size() && !etl_.isStopping() && - cq.Next(&tag, &ok)) - { - assert(tag); - - auto ptr = static_cast(tag); - - if (!ok) - { - JLOG(journal_.error()) << "loadInitialLedger - ok is false"; - return false; - // handle cancelled - } - else - { - JLOG(journal_.debug()) - << "Marker prefix = " << ptr->getMarkerPrefix(); - auto result = ptr->process(stub_, cq, writeQueue, abort); - if (result != AsyncCallData::CallStatus::MORE) - { - numFinished++; - JLOG(journal_.debug()) - << "Finished a marker. " - << "Current number of finished = " << numFinished; - } - if (result == AsyncCallData::CallStatus::ERRORED) - { - abort = true; - } - } - } - return !abort; -} - -std::pair -ETLSource::fetchLedger(uint32_t ledgerSequence, bool getObjects) -{ - org::xrpl::rpc::v1::GetLedgerResponse response; - if (!stub_) - return {{grpc::StatusCode::INTERNAL, "No Stub"}, response}; - - // ledger header with txns and metadata - org::xrpl::rpc::v1::GetLedgerRequest request; - grpc::ClientContext context; - request.mutable_ledger()->set_sequence(ledgerSequence); - request.set_transactions(true); - request.set_expand(true); - request.set_get_objects(getObjects); - request.set_user("ETL"); - grpc::Status status = stub_->GetLedger(&context, request, &response); - if (status.ok() && !response.is_unlimited()) - { - JLOG(journal_.warn()) << "ETLSource::fetchLedger - is_unlimited is " - "false. Make sure secure_gateway is set " - "correctly on the ETL source. source = " - << toString(); - assert(false); - } - return {status, std::move(response)}; -} - -ETLLoadBalancer::ETLLoadBalancer(ReportingETL& etl) - : etl_(etl) - , journal_(etl_.getApplication().journal("ReportingETL::LoadBalancer")) -{ -} - -void -ETLLoadBalancer::add( - std::string& host, - std::string& websocketPort, - std::string& grpcPort) -{ - std::unique_ptr ptr = - std::make_unique(host, websocketPort, grpcPort, etl_); - sources_.push_back(std::move(ptr)); - JLOG(journal_.info()) << __func__ << " : added etl source - " - << sources_.back()->toString(); -} - -void -ETLLoadBalancer::add(std::string& host, std::string& websocketPort) -{ - std::unique_ptr ptr = - std::make_unique(host, websocketPort, etl_); - sources_.push_back(std::move(ptr)); - JLOG(journal_.info()) << __func__ << " : added etl source - " - << sources_.back()->toString(); -} - -void -ETLLoadBalancer::loadInitialLedger( - uint32_t sequence, - ThreadSafeQueue>& writeQueue) -{ - execute( - [this, &sequence, &writeQueue](auto& source) { - bool res = source->loadInitialLedger(sequence, writeQueue); - if (!res) - { - JLOG(journal_.error()) << "Failed to download initial ledger. " - << " Sequence = " << sequence - << " source = " << source->toString(); - } - return res; - }, - sequence); -} - -std::optional -ETLLoadBalancer::fetchLedger(uint32_t ledgerSequence, bool getObjects) -{ - org::xrpl::rpc::v1::GetLedgerResponse response; - bool success = execute( - [&response, ledgerSequence, getObjects, this](auto& source) { - auto [status, data] = - source->fetchLedger(ledgerSequence, getObjects); - response = std::move(data); - if (status.ok() && response.validated()) - { - JLOG(journal_.info()) - << "Successfully fetched ledger = " << ledgerSequence - << " from source = " << source->toString(); - return true; - } - else - { - JLOG(journal_.warn()) - << "Error getting ledger = " << ledgerSequence - << " Reply : " << response.DebugString() - << " error_code : " << status.error_code() - << " error_msg : " << status.error_message() - << " source = " << source->toString(); - return false; - } - }, - ledgerSequence); - if (success) - return response; - else - return {}; -} - -std::unique_ptr -ETLLoadBalancer::getP2pForwardingStub() const -{ - if (sources_.size() == 0) - return nullptr; - srand((unsigned)time(0)); - auto sourceIdx = rand() % sources_.size(); - auto numAttempts = 0; - while (numAttempts < sources_.size()) - { - auto stub = sources_[sourceIdx]->getP2pForwardingStub(); - if (!stub) - { - sourceIdx = (sourceIdx + 1) % sources_.size(); - ++numAttempts; - continue; - } - return stub; - } - return nullptr; -} - -Json::Value -ETLLoadBalancer::forwardToP2p(RPC::JsonContext& context) const -{ - Json::Value res; - if (sources_.size() == 0) - return res; - srand((unsigned)time(0)); - auto sourceIdx = rand() % sources_.size(); - auto numAttempts = 0; - - auto mostRecent = etl_.getNetworkValidatedLedgers().tryGetMostRecent(); - while (numAttempts < sources_.size()) - { - auto increment = [&]() { - sourceIdx = (sourceIdx + 1) % sources_.size(); - ++numAttempts; - }; - auto& src = sources_[sourceIdx]; - if (mostRecent && !src->hasLedger(*mostRecent)) - { - increment(); - continue; - } - res = src->forwardToP2p(context); - if (!res.isMember("forwarded") || res["forwarded"] != true) - { - increment(); - continue; - } - return res; - } - RPC::Status err = {rpcFAILED_TO_FORWARD}; - err.inject(res); - return res; -} - -std::unique_ptr -ETLSource::getP2pForwardingStub() const -{ - if (!connected_) - return nullptr; - try - { - return org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub( - grpc::CreateChannel( - beast::IP::Endpoint( - boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)) - .to_string(), - grpc::InsecureChannelCredentials())); - } - catch (std::exception const&) - { - JLOG(journal_.error()) << "Failed to create grpc stub"; - return nullptr; - } -} - -Json::Value -ETLSource::forwardToP2p(RPC::JsonContext& context) const -{ - JLOG(journal_.debug()) << "Attempting to forward request to tx. " - << "request = " << context.params.toStyledString(); - - Json::Value response; - if (!connected_) - { - JLOG(journal_.error()) - << "Attempted to proxy but failed to connect to tx"; - return response; - } - namespace beast = boost::beast; // from - namespace http = beast::http; // from - namespace websocket = beast::websocket; // from - namespace net = boost::asio; // from - using tcp = boost::asio::ip::tcp; // from - Json::Value& request = context.params; - try - { - // The io_context is required for all I/O - net::io_context ioc; - - // These objects perform our I/O - tcp::resolver resolver{ioc}; - - JLOG(journal_.debug()) << "Creating websocket"; - auto ws = std::make_unique>(ioc); - - // Look up the domain name - auto const results = resolver.resolve(ip_, wsPort_); - - JLOG(journal_.debug()) << "Connecting websocket"; - // Make the connection on the IP address we get from a lookup - net::connect(ws->next_layer(), results.begin(), results.end()); - - // Set a decorator to change the User-Agent of the handshake - // and to tell rippled to charge the client IP for RPC - // resources. See "secure_gateway" in - // https://github.com/ripple/rippled/blob/develop/cfg/rippled-example.cfg - ws->set_option(websocket::stream_base::decorator( - [&context](websocket::request_type& req) { - req.set( - http::field::user_agent, - std::string(BOOST_BEAST_VERSION_STRING) + - " websocket-client-coro"); - req.set( - http::field::forwarded, - "for=" + context.consumer.to_string()); - })); - JLOG(journal_.debug()) << "client ip: " << context.consumer.to_string(); - - JLOG(journal_.debug()) << "Performing websocket handshake"; - // Perform the websocket handshake - ws->handshake(ip_, "/"); - - Json::FastWriter fastWriter; - - JLOG(journal_.debug()) << "Sending request"; - // Send the message - ws->write(net::buffer(fastWriter.write(request))); - - beast::flat_buffer buffer; - ws->read(buffer); - - Json::Reader reader; - if (!reader.parse( - static_cast(buffer.data().data()), response)) - { - JLOG(journal_.error()) << "Error parsing response"; - response[jss::error] = "Error parsing response from tx"; - } - JLOG(journal_.debug()) << "Successfully forward request"; - - response["forwarded"] = true; - return response; - } - catch (std::exception const& e) - { - JLOG(journal_.error()) << "Encountered exception : " << e.what(); - return response; - } -} - -template -bool -ETLLoadBalancer::execute(Func f, uint32_t ledgerSequence) -{ - srand((unsigned)time(0)); - auto sourceIdx = rand() % sources_.size(); - auto numAttempts = 0; - - while (!etl_.isStopping()) - { - auto& source = sources_[sourceIdx]; - - JLOG(journal_.debug()) - << __func__ << " : " - << "Attempting to execute func. ledger sequence = " - << ledgerSequence << " - source = " << source->toString(); - if (source->hasLedger(ledgerSequence)) - { - bool res = f(source); - if (res) - { - JLOG(journal_.debug()) - << __func__ << " : " - << "Successfully executed func at source = " - << source->toString() - << " - ledger sequence = " << ledgerSequence; - break; - } - else - { - JLOG(journal_.warn()) - << __func__ << " : " - << "Failed to execute func at source = " - << source->toString() - << " - ledger sequence = " << ledgerSequence; - } - } - else - { - JLOG(journal_.warn()) - << __func__ << " : " - << "Ledger not present at source = " << source->toString() - << " - ledger sequence = " << ledgerSequence; - } - sourceIdx = (sourceIdx + 1) % sources_.size(); - numAttempts++; - if (numAttempts % sources_.size() == 0) - { - // If another process loaded the ledger into the database, we can - // abort trying to fetch the ledger from a transaction processing - // process - if (etl_.getApplication().getLedgerMaster().getLedgerBySeq( - ledgerSequence)) - { - JLOG(journal_.warn()) - << __func__ << " : " - << "Error executing function. " - << " Tried all sources, but ledger was found in db." - << " Sequence = " << ledgerSequence; - return false; - } - JLOG(journal_.error()) - << __func__ << " : " - << "Error executing function " - << " - ledger sequence = " << ledgerSequence - << " - Tried all sources. Sleeping and trying again"; - std::this_thread::sleep_for(std::chrono::seconds(2)); - } - } - return !etl_.isStopping(); -} - -void -ETLLoadBalancer::start() -{ - for (auto& source : sources_) - source->start(); -} - -void -ETLLoadBalancer::stop() -{ - for (auto& source : sources_) - source->stop(); -} - -} // namespace ripple diff --git a/src/xrpld/app/reporting/ETLSource.h b/src/xrpld/app/reporting/ETLSource.h deleted file mode 100644 index 633b72afac1..00000000000 --- a/src/xrpld/app/reporting/ETLSource.h +++ /dev/null @@ -1,435 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED -#define RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include - -namespace ripple { - -class ReportingETL; - -/// This class manages a connection to a single ETL source. This is almost -/// always a p2p node, but really could be another reporting node. This class -/// subscribes to the ledgers and transactions_proposed streams of the -/// associated p2p node, and keeps track of which ledgers the p2p node has. This -/// class also has methods for extracting said ledgers. Lastly this class -/// forwards transactions received on the transactions_proposed streams to any -/// subscribers. -class ETLSource -{ - std::string ip_; - - std::string wsPort_; - - std::string grpcPort_; - - ReportingETL& etl_; - - // a reference to the applications io_service - boost::asio::io_context& ioc_; - - std::unique_ptr stub_; - - std::unique_ptr> - ws_; - boost::asio::ip::tcp::resolver resolver_; - - boost::beast::flat_buffer readBuffer_; - - std::vector> validatedLedgers_; - - std::string validatedLedgersRaw_; - - NetworkValidatedLedgers& networkValidatedLedgers_; - - beast::Journal journal_; - - Application& app_; - - mutable std::mutex mtx_; - - size_t numFailures_ = 0; - - std::atomic_bool closing_ = false; - - std::atomic_bool connected_ = false; - - // true if this ETL source is forwarding transactions received on the - // transactions_proposed stream. There are usually multiple ETL sources, - // so to avoid forwarding the same transaction multiple times, we only - // forward from one particular ETL source at a time. - std::atomic_bool forwardingStream_ = false; - - // The last time a message was received on the ledgers stream - std::chrono::system_clock::time_point lastMsgTime_; - mutable std::mutex lastMsgTimeMtx_; - - // used for retrying connections - boost::asio::steady_timer timer_; - -public: - bool - isConnected() const - { - return connected_; - } - - std::chrono::system_clock::time_point - getLastMsgTime() const - { - std::lock_guard lck(lastMsgTimeMtx_); - return lastMsgTime_; - } - - void - setLastMsgTime() - { - std::lock_guard lck(lastMsgTimeMtx_); - lastMsgTime_ = std::chrono::system_clock::now(); - } - - /// Create ETL source without gRPC endpoint - /// Fetch ledger and load initial ledger will fail for this source - /// Primarly used in read-only mode, to monitor when ledgers are validated - ETLSource(std::string ip, std::string wsPort, ReportingETL& etl); - - /// Create ETL source with gRPC endpoint - ETLSource( - std::string ip, - std::string wsPort, - std::string grpcPort, - ReportingETL& etl); - - /// @param sequence ledger sequence to check for - /// @return true if this source has the desired ledger - bool - hasLedger(uint32_t sequence) const - { - std::lock_guard lck(mtx_); - for (auto& pair : validatedLedgers_) - { - if (sequence >= pair.first && sequence <= pair.second) - { - return true; - } - else if (sequence < pair.first) - { - // validatedLedgers_ is a sorted list of disjoint ranges - // if the sequence comes before this range, the sequence will - // come before all subsequent ranges - return false; - } - } - return false; - } - - /// process the validated range received on the ledgers stream. set the - /// appropriate member variable - /// @param range validated range received on ledgers stream - void - setValidatedRange(std::string const& range) - { - std::vector> pairs; - std::vector ranges; - boost::split(ranges, range, boost::is_any_of(",")); - for (auto& pair : ranges) - { - std::vector minAndMax; - - boost::split(minAndMax, pair, boost::is_any_of("-")); - - if (minAndMax.size() == 1) - { - uint32_t sequence = std::stoll(minAndMax[0]); - pairs.push_back(std::make_pair(sequence, sequence)); - } - else - { - assert(minAndMax.size() == 2); - uint32_t min = std::stoll(minAndMax[0]); - uint32_t max = std::stoll(minAndMax[1]); - pairs.push_back(std::make_pair(min, max)); - } - } - std::sort(pairs.begin(), pairs.end(), [](auto left, auto right) { - return left.first < right.first; - }); - - // we only hold the lock here, to avoid blocking while string processing - std::lock_guard lck(mtx_); - validatedLedgers_ = std::move(pairs); - validatedLedgersRaw_ = range; - } - - /// @return the validated range of this source - /// @note this is only used by server_info - std::string - getValidatedRange() const - { - std::lock_guard lck(mtx_); - - return validatedLedgersRaw_; - } - - /// Close the underlying websocket - void - stop() - { - JLOG(journal_.debug()) << __func__ << " : " - << "Closing websocket"; - - assert(ws_); - close(false); - } - - /// Fetch the specified ledger - /// @param ledgerSequence sequence of the ledger to fetch - /// @getObjects whether to get the account state diff between this ledger - /// and the prior one - /// @return the extracted data and the result status - std::pair - fetchLedger(uint32_t ledgerSequence, bool getObjects = true); - - std::string - toString() const - { - return "{ validated_ledger : " + getValidatedRange() + - " , ip : " + ip_ + " , web socket port : " + wsPort_ + - ", grpc port : " + grpcPort_ + " }"; - } - - Json::Value - toJson() const - { - Json::Value result(Json::objectValue); - result["connected"] = connected_.load(); - result["validated_ledgers_range"] = getValidatedRange(); - result["ip"] = ip_; - result["websocket_port"] = wsPort_; - result["grpc_port"] = grpcPort_; - auto last = getLastMsgTime(); - if (last.time_since_epoch().count() != 0) - result["last_message_arrival_time"] = - to_string(std::chrono::floor(last)); - return result; - } - - /// Download a ledger in full - /// @param ledgerSequence sequence of the ledger to download - /// @param writeQueue queue to push downloaded ledger objects - /// @return true if the download was successful - bool - loadInitialLedger( - uint32_t ledgerSequence, - ThreadSafeQueue>& writeQueue); - - /// Begin sequence of operations to connect to the ETL source and subscribe - /// to ledgers and transactions_proposed - void - start(); - - /// Attempt to reconnect to the ETL source - void - reconnect(boost::beast::error_code ec); - - /// Callback - void - onResolve( - boost::beast::error_code ec, - boost::asio::ip::tcp::resolver::results_type results); - - /// Callback - void - onConnect( - boost::beast::error_code ec, - boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint); - - /// Callback - void - onHandshake(boost::beast::error_code ec); - - /// Callback - void - onWrite(boost::beast::error_code ec, size_t size); - - /// Callback - void - onRead(boost::beast::error_code ec, size_t size); - - /// Handle the most recently received message - /// @return true if the message was handled successfully. false on error - bool - handleMessage(); - - /// Close the websocket - /// @param startAgain whether to reconnect - void - close(bool startAgain); - - /// Get grpc stub to forward requests to p2p node - /// @return stub to send requests to ETL source - std::unique_ptr - getP2pForwardingStub() const; - - /// Forward a JSON RPC request to a p2p node - /// @param context context of RPC request - /// @return response received from ETL source - Json::Value - forwardToP2p(RPC::JsonContext& context) const; -}; - -/// This class is used to manage connections to transaction processing processes -/// This class spawns a listener for each etl source, which listens to messages -/// on the ledgers stream (to keep track of which ledgers have been validated by -/// the network, and the range of ledgers each etl source has). This class also -/// allows requests for ledger data to be load balanced across all possible etl -/// sources. -class ETLLoadBalancer -{ -private: - ReportingETL& etl_; - - beast::Journal journal_; - - std::vector> sources_; - -public: - ETLLoadBalancer(ReportingETL& etl); - - /// Add an ETL source - /// @param host host or ip of ETL source - /// @param websocketPort port where ETL source accepts websocket connections - /// @param grpcPort port where ETL source accepts gRPC requests - void - add(std::string& host, std::string& websocketPort, std::string& grpcPort); - - /// Add an ETL source without gRPC support. This source will send messages - /// on the ledgers and transactions_proposed streams, but will not be able - /// to handle the gRPC requests that are used for ETL - /// @param host host or ip of ETL source - /// @param websocketPort port where ETL source accepts websocket connections - void - add(std::string& host, std::string& websocketPort); - - /// Load the initial ledger, writing data to the queue - /// @param sequence sequence of ledger to download - /// @param writeQueue queue to push downloaded data to - void - loadInitialLedger( - uint32_t sequence, - ThreadSafeQueue>& writeQueue); - - /// Fetch data for a specific ledger. This function will continuously try - /// to fetch data for the specified ledger until the fetch succeeds, the - /// ledger is found in the database, or the server is shutting down. - /// @param ledgerSequence sequence of ledger to fetch data for - /// @param getObjects if true, fetch diff between specified ledger and - /// previous - /// @return the extracted data, if extraction was successful. If the ledger - /// was found in the database or the server is shutting down, the optional - /// will be empty - std::optional - fetchLedger(uint32_t ledgerSequence, bool getObjects); - - /// Setup all of the ETL sources and subscribe to the necessary streams - void - start(); - - void - stop(); - - /// Determine whether messages received on the transactions_proposed stream - /// should be forwarded to subscribing clients. The server subscribes to - /// transactions_proposed, validations, and manifests on multiple - /// ETLSources, yet only forwards messages from one source at any given time - /// (to avoid sending duplicate messages to clients). - /// @param in ETLSource in question - /// @return true if messages should be forwarded - bool - shouldPropagateStream(ETLSource* in) const - { - for (auto& src : sources_) - { - assert(src); - // We pick the first ETLSource encountered that is connected - if (src->isConnected()) - { - if (src.get() == in) - return true; - else - return false; - } - } - - // If no sources connected, then this stream has not been forwarded. - return true; - } - - Json::Value - toJson() const - { - Json::Value ret(Json::arrayValue); - for (auto& src : sources_) - { - ret.append(src->toJson()); - } - return ret; - } - - /// Randomly select a p2p node to forward a gRPC request to - /// @return gRPC stub to forward requests to p2p node - std::unique_ptr - getP2pForwardingStub() const; - - /// Forward a JSON RPC request to a randomly selected p2p node - /// @param context context of the request - /// @return response received from p2p node - Json::Value - forwardToP2p(RPC::JsonContext& context) const; - -private: - /// f is a function that takes an ETLSource as an argument and returns a - /// bool. Attempt to execute f for one randomly chosen ETLSource that has - /// the specified ledger. If f returns false, another randomly chosen - /// ETLSource is used. The process repeats until f returns true. - /// @param f function to execute. This function takes the ETL source as an - /// argument, and returns a bool. - /// @param ledgerSequence f is executed for each ETLSource that has this - /// ledger - /// @return true if f was eventually executed successfully. false if the - /// ledger was found in the database or the server is shutting down - template - bool - execute(Func f, uint32_t ledgerSequence); -}; - -} // namespace ripple -#endif diff --git a/src/xrpld/app/reporting/P2pProxy.cpp b/src/xrpld/app/reporting/P2pProxy.cpp deleted file mode 100644 index df699f4faf0..00000000000 --- a/src/xrpld/app/reporting/P2pProxy.cpp +++ /dev/null @@ -1,84 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include - -namespace ripple { - -Json::Value -forwardToP2p(RPC::JsonContext& context) -{ - return context.app.getReportingETL().getETLLoadBalancer().forwardToP2p( - context); -} - -std::unique_ptr -getP2pForwardingStub(RPC::Context& context) -{ - return context.app.getReportingETL() - .getETLLoadBalancer() - .getP2pForwardingStub(); -} - -// We only forward requests where ledger_index is "current" or "closed" -// otherwise, attempt to handle here -bool -shouldForwardToP2p(RPC::JsonContext& context) -{ - if (!context.app.config().reporting()) - return false; - - Json::Value& params = context.params; - std::string strCommand = params.isMember(jss::command) - ? params[jss::command].asString() - : params[jss::method].asString(); - - JLOG(context.j.trace()) << "COMMAND:" << strCommand; - JLOG(context.j.trace()) << "REQUEST:" << params; - auto handler = RPC::getHandler( - context.apiVersion, context.app.config().BETA_RPC_API, strCommand); - if (!handler) - { - JLOG(context.j.error()) - << "Error getting handler. command = " << strCommand; - return false; - } - - if (handler->condition_ == RPC::NEEDS_CURRENT_LEDGER || - handler->condition_ == RPC::NEEDS_CLOSED_LEDGER) - { - return true; - } - - if (params.isMember(jss::ledger_index)) - { - auto indexValue = params[jss::ledger_index]; - if (indexValue.isString()) - { - auto index = indexValue.asString(); - return index == "current" || index == "closed"; - } - } - return false; -} - -} // namespace ripple diff --git a/src/xrpld/app/reporting/P2pProxy.h b/src/xrpld/app/reporting/P2pProxy.h deleted file mode 100644 index d49389f42d3..00000000000 --- a/src/xrpld/app/reporting/P2pProxy.h +++ /dev/null @@ -1,113 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_REPORTING_P2PPROXY_H_INCLUDED -#define RIPPLE_APP_REPORTING_P2PPROXY_H_INCLUDED - -#include -#include -#include -#include - -#include -#include - -namespace ripple { -/// Forward a JSON request to a p2p node and return the response -/// @param context context of the request -/// @return response from p2p node -Json::Value -forwardToP2p(RPC::JsonContext& context); - -/// Whether a request should be forwarded, based on request parameters -/// @param context context of the request -/// @return true if should be forwarded -bool -shouldForwardToP2p(RPC::JsonContext& context); - -template -bool -needCurrentOrClosed(Request& request) -{ - // These are the only gRPC requests that specify a ledger - if constexpr ( - std::is_same::value || - std::is_same:: - value || - std::is_same::value) - { - if (request.ledger().ledger_case() == - org::xrpl::rpc::v1::LedgerSpecifier::LedgerCase::kShortcut) - { - if (request.ledger().shortcut() != - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED && - request.ledger().shortcut() != - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_UNSPECIFIED) - return true; - } - } - // GetLedgerDiff specifies two ledgers - else if constexpr (std::is_same< - Request, - org::xrpl::rpc::v1::GetLedgerDiffRequest>::value) - { - auto help = [](auto specifier) { - if (specifier.ledger_case() == - org::xrpl::rpc::v1::LedgerSpecifier::LedgerCase::kShortcut) - { - if (specifier.shortcut() != - org::xrpl::rpc::v1::LedgerSpecifier:: - SHORTCUT_VALIDATED && - specifier.shortcut() != - org::xrpl::rpc::v1::LedgerSpecifier:: - SHORTCUT_UNSPECIFIED) - return true; - } - return false; - }; - return help(request.base_ledger()) || help(request.desired_ledger()); - } - return false; -} - -/// Whether a request should be forwarded, based on request parameters -/// @param context context of the request -/// @condition required condition for the request -/// @return true if should be forwarded -template -bool -shouldForwardToP2p(RPC::GRPCContext& context, RPC::Condition condition) -{ - if (!context.app.config().reporting()) - return false; - if (condition == RPC::NEEDS_CURRENT_LEDGER || - condition == RPC::NEEDS_CLOSED_LEDGER) - return true; - - return needCurrentOrClosed(context.params); -} - -/// Get stub used to forward gRPC requests to a p2p node -/// @param context context of the request -/// @return stub to forward requests -std::unique_ptr -getP2pForwardingStub(RPC::Context& context); - -} // namespace ripple -#endif diff --git a/src/xrpld/app/reporting/README.md b/src/xrpld/app/reporting/README.md deleted file mode 100644 index f55b2d8d60d..00000000000 --- a/src/xrpld/app/reporting/README.md +++ /dev/null @@ -1,110 +0,0 @@ -# Reporting mode - -Reporting mode is a special operating mode of rippled, designed to handle RPCs -for validated data. A server running in reporting mode does not connect to the -p2p network, but rather extracts validated data from a node that is connected -to the p2p network. To run rippled in reporting mode, you must also run a -separate rippled node in p2p mode, to use as an ETL source. Multiple reporting -nodes can share access to the same network accessible databases (Postgres and -Cassandra); at any given time, only one reporting node will be performing ETL -and writing to the databases, while the others simply read from the databases. -A server running in reporting mode will forward any requests that require access -to the p2p network to a p2p node. - -# Reporting ETL -A single reporting node has one or more ETL sources, specified in the config -file. A reporting node will subscribe to the "ledgers" stream of each of the ETL -sources. This stream sends a message whenever a new ledger is validated. Upon -receiving a message on the stream, reporting will then fetch the data associated -with the newly validated ledger from one of the ETL sources. The fetch is -performed via a gRPC request ("GetLedger"). This request returns the ledger -header, transactions+metadata blobs, and every ledger object -added/modified/deleted as part of this ledger. ETL then writes all of this data -to the databases, and moves on to the next ledger. ETL does not apply -transactions, but rather extracts the already computed results of those -transactions (all of the added/modified/deleted SHAMap leaf nodes of the state -tree). The new SHAMap inner nodes are computed by the ETL writer; this computation mainly -involves manipulating child pointers and recomputing hashes, logic which is -buried inside of SHAMap. - -If the database is entirely empty, ETL must download an entire ledger in full -(as opposed to just the diff, as described above). This download is done via the -"GetLedgerData" gRPC request. "GetLedgerData" allows clients to page through an -entire ledger over several RPC calls. ETL will page through an entire ledger, -and write each object to the database. - -If the database is not empty, the reporting node will first come up in a "soft" -read-only mode. In read-only mode, the server does not perform ETL and simply -publishes new ledgers as they are written to the database. -If the database is not updated within a certain time period -(currently hard coded at 20 seconds), the reporting node will begin the ETL -process and start writing to the database. Postgres will report an error when -trying to write a record with a key that already exists. ETL uses this error to -determine that another process is writing to the database, and subsequently -falls back to a soft read-only mode. Reporting nodes can also operate in strict -read-only mode, in which case they will never write to the database. - -# Database Nuances -The database schema for reporting mode does not allow any history gaps. -Attempting to write a ledger to a non-empty database where the previous ledger -does not exist will return an error. - -The databases must be set up prior to running reporting mode. This requires -creating the Postgres database, and setting up the Cassandra keyspace. Reporting -mode will create the objects table in Cassandra if the table does not yet exist. - -Creating the Postgres database: -``` -$ psql -h [host] -U [user] -postgres=# create database [database]; -``` -Creating the keyspace: -``` -$ cqlsh [host] [port] -> CREATE KEYSPACE rippled WITH REPLICATION = - {'class' : 'SimpleStrategy', 'replication_factor' : 3 }; -``` -A replication factor of 3 is recommended. However, when running locally, only a -replication factor of 1 is supported. - -Online delete is not supported by reporting mode and must be done manually. The -easiest way to do this would be to setup a second Cassandra keyspace and -Postgres database, bring up a single reporting mode instance that uses those -databases, and start ETL at a ledger of your choosing (via --startReporting on -the command line). Once this node is caught up, the other databases can be -deleted. - -To delete: -``` -$ psql -h [host] -U [user] -d [database] -reporting=$ truncate table ledgers cascade; -``` -``` -$ cqlsh [host] [port] -> truncate table objects; -``` -# Proxy -RPCs that require access to the p2p network and/or the open ledger are forwarded -from the reporting node to one of the ETL sources. The request is not processed -prior to forwarding, and the response is delivered as-is to the client. -Reporting will forward any requests that always require p2p/open ledger access -(fee and submit, for instance). In addition, any request that explicitly -requests data from the open or closed ledger (via setting -"ledger_index":"current" or "ledger_index":"closed"), will be forwarded to a -p2p node. - -For the stream "transactions_proposed" (AKA "rt_transactions"), reporting -subscribes to the "transactions_proposed" streams of each ETL source, and then -forwards those messages to any clients subscribed to the same stream on the -reporting node. A reporting node will subscribe to the stream on each ETL -source, but will only forward the messages from one of the streams at any given -time (to avoid sending the same message more than once to the same client). - -# API changes -A reporting node defaults to only returning validated data. If a ledger is not -specified, the most recently validated ledger is used. This is in contrast to -the normal rippled behavior, where the open ledger is used by default. - -Reporting will reject all subscribe requests for streams "server", "manifests", -"validations", "peer_status" and "consensus". - diff --git a/src/xrpld/app/reporting/ReportingETL.cpp b/src/xrpld/app/reporting/ReportingETL.cpp deleted file mode 100644 index 2f6411b0808..00000000000 --- a/src/xrpld/app/reporting/ReportingETL.cpp +++ /dev/null @@ -1,960 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -namespace detail { -/// Convenience function for printing out basic ledger info -std::string -toString(LedgerInfo const& info) -{ - std::stringstream ss; - ss << "LedgerInfo { Sequence : " << info.seq - << " Hash : " << strHex(info.hash) << " TxHash : " << strHex(info.txHash) - << " AccountHash : " << strHex(info.accountHash) - << " ParentHash : " << strHex(info.parentHash) << " }"; - return ss.str(); -} -} // namespace detail - -void -ReportingETL::consumeLedgerData( - std::shared_ptr& ledger, - ThreadSafeQueue>& writeQueue) -{ - std::shared_ptr sle; - size_t num = 0; - while (!stopping_ && (sle = writeQueue.pop())) - { - assert(sle); - if (!ledger->exists(sle->key())) - ledger->rawInsert(sle); - - if (flushInterval_ != 0 && (num % flushInterval_) == 0) - { - JLOG(journal_.debug()) << "Flushing! key = " << strHex(sle->key()); - ledger->stateMap().flushDirty(hotACCOUNT_NODE); - } - ++num; - } -} - -std::vector -ReportingETL::insertTransactions( - std::shared_ptr& ledger, - org::xrpl::rpc::v1::GetLedgerResponse& data) -{ - std::vector accountTxData; - for (auto& txn : data.transactions_list().transactions()) - { - auto& raw = txn.transaction_blob(); - - SerialIter it{raw.data(), raw.size()}; - STTx sttx{it}; - - auto txSerializer = std::make_shared(sttx.getSerializer()); - - TxMeta txMeta{ - sttx.getTransactionID(), ledger->info().seq, txn.metadata_blob()}; - - auto metaSerializer = - std::make_shared(txMeta.getAsObject().getSerializer()); - - JLOG(journal_.trace()) - << __func__ << " : " - << "Inserting transaction = " << sttx.getTransactionID(); - uint256 nodestoreHash = ledger->rawTxInsertWithHash( - sttx.getTransactionID(), txSerializer, metaSerializer); - accountTxData.emplace_back(txMeta, std::move(nodestoreHash), journal_); - } - return accountTxData; -} - -std::shared_ptr -ReportingETL::loadInitialLedger(uint32_t startingSequence) -{ - // check that database is actually empty - auto ledger = std::const_pointer_cast( - app_.getLedgerMaster().getValidatedLedger()); - if (ledger) - { - JLOG(journal_.fatal()) << __func__ << " : " - << "Database is not empty"; - assert(false); - return {}; - } - - // fetch the ledger from the network. This function will not return until - // either the fetch is successful, or the server is being shutdown. This - // only fetches the ledger header and the transactions+metadata - std::optional ledgerData{ - fetchLedgerData(startingSequence)}; - if (!ledgerData) - return {}; - - LedgerInfo lgrInfo = - deserializeHeader(makeSlice(ledgerData->ledger_header()), true); - - JLOG(journal_.debug()) << __func__ << " : " - << "Deserialized ledger header. " - << detail::toString(lgrInfo); - - ledger = - std::make_shared(lgrInfo, app_.config(), app_.getNodeFamily()); - ledger->stateMap().clearSynching(); - ledger->txMap().clearSynching(); - -#ifdef RIPPLED_REPORTING - std::vector accountTxData = - insertTransactions(ledger, *ledgerData); -#endif - - auto start = std::chrono::system_clock::now(); - - ThreadSafeQueue> writeQueue; - std::thread asyncWriter{[this, &ledger, &writeQueue]() { - consumeLedgerData(ledger, writeQueue); - }}; - - // download the full account state map. This function downloads full ledger - // data and pushes the downloaded data into the writeQueue. asyncWriter - // consumes from the queue and inserts the data into the Ledger object. - // Once the below call returns, all data has been pushed into the queue - loadBalancer_.loadInitialLedger(startingSequence, writeQueue); - - // null is used to represent the end of the queue - std::shared_ptr null; - writeQueue.push(null); - // wait for the writer to finish - asyncWriter.join(); - - if (!stopping_) - { - flushLedger(ledger); - if (app_.config().reporting()) - { -#ifdef RIPPLED_REPORTING - dynamic_cast(&app_.getRelationalDatabase()) - ->writeLedgerAndTransactions(ledger->info(), accountTxData); -#endif - } - } - auto end = std::chrono::system_clock::now(); - JLOG(journal_.debug()) << "Time to download and store ledger = " - << ((end - start).count()) / 1000000000.0; - return ledger; -} - -void -ReportingETL::flushLedger(std::shared_ptr& ledger) -{ - JLOG(journal_.debug()) << __func__ << " : " - << "Flushing ledger. " - << detail::toString(ledger->info()); - // These are recomputed in setImmutable - auto& accountHash = ledger->info().accountHash; - auto& txHash = ledger->info().txHash; - auto& ledgerHash = ledger->info().hash; - - assert( - ledger->info().seq < XRP_LEDGER_EARLIEST_FEES || - ledger->read(keylet::fees())); - ledger->setImmutable(false); - auto start = std::chrono::system_clock::now(); - - auto numFlushed = ledger->stateMap().flushDirty(hotACCOUNT_NODE); - - auto numTxFlushed = ledger->txMap().flushDirty(hotTRANSACTION_NODE); - - { - Serializer s(128); - s.add32(HashPrefix::ledgerMaster); - addRaw(ledger->info(), s); - app_.getNodeStore().store( - hotLEDGER, - std::move(s.modData()), - ledger->info().hash, - ledger->info().seq); - } - - app_.getNodeStore().sync(); - - auto end = std::chrono::system_clock::now(); - - JLOG(journal_.debug()) << __func__ << " : " - << "Flushed " << numFlushed - << " nodes to nodestore from stateMap"; - JLOG(journal_.debug()) << __func__ << " : " - << "Flushed " << numTxFlushed - << " nodes to nodestore from txMap"; - - JLOG(journal_.debug()) << __func__ << " : " - << "Flush took " - << (end - start).count() / 1000000000.0 - << " seconds"; - - if (numFlushed == 0) - { - JLOG(journal_.fatal()) << __func__ << " : " - << "Flushed 0 nodes from state map"; - assert(false); - } - if (numTxFlushed == 0) - { - JLOG(journal_.warn()) << __func__ << " : " - << "Flushed 0 nodes from tx map"; - } - - // Make sure calculated hashes are correct - if (ledger->stateMap().getHash().as_uint256() != accountHash) - { - JLOG(journal_.fatal()) - << __func__ << " : " - << "State map hash does not match. " - << "Expected hash = " << strHex(accountHash) << "Actual hash = " - << strHex(ledger->stateMap().getHash().as_uint256()); - Throw("state map hash mismatch"); - } - - if (ledger->txMap().getHash().as_uint256() != txHash) - { - JLOG(journal_.fatal()) - << __func__ << " : " - << "Tx map hash does not match. " - << "Expected hash = " << strHex(txHash) << "Actual hash = " - << strHex(ledger->txMap().getHash().as_uint256()); - Throw("tx map hash mismatch"); - } - - if (ledger->info().hash != ledgerHash) - { - JLOG(journal_.fatal()) - << __func__ << " : " - << "Ledger hash does not match. " - << "Expected hash = " << strHex(ledgerHash) - << "Actual hash = " << strHex(ledger->info().hash); - Throw("ledger hash mismatch"); - } - - JLOG(journal_.info()) << __func__ << " : " - << "Successfully flushed ledger! " - << detail::toString(ledger->info()); -} - -void -ReportingETL::publishLedger(std::shared_ptr& ledger) -{ - app_.getOPs().pubLedger(ledger); - - setLastPublish(); -} - -bool -ReportingETL::publishLedger(uint32_t ledgerSequence, uint32_t maxAttempts) -{ - JLOG(journal_.info()) << __func__ << " : " - << "Attempting to publish ledger = " - << ledgerSequence; - size_t numAttempts = 0; - while (!stopping_) - { - auto ledger = app_.getLedgerMaster().getLedgerBySeq(ledgerSequence); - - if (!ledger) - { - JLOG(journal_.warn()) - << __func__ << " : " - << "Trying to publish. Could not find ledger with sequence = " - << ledgerSequence; - // We try maxAttempts times to publish the ledger, waiting one - // second in between each attempt. - // If the ledger is not present in the database after maxAttempts, - // we attempt to take over as the writer. If the takeover fails, - // doContinuousETL will return, and this node will go back to - // publishing. - // If the node is in strict read only mode, we simply - // skip publishing this ledger and return false indicating the - // publish failed - if (numAttempts >= maxAttempts) - { - JLOG(journal_.error()) << __func__ << " : " - << "Failed to publish ledger after " - << numAttempts << " attempts."; - if (!readOnly_) - { - JLOG(journal_.info()) << __func__ << " : " - << "Attempting to become ETL writer"; - return false; - } - else - { - JLOG(journal_.debug()) - << __func__ << " : " - << "In strict read-only mode. " - << "Skipping publishing this ledger. " - << "Beginning fast forward."; - return false; - } - } - else - { - std::this_thread::sleep_for(std::chrono::seconds(1)); - ++numAttempts; - } - continue; - } - - publishStrand_.post([this, ledger, fname = __func__]() { - app_.getOPs().pubLedger(ledger); - setLastPublish(); - JLOG(journal_.info()) - << fname << " : " - << "Published ledger. " << detail::toString(ledger->info()); - }); - return true; - } - return false; -} - -std::optional -ReportingETL::fetchLedgerData(uint32_t idx) -{ - JLOG(journal_.debug()) << __func__ << " : " - << "Attempting to fetch ledger with sequence = " - << idx; - - std::optional response = - loadBalancer_.fetchLedger(idx, false); - JLOG(journal_.trace()) << __func__ << " : " - << "GetLedger reply = " << response->DebugString(); - return response; -} - -std::optional -ReportingETL::fetchLedgerDataAndDiff(uint32_t idx) -{ - JLOG(journal_.debug()) << __func__ << " : " - << "Attempting to fetch ledger with sequence = " - << idx; - - std::optional response = - loadBalancer_.fetchLedger(idx, true); - JLOG(journal_.trace()) << __func__ << " : " - << "GetLedger reply = " << response->DebugString(); - return response; -} - -std::pair, std::vector> -ReportingETL::buildNextLedger( - std::shared_ptr& next, - org::xrpl::rpc::v1::GetLedgerResponse& rawData) -{ - JLOG(journal_.info()) << __func__ << " : " - << "Beginning ledger update"; - - LedgerInfo lgrInfo = - deserializeHeader(makeSlice(rawData.ledger_header()), true); - - JLOG(journal_.debug()) << __func__ << " : " - << "Deserialized ledger header. " - << detail::toString(lgrInfo); - - next->setLedgerInfo(lgrInfo); - - next->stateMap().clearSynching(); - next->txMap().clearSynching(); - - std::vector accountTxData{ - insertTransactions(next, rawData)}; - - JLOG(journal_.debug()) - << __func__ << " : " - << "Inserted all transactions. Number of transactions = " - << rawData.transactions_list().transactions_size(); - - for (auto& obj : rawData.ledger_objects().objects()) - { - auto key = uint256::fromVoidChecked(obj.key()); - if (!key) - throw std::runtime_error("Recevied malformed object ID"); - - auto& data = obj.data(); - - // indicates object was deleted - if (data.size() == 0) - { - JLOG(journal_.trace()) << __func__ << " : " - << "Erasing object = " << *key; - if (next->exists(*key)) - next->rawErase(*key); - } - else - { - SerialIter it{data.data(), data.size()}; - std::shared_ptr sle = std::make_shared(it, *key); - - if (next->exists(*key)) - { - JLOG(journal_.trace()) << __func__ << " : " - << "Replacing object = " << *key; - next->rawReplace(sle); - } - else - { - JLOG(journal_.trace()) << __func__ << " : " - << "Inserting object = " << *key; - next->rawInsert(sle); - } - } - } - JLOG(journal_.debug()) - << __func__ << " : " - << "Inserted/modified/deleted all objects. Number of objects = " - << rawData.ledger_objects().objects_size(); - - if (!rawData.skiplist_included()) - { - next->updateSkipList(); - JLOG(journal_.warn()) - << __func__ << " : " - << "tx process is not sending skiplist. This indicates that the tx " - "process is parsing metadata instead of doing a SHAMap diff. " - "Make sure tx process is running the same code as reporting to " - "use SHAMap diff instead of parsing metadata"; - } - - JLOG(journal_.debug()) << __func__ << " : " - << "Finished ledger update. " - << detail::toString(next->info()); - return {std::move(next), std::move(accountTxData)}; -} - -// Database must be populated when this starts -std::optional -ReportingETL::runETLPipeline(uint32_t startSequence) -{ - /* - * Behold, mortals! This function spawns three separate threads, which talk - * to each other via 2 different thread safe queues and 1 atomic variable. - * All threads and queues are function local. This function returns when all - * of the threads exit. There are two termination conditions: the first is - * if the load thread encounters a write conflict. In this case, the load - * thread sets writeConflict, an atomic bool, to true, which signals the - * other threads to stop. The second termination condition is when the - * entire server is shutting down, which is detected in one of three ways: - * 1. isStopping() returns true if the server is shutting down - * 2. networkValidatedLedgers_.waitUntilValidatedByNetwork returns - * false, signaling the wait was aborted. - * 3. fetchLedgerDataAndDiff returns an empty optional, signaling the fetch - * was aborted. - * In all cases, the extract thread detects this condition, - * and pushes an empty optional onto the transform queue. The transform - * thread, upon popping an empty optional, pushes an empty optional onto the - * load queue, and then returns. The load thread, upon popping an empty - * optional, returns. - */ - - JLOG(journal_.debug()) << __func__ << " : " - << "Starting etl pipeline"; - writing_ = true; - - std::shared_ptr parent = std::const_pointer_cast( - app_.getLedgerMaster().getLedgerBySeq(startSequence - 1)); - if (!parent) - { - assert(false); - Throw("runETLPipeline: parent ledger is null"); - } - - std::atomic_bool writeConflict = false; - std::optional lastPublishedSequence; - constexpr uint32_t maxQueueSize = 1000; - - ThreadSafeQueue> - transformQueue{maxQueueSize}; - - std::thread extracter{[this, - &startSequence, - &writeConflict, - &transformQueue]() { - beast::setCurrentThreadName("rippled: ReportingETL extract"); - uint32_t currentSequence = startSequence; - - // there are two stopping conditions here. - // First, if there is a write conflict in the load thread, the ETL - // mechanism should stop. - // The other stopping condition is if the entire server is shutting - // down. This can be detected in a variety of ways. See the comment - // at the top of the function - while (networkValidatedLedgers_.waitUntilValidatedByNetwork( - currentSequence) && - !writeConflict && !isStopping()) - { - auto start = std::chrono::system_clock::now(); - std::optional fetchResponse{ - fetchLedgerDataAndDiff(currentSequence)}; - // if the fetch is unsuccessful, stop. fetchLedger only returns - // false if the server is shutting down, or if the ledger was - // found in the database (which means another process already - // wrote the ledger that this process was trying to extract; - // this is a form of a write conflict). Otherwise, - // fetchLedgerDataAndDiff will keep trying to fetch the - // specified ledger until successful - if (!fetchResponse) - { - break; - } - auto end = std::chrono::system_clock::now(); - - auto time = ((end - start).count()) / 1000000000.0; - auto tps = - fetchResponse->transactions_list().transactions_size() / time; - - JLOG(journal_.debug()) << "Extract phase time = " << time - << " . Extract phase tps = " << tps; - - transformQueue.push(std::move(fetchResponse)); - ++currentSequence; - } - // empty optional tells the transformer to shut down - transformQueue.push({}); - }}; - - ThreadSafeQueue, - std::vector>>> - loadQueue{maxQueueSize}; - std::thread transformer{[this, - &parent, - &writeConflict, - &loadQueue, - &transformQueue]() { - beast::setCurrentThreadName("rippled: ReportingETL transform"); - - assert(parent); - parent = std::make_shared(*parent, NetClock::time_point{}); - while (!writeConflict) - { - std::optional fetchResponse{ - transformQueue.pop()}; - // if fetchResponse is an empty optional, the extracter thread has - // stopped and the transformer should stop as well - if (!fetchResponse) - { - break; - } - if (isStopping()) - continue; - - auto start = std::chrono::system_clock::now(); - auto [next, accountTxData] = - buildNextLedger(parent, *fetchResponse); - auto end = std::chrono::system_clock::now(); - - auto duration = ((end - start).count()) / 1000000000.0; - JLOG(journal_.debug()) << "transform time = " << duration; - // The below line needs to execute before pushing to the queue, in - // order to prevent this thread and the loader thread from accessing - // the same SHAMap concurrently - parent = std::make_shared(*next, NetClock::time_point{}); - loadQueue.push( - std::make_pair(std::move(next), std::move(accountTxData))); - } - // empty optional tells the loader to shutdown - loadQueue.push({}); - }}; - - std::thread loader{[this, - &lastPublishedSequence, - &loadQueue, - &writeConflict]() { - beast::setCurrentThreadName("rippled: ReportingETL load"); - size_t totalTransactions = 0; - double totalTime = 0; - while (!writeConflict) - { - std::optional, - std::vector>> - result{loadQueue.pop()}; - // if result is an empty optional, the transformer thread has - // stopped and the loader should stop as well - if (!result) - break; - if (isStopping()) - continue; - - auto& ledger = result->first; - auto& accountTxData = result->second; - - auto start = std::chrono::system_clock::now(); - // write to the key-value store - flushLedger(ledger); - - auto mid = std::chrono::system_clock::now(); - // write to RDBMS - // if there is a write conflict, some other process has already - // written this ledger and has taken over as the ETL writer -#ifdef RIPPLED_REPORTING - if (!dynamic_cast(&app_.getRelationalDatabase()) - ->writeLedgerAndTransactions( - ledger->info(), accountTxData)) - writeConflict = true; -#endif - auto end = std::chrono::system_clock::now(); - - if (!writeConflict) - { - publishLedger(ledger); - lastPublishedSequence = ledger->info().seq; - } - // print some performance numbers - auto kvTime = ((mid - start).count()) / 1000000000.0; - auto relationalTime = ((end - mid).count()) / 1000000000.0; - - size_t numTxns = accountTxData.size(); - totalTime += kvTime; - totalTransactions += numTxns; - JLOG(journal_.info()) - << "Load phase of etl : " - << "Successfully published ledger! Ledger info: " - << detail::toString(ledger->info()) - << ". txn count = " << numTxns - << ". key-value write time = " << kvTime - << ". relational write time = " << relationalTime - << ". key-value tps = " << numTxns / kvTime - << ". relational tps = " << numTxns / relationalTime - << ". total key-value tps = " << totalTransactions / totalTime; - } - }}; - - // wait for all of the threads to stop - loader.join(); - extracter.join(); - transformer.join(); - writing_ = false; - - JLOG(journal_.debug()) << __func__ << " : " - << "Stopping etl pipeline"; - - return lastPublishedSequence; -} - -// main loop. The software begins monitoring the ledgers that are validated -// by the nework. The member networkValidatedLedgers_ keeps track of the -// sequences of ledgers validated by the network. Whenever a ledger is validated -// by the network, the software looks for that ledger in the database. Once the -// ledger is found in the database, the software publishes that ledger to the -// ledgers stream. If a network validated ledger is not found in the database -// after a certain amount of time, then the software attempts to take over -// responsibility of the ETL process, where it writes new ledgers to the -// database. The software will relinquish control of the ETL process if it -// detects that another process has taken over ETL. -void -ReportingETL::monitor() -{ - auto ledger = std::const_pointer_cast( - app_.getLedgerMaster().getValidatedLedger()); - if (!ledger) - { - JLOG(journal_.info()) << __func__ << " : " - << "Database is empty. Will download a ledger " - "from the network."; - if (startSequence_) - { - JLOG(journal_.info()) - << __func__ << " : " - << "ledger sequence specified in config. " - << "Will begin ETL process starting with ledger " - << *startSequence_; - ledger = loadInitialLedger(*startSequence_); - } - else - { - JLOG(journal_.info()) - << __func__ << " : " - << "Waiting for next ledger to be validated by network..."; - std::optional mostRecentValidated = - networkValidatedLedgers_.getMostRecent(); - if (mostRecentValidated) - { - JLOG(journal_.info()) << __func__ << " : " - << "Ledger " << *mostRecentValidated - << " has been validated. " - << "Downloading..."; - ledger = loadInitialLedger(*mostRecentValidated); - } - else - { - JLOG(journal_.info()) << __func__ << " : " - << "The wait for the next validated " - << "ledger has been aborted. " - << "Exiting monitor loop"; - return; - } - } - } - else - { - if (startSequence_) - { - Throw( - "start sequence specified but db is already populated"); - } - JLOG(journal_.info()) - << __func__ << " : " - << "Database already populated. Picking up from the tip of history"; - } - if (!ledger) - { - JLOG(journal_.error()) - << __func__ << " : " - << "Failed to load initial ledger. Exiting monitor loop"; - return; - } - else - { - publishLedger(ledger); - } - uint32_t nextSequence = ledger->info().seq + 1; - - JLOG(journal_.debug()) << __func__ << " : " - << "Database is populated. " - << "Starting monitor loop. sequence = " - << nextSequence; - while (!stopping_ && - networkValidatedLedgers_.waitUntilValidatedByNetwork(nextSequence)) - { - JLOG(journal_.info()) << __func__ << " : " - << "Ledger with sequence = " << nextSequence - << " has been validated by the network. " - << "Attempting to find in database and publish"; - // Attempt to take over responsibility of ETL writer after 10 failed - // attempts to publish the ledger. publishLedger() fails if the - // ledger that has been validated by the network is not found in the - // database after the specified number of attempts. publishLedger() - // waits one second between each attempt to read the ledger from the - // database - // - // In strict read-only mode, when the software fails to find a - // ledger in the database that has been validated by the network, - // the software will only try to publish subsequent ledgers once, - // until one of those ledgers is found in the database. Once the - // software successfully publishes a ledger, the software will fall - // back to the normal behavior of trying several times to publish - // the ledger that has been validated by the network. In this - // manner, a reporting processing running in read-only mode does not - // need to restart if the database is wiped. - constexpr size_t timeoutSeconds = 10; - bool success = publishLedger(nextSequence, timeoutSeconds); - if (!success) - { - JLOG(journal_.warn()) - << __func__ << " : " - << "Failed to publish ledger with sequence = " << nextSequence - << " . Beginning ETL"; - // doContinousETLPipelined returns the most recent sequence - // published empty optional if no sequence was published - std::optional lastPublished = - runETLPipeline(nextSequence); - JLOG(journal_.info()) << __func__ << " : " - << "Aborting ETL. Falling back to publishing"; - // if no ledger was published, don't increment nextSequence - if (lastPublished) - nextSequence = *lastPublished + 1; - } - else - { - ++nextSequence; - } - } -} - -void -ReportingETL::monitorReadOnly() -{ - JLOG(journal_.debug()) << "Starting reporting in strict read only mode"; - std::optional mostRecent = - networkValidatedLedgers_.getMostRecent(); - if (!mostRecent) - return; - uint32_t sequence = *mostRecent; - bool success = true; - while (!stopping_ && - networkValidatedLedgers_.waitUntilValidatedByNetwork(sequence)) - { - success = publishLedger(sequence, success ? 30 : 1); - ++sequence; - } -} - -void -ReportingETL::doWork() -{ - worker_ = std::thread([this]() { - beast::setCurrentThreadName("rippled: ReportingETL worker"); - if (readOnly_) - monitorReadOnly(); - else - monitor(); - }); -} - -ReportingETL::ReportingETL(Application& app) - : app_(app) - , journal_(app.journal("ReportingETL")) - , publishStrand_(app_.getIOService()) - , loadBalancer_(*this) -{ - // if present, get endpoint from config - if (app_.config().exists("reporting")) - { -#ifndef RIPPLED_REPORTING - Throw( - "Config file specifies reporting, but software was not built with " - "-Dreporting=1. To use reporting, configure CMake with " - "-Dreporting=1"); -#endif - if (!app_.config().useTxTables()) - Throw( - "Reporting requires tx tables. Set use_tx_tables=1 in config " - "file, under [ledger_tx_tables] section"); - Section section = app_.config().section("reporting"); - - JLOG(journal_.debug()) << "Parsing config info"; - - auto& vals = section.values(); - for (auto& v : vals) - { - JLOG(journal_.debug()) << "val is " << v; - Section source = app_.config().section(v); - - auto optIp = source.get("source_ip"); - if (!optIp) - continue; - - auto optWsPort = source.get("source_ws_port"); - if (!optWsPort) - continue; - - auto optGrpcPort = source.get("source_grpc_port"); - if (!optGrpcPort) - { - // add source without grpc port - // used in read-only mode to detect when new ledgers have - // been validated. Used for publishing - if (app_.config().reportingReadOnly()) - loadBalancer_.add(*optIp, *optWsPort); - continue; - } - - loadBalancer_.add(*optIp, *optWsPort, *optGrpcPort); - } - - // this is true iff --reportingReadOnly was passed via command line - readOnly_ = app_.config().reportingReadOnly(); - - // if --reportingReadOnly was not passed via command line, check config - // file. Command line takes precedence - if (!readOnly_) - { - auto const optRO = section.get("read_only"); - if (optRO) - { - readOnly_ = (*optRO == "true" || *optRO == "1"); - app_.config().setReportingReadOnly(readOnly_); - } - } - - // lambda throws a useful message if string to integer conversion fails - auto asciiToIntThrows = - [](auto& dest, std::string const& src, char const* onError) { - char const* const srcEnd = src.data() + src.size(); - auto [ptr, err] = std::from_chars(src.data(), srcEnd, dest); - - if (err == std::errc()) - // skip whitespace at end of string - while (ptr != srcEnd && - std::isspace(static_cast(*ptr))) - ++ptr; - - // throw if - // o conversion error or - // o entire string is not consumed - if (err != std::errc() || ptr != srcEnd) - Throw(onError + src); - }; - - // handle command line arguments - if (app_.config().START_UP == Config::StartUpType::FRESH && !readOnly_) - { - asciiToIntThrows( - *startSequence_, - app_.config().START_LEDGER, - "Expected integral START_LEDGER command line argument. Got: "); - } - // if not passed via command line, check config for start sequence - if (!startSequence_) - { - auto const optStartSeq = section.get("start_sequence"); - if (optStartSeq) - { - // set a value so we can dereference - startSequence_ = 0; - asciiToIntThrows( - *startSequence_, - *optStartSeq, - "Expected integral start_sequence config entry. Got: "); - } - } - - auto const optFlushInterval = section.get("flush_interval"); - if (optFlushInterval) - asciiToIntThrows( - flushInterval_, - *optFlushInterval, - "Expected integral flush_interval config entry. Got: "); - - auto const optNumMarkers = section.get("num_markers"); - if (optNumMarkers) - asciiToIntThrows( - numMarkers_, - *optNumMarkers, - "Expected integral num_markers config entry. Got: "); - } -} - -} // namespace ripple diff --git a/src/xrpld/app/reporting/ReportingETL.h b/src/xrpld/app/reporting/ReportingETL.h deleted file mode 100644 index fc15f90b43b..00000000000 --- a/src/xrpld/app/reporting/ReportingETL.h +++ /dev/null @@ -1,367 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_REPORTING_REPORTINGETL_H_INCLUDED -#define RIPPLE_APP_REPORTING_REPORTINGETL_H_INCLUDED - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include - -#include -#include -#include - -#include -namespace ripple { - -using AccountTransactionsData = RelationalDatabase::AccountTransactionsData; - -/** - * This class is responsible for continuously extracting data from a - * p2p node, and writing that data to the databases. Usually, multiple different - * processes share access to the same network accessible databases, in which - * case only one such process is performing ETL and writing to the database. The - * other processes simply monitor the database for new ledgers, and publish - * those ledgers to the various subscription streams. If a monitoring process - * determines that the ETL writer has failed (no new ledgers written for some - * time), the process will attempt to become the ETL writer. If there are - * multiple monitoring processes that try to become the ETL writer at the same - * time, one will win out, and the others will fall back to - * monitoring/publishing. In this sense, this class dynamically transitions from - * monitoring to writing and from writing to monitoring, based on the activity - * of other processes running on different machines. - */ -class ReportingETL -{ -private: - Application& app_; - - beast::Journal journal_; - - std::thread worker_; - - /// Strand to ensure that ledgers are published in order. - /// If ETL is started far behind the network, ledgers will be written and - /// published very rapidly. Monitoring processes will publish ledgers as - /// they are written. However, to publish a ledger, the monitoring process - /// needs to read all of the transactions for that ledger from the database. - /// Reading the transactions from the database requires network calls, which - /// can be slow. It is imperative however that the monitoring processes keep - /// up with the writer, else the monitoring processes will not be able to - /// detect if the writer failed. Therefore, publishing each ledger (which - /// includes reading all of the transactions from the database) is done from - /// the application wide asio io_service, and a strand is used to ensure - /// ledgers are published in order - boost::asio::io_context::strand publishStrand_; - - /// Mechanism for communicating with ETL sources. ETLLoadBalancer wraps an - /// arbitrary number of ETL sources and load balances ETL requests across - /// those sources. - ETLLoadBalancer loadBalancer_; - - /// Mechanism for detecting when the network has validated a new ledger. - /// This class provides a way to wait for a specific ledger to be validated - NetworkValidatedLedgers networkValidatedLedgers_; - - /// Whether the software is stopping - std::atomic_bool stopping_ = false; - - /// Used to determine when to write to the database during the initial - /// ledger download. By default, the software downloads an entire ledger and - /// then writes to the database. If flushInterval_ is non-zero, the software - /// will write to the database as new ledger data (SHAMap leaf nodes) - /// arrives. It is not neccesarily more effient to write the data as it - /// arrives, as different SHAMap leaf nodes share the same SHAMap inner - /// nodes; flushing prematurely can result in the same SHAMap inner node - /// being written to the database more than once. It is recommended to use - /// the default value of 0 for this variable; however, different values can - /// be experimented with if better performance is desired. - size_t flushInterval_ = 0; - - /// This variable controls the number of GetLedgerData calls that will be - /// executed in parallel during the initial ledger download. GetLedgerData - /// allows clients to page through a ledger over many RPC calls. - /// GetLedgerData returns a marker that is used as an offset in a subsequent - /// call. If numMarkers_ is greater than 1, there will be multiple chains of - /// GetLedgerData calls iterating over different parts of the same ledger in - /// parallel. This can dramatically speed up the time to download the - /// initial ledger. However, a higher value for this member variable puts - /// more load on the ETL source. - size_t numMarkers_ = 2; - - /// Whether the process is in strict read-only mode. In strict read-only - /// mode, the process will never attempt to become the ETL writer, and will - /// only publish ledgers as they are written to the database. - bool readOnly_ = false; - - /// Whether the process is writing to the database. Used by server_info - std::atomic_bool writing_ = false; - - /// Ledger sequence to start ETL from. If this is empty, ETL will start from - /// the next ledger validated by the network. If this is set, and the - /// database is already populated, an error is thrown. - std::optional startSequence_; - - /// The time that the most recently published ledger was published. Used by - /// server_info - std::chrono::time_point lastPublish_; - - std::mutex publishTimeMtx_; - - std::chrono::time_point - getLastPublish() - { - std::unique_lock lck(publishTimeMtx_); - return lastPublish_; - } - - void - setLastPublish() - { - std::unique_lock lck(publishTimeMtx_); - lastPublish_ = std::chrono::system_clock::now(); - } - - /// Download a ledger with specified sequence in full, via GetLedgerData, - /// and write the data to the databases. This takes several minutes or - /// longer. - /// @param sequence the sequence of the ledger to download - /// @return The ledger downloaded, with a full transaction and account state - /// map - std::shared_ptr - loadInitialLedger(uint32_t sequence); - - /// Run ETL. Extracts ledgers and writes them to the database, until a write - /// conflict occurs (or the server shuts down). - /// @note database must already be populated when this function is called - /// @param startSequence the first ledger to extract - /// @return the last ledger written to the database, if any - std::optional - runETLPipeline(uint32_t startSequence); - - /// Monitor the network for newly validated ledgers. Also monitor the - /// database to see if any process is writing those ledgers. This function - /// is called when the application starts, and will only return when the - /// application is shutting down. If the software detects the database is - /// empty, this function will call loadInitialLedger(). If the software - /// detects ledgers are not being written, this function calls - /// runETLPipeline(). Otherwise, this function publishes ledgers as they are - /// written to the database. - void - monitor(); - - /// Monitor the database for newly written ledgers. - /// Similar to the monitor(), except this function will never call - /// runETLPipeline() or loadInitialLedger(). This function only publishes - /// ledgers as they are written to the database. - void - monitorReadOnly(); - - /// Extract data for a particular ledger from an ETL source. This function - /// continously tries to extract the specified ledger (using all available - /// ETL sources) until the extraction succeeds, or the server shuts down. - /// @param sequence sequence of the ledger to extract - /// @return ledger header and transaction+metadata blobs. Empty optional - /// if the server is shutting down - std::optional - fetchLedgerData(uint32_t sequence); - - /// Extract data for a particular ledger from an ETL source. This function - /// continously tries to extract the specified ledger (using all available - /// ETL sources) until the extraction succeeds, or the server shuts down. - /// @param sequence sequence of the ledger to extract - /// @return ledger header, transaction+metadata blobs, and all ledger - /// objects created, modified or deleted between this ledger and the parent. - /// Empty optional if the server is shutting down - std::optional - fetchLedgerDataAndDiff(uint32_t sequence); - - /// Insert all of the extracted transactions into the ledger - /// @param ledger ledger to insert transactions into - /// @param data data extracted from an ETL source - /// @return struct that contains the neccessary info to write to the - /// transctions and account_transactions tables in Postgres (mostly - /// transaction hashes, corresponding nodestore hashes and affected - /// accounts) - std::vector - insertTransactions( - std::shared_ptr& ledger, - org::xrpl::rpc::v1::GetLedgerResponse& data); - - /// Build the next ledger using the previous ledger and the extracted data. - /// This function calls insertTransactions() - /// @note rawData should be data that corresponds to the ledger immediately - /// following parent - /// @param parent the previous ledger - /// @param rawData data extracted from an ETL source - /// @return the newly built ledger and data to write to Postgres - std::pair, std::vector> - buildNextLedger( - std::shared_ptr& parent, - org::xrpl::rpc::v1::GetLedgerResponse& rawData); - - /// Write all new data to the key-value store - /// @param ledger ledger with new data to write - void - flushLedger(std::shared_ptr& ledger); - - /// Attempt to read the specified ledger from the database, and then publish - /// that ledger to the ledgers stream. - /// @param ledgerSequence the sequence of the ledger to publish - /// @param maxAttempts the number of times to attempt to read the ledger - /// from the database. 1 attempt per second - /// @return whether the ledger was found in the database and published - bool - publishLedger(uint32_t ledgerSequence, uint32_t maxAttempts = 10); - - /// Publish the passed in ledger - /// @param ledger the ledger to publish - void - publishLedger(std::shared_ptr& ledger); - - /// Consume data from a queue and insert that data into the ledger - /// This function will continue to pull from the queue until the queue - /// returns nullptr. This is used during the initial ledger download - /// @param ledger the ledger to insert data into - /// @param writeQueue the queue with extracted data - void - consumeLedgerData( - std::shared_ptr& ledger, - ThreadSafeQueue>& writeQueue); - -public: - explicit ReportingETL(Application& app); - - ~ReportingETL() - { - } - - NetworkValidatedLedgers& - getNetworkValidatedLedgers() - { - return networkValidatedLedgers_; - } - - bool - isStopping() const - { - return stopping_; - } - - /// Get the number of markers to use during the initial ledger download. - /// This is equivelent to the degree of parallelism during the initial - /// ledger download - /// @return the number of markers - uint32_t - getNumMarkers() - { - return numMarkers_; - } - - Application& - getApplication() - { - return app_; - } - - beast::Journal& - getJournal() - { - return journal_; - } - - Json::Value - getInfo() - { - Json::Value result(Json::objectValue); - - result["etl_sources"] = loadBalancer_.toJson(); - result["is_writer"] = writing_.load(); - auto last = getLastPublish(); - if (last.time_since_epoch().count() != 0) - result["last_publish_time"] = - to_string(std::chrono::floor( - getLastPublish())); - return result; - } - - /// start all of the necessary components and begin ETL - void - start() - { - JLOG(journal_.info()) << "Starting reporting etl"; - assert(app_.config().reporting()); - assert(app_.config().standalone()); - assert(app_.config().reportingReadOnly() == readOnly_); - - stopping_ = false; - - loadBalancer_.start(); - doWork(); - } - - void - stop() - { - JLOG(journal_.info()) << "onStop called"; - JLOG(journal_.debug()) << "Stopping Reporting ETL"; - stopping_ = true; - networkValidatedLedgers_.stop(); - loadBalancer_.stop(); - - JLOG(journal_.debug()) << "Stopped loadBalancer"; - if (worker_.joinable()) - worker_.join(); - - JLOG(journal_.debug()) << "Joined worker thread"; - } - - ETLLoadBalancer& - getETLLoadBalancer() - { - return loadBalancer_; - } - -private: - void - doWork(); -}; - -} // namespace ripple -#endif diff --git a/src/xrpld/core/Config.h b/src/xrpld/core/Config.h index e63e6d2f356..2d8340e0747 100644 --- a/src/xrpld/core/Config.h +++ b/src/xrpld/core/Config.h @@ -127,10 +127,6 @@ class Config : public BasicConfig */ bool RUN_STANDALONE = false; - bool RUN_REPORTING = false; - - bool REPORTING_READ_ONLY = false; - bool USE_TX_TABLES = true; /** Determines if the server will sign a tx, given an account's secret seed. @@ -347,11 +343,6 @@ class Config : public BasicConfig { return RUN_STANDALONE; } - bool - reporting() const - { - return RUN_REPORTING; - } bool useTxTables() const @@ -359,18 +350,6 @@ class Config : public BasicConfig return USE_TX_TABLES; } - bool - reportingReadOnly() const - { - return REPORTING_READ_ONLY; - } - - void - setReportingReadOnly(bool b) - { - REPORTING_READ_ONLY = b; - } - bool canSign() const { diff --git a/src/xrpld/core/DatabaseCon.h b/src/xrpld/core/DatabaseCon.h index d2f6b0a4f05..0ded37b1739 100644 --- a/src/xrpld/core/DatabaseCon.h +++ b/src/xrpld/core/DatabaseCon.h @@ -87,7 +87,6 @@ class DatabaseCon Config::StartUpType startUp = Config::NORMAL; bool standAlone = false; - bool reporting = false; boost::filesystem::path dataDir; // Indicates whether or not to return the `globalPragma` // from commonPragma() @@ -118,8 +117,7 @@ class DatabaseCon std::array const& initSQL) // Use temporary files or regular DB files? : DatabaseCon( - setup.standAlone && !setup.reporting && - setup.startUp != Config::LOAD && + setup.standAlone && setup.startUp != Config::LOAD && setup.startUp != Config::LOAD_FILE && setup.startUp != Config::REPLAY ? "" diff --git a/src/xrpld/core/Pg.cpp b/src/xrpld/core/Pg.cpp deleted file mode 100644 index 829e17658c1..00000000000 --- a/src/xrpld/core/Pg.cpp +++ /dev/null @@ -1,1415 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifdef RIPPLED_REPORTING -// Need raw socket manipulation to determine if postgres socket IPv4 or 6. -#if defined(_WIN32) -#include -#include -#else -#include -#include -#include -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -static void -noticeReceiver(void* arg, PGresult const* res) -{ - beast::Journal& j = *static_cast(arg); - JLOG(j.info()) << "server message: " << PQresultErrorMessage(res); -} - -//----------------------------------------------------------------------------- - -std::string -PgResult::msg() const -{ - if (error_.has_value()) - { - std::stringstream ss; - ss << error_->first << ": " << error_->second; - return ss.str(); - } - if (result_) - return "ok"; - - // Must be stopping. - return "stopping"; -} - -//----------------------------------------------------------------------------- - -/* - Connecting described in: - https://www.postgresql.org/docs/10/libpq-connect.html - */ -void -Pg::connect() -{ - if (conn_) - { - // Nothing to do if we already have a good connection. - if (PQstatus(conn_.get()) == CONNECTION_OK) - return; - /* Try resetting connection. */ - PQreset(conn_.get()); - } - else // Make new connection. - { - conn_.reset(PQconnectdbParams( - reinterpret_cast(&config_.keywordsIdx[0]), - reinterpret_cast(&config_.valuesIdx[0]), - 0)); - if (!conn_) - Throw("No db connection struct"); - } - - /** Results from a synchronous connection attempt can only be either - * CONNECTION_OK or CONNECTION_BAD. */ - if (PQstatus(conn_.get()) == CONNECTION_BAD) - { - std::stringstream ss; - ss << "DB connection status " << PQstatus(conn_.get()) << ": " - << PQerrorMessage(conn_.get()); - Throw(ss.str()); - } - - // Log server session console messages. - PQsetNoticeReceiver( - conn_.get(), noticeReceiver, const_cast(&j_)); -} - -PgResult -Pg::query(char const* command, std::size_t nParams, char const* const* values) -{ - // The result object must be freed using the libpq API PQclear() call. - pg_result_type ret{nullptr, [](PGresult* result) { PQclear(result); }}; - // Connect then submit query. - while (true) - { - { - std::lock_guard lock(mutex_); - if (stop_) - return PgResult(); - } - try - { - connect(); - if (nParams) - { - // PQexecParams can process only a single command. - ret.reset(PQexecParams( - conn_.get(), - command, - nParams, - nullptr, - values, - nullptr, - nullptr, - 0)); - } - else - { - // PQexec can process multiple commands separated by - // semi-colons. Returns the response from the last - // command processed. - ret.reset(PQexec(conn_.get(), command)); - } - if (!ret) - Throw("no result structure returned"); - break; - } - catch (std::exception const& e) - { - // Sever connection and retry until successful. - disconnect(); - JLOG(j_.error()) << "database error, retrying: " << e.what(); - std::this_thread::sleep_for(std::chrono::seconds(1)); - } - } - - // Ensure proper query execution. - switch (PQresultStatus(ret.get())) - { - case PGRES_TUPLES_OK: - case PGRES_COMMAND_OK: - case PGRES_COPY_IN: - case PGRES_COPY_OUT: - case PGRES_COPY_BOTH: - break; - default: { - std::stringstream ss; - ss << "bad query result: " << PQresStatus(PQresultStatus(ret.get())) - << " error message: " << PQerrorMessage(conn_.get()) - << ", number of tuples: " << PQntuples(ret.get()) - << ", number of fields: " << PQnfields(ret.get()); - JLOG(j_.error()) << ss.str(); - PgResult retRes(ret.get(), conn_.get()); - disconnect(); - return retRes; - } - } - - return PgResult(std::move(ret)); -} - -static pg_formatted_params -formatParams(pg_params const& dbParams, beast::Journal const& j) -{ - std::vector> const& values = dbParams.second; - /* Convert vector to C-style array of C-strings for postgres API. - std::nullopt is a proxy for NULL since an empty std::string is - 0 length but not NULL. */ - std::vector valuesIdx; - valuesIdx.reserve(values.size()); - std::stringstream ss; - bool first = true; - for (auto const& value : values) - { - if (value) - { - valuesIdx.push_back(value->c_str()); - ss << value->c_str(); - } - else - { - valuesIdx.push_back(nullptr); - ss << "(null)"; - } - if (first) - first = false; - else - ss << ','; - } - - JLOG(j.trace()) << "query: " << dbParams.first << ". params: " << ss.str(); - return valuesIdx; -} - -PgResult -Pg::query(pg_params const& dbParams) -{ - char const* const& command = dbParams.first; - auto const formattedParams = formatParams(dbParams, j_); - return query( - command, - formattedParams.size(), - formattedParams.size() - ? reinterpret_cast(&formattedParams[0]) - : nullptr); -} - -void -Pg::bulkInsert(char const* table, std::string const& records) -{ - // https://www.postgresql.org/docs/12/libpq-copy.html#LIBPQ-COPY-SEND - assert(conn_.get()); - static auto copyCmd = boost::format(R"(COPY %s FROM stdin)"); - auto res = query(boost::str(copyCmd % table).c_str()); - if (!res || res.status() != PGRES_COPY_IN) - { - std::stringstream ss; - ss << "bulkInsert to " << table - << ". Postgres insert error: " << res.msg(); - if (res) - ss << ". Query status not PGRES_COPY_IN: " << res.status(); - Throw(ss.str()); - } - - if (PQputCopyData(conn_.get(), records.c_str(), records.size()) == -1) - { - std::stringstream ss; - ss << "bulkInsert to " << table - << ". PQputCopyData error: " << PQerrorMessage(conn_.get()); - disconnect(); - Throw(ss.str()); - } - - if (PQputCopyEnd(conn_.get(), nullptr) == -1) - { - std::stringstream ss; - ss << "bulkInsert to " << table - << ". PQputCopyEnd error: " << PQerrorMessage(conn_.get()); - disconnect(); - Throw(ss.str()); - } - - // The result object must be freed using the libpq API PQclear() call. - pg_result_type copyEndResult{ - nullptr, [](PGresult* result) { PQclear(result); }}; - copyEndResult.reset(PQgetResult(conn_.get())); - ExecStatusType status = PQresultStatus(copyEndResult.get()); - if (status != PGRES_COMMAND_OK) - { - std::stringstream ss; - ss << "bulkInsert to " << table - << ". PQputCopyEnd status not PGRES_COMMAND_OK: " << status; - disconnect(); - Throw(ss.str()); - } -} - -bool -Pg::clear() -{ - if (!conn_) - return false; - - // The result object must be freed using the libpq API PQclear() call. - pg_result_type res{nullptr, [](PGresult* result) { PQclear(result); }}; - - // Consume results until no more, or until the connection is severed. - do - { - res.reset(PQgetResult(conn_.get())); - if (!res) - break; - - // Pending bulk copy operations may leave the connection in such a - // state that it must be disconnected. - switch (PQresultStatus(res.get())) - { - case PGRES_COPY_IN: - if (PQputCopyEnd(conn_.get(), nullptr) != -1) - break; - [[fallthrough]]; // avoids compiler warning - case PGRES_COPY_OUT: - case PGRES_COPY_BOTH: - conn_.reset(); - default:; - } - } while (res && conn_); - - return conn_ != nullptr; -} - -//----------------------------------------------------------------------------- - -PgPool::PgPool(Section const& pgConfig, beast::Journal j) : j_(j) -{ - // Make sure that boost::asio initializes the SSL library. - { - static boost::asio::ssl::detail::openssl_init initSsl; - } - // Don't have postgres client initialize SSL. - PQinitOpenSSL(0, 0); - - /* - Connect to postgres to create low level connection parameters - with optional caching of network address info for subsequent connections. - See https://www.postgresql.org/docs/10/libpq-connect.html - - For bounds checking of postgres connection data received from - the network: the largest size for any connection field in - PG source code is 64 bytes as of 5/2019. There are 29 fields. - */ - constexpr std::size_t maxFieldSize = 1024; - constexpr std::size_t maxFields = 1000; - - // The connection object must be freed using the libpq API PQfinish() call. - pg_connection_type conn( - PQconnectdb(get(pgConfig, "conninfo").c_str()), - [](PGconn* conn) { PQfinish(conn); }); - if (!conn) - Throw("Can't create DB connection."); - if (PQstatus(conn.get()) != CONNECTION_OK) - { - std::stringstream ss; - ss << "Initial DB connection failed: " << PQerrorMessage(conn.get()); - Throw(ss.str()); - } - - int const sockfd = PQsocket(conn.get()); - if (sockfd == -1) - Throw("No DB socket is open."); - struct sockaddr_storage addr; - socklen_t len = sizeof(addr); - if (getpeername(sockfd, reinterpret_cast(&addr), &len) == - -1) - { - Throw( - errno, std::generic_category(), "Can't get server address info."); - } - - // Set "port" and "hostaddr" if we're caching it. - bool const remember_ip = get(pgConfig, "remember_ip", true); - - if (remember_ip) - { - config_.keywords.push_back("port"); - config_.keywords.push_back("hostaddr"); - std::string port; - std::string hostaddr; - - if (addr.ss_family == AF_INET) - { - hostaddr.assign(INET_ADDRSTRLEN, '\0'); - struct sockaddr_in const& ainfo = - reinterpret_cast(addr); - port = std::to_string(ntohs(ainfo.sin_port)); - if (!inet_ntop( - AF_INET, &ainfo.sin_addr, &hostaddr[0], hostaddr.size())) - { - Throw( - errno, - std::generic_category(), - "Can't get IPv4 address string."); - } - } - else if (addr.ss_family == AF_INET6) - { - hostaddr.assign(INET6_ADDRSTRLEN, '\0'); - struct sockaddr_in6 const& ainfo = - reinterpret_cast(addr); - port = std::to_string(ntohs(ainfo.sin6_port)); - if (!inet_ntop( - AF_INET6, &ainfo.sin6_addr, &hostaddr[0], hostaddr.size())) - { - Throw( - errno, - std::generic_category(), - "Can't get IPv6 address string."); - } - } - - config_.values.push_back(port.c_str()); - config_.values.push_back(hostaddr.c_str()); - } - std::unique_ptr connOptions( - PQconninfo(conn.get()), - [](PQconninfoOption* opts) { PQconninfoFree(opts); }); - if (!connOptions) - Throw("Can't get DB connection options."); - - std::size_t nfields = 0; - for (PQconninfoOption* option = connOptions.get(); - option->keyword != nullptr; - ++option) - { - if (++nfields > maxFields) - { - std::stringstream ss; - ss << "DB returned connection options with > " << maxFields - << " fields."; - Throw(ss.str()); - } - - if (!option->val || - (remember_ip && - (!strcmp(option->keyword, "hostaddr") || - !strcmp(option->keyword, "port")))) - { - continue; - } - - if (strlen(option->keyword) > maxFieldSize || - strlen(option->val) > maxFieldSize) - { - std::stringstream ss; - ss << "DB returned a connection option name or value with\n"; - ss << "excessive size (>" << maxFieldSize << " bytes).\n"; - ss << "option (possibly truncated): " - << std::string_view( - option->keyword, - std::min(strlen(option->keyword), maxFieldSize)) - << '\n'; - ss << " value (possibly truncated): " - << std::string_view( - option->val, std::min(strlen(option->val), maxFieldSize)); - Throw(ss.str()); - } - config_.keywords.push_back(option->keyword); - config_.values.push_back(option->val); - } - - config_.keywordsIdx.reserve(config_.keywords.size() + 1); - config_.valuesIdx.reserve(config_.values.size() + 1); - for (std::size_t n = 0; n < config_.keywords.size(); ++n) - { - config_.keywordsIdx.push_back(config_.keywords[n].c_str()); - config_.valuesIdx.push_back(config_.values[n].c_str()); - } - config_.keywordsIdx.push_back(nullptr); - config_.valuesIdx.push_back(nullptr); - - get_if_exists(pgConfig, "max_connections", config_.max_connections); - std::size_t timeout; - if (get_if_exists(pgConfig, "timeout", timeout)) - config_.timeout = std::chrono::seconds(timeout); -} - -void -PgPool::setup() -{ - { - std::stringstream ss; - ss << "max_connections: " << config_.max_connections << ", " - << "timeout: " << config_.timeout.count() << ", " - << "connection params: "; - bool first = true; - for (std::size_t i = 0; i < config_.keywords.size(); ++i) - { - if (first) - first = false; - else - ss << ", "; - ss << config_.keywords[i] << ": " - << (config_.keywords[i] == "password" ? "*" : config_.values[i]); - } - JLOG(j_.debug()) << ss.str(); - } -} - -void -PgPool::stop() -{ - std::lock_guard lock(mutex_); - stop_ = true; - cond_.notify_all(); - idle_.clear(); - JLOG(j_.info()) << "stopped"; -} - -void -PgPool::idleSweeper() -{ - std::size_t before, after; - { - std::lock_guard lock(mutex_); - before = idle_.size(); - if (config_.timeout != std::chrono::seconds(0)) - { - auto const found = - idle_.upper_bound(clock_type::now() - config_.timeout); - for (auto it = idle_.begin(); it != found;) - { - it = idle_.erase(it); - --connections_; - } - } - after = idle_.size(); - } - - JLOG(j_.info()) << "Idle sweeper. connections: " << connections_ - << ". checked out: " << connections_ - after - << ". idle before, after sweep: " << before << ", " - << after; -} - -std::unique_ptr -PgPool::checkout() -{ - std::unique_ptr ret; - std::unique_lock lock(mutex_); - do - { - if (stop_) - return {}; - - // If there is a connection in the pool, return the most recent. - if (idle_.size()) - { - auto entry = idle_.rbegin(); - ret = std::move(entry->second); - idle_.erase(std::next(entry).base()); - } - // Otherwise, return a new connection unless over threshold. - else if (connections_ < config_.max_connections) - { - ++connections_; - ret = std::make_unique(config_, j_, stop_, mutex_); - } - // Otherwise, wait until a connection becomes available or we stop. - else - { - JLOG(j_.error()) << "No database connections available."; - cond_.wait(lock); - } - } while (!ret && !stop_); - lock.unlock(); - - return ret; -} - -void -PgPool::checkin(std::unique_ptr& pg) -{ - if (pg) - { - std::lock_guard lock(mutex_); - if (!stop_ && pg->clear()) - { - idle_.emplace(clock_type::now(), std::move(pg)); - } - else - { - --connections_; - pg.reset(); - } - } - - cond_.notify_all(); -} - -//----------------------------------------------------------------------------- - -std::shared_ptr -make_PgPool(Section const& pgConfig, beast::Journal j) -{ - auto ret = std::make_shared(pgConfig, j); - ret->setup(); - return ret; -} - -//----------------------------------------------------------------------------- - -/** Postgres Schema Management - * - * The postgres schema has several properties to facilitate - * consistent deployments, including upgrades. It is not recommended to - * upgrade the schema concurrently. - * - * Initial deployment should be against a completely fresh database. The - * postgres user must have the CREATE TABLE privilege. - * - * With postgres configured, the first step is to apply the version_query - * schema and consume the results. This script returns the currently - * installed schema version, if configured, or 0 if not. It is idempotent. - * - * If the version installed on the database is equal to the - * LATEST_SCHEMA_VERSION, then no action should take place. - * - * If the version on the database is 0, then the entire latest schema - * should be deployed with the applySchema() function. - * Each version that is developed is fully - * represented in the full_schemata array with each version equal to the - * text in the array's index position. For example, index position 1 - * contains the full schema version 1. Position 2 contains schema version 2. - * Index 0 should never be referenced and its value only a placeholder. - * If a fresh installation is aborted, then subsequent fresh installations - * should install the same version previously attempted, even if there - * exists a newer version. The initSchema() function performs this task. - * Therefore, previous schema versions should remain in the array - * without modification as new versions are developed and placed after them. - * Once the schema is succesffuly deployed, applySchema() persists the - * schema version to the database. - * - * If the current version of the database is greater than 0, then it means - * that a previous schema version is already present. In this case, the database - * schema needs to be updated incrementally for each subsequent version. - * Again, applySchema() is used to upgrade the schema. Schema upgrades are - * in the upgrade_schemata array. Each entry by index position represents - * the database schema version from which the upgrade begins. Each upgrade - * sets the database to the next version. Schema upgrades can only safely - * happen from one version to the next. To upgrade several versions of schema, - * upgrade incrementally for each version that separates the current from the - * latest. For example, to upgrade from version 5 to version 6 of the schema, - * use upgrade_schemata[5]. To upgrade from version 1 to version 4, use - * upgrade_schemata[1], upgrade_schemata[2], and upgrade_schemata[3] in - * sequence. - * - * To upgrade the schema past version 1, the following variables must be - * updated: - * 1) LATEST_SCHEMA_VERSION must be set to the new version. - * 2) A new entry must be placed at the end of the full_schemata array. This - * entry should have the entire schema so that fresh installations can - * be performed with it. The index position must be equal to the - * LATEST_SCHEMA_VERSION. - * 3) A new entry must be placed at the end of the upgrade_schemata array. - * This entry should only contain commands to upgrade the schema from - * the immediately previous version to the new version. - * - * It is up to the developer to ensure that all schema commands are idempotent. - * This protects against 2 things: - * 1) Resuming schema installation after a problem. - * 2) Concurrent schema updates from multiple processes. - * - * There are several things that must considered for upgrading existing - * schemata to avoid stability and performance problems. Some examples and - * suggestions follow. - * - Schema changes such as creating new columns and indices can consume - * a lot of time. Therefore, before such changes, a separate script should - * be executed by the user to perform the schema upgrade prior to restarting - * rippled. - * - Stored functions cannot be dropped while being accessed. Also, - * dropping stored functions can be ambiguous if multiple functions with - * the same name but different signatures exist. Further, stored function - * behavior from one schema version to the other would likely be handled - * differently by rippled. In this case, it is likely that the functions - * themselves should be versioned such as by appending a number to the - * end of the name (abcf becomes abcf_2, abcf_3, etc.) - * - * Essentially, each schema upgrade will have its own factors to impact - * service availability and function. - */ - -#define LATEST_SCHEMA_VERSION 1 - -char const* version_query = R"( -CREATE TABLE IF NOT EXISTS version (version int NOT NULL, - fresh_pending int NOT NULL); - --- Version 0 means that no schema has been fully deployed. -DO $$ -BEGIN - IF NOT EXISTS (SELECT 1 FROM version) THEN - INSERT INTO version VALUES (0, 0); -END IF; -END $$; - --- Function to set the schema version. _in_pending should only be set to --- non-zero prior to an attempt to initialize the schema from scratch. --- After successful initialization, this should set to 0. --- _in_version should be set to the version of schema that has been applied --- once successful application has occurred. -CREATE OR REPLACE FUNCTION set_schema_version ( - _in_version int, - _in_pending int -) RETURNS void AS $$ -DECLARE - _current_version int; -BEGIN - IF _in_version IS NULL OR _in_pending IS NULL THEN RETURN; END IF; - IF EXISTS (SELECT 1 FROM version) THEN DELETE FROM version; END IF; - INSERT INTO version VALUES (_in_version, _in_pending); - RETURN; -END; -$$ LANGUAGE plpgsql; - --- PQexec() returns the output of the last statement in its response. -SELECT * FROM version; -)"; - -std::array full_schemata = { - // version 0: - "There is no such thing as schema version 0." - - // version 1: - , - R"( --- Table to store ledger headers. -CREATE TABLE IF NOT EXISTS ledgers ( - ledger_seq bigint PRIMARY KEY, - ledger_hash bytea NOT NULL, - prev_hash bytea NOT NULL, - total_coins bigint NOT NULL, - closing_time bigint NOT NULL, - prev_closing_time bigint NOT NULL, - close_time_res bigint NOT NULL, - close_flags bigint NOT NULL, - account_set_hash bytea NOT NULL, - trans_set_hash bytea NOT NULL -); - --- Index for lookups by ledger hash. -CREATE INDEX IF NOT EXISTS ledgers_ledger_hash_idx ON ledgers - USING hash (ledger_hash); - --- Transactions table. Deletes from the ledger table --- cascade here based on ledger_seq. -CREATE TABLE IF NOT EXISTS transactions ( - ledger_seq bigint NOT NULL, - transaction_index bigint NOT NULL, - trans_id bytea NOT NULL, - nodestore_hash bytea NOT NULL, - constraint transactions_pkey PRIMARY KEY (ledger_seq, transaction_index), - constraint transactions_fkey FOREIGN KEY (ledger_seq) - REFERENCES ledgers (ledger_seq) ON DELETE CASCADE -); - --- Index for lookups by transaction hash. -CREATE INDEX IF NOT EXISTS transactions_trans_id_idx ON transactions - USING hash (trans_id); - --- Table that maps accounts to transactions affecting them. Deletes from the --- ledger table by way of transactions table cascade here based on ledger_seq. -CREATE TABLE IF NOT EXISTS account_transactions ( - account bytea NOT NULL, - ledger_seq bigint NOT NULL, - transaction_index bigint NOT NULL, - constraint account_transactions_pkey PRIMARY KEY (account, ledger_seq, - transaction_index), - constraint account_transactions_fkey FOREIGN KEY (ledger_seq, - transaction_index) REFERENCES transactions ( - ledger_seq, transaction_index) ON DELETE CASCADE -); - --- Index to allow for fast cascading deletions and referential integrity. -CREATE INDEX IF NOT EXISTS fki_account_transactions_idx ON - account_transactions USING btree (ledger_seq, transaction_index); - --- Avoid inadvertent administrative tampering with committed data. -CREATE OR REPLACE RULE ledgers_update_protect AS ON UPDATE TO - ledgers DO INSTEAD NOTHING; -CREATE OR REPLACE RULE transactions_update_protect AS ON UPDATE TO - transactions DO INSTEAD NOTHING; -CREATE OR REPLACE RULE account_transactions_update_protect AS ON UPDATE TO - account_transactions DO INSTEAD NOTHING; - --- Stored procedure to assist with the tx() RPC call. Takes transaction hash --- as input. If found, returns the ledger sequence in which it was applied. --- If not, returns the range of ledgers searched. -CREATE OR REPLACE FUNCTION tx ( - _in_trans_id bytea -) RETURNS jsonb AS $$ -DECLARE - _min_seq bigint := min_ledger(); - _max_seq bigint := max_ledger(); - _ledger_seq bigint; - _nodestore_hash bytea; -BEGIN - - IF _min_seq IS NULL THEN - RETURN jsonb_build_object('error', 'empty database'); - END IF; - IF length(_in_trans_id) != 32 THEN - RETURN jsonb_build_object('error', '_in_trans_id size: ' - || to_char(length(_in_trans_id), '999')); - END IF; - - EXECUTE 'SELECT nodestore_hash, ledger_seq - FROM transactions - WHERE trans_id = $1 - AND ledger_seq BETWEEN $2 AND $3 - ' INTO _nodestore_hash, _ledger_seq USING _in_trans_id, _min_seq, _max_seq; - IF _nodestore_hash IS NULL THEN - RETURN jsonb_build_object('min_seq', _min_seq, 'max_seq', _max_seq); - END IF; - RETURN jsonb_build_object('nodestore_hash', _nodestore_hash, 'ledger_seq', - _ledger_seq); -END; -$$ LANGUAGE plpgsql; - --- Return the earliest ledger sequence intended for range operations --- that protect the bottom of the range from deletion. Return NULL if empty. -CREATE OR REPLACE FUNCTION min_ledger () RETURNS bigint AS $$ -DECLARE - _min_seq bigint := (SELECT ledger_seq from min_seq); -BEGIN - IF _min_seq IS NULL THEN - RETURN (SELECT ledger_seq FROM ledgers ORDER BY ledger_seq ASC LIMIT 1); - ELSE - RETURN _min_seq; - END IF; -END; -$$ LANGUAGE plpgsql; - --- Return the latest ledger sequence in the database, or NULL if empty. -CREATE OR REPLACE FUNCTION max_ledger () RETURNS bigint AS $$ -BEGIN - RETURN (SELECT ledger_seq FROM ledgers ORDER BY ledger_seq DESC LIMIT 1); -END; -$$ LANGUAGE plpgsql; - --- account_tx() RPC helper. From the rippled reporting process, only the --- parameters without defaults are required. For the parameters with --- defaults, validation should be done by rippled, such as: --- _in_account_id should be a valid xrp base58 address. --- _in_forward either true or false according to the published api --- _in_limit should be validated and not simply passed through from --- client. --- --- For _in_ledger_index_min and _in_ledger_index_max, if passed in the --- request, verify that their type is int and pass through as is. --- For _ledger_hash, verify and convert from hex length 32 bytes and --- prepend with \x (\\x C++). --- --- For _in_ledger_index, if the input type is integer, then pass through --- as is. If the type is string and contents = validated, then do not --- set _in_ledger_index. Instead set _in_invalidated to TRUE. --- --- There is no need for rippled to do any type of lookup on max/min --- ledger range, lookup of hash, or the like. This functions does those --- things, including error responses if bad input. Only the above must --- be done to set the correct search range. --- --- If a marker is present in the request, verify the members 'ledger' --- and 'seq' are integers and they correspond to _in_marker_seq --- _in_marker_index. --- To reiterate: --- JSON input field 'ledger' corresponds to _in_marker_seq --- JSON input field 'seq' corresponds to _in_marker_index -CREATE OR REPLACE FUNCTION account_tx ( - _in_account_id bytea, - _in_forward bool, - _in_limit bigint, - _in_ledger_index_min bigint = NULL, - _in_ledger_index_max bigint = NULL, - _in_ledger_hash bytea = NULL, - _in_ledger_index bigint = NULL, - _in_validated bool = NULL, - _in_marker_seq bigint = NULL, - _in_marker_index bigint = NULL -) RETURNS jsonb AS $$ -DECLARE - _min bigint; - _max bigint; - _sort_order text := (SELECT CASE WHEN _in_forward IS TRUE THEN - 'ASC' ELSE 'DESC' END); - _marker bool; - _between_min bigint; - _between_max bigint; - _sql text; - _cursor refcursor; - _result jsonb; - _record record; - _tally bigint := 0; - _ret_marker jsonb; - _transactions jsonb[] := '{}'; -BEGIN - IF _in_ledger_index_min IS NOT NULL OR - _in_ledger_index_max IS NOT NULL THEN - _min := (SELECT CASE WHEN _in_ledger_index_min IS NULL - THEN min_ledger() ELSE greatest( - _in_ledger_index_min, min_ledger()) END); - _max := (SELECT CASE WHEN _in_ledger_index_max IS NULL OR - _in_ledger_index_max = -1 THEN max_ledger() ELSE - least(_in_ledger_index_max, max_ledger()) END); - - IF _max < _min THEN - RETURN jsonb_build_object('error', 'max is less than min ledger'); - END IF; - - ELSIF _in_ledger_hash IS NOT NULL OR _in_ledger_index IS NOT NULL - OR _in_validated IS TRUE THEN - IF _in_ledger_hash IS NOT NULL THEN - IF length(_in_ledger_hash) != 32 THEN - RETURN jsonb_build_object('error', '_in_ledger_hash size: ' - || to_char(length(_in_ledger_hash), '999')); - END IF; - EXECUTE 'SELECT ledger_seq - FROM ledgers - WHERE ledger_hash = $1' - INTO _min USING _in_ledger_hash::bytea; - ELSE - IF _in_ledger_index IS NOT NULL AND _in_validated IS TRUE THEN - RETURN jsonb_build_object('error', - '_in_ledger_index cannot be set and _in_validated true'); - END IF; - IF _in_validated IS TRUE THEN - _in_ledger_index := max_ledger(); - END IF; - _min := (SELECT ledger_seq - FROM ledgers - WHERE ledger_seq = _in_ledger_index); - END IF; - IF _min IS NULL THEN - RETURN jsonb_build_object('error', 'ledger not found'); - END IF; - _max := _min; - ELSE - _min := min_ledger(); - _max := max_ledger(); - END IF; - - IF _in_marker_seq IS NOT NULL OR _in_marker_index IS NOT NULL THEN - _marker := TRUE; - IF _in_marker_seq IS NULL OR _in_marker_index IS NULL THEN - -- The rippled implementation returns no transaction results - -- if either of these values are missing. - _between_min := 0; - _between_max := 0; - ELSE - IF _in_forward IS TRUE THEN - _between_min := _in_marker_seq; - _between_max := _max; - ELSE - _between_min := _min; - _between_max := _in_marker_seq; - END IF; - END IF; - ELSE - _marker := FALSE; - _between_min := _min; - _between_max := _max; - END IF; - IF _between_max < _between_min THEN - RETURN jsonb_build_object('error', 'ledger search range is ' - || to_char(_between_min, '999') || '-' - || to_char(_between_max, '999')); - END IF; - - _sql := format(' - SELECT transactions.ledger_seq, transactions.transaction_index, - transactions.trans_id, transactions.nodestore_hash - FROM transactions - INNER JOIN account_transactions - ON transactions.ledger_seq = - account_transactions.ledger_seq - AND transactions.transaction_index = - account_transactions.transaction_index - WHERE account_transactions.account = $1 - AND account_transactions.ledger_seq BETWEEN $2 AND $3 - ORDER BY transactions.ledger_seq %s, transactions.transaction_index %s - ', _sort_order, _sort_order); - - OPEN _cursor FOR EXECUTE _sql USING _in_account_id, _between_min, - _between_max; - LOOP - FETCH _cursor INTO _record; - IF _record IS NULL THEN EXIT; END IF; - IF _marker IS TRUE THEN - IF _in_marker_seq = _record.ledger_seq THEN - IF _in_forward IS TRUE THEN - IF _in_marker_index > _record.transaction_index THEN - CONTINUE; - END IF; - ELSE - IF _in_marker_index < _record.transaction_index THEN - CONTINUE; - END IF; - END IF; - END IF; - _marker := FALSE; - END IF; - - _tally := _tally + 1; - IF _tally > _in_limit THEN - _ret_marker := jsonb_build_object( - 'ledger', _record.ledger_seq, - 'seq', _record.transaction_index); - EXIT; - END IF; - - -- Is the transaction index in the tx object? - _transactions := _transactions || jsonb_build_object( - 'ledger_seq', _record.ledger_seq, - 'transaction_index', _record.transaction_index, - 'trans_id', _record.trans_id, - 'nodestore_hash', _record.nodestore_hash); - - END LOOP; - CLOSE _cursor; - - _result := jsonb_build_object('ledger_index_min', _min, - 'ledger_index_max', _max, - 'transactions', _transactions); - IF _ret_marker IS NOT NULL THEN - _result := _result || jsonb_build_object('marker', _ret_marker); - END IF; - RETURN _result; -END; -$$ LANGUAGE plpgsql; - --- Trigger prior to insert on ledgers table. Validates length of hash fields. --- Verifies ancestry based on ledger_hash & prev_hash as follows: --- 1) If ledgers is empty, allows insert. --- 2) For each new row, check for previous and later ledgers by a single --- sequence. For each that exist, confirm ancestry based on hashes. --- 3) Disallow inserts with no prior or next ledger by sequence if any --- ledgers currently exist. This disallows gaps to be introduced by --- way of inserting. -CREATE OR REPLACE FUNCTION insert_ancestry() RETURNS TRIGGER AS $$ -DECLARE - _parent bytea; - _child bytea; -BEGIN - IF length(NEW.ledger_hash) != 32 OR length(NEW.prev_hash) != 32 THEN - RAISE 'ledger_hash and prev_hash must each be 32 bytes: %', NEW; - END IF; - - IF (SELECT ledger_hash - FROM ledgers - ORDER BY ledger_seq DESC - LIMIT 1) = NEW.prev_hash THEN RETURN NEW; END IF; - - IF NOT EXISTS (SELECT 1 FROM LEDGERS) THEN RETURN NEW; END IF; - - _parent := (SELECT ledger_hash - FROM ledgers - WHERE ledger_seq = NEW.ledger_seq - 1); - _child := (SELECT prev_hash - FROM ledgers - WHERE ledger_seq = NEW.ledger_seq + 1); - IF _parent IS NULL AND _child IS NULL THEN - RAISE 'Ledger Ancestry error: orphan.'; - END IF; - IF _parent != NEW.prev_hash THEN - RAISE 'Ledger Ancestry error: bad parent.'; - END IF; - IF _child != NEW.ledger_hash THEN - RAISE 'Ledger Ancestry error: bad child.'; - END IF; - - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - --- Trigger function prior to delete on ledgers table. Disallow gaps from --- forming. Do not allow deletions if both the previous and next ledgers --- are present. In other words, only allow either the least or greatest --- to be deleted. -CREATE OR REPLACE FUNCTION delete_ancestry () RETURNS TRIGGER AS $$ -BEGIN - IF EXISTS (SELECT 1 - FROM ledgers - WHERE ledger_seq = OLD.ledger_seq + 1) - AND EXISTS (SELECT 1 - FROM ledgers - WHERE ledger_seq = OLD.ledger_seq - 1) THEN - RAISE 'Ledger Ancestry error: Can only delete the least or greatest ' - 'ledger.'; - END IF; - RETURN OLD; -END; -$$ LANGUAGE plpgsql; - --- Track the minimum sequence that should be used for ranged queries --- with protection against deletion during the query. This should --- be updated before calling online_delete() to not block deleting that --- range. -CREATE TABLE IF NOT EXISTS min_seq ( - ledger_seq bigint NOT NULL -); - --- Set the minimum sequence for use in ranged queries with protection --- against deletion greater than or equal to the input parameter. This --- should be called prior to online_delete() with the same parameter --- value so that online_delete() is not blocked by range queries --- that are protected against concurrent deletion of the ledger at --- the bottom of the range. This function needs to be called from a --- separate transaction from that which executes online_delete(). -CREATE OR REPLACE FUNCTION prepare_delete ( - _in_last_rotated bigint -) RETURNS void AS $$ -BEGIN - IF EXISTS (SELECT 1 FROM min_seq) THEN - DELETE FROM min_seq; - END IF; - INSERT INTO min_seq VALUES (_in_last_rotated + 1); -END; -$$ LANGUAGE plpgsql; - --- Function to delete old data. All data belonging to ledgers prior to and --- equal to the _in_seq parameter will be deleted. This should be --- called with the input parameter equivalent to the value of lastRotated --- in rippled's online_delete routine. -CREATE OR REPLACE FUNCTION online_delete ( - _in_seq bigint -) RETURNS void AS $$ -BEGIN - DELETE FROM LEDGERS WHERE ledger_seq <= _in_seq; -END; -$$ LANGUAGE plpgsql; - --- Function to delete data from the top of the ledger range. Delete --- everything greater than the input parameter. --- It doesn't do a normal range delete because of the trigger protecting --- deletions causing gaps. Instead, it walks back from the greatest ledger. -CREATE OR REPLACE FUNCTION delete_above ( - _in_seq bigint -) RETURNS void AS $$ -DECLARE - _max_seq bigint := max_ledger(); - _i bigint := _max_seq; -BEGIN - IF _max_seq IS NULL THEN RETURN; END IF; - LOOP - IF _i <= _in_seq THEN RETURN; END IF; - EXECUTE 'DELETE FROM ledgers WHERE ledger_seq = $1' USING _i; - _i := _i - 1; - END LOOP; -END; -$$ LANGUAGE plpgsql; - --- Verify correct ancestry of ledgers in database: --- Table to persist last-confirmed latest ledger with proper ancestry. -CREATE TABLE IF NOT EXISTS ancestry_verified ( - ledger_seq bigint NOT NULL -); - --- Function to verify ancestry of ledgers based on ledger_hash and prev_hash. --- Upon failure, returns ledger sequence failing ancestry check. --- Otherwise, returns NULL. --- _in_full: If TRUE, verify entire table. Else verify starting from --- value in ancestry_verfied table. If no value, then start --- from lowest ledger. --- _in_persist: If TRUE, persist the latest ledger with correct ancestry. --- If an exception was raised because of failure, persist --- the latest ledger prior to that which failed. --- _in_min: If set and _in_full is not true, the starting ledger from which --- to verify. --- _in_max: If set and _in_full is not true, the latest ledger to verify. -CREATE OR REPLACE FUNCTION check_ancestry ( - _in_full bool = FALSE, - _in_persist bool = TRUE, - _in_min bigint = NULL, - _in_max bigint = NULL -) RETURNS bigint AS $$ -DECLARE - _min bigint; - _max bigint; - _last_verified bigint; - _parent ledgers; - _current ledgers; - _cursor refcursor; -BEGIN - IF _in_full IS TRUE AND - (_in_min IS NOT NULL) OR (_in_max IS NOT NULL) THEN - RAISE 'Cannot specify manual range and do full check.'; - END IF; - - IF _in_min IS NOT NULL THEN - _min := _in_min; - ELSIF _in_full IS NOT TRUE THEN - _last_verified := (SELECT ledger_seq FROM ancestry_verified); - IF _last_verified IS NULL THEN - _min := min_ledger(); - ELSE - _min := _last_verified + 1; - END IF; - ELSE - _min := min_ledger(); - END IF; - EXECUTE 'SELECT * FROM ledgers WHERE ledger_seq = $1' - INTO _parent USING _min - 1; - IF _last_verified IS NOT NULL AND _parent IS NULL THEN - RAISE 'Verified ledger % doesn''t exist.', _last_verified; - END IF; - - IF _in_max IS NOT NULL THEN - _max := _in_max; - ELSE - _max := max_ledger(); - END IF; - - OPEN _cursor FOR EXECUTE 'SELECT * - FROM ledgers - WHERE ledger_seq BETWEEN $1 AND $2 - ORDER BY ledger_seq ASC' - USING _min, _max; - LOOP - FETCH _cursor INTO _current; - IF _current IS NULL THEN EXIT; END IF; - IF _parent IS NOT NULL THEN - IF _current.prev_hash != _parent.ledger_hash THEN - CLOSE _cursor; - RETURN _current.ledger_seq; - RAISE 'Ledger ancestry failure current, parent:% %', - _current, _parent; - END IF; - END IF; - _parent := _current; - END LOOP; - CLOSE _cursor; - - IF _in_persist IS TRUE AND _parent IS NOT NULL THEN - DELETE FROM ancestry_verified; - INSERT INTO ancestry_verified VALUES (_parent.ledger_seq); - END IF; - - RETURN NULL; -END; -$$ LANGUAGE plpgsql; - --- Return number of whole seconds since the latest ledger was inserted, based --- on ledger close time (not wall clock) of the insert. --- Note that ledgers.closing_time is number of seconds since the XRP --- epoch, which is 01/01/2000 00:00:00. This in turn is 946684800 seconds --- after the UNIX epoch. This conforms to the "age" field in the --- server_info RPC call. -CREATE OR REPLACE FUNCTION age () RETURNS bigint AS $$ -BEGIN - RETURN (EXTRACT(EPOCH FROM (now())) - - (946684800 + (SELECT closing_time - FROM ledgers - ORDER BY ledger_seq DESC - LIMIT 1)))::bigint; -END; -$$ LANGUAGE plpgsql; - --- Return range of ledgers, or empty if none. This conforms to the --- "complete_ledgers" field of the server_info RPC call. Note --- that ledger gaps are prevented for reporting mode so the range --- is simply the set between the least and greatest ledgers. -CREATE OR REPLACE FUNCTION complete_ledgers () RETURNS text AS $$ -DECLARE - _min bigint := min_ledger(); - _max bigint := max_ledger(); -BEGIN - IF _min IS NULL THEN RETURN 'empty'; END IF; - IF _min = _max THEN RETURN _min; END IF; - RETURN _min || '-' || _max; -END; -$$ LANGUAGE plpgsql; - -)" - - // version 2: - // , R"(Full idempotent text of schema version 2)" - - // version 3: - // , R"(Full idempotent text of schema version 3)" - - // version 4: - // , R"(Full idempotent text of schema version 4)" - - // ... - - // version n: - // , R"(Full idempotent text of schema version n)" -}; - -std::array upgrade_schemata = { - // upgrade from version 0: - "There is no upgrade path from version 0. Instead, install " - "from full_schemata." - // upgrade from version 1 to 2: - //, R"(Text to idempotently upgrade from version 1 to 2)" - // upgrade from version 2 to 3: - //, R"(Text to idempotently upgrade from version 2 to 3)" - // upgrade from version 3 to 4: - //, R"(Text to idempotently upgrade from version 3 to 4)" - // ... - // upgrade from version n-1 to n: - //, R"(Text to idempotently upgrade from version n-1 to n)" -}; - -/** Apply schema to postgres. - * - * The schema text should contain idempotent SQL & plpgSQL statements. - * Once completed, the version of the schema will be persisted. - * - * Throws upon error. - * - * @param pool Postgres connection pool manager. - * @param schema SQL commands separated by semi-colon. - * @param currentVersion The current version of the schema on the database. - * @param schemaVersion The version that will be in place once the schema - * has been applied. - */ -void -applySchema( - std::shared_ptr const& pool, - char const* schema, - std::uint32_t currentVersion, - std::uint32_t schemaVersion) -{ - if (currentVersion != 0 && schemaVersion != currentVersion + 1) - { - assert(false); - std::stringstream ss; - ss << "Schema upgrade versions past initial deployment must increase " - "monotonically. Versions: current, target: " - << currentVersion << ", " << schemaVersion; - Throw(ss.str()); - } - - auto res = PgQuery(pool)({schema, {}}); - if (!res) - { - std::stringstream ss; - ss << "Error applying schema from version " << currentVersion << "to " - << schemaVersion << ": " << res.msg(); - Throw(ss.str()); - } - - auto cmd = boost::format(R"(SELECT set_schema_version(%u, 0))"); - res = PgQuery(pool)({boost::str(cmd % schemaVersion).c_str(), {}}); - if (!res) - { - std::stringstream ss; - ss << "Error setting schema version from " << currentVersion << " to " - << schemaVersion << ": " << res.msg(); - Throw(ss.str()); - } -} - -void -initSchema(std::shared_ptr const& pool) -{ - // Figure out what schema version, if any, is already installed. - auto res = PgQuery(pool)({version_query, {}}); - if (!res) - { - std::stringstream ss; - ss << "Error getting database schema version: " << res.msg(); - Throw(ss.str()); - } - std::uint32_t currentSchemaVersion = res.asInt(); - std::uint32_t const pendingSchemaVersion = res.asInt(0, 1); - - // Nothing to do if we are on the latest schema; - if (currentSchemaVersion == LATEST_SCHEMA_VERSION) - return; - - if (currentSchemaVersion == 0) - { - // If a fresh install has not been completed, then re-attempt - // the install of the same schema version. - std::uint32_t const freshVersion = - pendingSchemaVersion ? pendingSchemaVersion : LATEST_SCHEMA_VERSION; - // Persist that we are attempting a fresh install to the latest version. - // This protects against corruption in an aborted install that is - // followed by a fresh installation attempt with a new schema. - auto cmd = boost::format(R"(SELECT set_schema_version(0, %u))"); - res = PgQuery(pool)({boost::str(cmd % freshVersion).c_str(), {}}); - if (!res) - { - std::stringstream ss; - ss << "Error setting schema version from " << currentSchemaVersion - << " to " << freshVersion << ": " << res.msg(); - Throw(ss.str()); - } - - // Install the full latest schema. - applySchema( - pool, - full_schemata[freshVersion], - currentSchemaVersion, - freshVersion); - currentSchemaVersion = freshVersion; - } - - // Incrementally upgrade one version at a time until latest. - for (; currentSchemaVersion < LATEST_SCHEMA_VERSION; ++currentSchemaVersion) - { - applySchema( - pool, - upgrade_schemata[currentSchemaVersion], - currentSchemaVersion, - currentSchemaVersion + 1); - } -} - -} // namespace ripple -#endif diff --git a/src/xrpld/core/Pg.h b/src/xrpld/core/Pg.h deleted file mode 100644 index c1333ed66fb..00000000000 --- a/src/xrpld/core/Pg.h +++ /dev/null @@ -1,520 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifdef RIPPLED_REPORTING -#ifndef RIPPLE_CORE_PG_H_INCLUDED -#define RIPPLE_CORE_PG_H_INCLUDED - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -// These postgres structs must be freed only by the postgres API. -using pg_result_type = std::unique_ptr; -using pg_connection_type = std::unique_ptr; - -/** first: command - * second: parameter values - * - * The 2nd member takes an optional string to - * distinguish between NULL parameters and empty strings. An empty - * item corresponds to a NULL parameter. - * - * Postgres reads each parameter as a c-string, regardless of actual type. - * Binary types (bytea) need to be converted to hex and prepended with - * \x ("\\x"). - */ -using pg_params = - std::pair>>; - -/** Parameter values for pg API. */ -using pg_formatted_params = std::vector; - -/** Parameters for managing postgres connections. */ -struct PgConfig -{ - /** Maximum connections allowed to db. */ - std::size_t max_connections{std::numeric_limits::max()}; - /** Close idle connections past this duration. */ - std::chrono::seconds timeout{600}; - - /** Index of DB connection parameter names. */ - std::vector keywordsIdx; - /** DB connection parameter names. */ - std::vector keywords; - /** Index of DB connection parameter values. */ - std::vector valuesIdx; - /** DB connection parameter values. */ - std::vector values; -}; - -//----------------------------------------------------------------------------- - -/** Class that operates on postgres query results. - * - * The functions that return results do not check first whether the - * expected results are actually there. Therefore, the caller first needs - * to check whether or not a valid response was returned using the operator - * bool() overload. If number of tuples or fields are unknown, then check - * those. Each result field should be checked for null before attempting - * to return results. Finally, the caller must know the type of the field - * before calling the corresponding function to return a field. Postgres - * internally stores each result field as null-terminated strings. - */ -class PgResult -{ - // The result object must be freed using the libpq API PQclear() call. - pg_result_type result_{nullptr, [](PGresult* result) { PQclear(result); }}; - std::optional> error_; - -public: - /** Constructor for when the process is stopping. - * - */ - PgResult() - { - } - - /** Constructor for successful query results. - * - * @param result Query result. - */ - explicit PgResult(pg_result_type&& result) : result_(std::move(result)) - { - } - - /** Constructor for failed query results. - * - * @param result Query result that contains error information. - * @param conn Postgres connection that contains error information. - */ - PgResult(PGresult* result, PGconn* conn) - : error_({PQresultStatus(result), PQerrorMessage(conn)}) - { - } - - /** Return field as a null-terminated string pointer. - * - * Note that this function does not guarantee that the result struct - * exists, or that the row and fields exist, or that the field is - * not null. - * - * @param ntuple Row number. - * @param nfield Field number. - * @return Field contents. - */ - char const* - c_str(int ntuple = 0, int nfield = 0) const - { - return PQgetvalue(result_.get(), ntuple, nfield); - } - - /** Return field as equivalent to Postgres' INT type (32 bit signed). - * - * Note that this function does not guarantee that the result struct - * exists, or that the row and fields exist, or that the field is - * not null, or that the type is that requested. - - * @param ntuple Row number. - * @param nfield Field number. - * @return Field contents. - */ - std::int32_t - asInt(int ntuple = 0, int nfield = 0) const - { - return boost::lexical_cast( - PQgetvalue(result_.get(), ntuple, nfield)); - } - - /** Return field as equivalent to Postgres' BIGINT type (64 bit signed). - * - * Note that this function does not guarantee that the result struct - * exists, or that the row and fields exist, or that the field is - * not null, or that the type is that requested. - - * @param ntuple Row number. - * @param nfield Field number. - * @return Field contents. - */ - std::int64_t - asBigInt(int ntuple = 0, int nfield = 0) const - { - return boost::lexical_cast( - PQgetvalue(result_.get(), ntuple, nfield)); - } - - /** Returns whether the field is NULL or not. - * - * Note that this function does not guarantee that the result struct - * exists, or that the row and fields exist. - * - * @param ntuple Row number. - * @param nfield Field number. - * @return Whether field is NULL. - */ - bool - isNull(int ntuple = 0, int nfield = 0) const - { - return PQgetisnull(result_.get(), ntuple, nfield); - } - - /** Check whether a valid response occurred. - * - * @return Whether or not the query returned a valid response. - */ - operator bool() const - { - return result_ != nullptr; - } - - /** Message describing the query results suitable for diagnostics. - * - * If error, then the postgres error type and message are returned. - * Otherwise, "ok" - * - * @return Query result message. - */ - std::string - msg() const; - - /** Get number of rows in result. - * - * Note that this function does not guarantee that the result struct - * exists. - * - * @return Number of result rows. - */ - int - ntuples() const - { - return PQntuples(result_.get()); - } - - /** Get number of fields in result. - * - * Note that this function does not guarantee that the result struct - * exists. - * - * @return Number of result fields. - */ - int - nfields() const - { - return PQnfields(result_.get()); - } - - /** Return result status of the command. - * - * Note that this function does not guarantee that the result struct - * exists. - * - * @return - */ - ExecStatusType - status() const - { - return PQresultStatus(result_.get()); - } -}; - -/* Class that contains and operates upon a postgres connection. */ -class Pg -{ - friend class PgPool; - friend class PgQuery; - - PgConfig const& config_; - beast::Journal const j_; - bool& stop_; - std::mutex& mutex_; - - // The connection object must be freed using the libpq API PQfinish() call. - pg_connection_type conn_{nullptr, [](PGconn* conn) { PQfinish(conn); }}; - - /** Clear results from the connection. - * - * Results from previous commands must be cleared before new commands - * can be processed. This function should be called on connections - * that weren't processed completely before being reused, such as - * when being checked-in. - * - * @return whether or not connection still exists. - */ - bool - clear(); - - /** Connect to postgres. - * - * Idempotently connects to postgres by first checking whether an - * existing connection is already present. If connection is not present - * or in an errored state, reconnects to the database. - */ - void - connect(); - - /** Disconnect from postgres. */ - void - disconnect() - { - conn_.reset(); - } - - /** Execute postgres query. - * - * If parameters are included, then the command should contain only a - * single SQL statement. If no parameters, then multiple SQL statements - * delimited by semi-colons can be processed. The response is from - * the last command executed. - * - * @param command postgres API command string. - * @param nParams postgres API number of parameters. - * @param values postgres API array of parameter. - * @return Query result object. - */ - PgResult - query(char const* command, std::size_t nParams, char const* const* values); - - /** Execute postgres query with no parameters. - * - * @param command Query string. - * @return Query result object; - */ - PgResult - query(char const* command) - { - return query(command, 0, nullptr); - } - - /** Execute postgres query with parameters. - * - * @param dbParams Database command and parameter values. - * @return Query result object. - */ - PgResult - query(pg_params const& dbParams); - - /** Insert multiple records into a table using Postgres' bulk COPY. - * - * Throws upon error. - * - * @param table Name of table for import. - * @param records Records in the COPY IN format. - */ - void - bulkInsert(char const* table, std::string const& records); - -public: - /** Constructor for Pg class. - * - * @param config Config parameters. - * @param j Logger object. - * @param stop Reference to connection pool's stop flag. - * @param mutex Reference to connection pool's mutex. - */ - Pg(PgConfig const& config, - beast::Journal const j, - bool& stop, - std::mutex& mutex) - : config_(config), j_(j), stop_(stop), mutex_(mutex) - { - } -}; - -//----------------------------------------------------------------------------- - -/** Database connection pool. - * - * Allow re-use of postgres connections. Postgres connections are created - * as needed until configurable limit is reached. After use, each connection - * is placed in a container ordered by time of use. Each request for - * a connection grabs the most recently used connection from the container. - * If none are available, a new connection is used (up to configured limit). - * Idle connections are destroyed periodically after configurable - * timeout duration. - * - * This should be stored as a shared pointer so PgQuery objects can safely - * outlive it. - */ -class PgPool -{ - friend class PgQuery; - - using clock_type = std::chrono::steady_clock; - - PgConfig config_; - beast::Journal const j_; - std::mutex mutex_; - std::condition_variable cond_; - std::size_t connections_{}; - bool stop_{false}; - - /** Idle database connections ordered by timestamp to allow timing out. */ - std::multimap, std::unique_ptr> - idle_; - - /** Get a postgres connection object. - * - * Return the most recent idle connection in the pool, if available. - * Otherwise, return a new connection unless we're at the threshold. - * If so, then wait until a connection becomes available. - * - * @return Postgres object. - */ - std::unique_ptr - checkout(); - - /** Return a postgres object to the pool for reuse. - * - * If connection is healthy, place in pool for reuse. After calling this, - * the container no longer have a connection unless checkout() is called. - * - * @param pg Pg object. - */ - void - checkin(std::unique_ptr& pg); - -public: - /** Connection pool constructor. - * - * @param pgConfig Postgres config. - * @param j Logger object. - */ - PgPool(Section const& pgConfig, beast::Journal j); - - /** Initiate idle connection timer. - * - * The PgPool object needs to be fully constructed to support asynchronous - * operations. - */ - void - setup(); - - /** Prepare for process shutdown. */ - void - stop(); - - /** Disconnect idle postgres connections. */ - void - idleSweeper(); -}; - -//----------------------------------------------------------------------------- - -/** Class to query postgres. - * - * This class should be used by functions outside of this - * compilation unit for querying postgres. It automatically acquires and - * relinquishes a database connection to handle each query. - */ -class PgQuery -{ -private: - std::shared_ptr pool_; - std::unique_ptr pg_; - -public: - PgQuery() = delete; - - PgQuery(std::shared_ptr const& pool) - : pool_(pool), pg_(pool->checkout()) - { - } - - ~PgQuery() - { - pool_->checkin(pg_); - } - - /** Execute postgres query with parameters. - * - * @param dbParams Database command with parameters. - * @return Result of query, including errors. - */ - PgResult - operator()(pg_params const& dbParams) - { - if (!pg_) // It means we're stopping. Return empty result. - return PgResult(); - return pg_->query(dbParams); - } - - /** Execute postgres query with only command statement. - * - * @param command Command statement. - * @return Result of query, including errors. - */ - PgResult - operator()(char const* command) - { - return operator()(pg_params{command, {}}); - } - - /** Insert multiple records into a table using Postgres' bulk COPY. - * - * Throws upon error. - * - * @param table Name of table for import. - * @param records Records in the COPY IN format. - */ - void - bulkInsert(char const* table, std::string const& records) - { - pg_->bulkInsert(table, records); - } -}; - -//----------------------------------------------------------------------------- - -/** Create Postgres connection pool manager. - * - * @param pgConfig Configuration for Postgres. - * @param j Logger object. - * @return Postgres connection pool manager - */ -std::shared_ptr -make_PgPool(Section const& pgConfig, beast::Journal j); - -/** Initialize the Postgres schema. - * - * This function ensures that the database is running the latest version - * of the schema. - * - * @param pool Postgres connection pool manager. - */ -void -initSchema(std::shared_ptr const& pool); - -} // namespace ripple - -#endif // RIPPLE_CORE_PG_H_INCLUDED -#endif // RIPPLED_REPORTING diff --git a/src/xrpld/core/detail/Config.cpp b/src/xrpld/core/detail/Config.cpp index 07d269883e2..885951111c0 100644 --- a/src/xrpld/core/detail/Config.cpp +++ b/src/xrpld/core/detail/Config.cpp @@ -382,11 +382,6 @@ Config::setup( // Update default values load(); - if (exists("reporting")) - { - RUN_REPORTING = true; - RUN_STANDALONE = true; - } { // load() may have set a new value for the dataDir std::string const dbPath(legacy("database_path")); diff --git a/src/xrpld/core/detail/DatabaseCon.cpp b/src/xrpld/core/detail/DatabaseCon.cpp index c23c7491b04..10b34efd41e 100644 --- a/src/xrpld/core/detail/DatabaseCon.cpp +++ b/src/xrpld/core/detail/DatabaseCon.cpp @@ -109,7 +109,6 @@ setup_DatabaseCon(Config const& c, std::optional j) setup.startUp = c.START_UP; setup.standAlone = c.standalone(); - setup.reporting = c.reporting(); setup.dataDir = c.legacy("database_path"); if (!setup.standAlone && setup.dataDir.empty()) { diff --git a/src/xrpld/nodestore/Backend.h b/src/xrpld/nodestore/Backend.h index 85d38ec0a7b..29f37553327 100644 --- a/src/xrpld/nodestore/Backend.h +++ b/src/xrpld/nodestore/Backend.h @@ -39,29 +39,6 @@ namespace NodeStore { class Backend { public: - template - struct Counters - { - Counters() = default; - Counters(Counters const&) = default; - - template - Counters(Counters const& other) - : writeDurationUs(other.writeDurationUs) - , writeRetries(other.writeRetries) - , writesDelayed(other.writesDelayed) - , readRetries(other.readRetries) - , readErrors(other.readErrors) - { - } - - T writeDurationUs = {}; - T writeRetries = {}; - T writesDelayed = {}; - T readRetries = {}; - T readErrors = {}; - }; - /** Destroy the backend. All open files are closed and flushed. If there are batched writes @@ -174,17 +151,6 @@ class Backend /** Returns the number of file descriptors the backend expects to need. */ virtual int fdRequired() const = 0; - - /** Returns read and write stats. - - @note The Counters struct is specific to and only used - by CassandraBackend. - */ - virtual std::optional> - counters() const - { - return std::nullopt; - } }; } // namespace NodeStore diff --git a/src/xrpld/nodestore/Database.h b/src/xrpld/nodestore/Database.h index daf0483e890..bd25046fee2 100644 --- a/src/xrpld/nodestore/Database.h +++ b/src/xrpld/nodestore/Database.h @@ -302,17 +302,6 @@ class Database virtual void for_each(std::function)> f) = 0; - /** Retrieve backend read and write stats. - - @note The Counters struct is specific to and only used - by CassandraBackend. - */ - virtual std::optional> - getCounters() const - { - return std::nullopt; - } - void threadEntry(); }; diff --git a/src/xrpld/nodestore/backend/CassandraFactory.cpp b/src/xrpld/nodestore/backend/CassandraFactory.cpp deleted file mode 100644 index c53e7709587..00000000000 --- a/src/xrpld/nodestore/backend/CassandraFactory.cpp +++ /dev/null @@ -1,983 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifdef RIPPLED_REPORTING - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { -namespace NodeStore { - -void -writeCallback(CassFuture* fut, void* cbData); -void -readCallback(CassFuture* fut, void* cbData); - -class CassandraBackend : public Backend -{ -private: - // convenience function for one-off queries. For normal reads and writes, - // use the prepared statements insert_ and select_ - CassStatement* - makeStatement(char const* query, std::size_t params) - { - CassStatement* ret = cass_statement_new(query, params); - CassError rc = - cass_statement_set_consistency(ret, CASS_CONSISTENCY_QUORUM); - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "nodestore: Error setting query consistency: " << query - << ", result: " << rc << ", " << cass_error_desc(rc); - Throw(ss.str()); - } - return ret; - } - - beast::Journal const j_; - // size of a key - size_t const keyBytes_; - - Section const config_; - - std::atomic open_{false}; - - // mutex used for open() and close() - std::mutex mutex_; - - std::unique_ptr session_{ - nullptr, - [](CassSession* session) { - // Try to disconnect gracefully. - CassFuture* fut = cass_session_close(session); - cass_future_wait(fut); - cass_future_free(fut); - cass_session_free(session); - }}; - - // Database statements cached server side. Using these is more efficient - // than making a new statement - const CassPrepared* insert_ = nullptr; - const CassPrepared* select_ = nullptr; - - // io_context used for exponential backoff for write retries - boost::asio::io_context ioContext_; - std::optional work_; - std::thread ioThread_; - - // maximum number of concurrent in flight requests. New requests will wait - // for earlier requests to finish if this limit is exceeded - uint32_t maxRequestsOutstanding = 10000000; - std::atomic_uint32_t numRequestsOutstanding_ = 0; - - // mutex and condition_variable to limit the number of concurrent in flight - // requests - std::mutex throttleMutex_; - std::condition_variable throttleCv_; - - // writes are asynchronous. This mutex and condition_variable is used to - // wait for all writes to finish - std::mutex syncMutex_; - std::condition_variable syncCv_; - - Counters> counters_; - -public: - CassandraBackend( - size_t keyBytes, - Section const& keyValues, - beast::Journal journal) - : j_(journal), keyBytes_(keyBytes), config_(keyValues) - { - } - - ~CassandraBackend() override - { - close(); - } - - std::string - getName() override - { - return "cassandra"; - } - - bool - isOpen() override - { - return open_; - } - - // Setup all of the necessary components for talking to the database. - // Create the table if it doesn't exist already - // @param createIfMissing ignored - void - open(bool createIfMissing) override - { - if (open_) - { - assert(false); - JLOG(j_.error()) << "database is already open"; - return; - } - - std::lock_guard lock(mutex_); - CassCluster* cluster = cass_cluster_new(); - if (!cluster) - Throw( - "nodestore:: Failed to create CassCluster"); - - std::string secureConnectBundle = get(config_, "secure_connect_bundle"); - - if (!secureConnectBundle.empty()) - { - /* Setup driver to connect to the cloud using the secure connection - * bundle */ - if (cass_cluster_set_cloud_secure_connection_bundle( - cluster, secureConnectBundle.c_str()) != CASS_OK) - { - JLOG(j_.error()) << "Unable to configure cloud using the " - "secure connection bundle: " - << secureConnectBundle; - Throw( - "nodestore: Failed to connect using secure connection " - "bundle"); - return; - } - } - else - { - std::string contact_points = get(config_, "contact_points"); - if (contact_points.empty()) - { - Throw( - "nodestore: Missing contact_points in Cassandra config"); - } - CassError rc = cass_cluster_set_contact_points( - cluster, contact_points.c_str()); - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "nodestore: Error setting Cassandra contact_points: " - << contact_points << ", result: " << rc << ", " - << cass_error_desc(rc); - - Throw(ss.str()); - } - - int port = get(config_, "port"); - if (port) - { - rc = cass_cluster_set_port(cluster, port); - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "nodestore: Error setting Cassandra port: " << port - << ", result: " << rc << ", " << cass_error_desc(rc); - - Throw(ss.str()); - } - } - } - cass_cluster_set_token_aware_routing(cluster, cass_true); - CassError rc = cass_cluster_set_protocol_version( - cluster, CASS_PROTOCOL_VERSION_V4); - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "nodestore: Error setting cassandra protocol version: " - << ", result: " << rc << ", " << cass_error_desc(rc); - - Throw(ss.str()); - } - - std::string username = get(config_, "username"); - if (username.size()) - { - std::cout << "user = " << username - << " password = " << get(config_, "password") - << std::endl; - cass_cluster_set_credentials( - cluster, username.c_str(), get(config_, "password").c_str()); - } - - unsigned int const ioThreads = get(config_, "io_threads", 4); - maxRequestsOutstanding = - get(config_, "max_requests_outstanding", 10000000); - JLOG(j_.info()) << "Configuring Cassandra driver to use " << ioThreads - << " IO threads. Capping maximum pending requests at " - << maxRequestsOutstanding; - rc = cass_cluster_set_num_threads_io(cluster, ioThreads); - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "nodestore: Error setting Cassandra io threads to " - << ioThreads << ", result: " << rc << ", " - << cass_error_desc(rc); - Throw(ss.str()); - } - - rc = cass_cluster_set_queue_size_io( - cluster, - maxRequestsOutstanding); // This number needs to scale w/ the - // number of request per sec - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "nodestore: Error setting Cassandra max core connections per " - "host" - << ", result: " << rc << ", " << cass_error_desc(rc); - std::cout << ss.str() << std::endl; - return; - ; - } - cass_cluster_set_request_timeout(cluster, 2000); - - std::string certfile = get(config_, "certfile"); - if (certfile.size()) - { - std::ifstream fileStream( - boost::filesystem::path(certfile).string(), std::ios::in); - if (!fileStream) - { - std::stringstream ss; - ss << "opening config file " << certfile; - Throw( - errno, std::generic_category(), ss.str()); - } - std::string cert( - std::istreambuf_iterator{fileStream}, - std::istreambuf_iterator{}); - if (fileStream.bad()) - { - std::stringstream ss; - ss << "reading config file " << certfile; - Throw( - errno, std::generic_category(), ss.str()); - } - - CassSsl* context = cass_ssl_new(); - cass_ssl_set_verify_flags(context, CASS_SSL_VERIFY_NONE); - rc = cass_ssl_add_trusted_cert(context, cert.c_str()); - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "nodestore: Error setting Cassandra ssl context: " << rc - << ", " << cass_error_desc(rc); - Throw(ss.str()); - } - - cass_cluster_set_ssl(cluster, context); - cass_ssl_free(context); - } - - std::string keyspace = get(config_, "keyspace"); - if (keyspace.empty()) - { - Throw( - "nodestore: Missing keyspace in Cassandra config"); - } - - std::string tableName = get(config_, "table_name"); - if (tableName.empty()) - { - Throw( - "nodestore: Missing table name in Cassandra config"); - } - - cass_cluster_set_connect_timeout(cluster, 10000); - - CassStatement* statement; - CassFuture* fut; - bool setupSessionAndTable = false; - while (!setupSessionAndTable) - { - std::this_thread::sleep_for(std::chrono::seconds(1)); - session_.reset(cass_session_new()); - assert(session_); - - fut = cass_session_connect_keyspace( - session_.get(), cluster, keyspace.c_str()); - rc = cass_future_error_code(fut); - cass_future_free(fut); - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "nodestore: Error connecting Cassandra session keyspace: " - << rc << ", " << cass_error_desc(rc); - JLOG(j_.error()) << ss.str(); - continue; - } - - std::stringstream query; - query << "CREATE TABLE IF NOT EXISTS " << tableName - << " ( hash blob PRIMARY KEY, object blob)"; - - statement = makeStatement(query.str().c_str(), 0); - fut = cass_session_execute(session_.get(), statement); - rc = cass_future_error_code(fut); - cass_future_free(fut); - cass_statement_free(statement); - if (rc != CASS_OK && rc != CASS_ERROR_SERVER_INVALID_QUERY) - { - std::stringstream ss; - ss << "nodestore: Error creating Cassandra table: " << rc - << ", " << cass_error_desc(rc); - JLOG(j_.error()) << ss.str(); - continue; - } - - query.str(""); - query << "SELECT * FROM " << tableName << " LIMIT 1"; - statement = makeStatement(query.str().c_str(), 0); - fut = cass_session_execute(session_.get(), statement); - rc = cass_future_error_code(fut); - cass_future_free(fut); - cass_statement_free(statement); - if (rc != CASS_OK) - { - if (rc == CASS_ERROR_SERVER_INVALID_QUERY) - { - JLOG(j_.warn()) << "table not here yet, sleeping 1s to " - "see if table creation propagates"; - continue; - } - else - { - std::stringstream ss; - ss << "nodestore: Error checking for table: " << rc << ", " - << cass_error_desc(rc); - JLOG(j_.error()) << ss.str(); - continue; - } - } - - setupSessionAndTable = true; - } - - cass_cluster_free(cluster); - - bool setupPreparedStatements = false; - while (!setupPreparedStatements) - { - std::this_thread::sleep_for(std::chrono::seconds(1)); - std::stringstream query; - query << "INSERT INTO " << tableName - << " (hash, object) VALUES (?, ?)"; - CassFuture* prepare_future = - cass_session_prepare(session_.get(), query.str().c_str()); - - /* Wait for the statement to prepare and get the result */ - rc = cass_future_error_code(prepare_future); - - if (rc != CASS_OK) - { - /* Handle error */ - cass_future_free(prepare_future); - - std::stringstream ss; - ss << "nodestore: Error preparing insert : " << rc << ", " - << cass_error_desc(rc); - JLOG(j_.error()) << ss.str(); - continue; - } - - /* Get the prepared object from the future */ - insert_ = cass_future_get_prepared(prepare_future); - - /* The future can be freed immediately after getting the prepared - * object - */ - cass_future_free(prepare_future); - - query.str(""); - query << "SELECT object FROM " << tableName << " WHERE hash = ?"; - prepare_future = - cass_session_prepare(session_.get(), query.str().c_str()); - - /* Wait for the statement to prepare and get the result */ - rc = cass_future_error_code(prepare_future); - - if (rc != CASS_OK) - { - /* Handle error */ - cass_future_free(prepare_future); - - std::stringstream ss; - ss << "nodestore: Error preparing select : " << rc << ", " - << cass_error_desc(rc); - JLOG(j_.error()) << ss.str(); - continue; - } - - /* Get the prepared object from the future */ - select_ = cass_future_get_prepared(prepare_future); - - /* The future can be freed immediately after getting the prepared - * object - */ - cass_future_free(prepare_future); - setupPreparedStatements = true; - } - - work_.emplace(ioContext_); - ioThread_ = std::thread{[this]() { ioContext_.run(); }}; - open_ = true; - } - - // Close the connection to the database - void - close() override - { - { - std::lock_guard lock(mutex_); - if (insert_) - { - cass_prepared_free(insert_); - insert_ = nullptr; - } - if (select_) - { - cass_prepared_free(select_); - select_ = nullptr; - } - work_.reset(); - ioThread_.join(); - } - open_ = false; - } - - // Synchronously fetch the object with key key and store the result in pno - // @param key the key of the object - // @param pno object in which to store the result - // @return result status of query - Status - fetch(void const* key, std::shared_ptr* pno) override - { - JLOG(j_.trace()) << "Fetching from cassandra"; - CassStatement* statement = cass_prepared_bind(select_); - cass_statement_set_consistency(statement, CASS_CONSISTENCY_QUORUM); - CassError rc = cass_statement_bind_bytes( - statement, 0, static_cast(key), keyBytes_); - if (rc != CASS_OK) - { - cass_statement_free(statement); - JLOG(j_.error()) << "Binding Cassandra fetch query: " << rc << ", " - << cass_error_desc(rc); - pno->reset(); - return backendError; - } - CassFuture* fut; - do - { - fut = cass_session_execute(session_.get(), statement); - rc = cass_future_error_code(fut); - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "Cassandra fetch error"; - ss << ", retrying"; - ++counters_.readRetries; - ss << ": " << cass_error_desc(rc); - JLOG(j_.warn()) << ss.str(); - } - } while (rc != CASS_OK); - - CassResult const* res = cass_future_get_result(fut); - cass_statement_free(statement); - cass_future_free(fut); - - CassRow const* row = cass_result_first_row(res); - if (!row) - { - cass_result_free(res); - pno->reset(); - return notFound; - } - cass_byte_t const* buf; - std::size_t bufSize; - rc = cass_value_get_bytes(cass_row_get_column(row, 0), &buf, &bufSize); - if (rc != CASS_OK) - { - cass_result_free(res); - pno->reset(); - JLOG(j_.error()) << "Cassandra fetch result error: " << rc << ", " - << cass_error_desc(rc); - ++counters_.readErrors; - return backendError; - } - - nudb::detail::buffer bf; - std::pair uncompressed = - nodeobject_decompress(buf, bufSize, bf); - DecodedBlob decoded(key, uncompressed.first, uncompressed.second); - cass_result_free(res); - - if (!decoded.wasOk()) - { - pno->reset(); - JLOG(j_.error()) << "Cassandra error decoding result: " << rc - << ", " << cass_error_desc(rc); - ++counters_.readErrors; - return dataCorrupt; - } - *pno = decoded.createObject(); - return ok; - } - - struct ReadCallbackData - { - CassandraBackend& backend; - const void* const key; - std::shared_ptr& result; - std::condition_variable& cv; - - std::atomic_uint32_t& numFinished; - size_t batchSize; - - ReadCallbackData( - CassandraBackend& backend, - const void* const key, - std::shared_ptr& result, - std::condition_variable& cv, - std::atomic_uint32_t& numFinished, - size_t batchSize) - : backend(backend) - , key(key) - , result(result) - , cv(cv) - , numFinished(numFinished) - , batchSize(batchSize) - { - } - - ReadCallbackData(ReadCallbackData const& other) = default; - }; - - std::pair>, Status> - fetchBatch(std::vector const& hashes) override - { - std::size_t const numHashes = hashes.size(); - JLOG(j_.trace()) << "Fetching " << numHashes - << " records from Cassandra"; - std::atomic_uint32_t numFinished = 0; - std::condition_variable cv; - std::mutex mtx; - std::vector> results{numHashes}; - std::vector> cbs; - cbs.reserve(numHashes); - for (std::size_t i = 0; i < hashes.size(); ++i) - { - cbs.push_back(std::make_shared( - *this, - static_cast(hashes[i]), - results[i], - cv, - numFinished, - numHashes)); - read(*cbs[i]); - } - assert(results.size() == cbs.size()); - - std::unique_lock lck(mtx); - cv.wait(lck, [&numFinished, &numHashes]() { - return numFinished == numHashes; - }); - - JLOG(j_.trace()) << "Fetched " << numHashes - << " records from Cassandra"; - return {results, ok}; - } - - void - read(ReadCallbackData& data) - { - CassStatement* statement = cass_prepared_bind(select_); - cass_statement_set_consistency(statement, CASS_CONSISTENCY_QUORUM); - CassError rc = cass_statement_bind_bytes( - statement, 0, static_cast(data.key), keyBytes_); - if (rc != CASS_OK) - { - size_t batchSize = data.batchSize; - if (++(data.numFinished) == batchSize) - data.cv.notify_all(); - cass_statement_free(statement); - JLOG(j_.error()) << "Binding Cassandra fetch query: " << rc << ", " - << cass_error_desc(rc); - return; - } - - CassFuture* fut = cass_session_execute(session_.get(), statement); - - cass_statement_free(statement); - - cass_future_set_callback(fut, readCallback, static_cast(&data)); - cass_future_free(fut); - } - - struct WriteCallbackData - { - CassandraBackend* backend; - // The shared pointer to the node object must exist until it's - // confirmed persisted. Otherwise, it can become deleted - // prematurely if other copies are removed from caches. - std::shared_ptr no; - std::optional e; - std::pair compressed; - std::chrono::steady_clock::time_point begin; - // The data is stored in this buffer. The void* in the above member - // is a pointer into the below buffer - nudb::detail::buffer bf; - std::atomic& totalWriteRetries; - - uint32_t currentRetries = 0; - - WriteCallbackData( - CassandraBackend* f, - std::shared_ptr const& nobj, - std::atomic& retries) - : backend(f), no(nobj), totalWriteRetries(retries) - { - e.emplace(no); - - compressed = - NodeStore::nodeobject_compress(e->getData(), e->getSize(), bf); - } - }; - - void - write(WriteCallbackData& data, bool isRetry) - { - { - // We limit the total number of concurrent inflight writes. This is - // a client side throttling to prevent overloading the database. - // This is mostly useful when the very first ledger is being written - // in full, which is several millions records. On sufficiently large - // Cassandra clusters, this throttling is not needed; the default - // value of maxRequestsOutstanding is 10 million, which is more - // records than are present in any single ledger - std::unique_lock lck(throttleMutex_); - if (!isRetry && numRequestsOutstanding_ > maxRequestsOutstanding) - { - JLOG(j_.trace()) << __func__ << " : " - << "Max outstanding requests reached. " - << "Waiting for other requests to finish"; - ++counters_.writesDelayed; - throttleCv_.wait(lck, [this]() { - return numRequestsOutstanding_ < maxRequestsOutstanding; - }); - } - } - - CassStatement* statement = cass_prepared_bind(insert_); - cass_statement_set_consistency(statement, CASS_CONSISTENCY_QUORUM); - CassError rc = cass_statement_bind_bytes( - statement, - 0, - static_cast(data.e->getKey()), - keyBytes_); - if (rc != CASS_OK) - { - cass_statement_free(statement); - std::stringstream ss; - ss << "Binding cassandra insert hash: " << rc << ", " - << cass_error_desc(rc); - JLOG(j_.error()) << __func__ << " : " << ss.str(); - Throw(ss.str()); - } - rc = cass_statement_bind_bytes( - statement, - 1, - static_cast(data.compressed.first), - data.compressed.second); - if (rc != CASS_OK) - { - cass_statement_free(statement); - std::stringstream ss; - ss << "Binding cassandra insert object: " << rc << ", " - << cass_error_desc(rc); - JLOG(j_.error()) << __func__ << " : " << ss.str(); - Throw(ss.str()); - } - data.begin = std::chrono::steady_clock::now(); - CassFuture* fut = cass_session_execute(session_.get(), statement); - cass_statement_free(statement); - - cass_future_set_callback(fut, writeCallback, static_cast(&data)); - cass_future_free(fut); - } - - void - store(std::shared_ptr const& no) override - { - JLOG(j_.trace()) << "Writing to cassandra"; - WriteCallbackData* data = - new WriteCallbackData(this, no, counters_.writeRetries); - - ++numRequestsOutstanding_; - write(*data, false); - } - - void - storeBatch(Batch const& batch) override - { - for (auto const& no : batch) - { - store(no); - } - } - - void - sync() override - { - std::unique_lock lck(syncMutex_); - - syncCv_.wait(lck, [this]() { return numRequestsOutstanding_ == 0; }); - } - - // Iterate through entire table and execute f(). Used for import only, - // with database not being written to, so safe to paginate through - // objects table with LIMIT x OFFSET y. - void - for_each(std::function)> f) override - { - assert(false); - Throw("not implemented"); - } - - int - getWriteLoad() override - { - return 0; - } - - void - setDeletePath() override - { - } - - int - fdRequired() const override - { - return 0; - } - - std::optional> - counters() const override - { - return counters_; - } - - friend void - writeCallback(CassFuture* fut, void* cbData); - - friend void - readCallback(CassFuture* fut, void* cbData); -}; - -// Process the result of an asynchronous read. Retry on error -// @param fut cassandra future associated with the read -// @param cbData struct that holds the request parameters -void -readCallback(CassFuture* fut, void* cbData) -{ - CassandraBackend::ReadCallbackData& requestParams = - *static_cast(cbData); - - CassError rc = cass_future_error_code(fut); - - if (rc != CASS_OK) - { - ++(requestParams.backend.counters_.readRetries); - JLOG(requestParams.backend.j_.warn()) - << "Cassandra fetch error : " << rc << " : " << cass_error_desc(rc) - << " - retrying"; - // Retry right away. The only time the cluster should ever be overloaded - // is when the very first ledger is being written in full (millions of - // writes at once), during which no reads should be occurring. If reads - // are timing out, the code/architecture should be modified to handle - // greater read load, as opposed to just exponential backoff - requestParams.backend.read(requestParams); - } - else - { - auto finish = [&requestParams]() { - size_t batchSize = requestParams.batchSize; - if (++(requestParams.numFinished) == batchSize) - requestParams.cv.notify_all(); - }; - CassResult const* res = cass_future_get_result(fut); - - CassRow const* row = cass_result_first_row(res); - if (!row) - { - cass_result_free(res); - JLOG(requestParams.backend.j_.error()) - << "Cassandra fetch get row error : " << rc << ", " - << cass_error_desc(rc); - finish(); - return; - } - cass_byte_t const* buf; - std::size_t bufSize; - rc = cass_value_get_bytes(cass_row_get_column(row, 0), &buf, &bufSize); - if (rc != CASS_OK) - { - cass_result_free(res); - JLOG(requestParams.backend.j_.error()) - << "Cassandra fetch get bytes error : " << rc << ", " - << cass_error_desc(rc); - ++requestParams.backend.counters_.readErrors; - finish(); - return; - } - nudb::detail::buffer bf; - std::pair uncompressed = - nodeobject_decompress(buf, bufSize, bf); - DecodedBlob decoded( - requestParams.key, uncompressed.first, uncompressed.second); - cass_result_free(res); - - if (!decoded.wasOk()) - { - JLOG(requestParams.backend.j_.fatal()) - << "Cassandra fetch error - data corruption : " << rc << ", " - << cass_error_desc(rc); - ++requestParams.backend.counters_.readErrors; - finish(); - return; - } - requestParams.result = decoded.createObject(); - finish(); - } -} - -// Process the result of an asynchronous write. Retry on error -// @param fut cassandra future associated with the write -// @param cbData struct that holds the request parameters -void -writeCallback(CassFuture* fut, void* cbData) -{ - CassandraBackend::WriteCallbackData& requestParams = - *static_cast(cbData); - CassandraBackend& backend = *requestParams.backend; - auto rc = cass_future_error_code(fut); - if (rc != CASS_OK) - { - JLOG(backend.j_.error()) - << "ERROR!!! Cassandra insert error: " << rc << ", " - << cass_error_desc(rc) << ", retrying "; - ++requestParams.totalWriteRetries; - // exponential backoff with a max wait of 2^10 ms (about 1 second) - auto wait = std::chrono::milliseconds( - lround(std::pow(2, std::min(10u, requestParams.currentRetries)))); - ++requestParams.currentRetries; - std::shared_ptr timer = - std::make_shared( - backend.ioContext_, std::chrono::steady_clock::now() + wait); - timer->async_wait([timer, &requestParams, &backend]( - const boost::system::error_code& error) { - backend.write(requestParams, true); - }); - } - else - { - backend.counters_.writeDurationUs += - std::chrono::duration_cast( - std::chrono::steady_clock::now() - requestParams.begin) - .count(); - --(backend.numRequestsOutstanding_); - - backend.throttleCv_.notify_all(); - if (backend.numRequestsOutstanding_ == 0) - backend.syncCv_.notify_all(); - delete &requestParams; - } -} - -//------------------------------------------------------------------------------ - -class CassandraFactory : public Factory -{ -public: - CassandraFactory() - { - Manager::instance().insert(*this); - } - - ~CassandraFactory() override - { - Manager::instance().erase(*this); - } - - std::string - getName() const override - { - return "cassandra"; - } - - std::unique_ptr - createInstance( - size_t keyBytes, - Section const& keyValues, - std::size_t burstSize, - Scheduler& scheduler, - beast::Journal journal) override - { - return std::make_unique(keyBytes, keyValues, journal); - } -}; - -static CassandraFactory cassandraFactory; - -} // namespace NodeStore -} // namespace ripple -#endif diff --git a/src/xrpld/nodestore/detail/Database.cpp b/src/xrpld/nodestore/detail/Database.cpp index 60cfb35051c..da15088e895 100644 --- a/src/xrpld/nodestore/detail/Database.cpp +++ b/src/xrpld/nodestore/detail/Database.cpp @@ -273,15 +273,6 @@ Database::getCountsJson(Json::Value& obj) obj[jss::node_written_bytes] = std::to_string(storeSz_); obj[jss::node_read_bytes] = std::to_string(fetchSz_); obj[jss::node_reads_duration_us] = std::to_string(fetchDurationUs_); - - if (auto c = getCounters()) - { - obj[jss::node_read_errors] = std::to_string(c->readErrors); - obj[jss::node_read_retries] = std::to_string(c->readRetries); - obj[jss::node_write_retries] = std::to_string(c->writeRetries); - obj[jss::node_writes_delayed] = std::to_string(c->writesDelayed); - obj[jss::node_writes_duration_us] = std::to_string(c->writeDurationUs); - } } } // namespace NodeStore diff --git a/src/xrpld/nodestore/detail/DatabaseNodeImp.h b/src/xrpld/nodestore/detail/DatabaseNodeImp.h index c2bf237b943..b8a9a3fa2b4 100644 --- a/src/xrpld/nodestore/detail/DatabaseNodeImp.h +++ b/src/xrpld/nodestore/detail/DatabaseNodeImp.h @@ -150,12 +150,6 @@ class DatabaseNodeImp : public Database { backend_->for_each(f); } - - std::optional> - getCounters() const override - { - return backend_->counters(); - } }; } // namespace NodeStore diff --git a/src/xrpld/nodestore/detail/ManagerImp.cpp b/src/xrpld/nodestore/detail/ManagerImp.cpp index 019dd1f8122..56dc66ee644 100644 --- a/src/xrpld/nodestore/detail/ManagerImp.cpp +++ b/src/xrpld/nodestore/detail/ManagerImp.cpp @@ -55,12 +55,6 @@ ManagerImp::make_Backend( auto factory{find(type)}; if (!factory) { -#ifndef RIPPLED_REPORTING - if (boost::iequals(type, "cassandra")) - Throw( - "To use Cassandra as a nodestore, build rippled with " - "-Dreporting=ON"); -#endif missing_backend(); } diff --git a/src/xrpld/peerfinder/detail/Logic.h b/src/xrpld/peerfinder/detail/Logic.h index 49b71a6a545..0403530ecf2 100644 --- a/src/xrpld/peerfinder/detail/Logic.h +++ b/src/xrpld/peerfinder/detail/Logic.h @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include diff --git a/src/xrpld/peerfinder/detail/Reporting.h b/src/xrpld/peerfinder/detail/Reporting.h deleted file mode 100644 index 25c36ea3f27..00000000000 --- a/src/xrpld/peerfinder/detail/Reporting.h +++ /dev/null @@ -1,49 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_PEERFINDER_REPORTING_H_INCLUDED -#define RIPPLE_PEERFINDER_REPORTING_H_INCLUDED - -namespace ripple { -namespace PeerFinder { - -/** Severity levels for test reporting. - This allows more fine grained control over reporting for diagnostics. -*/ -struct Reporting -{ - explicit Reporting() = default; - - // Report simulation parameters - static bool const params = true; - - // Report simulation crawl time-evolution - static bool const crawl = true; - - // Report nodes aggregate statistics - static bool const nodes = true; - - // Report nodes detailed information - static bool const dump_nodes = false; -}; - -} // namespace PeerFinder -} // namespace ripple - -#endif diff --git a/src/xrpld/peerfinder/sim/FunctionQueue.h b/src/xrpld/peerfinder/sim/FunctionQueue.h deleted file mode 100644 index d2fbb6dc26b..00000000000 --- a/src/xrpld/peerfinder/sim/FunctionQueue.h +++ /dev/null @@ -1,100 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_PEERFINDER_SIM_FUNCTIONQUEUE_H_INCLUDED -#define RIPPLE_PEERFINDER_SIM_FUNCTIONQUEUE_H_INCLUDED - -namespace ripple { -namespace PeerFinder { -namespace Sim { - -/** Maintains a queue of functors that can be called later. */ -class FunctionQueue -{ -public: - explicit FunctionQueue() = default; - -private: - class BasicWork - { - public: - virtual ~BasicWork() - { - } - virtual void - operator()() = 0; - }; - - template - class Work : public BasicWork - { - public: - explicit Work(Function f) : m_f(f) - { - } - void - operator()() - { - (m_f)(); - } - - private: - Function m_f; - }; - - std::list> m_work; - -public: - /** Returns `true` if there is no remaining work */ - bool - empty() - { - return m_work.empty(); - } - - /** Queue a function. - Function must be callable with this signature: - void (void) - */ - template - void - post(Function f) - { - m_work.emplace_back(std::make_unique>(f)); - } - - /** Run all pending functions. - The functions will be invoked in the order they were queued. - */ - void - run() - { - while (!m_work.empty()) - { - (*m_work.front())(); - m_work.pop_front(); - } - } -}; - -} // namespace Sim -} // namespace PeerFinder -} // namespace ripple - -#endif diff --git a/src/xrpld/peerfinder/sim/GraphAlgorithms.h b/src/xrpld/peerfinder/sim/GraphAlgorithms.h deleted file mode 100644 index b11ba42a7a7..00000000000 --- a/src/xrpld/peerfinder/sim/GraphAlgorithms.h +++ /dev/null @@ -1,76 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_PEERFINDER_SIM_GRAPHALGORITHMS_H_INCLUDED -#define RIPPLE_PEERFINDER_SIM_GRAPHALGORITHMS_H_INCLUDED - -namespace ripple { -namespace PeerFinder { -namespace Sim { - -template -struct VertexTraits; - -/** Call a function for each vertex in a connected graph. - Function will be called with this signature: - void (Vertex&, std::size_t diameter); -*/ - -template -void -breadth_first_traverse(Vertex& start, Function f) -{ - using Traits = VertexTraits; - using Edges = typename Traits::Edges; - using Edge = typename Traits::Edge; - - using Probe = std::pair; - using Work = std::deque; - using Visited = std::set; - Work work; - Visited visited; - work.emplace_back(&start, 0); - int diameter(0); - while (!work.empty()) - { - Probe const p(work.front()); - work.pop_front(); - if (visited.find(p.first) != visited.end()) - continue; - diameter = std::max(p.second, diameter); - visited.insert(p.first); - for (typename Edges::iterator iter(Traits::edges(*p.first).begin()); - iter != Traits::edges(*p.first).end(); - ++iter) - { - Vertex* v(Traits::vertex(*iter)); - if (visited.find(v) != visited.end()) - continue; - if (!iter->closed()) - work.emplace_back(v, p.second + 1); - } - f(*p.first, diameter); - } -} - -} // namespace Sim -} // namespace PeerFinder -} // namespace ripple - -#endif diff --git a/src/xrpld/peerfinder/sim/Message.h b/src/xrpld/peerfinder/sim/Message.h deleted file mode 100644 index 69be553c01a..00000000000 --- a/src/xrpld/peerfinder/sim/Message.h +++ /dev/null @@ -1,47 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_PEERFINDER_SIM_MESSAGE_H_INCLUDED -#define RIPPLE_PEERFINDER_SIM_MESSAGE_H_INCLUDED - -namespace ripple { -namespace PeerFinder { -namespace Sim { - -class Message -{ -public: - explicit Message(Endpoints const& endpoints) : m_payload(endpoints) - { - } - Endpoints const& - payload() const - { - return m_payload; - } - -private: - Endpoints m_payload; -}; - -} // namespace Sim -} // namespace PeerFinder -} // namespace ripple - -#endif diff --git a/src/xrpld/peerfinder/sim/NodeSnapshot.h b/src/xrpld/peerfinder/sim/NodeSnapshot.h deleted file mode 100644 index fbb08ece9a2..00000000000 --- a/src/xrpld/peerfinder/sim/NodeSnapshot.h +++ /dev/null @@ -1,37 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_PEERFINDER_SIM_NODESNAPSHOT_H_INCLUDED -#define RIPPLE_PEERFINDER_SIM_NODESNAPSHOT_H_INCLUDED - -namespace ripple { -namespace PeerFinder { -namespace Sim { - -/** A snapshot of a Node in the network simulator. */ -struct NodeSnapshot -{ - explicit NodeSnapshot() = default; -}; - -} // namespace Sim -} // namespace PeerFinder -} // namespace ripple - -#endif diff --git a/src/xrpld/peerfinder/sim/Params.h b/src/xrpld/peerfinder/sim/Params.h deleted file mode 100644 index c3c288bb985..00000000000 --- a/src/xrpld/peerfinder/sim/Params.h +++ /dev/null @@ -1,45 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_PEERFINDER_SIM_PARAMS_H_INCLUDED -#define RIPPLE_PEERFINDER_SIM_PARAMS_H_INCLUDED - -namespace ripple { -namespace PeerFinder { -namespace Sim { - -/** Defines the parameters for a network simulation. */ -struct Params -{ - Params() : steps(50), nodes(10), maxPeers(20), outPeers(9.5), firewalled(0) - { - } - - int steps; - int nodes; - int maxPeers; - double outPeers; - double firewalled; // [0, 1) -}; - -} // namespace Sim -} // namespace PeerFinder -} // namespace ripple - -#endif diff --git a/src/xrpld/peerfinder/sim/Predicates.h b/src/xrpld/peerfinder/sim/Predicates.h deleted file mode 100644 index 7bf125b383a..00000000000 --- a/src/xrpld/peerfinder/sim/Predicates.h +++ /dev/null @@ -1,87 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_PEERFINDER_SIM_PREDICATES_H_INCLUDED -#define RIPPLE_PEERFINDER_SIM_PREDICATES_H_INCLUDED - -namespace ripple { -namespace PeerFinder { -namespace Sim { - -/** UnaryPredicate, returns `true` if the 'to' node on a Link matches. */ -/** @{ */ -template -class is_remote_node_pred -{ -public: - is_remote_node_pred(Node const& n) : node(n) - { - } - template - bool - operator()(Link const& l) const - { - return &node == &l.remote_node(); - } - -private: - Node const& node; -}; - -template -is_remote_node_pred -is_remote_node(Node const& node) -{ - return is_remote_node_pred(node); -} - -template -is_remote_node_pred -is_remote_node(Node const* node) -{ - return is_remote_node_pred(*node); -} -/** @} */ - -//------------------------------------------------------------------------------ - -/** UnaryPredicate, `true` if the remote address matches. */ -class is_remote_endpoint -{ -public: - explicit is_remote_endpoint(beast::IP::Endpoint const& address) - : m_endpoint(address) - { - } - template - bool - operator()(Link const& link) const - { - return link.remote_endpoint() == m_endpoint; - } - -private: - beast::IP::Endpoint const m_endpoint; -}; - -} // namespace Sim -} // namespace PeerFinder -} // namespace ripple - -#endif diff --git a/src/xrpld/rpc/detail/DeliveredAmount.cpp b/src/xrpld/rpc/detail/DeliveredAmount.cpp index 4396fb291c7..7cf9a033fb8 100644 --- a/src/xrpld/rpc/detail/DeliveredAmount.cpp +++ b/src/xrpld/rpc/detail/DeliveredAmount.cpp @@ -119,20 +119,10 @@ canHaveDeliveredAmount( { // These lambdas are used to compute the values lazily auto const getFix1623Enabled = [&context]() -> bool { - if (context.app.config().reporting()) - { - auto const view = context.ledgerMaster.getValidatedLedger(); - if (!view) - return false; - return view->rules().enabled(fix1623); - } - else - { - auto const view = context.app.openLedger().current(); - if (!view) - return false; - return view->rules().enabled(fix1623); - } + auto const view = context.app.openLedger().current(); + if (!view) + return false; + return view->rules().enabled(fix1623); }; return canHaveDeliveredAmountHelp( diff --git a/src/xrpld/rpc/detail/Handler.cpp b/src/xrpld/rpc/detail/Handler.cpp index d4a3fda380f..90dee4475a1 100644 --- a/src/xrpld/rpc/detail/Handler.cpp +++ b/src/xrpld/rpc/detail/Handler.cpp @@ -106,11 +106,7 @@ Handler const handlerArray[]{ {"feature", byRef(&doFeature), Role::USER, NO_CONDITION}, {"fee", byRef(&doFee), Role::USER, NEEDS_CURRENT_LEDGER}, {"fetch_info", byRef(&doFetchInfo), Role::ADMIN, NO_CONDITION}, -#ifdef RIPPLED_REPORTING - {"gateway_balances", byRef(&doGatewayBalances), Role::ADMIN, NO_CONDITION}, -#else {"gateway_balances", byRef(&doGatewayBalances), Role::USER, NO_CONDITION}, -#endif {"get_counts", byRef(&doGetCounts), Role::ADMIN, NO_CONDITION}, {"get_aggregate_price", byRef(&doGetAggregatePrice), diff --git a/src/xrpld/rpc/detail/Handler.h b/src/xrpld/rpc/detail/Handler.h index 81fbc2be321..cb1a2579ecb 100644 --- a/src/xrpld/rpc/detail/Handler.h +++ b/src/xrpld/rpc/detail/Handler.h @@ -81,22 +81,6 @@ template error_code_i conditionMet(Condition condition_required, T& context) { - if (context.app.config().reporting()) - { - if (condition_required == NEEDS_CURRENT_LEDGER) - { - return rpcNO_CURRENT; - } - else if (condition_required == NEEDS_CLOSED_LEDGER) - { - return rpcNO_CLOSED; - } - else - { - return rpcSUCCESS; - } - } - if (context.app.getOPs().isAmendmentBlocked() && (condition_required != NO_CONDITION)) { diff --git a/src/xrpld/rpc/detail/RPCHandler.cpp b/src/xrpld/rpc/detail/RPCHandler.cpp index 8504fe72a83..19b33709c83 100644 --- a/src/xrpld/rpc/detail/RPCHandler.cpp +++ b/src/xrpld/rpc/detail/RPCHandler.cpp @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -206,11 +205,6 @@ callMethod( perfLog.rpcFinish(name, curId); return ret; } - catch (ReportingShouldProxy&) - { - result = forwardToP2p(context); - return rpcSUCCESS; - } catch (std::exception& e) { perfLog.rpcError(name, curId); @@ -226,36 +220,9 @@ callMethod( } // namespace -void -injectReportingWarning(RPC::JsonContext& context, Json::Value& result) -{ - if (context.app.config().reporting()) - { - Json::Value warnings{Json::arrayValue}; - Json::Value& w = warnings.append(Json::objectValue); - w[jss::id] = warnRPC_REPORTING; - w[jss::message] = - "This is a reporting server. " - " The default behavior of a reporting server is to only" - " return validated data. If you are looking for not yet" - " validated data, include \"ledger_index : current\"" - " in your request, which will cause this server to forward" - " the request to a p2p node. If the forward is successful" - " the response will include \"forwarded\" : \"true\""; - result[jss::warnings] = std::move(warnings); - } -} - Status doCommand(RPC::JsonContext& context, Json::Value& result) { - if (shouldForwardToP2p(context)) - { - result = forwardToP2p(context); - injectReportingWarning(context, result); - // this return value is ignored - return rpcSUCCESS; - } Handler const* handler = nullptr; if (auto error = fillHandler(context, handler)) { @@ -285,7 +252,6 @@ doCommand(RPC::JsonContext& context, Json::Value& result) else { auto ret = callMethod(context, method, handler->name_, result); - injectReportingWarning(context, result); return ret; } } diff --git a/src/xrpld/rpc/detail/RPCHelpers.cpp b/src/xrpld/rpc/detail/RPCHelpers.cpp index 687c33c45a4..87ec9ff03f4 100644 --- a/src/xrpld/rpc/detail/RPCHelpers.cpp +++ b/src/xrpld/rpc/detail/RPCHelpers.cpp @@ -335,9 +335,9 @@ getAccountObjects( namespace { bool -isValidatedOld(LedgerMaster& ledgerMaster, bool standaloneOrReporting) +isValidatedOld(LedgerMaster& ledgerMaster, bool standalone) { - if (standaloneOrReporting) + if (standalone) return false; return ledgerMaster.getValidatedLedgerAge() > Tuning::maxValidatedLedgerAge; @@ -377,12 +377,10 @@ ledgerFromRequest(T& ledger, JsonContext& context) auto const index = indexValue.asString(); - if (index == "current" || - (index.empty() && !context.app.config().reporting())) + if (index == "current" || index.empty()) return getLedger(ledger, LedgerShortcut::CURRENT, context); - if (index == "validated" || - (index.empty() && context.app.config().reporting())) + if (index == "validated") return getLedger(ledger, LedgerShortcut::VALIDATED, context); if (index == "closed") @@ -448,13 +446,8 @@ ledgerFromSpecifier( [[fallthrough]]; case LedgerCase::LEDGER_NOT_SET: { auto const shortcut = specifier.shortcut(); - // note, unspecified defaults to validated in reporting mode if (shortcut == - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED || - (shortcut == - org::xrpl::rpc::v1::LedgerSpecifier:: - SHORTCUT_UNSPECIFIED && - context.app.config().reporting())) + org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED) { return getLedger(ledger, LedgerShortcut::VALIDATED, context); } @@ -498,8 +491,6 @@ getLedger(T& ledger, uint32_t ledgerIndex, Context& context) ledger = context.ledgerMaster.getLedgerBySeq(ledgerIndex); if (ledger == nullptr) { - if (context.app.config().reporting()) - return {rpcLGR_NOT_FOUND, "ledgerNotFound"}; auto cur = context.ledgerMaster.getCurrentLedger(); if (cur->info().seq == ledgerIndex) { @@ -526,10 +517,7 @@ template Status getLedger(T& ledger, LedgerShortcut shortcut, Context& context) { - if (isValidatedOld( - context.ledgerMaster, - context.app.config().standalone() || - context.app.config().reporting())) + if (isValidatedOld(context.ledgerMaster, context.app.config().standalone())) { if (context.apiVersion == 1) return {rpcNO_NETWORK, "InsufficientNetworkMode"}; @@ -552,18 +540,11 @@ getLedger(T& ledger, LedgerShortcut shortcut, Context& context) { if (shortcut == LedgerShortcut::CURRENT) { - if (context.app.config().reporting()) - return { - rpcLGR_NOT_FOUND, - "Reporting does not track current ledger"}; ledger = context.ledgerMaster.getCurrentLedger(); assert(ledger->open()); } else if (shortcut == LedgerShortcut::CLOSED) { - if (context.app.config().reporting()) - return { - rpcLGR_NOT_FOUND, "Reporting does not track closed ledger"}; ledger = context.ledgerMaster.getClosedLedger(); assert(!ledger->open()); } @@ -1038,9 +1019,6 @@ getAPIVersionNumber(Json::Value const& jv, bool betaEnabled) std::variant, Json::Value> getLedgerByContext(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - auto const hasHash = context.params.isMember(jss::ledger_hash); auto const hasIndex = context.params.isMember(jss::ledger_index); std::uint32_t ledgerIndex = 0; diff --git a/src/xrpld/rpc/detail/TransactionSign.cpp b/src/xrpld/rpc/detail/TransactionSign.cpp index 77938b4924a..c2a0a0c4546 100644 --- a/src/xrpld/rpc/detail/TransactionSign.cpp +++ b/src/xrpld/rpc/detail/TransactionSign.cpp @@ -828,11 +828,7 @@ transactionSign( if (!preprocResult.second) return preprocResult.first; - std::shared_ptr ledger; - if (app.config().reporting()) - ledger = app.getLedgerMaster().getValidatedLedger(); - else - ledger = app.openLedger().current(); + std::shared_ptr ledger = app.openLedger().current(); // Make sure the STTx makes a legitimate Transaction. std::pair txn = transactionConstructImpl(preprocResult.second, ledger->rules(), app); diff --git a/src/xrpld/rpc/handlers/AccountTx.cpp b/src/xrpld/rpc/handlers/AccountTx.cpp index e4d78a4a03c..887694daf21 100644 --- a/src/xrpld/rpc/handlers/AccountTx.cpp +++ b/src/xrpld/rpc/handlers/AccountTx.cpp @@ -22,9 +22,7 @@ #include #include #include -#include #include -#include #include #include #include @@ -219,16 +217,6 @@ std::pair doAccountTxHelp(RPC::Context& context, AccountTxArgs const& args) { context.loadType = Resource::feeMediumBurdenRPC; - if (context.app.config().reporting()) - { - auto const db = dynamic_cast( - &context.app.getRelationalDatabase()); - - if (!db) - Throw("Failed to get relational database"); - - return db->getAccountTx(args); - } AccountTxResult result; @@ -394,8 +382,6 @@ populateJsonResponse( response[jss::marker][jss::ledger] = result.marker->ledgerSeq; response[jss::marker][jss::seq] = result.marker->txnSeq; } - if (context.app.config().reporting()) - response["used_postgres"] = true; } JLOG(context.j.debug()) << __func__ << " : finished"; diff --git a/src/xrpld/rpc/handlers/CanDelete.cpp b/src/xrpld/rpc/handlers/CanDelete.cpp index db9fdf7c5d0..df2301d03e0 100644 --- a/src/xrpld/rpc/handlers/CanDelete.cpp +++ b/src/xrpld/rpc/handlers/CanDelete.cpp @@ -34,9 +34,6 @@ namespace ripple { Json::Value doCanDelete(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return RPC::make_error(rpcREPORTING_UNSUPPORTED); - if (!context.app.getSHAMapStore().advisoryDelete()) return RPC::make_error(rpcNOT_ENABLED); diff --git a/src/xrpld/rpc/handlers/Connect.cpp b/src/xrpld/rpc/handlers/Connect.cpp index dadf0a0515e..c564319dc8b 100644 --- a/src/xrpld/rpc/handlers/Connect.cpp +++ b/src/xrpld/rpc/handlers/Connect.cpp @@ -37,9 +37,6 @@ namespace ripple { Json::Value doConnect(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - if (context.app.config().standalone()) return "cannot connect in standalone mode"; diff --git a/src/xrpld/rpc/handlers/ConsensusInfo.cpp b/src/xrpld/rpc/handlers/ConsensusInfo.cpp index 42fbb60ba76..ce727bb4006 100644 --- a/src/xrpld/rpc/handlers/ConsensusInfo.cpp +++ b/src/xrpld/rpc/handlers/ConsensusInfo.cpp @@ -30,9 +30,6 @@ namespace ripple { Json::Value doConsensusInfo(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - Json::Value ret(Json::objectValue); ret[jss::info] = context.netOps.getConsensusInfo(); diff --git a/src/xrpld/rpc/handlers/Feature1.cpp b/src/xrpld/rpc/handlers/Feature1.cpp index c06756ca00a..75e583a352c 100644 --- a/src/xrpld/rpc/handlers/Feature1.cpp +++ b/src/xrpld/rpc/handlers/Feature1.cpp @@ -35,9 +35,6 @@ namespace ripple { Json::Value doFeature(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - if (context.params.isMember(jss::feature)) { // ensure that the `feature` param is a string diff --git a/src/xrpld/rpc/handlers/FetchInfo.cpp b/src/xrpld/rpc/handlers/FetchInfo.cpp index 113ae78a35c..a4287266e52 100644 --- a/src/xrpld/rpc/handlers/FetchInfo.cpp +++ b/src/xrpld/rpc/handlers/FetchInfo.cpp @@ -30,9 +30,6 @@ namespace ripple { Json::Value doFetchInfo(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - Json::Value ret(Json::objectValue); if (context.params.isMember(jss::clear) && diff --git a/src/xrpld/rpc/handlers/GetCounts.cpp b/src/xrpld/rpc/handlers/GetCounts.cpp index 035d698a5d4..690106ebbd2 100644 --- a/src/xrpld/rpc/handlers/GetCounts.cpp +++ b/src/xrpld/rpc/handlers/GetCounts.cpp @@ -71,7 +71,7 @@ getCountsJson(Application& app, int minObjectCount) ret[k] = v; } - if (!app.config().reporting() && app.config().useTxTables()) + if (app.config().useTxTables()) { auto const db = dynamic_cast(&app.getRelationalDatabase()); diff --git a/src/xrpld/rpc/handlers/LedgerAccept.cpp b/src/xrpld/rpc/handlers/LedgerAccept.cpp index 742a84fbb4e..dbd7eb9f1ca 100644 --- a/src/xrpld/rpc/handlers/LedgerAccept.cpp +++ b/src/xrpld/rpc/handlers/LedgerAccept.cpp @@ -36,7 +36,7 @@ doLedgerAccept(RPC::JsonContext& context) { Json::Value jvResult; - if (!context.app.config().standalone() || context.app.config().reporting()) + if (!context.app.config().standalone()) { jvResult[jss::error] = "notStandAlone"; } diff --git a/src/xrpld/rpc/handlers/LedgerHandler.cpp b/src/xrpld/rpc/handlers/LedgerHandler.cpp index 6d695abc85f..2bf4fb09f94 100644 --- a/src/xrpld/rpc/handlers/LedgerHandler.cpp +++ b/src/xrpld/rpc/handlers/LedgerHandler.cpp @@ -40,8 +40,7 @@ LedgerHandler::check() { auto const& params = context_.params; bool needsLedger = params.isMember(jss::ledger) || - params.isMember(jss::ledger_hash) || - params.isMember(jss::ledger_index) || context_.app.config().reporting(); + params.isMember(jss::ledger_hash) || params.isMember(jss::ledger_index); if (!needsLedger) return Status::OK; diff --git a/src/xrpld/rpc/handlers/Manifest.cpp b/src/xrpld/rpc/handlers/Manifest.cpp index 700d6ab39df..1debd48422a 100644 --- a/src/xrpld/rpc/handlers/Manifest.cpp +++ b/src/xrpld/rpc/handlers/Manifest.cpp @@ -29,9 +29,6 @@ namespace ripple { Json::Value doManifest(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - auto& params = context.params; if (!params.isMember(jss::public_key)) diff --git a/src/xrpld/rpc/handlers/Peers.cpp b/src/xrpld/rpc/handlers/Peers.cpp index 718070ec927..f3be0df558e 100644 --- a/src/xrpld/rpc/handlers/Peers.cpp +++ b/src/xrpld/rpc/handlers/Peers.cpp @@ -31,9 +31,6 @@ namespace ripple { Json::Value doPeers(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - Json::Value jvResult(Json::objectValue); jvResult[jss::peers] = context.app.overlay().json(); diff --git a/src/xrpld/rpc/handlers/Reservations.cpp b/src/xrpld/rpc/handlers/Reservations.cpp index 57a8fb3664a..1ff2d506afa 100644 --- a/src/xrpld/rpc/handlers/Reservations.cpp +++ b/src/xrpld/rpc/handlers/Reservations.cpp @@ -34,9 +34,6 @@ namespace ripple { Json::Value doPeerReservationsAdd(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - auto const& params = context.params; if (!params.isMember(jss::public_key)) @@ -90,9 +87,6 @@ doPeerReservationsAdd(RPC::JsonContext& context) Json::Value doPeerReservationsDel(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - auto const& params = context.params; // We repeat much of the parameter parsing from `doPeerReservationsAdd`. @@ -120,9 +114,6 @@ doPeerReservationsDel(RPC::JsonContext& context) Json::Value doPeerReservationsList(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - auto const& reservations = context.app.peerReservations().list(); // Enumerate the reservations in context.app.peerReservations() // as a Json::Value. diff --git a/src/xrpld/rpc/handlers/ServerInfo.cpp b/src/xrpld/rpc/handlers/ServerInfo.cpp index 769974985da..72beb37ed64 100644 --- a/src/xrpld/rpc/handlers/ServerInfo.cpp +++ b/src/xrpld/rpc/handlers/ServerInfo.cpp @@ -19,7 +19,6 @@ #include #include -#include #include #include #include @@ -330,14 +329,6 @@ doServerInfo(RPC::JsonContext& context) context.params.isMember(jss::counters) && context.params[jss::counters].asBool()); - if (context.app.config().reporting()) - { - Json::Value const proxied = forwardToP2p(context); - auto const lf = proxied[jss::result][jss::info][jss::load_factor]; - auto const vq = proxied[jss::result][jss::info][jss::validation_quorum]; - ret[jss::info][jss::validation_quorum] = vq.isNull() ? 1 : vq; - ret[jss::info][jss::load_factor] = lf.isNull() ? 1 : lf; - } return ret; } diff --git a/src/xrpld/rpc/handlers/Subscribe.cpp b/src/xrpld/rpc/handlers/Subscribe.cpp index 9f9181e1ab2..66fe89dea04 100644 --- a/src/xrpld/rpc/handlers/Subscribe.cpp +++ b/src/xrpld/rpc/handlers/Subscribe.cpp @@ -128,8 +128,6 @@ doSubscribe(RPC::JsonContext& context) std::string streamName = it.asString(); if (streamName == "server") { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); context.netOps.subServer( ispSub, jvResult, context.role == Role::ADMIN); } @@ -161,16 +159,12 @@ doSubscribe(RPC::JsonContext& context) } else if (streamName == "peer_status") { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); if (context.role != Role::ADMIN) return rpcError(rpcNO_PERMISSION); context.netOps.subPeerStatus(ispSub); } else if (streamName == "consensus") { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); context.netOps.subConsensus(ispSub); } else diff --git a/src/xrpld/rpc/handlers/Tx.cpp b/src/xrpld/rpc/handlers/Tx.cpp index e57f8b3852d..98af3a809bf 100644 --- a/src/xrpld/rpc/handlers/Tx.cpp +++ b/src/xrpld/rpc/handlers/Tx.cpp @@ -71,128 +71,9 @@ struct TxArgs std::optional> ledgerRange; }; -std::pair -doTxPostgres(RPC::Context& context, TxArgs const& args) -{ - if (!context.app.config().reporting()) - { - assert(false); - Throw( - "Called doTxPostgres yet not in reporting mode"); - } - - TxResult res; - res.searchedAll = TxSearched::unknown; - - if (!args.hash) - return { - res, - {rpcNOT_IMPL, - "Use of CTIDs on reporting mode is not currently supported."}}; - - JLOG(context.j.debug()) << "Fetching from postgres"; - Transaction::Locator locator = - Transaction::locate(*(args.hash), context.app); - - std::pair, std::shared_ptr> - pair; - // database returned the nodestore hash. Fetch the txn directly from the - // nodestore. Don't traverse the transaction SHAMap - if (locator.isFound()) - { - auto start = std::chrono::system_clock::now(); - if (auto obj = context.app.getNodeFamily().db().fetchNodeObject( - locator.getNodestoreHash(), locator.getLedgerSequence())) - { - auto node = SHAMapTreeNode::makeFromPrefix( - makeSlice(obj->getData()), - SHAMapHash{locator.getNodestoreHash()}); - if (!node) - { - assert(false); - return {res, {rpcINTERNAL, "Error making SHAMap node"}}; - } - auto item = (static_cast(node.get()))->peekItem(); - if (!item) - { - assert(false); - return {res, {rpcINTERNAL, "Error reading SHAMap node"}}; - } - - auto [sttx, meta] = deserializeTxPlusMeta(*item); - JLOG(context.j.debug()) << "Successfully fetched from db"; - - if (!sttx || !meta) - { - assert(false); - return {res, {rpcINTERNAL, "Error deserializing SHAMap node"}}; - } - std::string reason; - res.txn = std::make_shared(sttx, reason, context.app); - res.txn->setLedger(locator.getLedgerSequence()); - res.txn->setStatus(COMMITTED); - if (args.binary) - { - SerialIter it(item->slice()); - it.skip(it.getVLDataLength()); // skip transaction - Blob blob = it.getVL(); - res.meta = std::move(blob); - } - else - { - res.meta = std::make_shared( - *(args.hash), res.txn->getLedger(), *meta); - } - res.validated = true; - - auto const ledgerInfo = - context.app.getRelationalDatabase().getLedgerInfoByIndex( - locator.getLedgerSequence()); - res.closeTime = ledgerInfo->closeTime; - res.ledgerHash = ledgerInfo->hash; - - return {res, rpcSUCCESS}; - } - else - { - JLOG(context.j.error()) << "Failed to fetch from db"; - assert(false); - return {res, {rpcINTERNAL, "Containing SHAMap node not found"}}; - } - auto end = std::chrono::system_clock::now(); - JLOG(context.j.debug()) << "tx flat fetch time : " - << ((end - start).count() / 1000000000.0); - } - // database did not find the transaction, and returned the ledger range - // that was searched - else - { - if (args.ledgerRange) - { - auto range = locator.getLedgerRangeSearched(); - auto min = args.ledgerRange->first; - auto max = args.ledgerRange->second; - if (min >= range.lower() && max <= range.upper()) - { - res.searchedAll = TxSearched::all; - } - else - { - res.searchedAll = TxSearched::some; - } - } - return {res, rpcTXN_NOT_FOUND}; - } - // database didn't return anything. This shouldn't happen - assert(false); - return {res, {rpcINTERNAL, "unexpected Postgres response"}}; -} - std::pair doTxHelp(RPC::Context& context, TxArgs args) { - if (context.app.config().reporting()) - return doTxPostgres(context, args); TxResult result; ClosedInterval range; @@ -345,7 +226,7 @@ populateJsonResponse( } // Note, result.ledgerHash is only set in a closed or validated - // ledger - as seen in `doTxHelp` and `doTxPostgres` + // ledger - as seen in `doTxHelp` if (result.ledgerHash) response[jss::ledger_hash] = to_string(*result.ledgerHash); diff --git a/src/xrpld/rpc/handlers/TxHistory.cpp b/src/xrpld/rpc/handlers/TxHistory.cpp index de86b182534..1122eab51c3 100644 --- a/src/xrpld/rpc/handlers/TxHistory.cpp +++ b/src/xrpld/rpc/handlers/TxHistory.cpp @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -60,8 +59,6 @@ doTxHistory(RPC::JsonContext& context) Json::Value obj; Json::Value& txs = obj[jss::txs]; obj[jss::index] = startIndex; - if (context.app.config().reporting()) - obj["used_postgres"] = true; for (auto const& t : trans) { diff --git a/src/xrpld/rpc/handlers/UnlList.cpp b/src/xrpld/rpc/handlers/UnlList.cpp index 78bd3f14eab..b3394534372 100644 --- a/src/xrpld/rpc/handlers/UnlList.cpp +++ b/src/xrpld/rpc/handlers/UnlList.cpp @@ -29,8 +29,6 @@ namespace ripple { Json::Value doUnlList(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); Json::Value obj(Json::objectValue); context.app.validators().for_each_listed( diff --git a/src/xrpld/rpc/handlers/ValidatorListSites.cpp b/src/xrpld/rpc/handlers/ValidatorListSites.cpp index 902c373766f..39bc4e36471 100644 --- a/src/xrpld/rpc/handlers/ValidatorListSites.cpp +++ b/src/xrpld/rpc/handlers/ValidatorListSites.cpp @@ -28,9 +28,6 @@ namespace ripple { Json::Value doValidatorListSites(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - return context.app.validatorSites().getJson(); } diff --git a/src/xrpld/rpc/handlers/Validators.cpp b/src/xrpld/rpc/handlers/Validators.cpp index 4048e8962de..599e76f847a 100644 --- a/src/xrpld/rpc/handlers/Validators.cpp +++ b/src/xrpld/rpc/handlers/Validators.cpp @@ -28,9 +28,6 @@ namespace ripple { Json::Value doValidators(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - return context.app.validators().getJson(); } diff --git a/src/xrpld/shamap/Family.h b/src/xrpld/shamap/Family.h index 6559ce5059b..bbb22c273d0 100644 --- a/src/xrpld/shamap/Family.h +++ b/src/xrpld/shamap/Family.h @@ -65,8 +65,6 @@ class Family sweep() = 0; /** Acquire ledger that has a missing node by ledger sequence - * - * Throw if in reporting mode. * * @param refNum Sequence of ledger to acquire. * @param nodeHash Hash of missing node to report in throw. diff --git a/src/xrpld/shamap/detail/NodeFamily.cpp b/src/xrpld/shamap/detail/NodeFamily.cpp index 01440a48799..bf95003aef8 100644 --- a/src/xrpld/shamap/detail/NodeFamily.cpp +++ b/src/xrpld/shamap/detail/NodeFamily.cpp @@ -69,14 +69,6 @@ void NodeFamily::missingNodeAcquireBySeq(std::uint32_t seq, uint256 const& nodeHash) { JLOG(j_.error()) << "Missing node in " << seq; - if (app_.config().reporting()) - { - std::stringstream ss; - ss << "Node not read, likely a Cassandra error in ledger seq " << seq - << " object hash " << nodeHash; - Throw(ss.str()); - } - std::unique_lock lock(maxSeqMutex_); if (maxSeq_ == 0) {