From 33e1c42599857336d792effc753795911bdb13f0 Mon Sep 17 00:00:00 2001 From: Vlad <129996061+vvysokikh1@users.noreply.github.com> Date: Fri, 31 Jan 2025 18:40:33 +0000 Subject: [PATCH 01/29] Add deep freeze feature (XLS-77d) (#5187) - spec: XRPLF/XRPL-Standards#220 - amendment: "DeepFreeze" - implemented deep freeze spec to allow token issuers to prevent currency holders from being able to acquire more of these tokens. - in combination with normal freeze, deep freeze effectively prevents any balance trust line balance change of a currency holder (except direct issuer <-> holder payments). - added 2 new invariant checks to verify that deep freeze cannot be enacted without normal freeze and transfer is not frozen. - made some fixes to existing freeze handling. Co-authored-by: Ed Hennis Co-authored-by: Howard Hinnant --- include/xrpl/protocol/Feature.h | 2 +- include/xrpl/protocol/LedgerFormats.h | 10 +- include/xrpl/protocol/TxFlags.h | 4 +- include/xrpl/protocol/detail/features.macro | 2 + include/xrpl/protocol/jss.h | 2 + src/test/app/Freeze_test.cpp | 1520 ++++++++++++++++- src/test/ledger/Invariants_test.cpp | 179 ++ src/test/rpc/AccountLines_test.cpp | 16 +- src/xrpld/app/paths/TrustLine.h | 14 + src/xrpld/app/paths/detail/StepChecks.h | 6 + src/xrpld/app/tx/detail/CashCheck.cpp | 1 + src/xrpld/app/tx/detail/CreateOffer.cpp | 26 + src/xrpld/app/tx/detail/InvariantCheck.cpp | 316 ++++ src/xrpld/app/tx/detail/InvariantCheck.h | 110 ++ .../app/tx/detail/NFTokenAcceptOffer.cpp | 72 + src/xrpld/app/tx/detail/NFTokenAcceptOffer.h | 8 + src/xrpld/app/tx/detail/OfferStream.cpp | 14 + src/xrpld/app/tx/detail/SetTrust.cpp | 144 +- src/xrpld/ledger/View.h | 8 + src/xrpld/ledger/detail/View.cpp | 70 +- src/xrpld/rpc/handlers/AccountLines.cpp | 4 + 21 files changed, 2479 insertions(+), 49 deletions(-) diff --git a/include/xrpl/protocol/Feature.h b/include/xrpl/protocol/Feature.h index 369ec3304ef..bff3e57597f 100644 --- a/include/xrpl/protocol/Feature.h +++ b/include/xrpl/protocol/Feature.h @@ -80,7 +80,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 85; +static constexpr std::size_t numFeatures = 86; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated diff --git a/include/xrpl/protocol/LedgerFormats.h b/include/xrpl/protocol/LedgerFormats.h index 502d66bde6e..5f3cca53ac8 100644 --- a/include/xrpl/protocol/LedgerFormats.h +++ b/include/xrpl/protocol/LedgerFormats.h @@ -160,10 +160,12 @@ enum LedgerSpecificFlags { lsfHighAuth = 0x00080000, lsfLowNoRipple = 0x00100000, lsfHighNoRipple = 0x00200000, - lsfLowFreeze = 0x00400000, // True, low side has set freeze flag - lsfHighFreeze = 0x00800000, // True, high side has set freeze flag - lsfAMMNode = 0x01000000, // True, trust line to AMM. Used by client - // apps to identify payments via AMM. + lsfLowFreeze = 0x00400000, // True, low side has set freeze flag + lsfHighFreeze = 0x00800000, // True, high side has set freeze flag + lsfLowDeepFreeze = 0x02000000, // True, low side has set deep freeze flag + lsfHighDeepFreeze = 0x04000000, // True, high side has set deep freeze flag + lsfAMMNode = 0x01000000, // True, trust line to AMM. Used by client + // apps to identify payments via AMM. // ltSIGNER_LIST lsfOneOwnerCount = 0x00010000, // True, uses only one OwnerCount diff --git a/include/xrpl/protocol/TxFlags.h b/include/xrpl/protocol/TxFlags.h index 8d6ff09b763..f0f6c7f223c 100644 --- a/include/xrpl/protocol/TxFlags.h +++ b/include/xrpl/protocol/TxFlags.h @@ -114,9 +114,11 @@ constexpr std::uint32_t tfSetNoRipple = 0x00020000; constexpr std::uint32_t tfClearNoRipple = 0x00040000; constexpr std::uint32_t tfSetFreeze = 0x00100000; constexpr std::uint32_t tfClearFreeze = 0x00200000; +constexpr std::uint32_t tfSetDeepFreeze = 0x00400000; +constexpr std::uint32_t tfClearDeepFreeze = 0x00800000; constexpr std::uint32_t tfTrustSetMask = ~(tfUniversal | tfSetfAuth | tfSetNoRipple | tfClearNoRipple | tfSetFreeze | - tfClearFreeze); + tfClearFreeze | tfSetDeepFreeze | tfClearDeepFreeze); // EnableAmendment flags: constexpr std::uint32_t tfGotMajority = 0x00010000; diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index d90dc327780..322670c5170 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -29,6 +29,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FEATURE(DeepFreeze, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionedDomains, Supported::no, VoteBehavior::DefaultNo) XRPL_FEATURE(DynamicNFT, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(Credentials, Supported::yes, VoteBehavior::DefaultNo) @@ -116,3 +117,4 @@ XRPL_FIX (NFTokenNegOffer, Supported::yes, VoteBehavior::Obsolete) XRPL_FIX (NFTokenDirV1, Supported::yes, VoteBehavior::Obsolete) XRPL_FEATURE(NonFungibleTokensV1, Supported::yes, VoteBehavior::Obsolete) XRPL_FEATURE(CryptoConditionsSuite, Supported::yes, VoteBehavior::Obsolete) + diff --git a/include/xrpl/protocol/jss.h b/include/xrpl/protocol/jss.h index f21da5817d9..4db8e0e32d6 100644 --- a/include/xrpl/protocol/jss.h +++ b/include/xrpl/protocol/jss.h @@ -284,6 +284,8 @@ JSS(flags); // out: AccountOffers, JSS(forward); // in: AccountTx JSS(freeze); // out: AccountLines JSS(freeze_peer); // out: AccountLines +JSS(deep_freeze); // out: AccountLines +JSS(deep_freeze_peer); // out: AccountLines JSS(frozen_balances); // out: GatewayBalances JSS(full); // in: LedgerClearer, handlers/Ledger JSS(full_reply); // out: PathFind diff --git a/src/test/app/Freeze_test.cpp b/src/test/app/Freeze_test.cpp index 0c54f0e1f39..99696c11f6e 100644 --- a/src/test/app/Freeze_test.cpp +++ b/src/test/app/Freeze_test.cpp @@ -17,6 +17,7 @@ */ //============================================================================== #include +#include #include #include #include @@ -186,6 +187,193 @@ class Freeze_test : public beast::unit_test::suite } } + void + testDeepFreeze(FeatureBitset features) + { + testcase("Deep Freeze"); + + using namespace test::jtx; + Env env(*this, features); + + Account G1{"G1"}; + Account A1{"A1"}; + + env.fund(XRP(10000), G1, A1); + env.close(); + + env.trust(G1["USD"](1000), A1); + env.close(); + + if (features[featureDeepFreeze]) + { + // test: Issuer deep freezing the trust line in a single + // transaction + env(trust(G1, A1["USD"](0), tfSetFreeze | tfSetDeepFreeze)); + { + auto const flags = getTrustlineFlags(env, 2u, 1u); + BEAST_EXPECT(flags & lsfLowFreeze); + BEAST_EXPECT(flags & lsfLowDeepFreeze); + BEAST_EXPECT(!(flags & (lsfHighFreeze | lsfHighDeepFreeze))); + env.close(); + } + + // test: Issuer clearing deep freeze and normal freeze in a single + // transaction + env(trust(G1, A1["USD"](0), tfClearFreeze | tfClearDeepFreeze)); + { + auto const flags = getTrustlineFlags(env, 2u, 1u); + BEAST_EXPECT(!(flags & (lsfLowFreeze | lsfLowDeepFreeze))); + BEAST_EXPECT(!(flags & (lsfHighFreeze | lsfHighDeepFreeze))); + env.close(); + } + + // test: Issuer deep freezing not already frozen line must fail + env(trust(G1, A1["USD"](0), tfSetDeepFreeze), + ter(tecNO_PERMISSION)); + + env(trust(G1, A1["USD"](0), tfSetFreeze)); + env.close(); + + // test: Issuer deep freezing already frozen trust line + env(trust(G1, A1["USD"](0), tfSetDeepFreeze)); + { + auto const flags = getTrustlineFlags(env, 2u, 1u); + BEAST_EXPECT(flags & lsfLowFreeze); + BEAST_EXPECT(flags & lsfLowDeepFreeze); + BEAST_EXPECT(!(flags & (lsfHighFreeze | lsfHighDeepFreeze))); + env.close(); + } + + // test: Holder clearing freeze flags has no effect. Each sides' + // flags are independent + env(trust(A1, G1["USD"](0), tfClearFreeze | tfClearDeepFreeze)); + { + auto const flags = getTrustlineFlags(env, 2u, 1u); + BEAST_EXPECT(flags & lsfLowFreeze); + BEAST_EXPECT(flags & lsfLowDeepFreeze); + BEAST_EXPECT(!(flags & (lsfHighFreeze | lsfHighDeepFreeze))); + env.close(); + } + + // test: Issuer can't clear normal freeze when line is deep frozen + env(trust(G1, A1["USD"](0), tfClearFreeze), ter(tecNO_PERMISSION)); + + // test: Issuer clearing deep freeze but normal freeze is still in + // effect + env(trust(G1, A1["USD"](0), tfClearDeepFreeze)); + { + auto const flags = getTrustlineFlags(env, 2u, 1u); + BEAST_EXPECT(flags & lsfLowFreeze); + BEAST_EXPECT(!(flags & lsfLowDeepFreeze)); + BEAST_EXPECT(!(flags & (lsfHighFreeze | lsfHighDeepFreeze))); + env.close(); + } + } + else + { + // test: applying deep freeze before amendment fails + env(trust(G1, A1["USD"](0), tfSetDeepFreeze), ter(temINVALID_FLAG)); + + // test: clearing deep freeze before amendment fails + env(trust(G1, A1["USD"](0), tfClearDeepFreeze), + ter(temINVALID_FLAG)); + } + } + + void + testCreateFrozenTrustline(FeatureBitset features) + { + testcase("Create Frozen Trustline"); + + using namespace test::jtx; + Env env(*this, features); + + Account G1{"G1"}; + Account A1{"A1"}; + + env.fund(XRP(10000), G1, A1); + env.close(); + + // test: can create frozen trustline + { + env(trust(G1, A1["USD"](1000), tfSetFreeze)); + auto const flags = getTrustlineFlags(env, 5u, 3u, false); + BEAST_EXPECT(flags & lsfLowFreeze); + env.close(); + env.require(lines(A1, 1)); + } + + // Cleanup + env(trust(G1, A1["USD"](0), tfClearFreeze)); + env.close(); + env.require(lines(G1, 0)); + env.require(lines(A1, 0)); + + // test: cannot create deep frozen trustline without normal freeze + if (features[featureDeepFreeze]) + { + env(trust(G1, A1["USD"](1000), tfSetDeepFreeze), + ter(tecNO_PERMISSION)); + env.close(); + env.require(lines(A1, 0)); + } + + // test: can create deep frozen trustline together with normal freeze + if (features[featureDeepFreeze]) + { + env(trust(G1, A1["USD"](1000), tfSetFreeze | tfSetDeepFreeze)); + auto const flags = getTrustlineFlags(env, 5u, 3u, false); + BEAST_EXPECT(flags & lsfLowFreeze); + BEAST_EXPECT(flags & lsfLowDeepFreeze); + env.close(); + env.require(lines(A1, 1)); + } + } + + void + testSetAndClear(FeatureBitset features) + { + testcase("Freeze Set and Clear"); + + using namespace test::jtx; + Env env(*this, features); + + Account G1{"G1"}; + Account A1{"A1"}; + + env.fund(XRP(10000), G1, A1); + env.close(); + + env.trust(G1["USD"](1000), A1); + env.close(); + + if (features[featureDeepFreeze]) + { + // test: can't have both set and clear flag families in the same + // transaction + env(trust(G1, A1["USD"](0), tfSetFreeze | tfClearFreeze), + ter(tecNO_PERMISSION)); + env(trust(G1, A1["USD"](0), tfSetFreeze | tfClearDeepFreeze), + ter(tecNO_PERMISSION)); + env(trust(G1, A1["USD"](0), tfSetDeepFreeze | tfClearFreeze), + ter(tecNO_PERMISSION)); + env(trust(G1, A1["USD"](0), tfSetDeepFreeze | tfClearDeepFreeze), + ter(tecNO_PERMISSION)); + } + else + { + // test: old behavior, transaction succeed with no effect on a + // trust line + env(trust(G1, A1["USD"](0), tfSetFreeze | tfClearFreeze)); + { + auto affected = env.meta()->getJson( + JsonOptions::none)[sfAffectedNodes.fieldName]; + BEAST_EXPECT(checkArraySize( + affected, 1u)); // means no trustline changes + } + } + } + void testGlobalFreeze(FeatureBitset features) { @@ -354,15 +542,43 @@ class Freeze_test : public beast::unit_test::suite Account G1{"G1"}; Account A1{"A1"}; + Account frozenAcc{"A2"}; + Account deepFrozenAcc{"A3"}; env.fund(XRP(12000), G1); env.fund(XRP(1000), A1); + env.fund(XRP(1000), frozenAcc); + env.fund(XRP(1000), deepFrozenAcc); env.close(); env.trust(G1["USD"](1000), A1); + env.trust(G1["USD"](1000), frozenAcc); + env.trust(G1["USD"](1000), deepFrozenAcc); env.close(); env(pay(G1, A1, G1["USD"](1000))); + env(pay(G1, frozenAcc, G1["USD"](1000))); + env(pay(G1, deepFrozenAcc, G1["USD"](1000))); + + // Freezing and deep freezing some of the trust lines to check deep + // freeze and clearing of freeze separately + env(trust(G1, frozenAcc["USD"](0), tfSetFreeze)); + { + auto const flags = getTrustlineFlags(env, 2u, 1u); + BEAST_EXPECT(flags & lsfLowFreeze); + BEAST_EXPECT(!(flags & lsfHighFreeze)); + } + if (features[featureDeepFreeze]) + { + env(trust( + G1, deepFrozenAcc["USD"](0), tfSetFreeze | tfSetDeepFreeze)); + { + auto const flags = getTrustlineFlags(env, 2u, 1u); + BEAST_EXPECT(!(flags & (lsfLowFreeze | lsfLowDeepFreeze))); + BEAST_EXPECT(flags & lsfHighFreeze); + BEAST_EXPECT(flags & lsfHighDeepFreeze); + } + } env.close(); // TrustSet NoFreeze @@ -387,16 +603,48 @@ class Freeze_test : public beast::unit_test::suite env.require(flags(G1, asfNoFreeze)); env.require(flags(G1, asfGlobalFreeze)); - // test: trustlines can't be frozen - env(trust(G1, A1["USD"](0), tfSetFreeze)); - auto affected = - env.meta()->getJson(JsonOptions::none)[sfAffectedNodes.fieldName]; - if (!BEAST_EXPECT(checkArraySize(affected, 1u))) - return; + // test: trustlines can't be frozen when no freeze enacted + if (features[featureDeepFreeze]) + { + env(trust(G1, A1["USD"](0), tfSetFreeze), ter(tecNO_PERMISSION)); + + // test: cannot deep freeze already frozen line when no freeze + // enacted + env(trust(G1, frozenAcc["USD"](0), tfSetDeepFreeze), + ter(tecNO_PERMISSION)); + } + else + { + // test: previous functionality, checking there's no changes to a + // trust line + env(trust(G1, A1["USD"](0), tfSetFreeze)); + auto affected = env.meta()->getJson( + JsonOptions::none)[sfAffectedNodes.fieldName]; + if (!BEAST_EXPECT(checkArraySize(affected, 1u))) + return; + + auto let = affected[0u][sfModifiedNode.fieldName] + [sfLedgerEntryType.fieldName]; + BEAST_EXPECT(let == jss::AccountRoot); + } + + // test: can clear freeze on account + env(trust(G1, frozenAcc["USD"](0), tfClearFreeze)); + { + auto const flags = getTrustlineFlags(env, 2u, 1u); + BEAST_EXPECT(!(flags & lsfLowFreeze)); + } - auto let = - affected[0u][sfModifiedNode.fieldName][sfLedgerEntryType.fieldName]; - BEAST_EXPECT(let == jss::AccountRoot); + if (features[featureDeepFreeze]) + { + // test: can clear deep freeze on account + env(trust(G1, deepFrozenAcc["USD"](0), tfClearDeepFreeze)); + { + auto const flags = getTrustlineFlags(env, 2u, 1u); + BEAST_EXPECT(flags & lsfHighFreeze); + BEAST_EXPECT(!(flags & lsfHighDeepFreeze)); + } + } } void @@ -506,19 +754,1273 @@ class Freeze_test : public beast::unit_test::suite return; } + void + testOffersWhenDeepFrozen(FeatureBitset features) + { + testcase("Offers on frozen trust lines"); + + using namespace test::jtx; + Env env(*this, features); + + Account G1{"G1"}; + Account A1{"A1"}; + Account A2{"A2"}; + Account A3{"A3"}; + auto const USD{G1["USD"]}; + + env.fund(XRP(10000), G1, A1, A2, A3); + env.close(); + + auto const limit = USD(10000); + env.trust(limit, A1, A2, A3); + env.close(); + + env(pay(G1, A1, USD(1000))); + env(pay(G1, A2, USD(1000))); + env.close(); + + // Making large passive sell offer + // Wants to sell 50 USD for 100 XRP + env(offer(A2, XRP(100), USD(50)), txflags(tfPassive)); + env.close(); + // Making large passive buy offer + // Wants to buy 100 USD for 100 XRP + env(offer(A3, USD(100), XRP(100)), txflags(tfPassive)); + env.close(); + env.require(offers(A2, 1), offers(A3, 1)); + + // Checking A1 can buy from A2 by crossing it's offer + env(offer(A1, USD(1), XRP(2)), txflags(tfFillOrKill)); + env.close(); + env.require(balance(A1, USD(1001)), balance(A2, USD(999))); + + // Checking A1 can sell to A3 by crossing it's offer + env(offer(A1, XRP(1), USD(1)), txflags(tfFillOrKill)); + env.close(); + env.require(balance(A1, USD(1000)), balance(A3, USD(1))); + + // Testing aggressive and passive offer placing, trustline frozen by + // the issuer + { + env(trust(G1, A1["USD"](0), tfSetFreeze)); + env.close(); + + // test: can still make passive buy offer + env(offer(A1, USD(1), XRP(0.5)), txflags(tfPassive)); + env.close(); + env.require(balance(A1, USD(1000)), offers(A1, 1)); + // Cleanup + env(offer_cancel(A1, env.seq(A1) - 1)); + env.require(offers(A1, 0)); + env.close(); + + // test: can still buy from A2 + env(offer(A1, USD(1), XRP(2)), txflags(tfFillOrKill)); + env.close(); + env.require( + balance(A1, USD(1001)), balance(A2, USD(998)), offers(A1, 0)); + + // test: cannot create passive sell offer + env(offer(A1, XRP(2), USD(1)), + txflags(tfPassive), + ter(tecUNFUNDED_OFFER)); + env.close(); + env.require(balance(A1, USD(1001)), offers(A1, 0)); + + // test: cannot sell to A3 + env(offer(A1, XRP(1), USD(1)), + txflags(tfFillOrKill), + ter(tecUNFUNDED_OFFER)); + env.close(); + env.require(balance(A1, USD(1001)), offers(A1, 0)); + + env(trust(G1, A1["USD"](0), tfClearFreeze)); + env.close(); + } + + // Testing aggressive and passive offer placing, trustline deep frozen + // by the issuer + if (features[featureDeepFreeze]) + { + env(trust(G1, A1["USD"](0), tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // test: cannot create passive buy offer + env(offer(A1, USD(1), XRP(0.5)), + txflags(tfPassive), + ter(tecFROZEN)); + env.close(); + + // test: cannot buy from A2 + env(offer(A1, USD(1), XRP(2)), + txflags(tfFillOrKill), + ter(tecFROZEN)); + env.close(); + + // test: cannot create passive sell offer + env(offer(A1, XRP(2), USD(1)), + txflags(tfPassive), + ter(tecUNFUNDED_OFFER)); + env.close(); + + // test: cannot sell to A3 + env(offer(A1, XRP(1), USD(1)), + txflags(tfFillOrKill), + ter(tecUNFUNDED_OFFER)); + env.close(); + + env(trust(G1, A1["USD"](0), tfClearFreeze | tfClearDeepFreeze)); + env.close(); + env.require(balance(A1, USD(1001)), offers(A1, 0)); + } + + // Testing already existing offers behavior after trustline is frozen by + // the issuer + { + env.require(balance(A1, USD(1001))); + env(offer(A1, XRP(1.9), USD(1))); + env(offer(A1, USD(1), XRP(1.1))); + env.close(); + env.require(balance(A1, USD(1001)), offers(A1, 2)); + + env(trust(G1, A1["USD"](0), tfSetFreeze)); + env.close(); + + // test: A2 wants to sell to A1, must succeed + env.require(balance(A1, USD(1001)), balance(A2, USD(998))); + env(offer(A2, XRP(1.1), USD(1)), txflags(tfFillOrKill)); + env.close(); + env.require( + balance(A1, USD(1002)), balance(A2, USD(997)), offers(A1, 1)); + + // test: A3 wants to buy from A1, must fail + env.require( + balance(A1, USD(1002)), balance(A3, USD(1)), offers(A1, 1)); + env(offer(A3, USD(1), XRP(1.9)), + txflags(tfFillOrKill), + ter(tecKILLED)); + env.close(); + env.require( + balance(A1, USD(1002)), balance(A3, USD(1)), offers(A1, 0)); + + env(trust(G1, A1["USD"](0), tfClearFreeze)); + env.close(); + } + + // Testing existing offers behavior after trustline is deep frozen by + // the issuer + if (features[featureDeepFreeze]) + { + env.require(balance(A1, USD(1002))); + env(offer(A1, XRP(1.9), USD(1))); + env(offer(A1, USD(1), XRP(1.1))); + env.close(); + env.require(balance(A1, USD(1002)), offers(A1, 2)); + + env(trust(G1, A1["USD"](0), tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // test: A2 wants to sell to A1, must fail + env.require(balance(A1, USD(1002)), balance(A2, USD(997))); + env(offer(A2, XRP(1.1), USD(1)), + txflags(tfFillOrKill), + ter(tecKILLED)); + env.close(); + env.require( + balance(A1, USD(1002)), balance(A2, USD(997)), offers(A1, 1)); + + // test: A3 wants to buy from A1, must fail + env.require( + balance(A1, USD(1002)), balance(A3, USD(1)), offers(A1, 1)); + env(offer(A3, USD(1), XRP(1.9)), + txflags(tfFillOrKill), + ter(tecKILLED)); + env.close(); + env.require( + balance(A1, USD(1002)), balance(A3, USD(1)), offers(A1, 0)); + + env(trust(G1, A1["USD"](0), tfClearFreeze | tfClearDeepFreeze)); + env.close(); + } + + // Testing aggressive and passive offer placing, trustline frozen by + // the holder + { + env(trust(A1, limit, tfSetFreeze)); + env.close(); + + // test: A1 can make passive buy offer + env(offer(A1, USD(1), XRP(0.5)), txflags(tfPassive)); + env.close(); + env.require(balance(A1, USD(1002)), offers(A1, 1)); + // Cleanup + env(offer_cancel(A1, env.seq(A1) - 1)); + env.require(offers(A1, 0)); + env.close(); + + // test: A1 wants to buy, must fail + if (features[featureFlowCross]) + { + env(offer(A1, USD(1), XRP(2)), + txflags(tfFillOrKill), + ter(tecKILLED)); + env.close(); + env.require( + balance(A1, USD(1002)), + balance(A2, USD(997)), + offers(A1, 0)); + } + else + { + // The transaction that should be here would succeed. + // I don't want to adjust balances in following tests. Flow + // cross feature flag is not relevant to this particular test + // case so we're not missing out some corner cases checks. + } + + // test: A1 can create passive sell offer + env(offer(A1, XRP(2), USD(1)), txflags(tfPassive)); + env.close(); + env.require(balance(A1, USD(1002)), offers(A1, 1)); + // Cleanup + env(offer_cancel(A1, env.seq(A1) - 1)); + env.require(offers(A1, 0)); + env.close(); + + // test: A1 can sell to A3 + env(offer(A1, XRP(1), USD(1)), txflags(tfFillOrKill)); + env.close(); + env.require(balance(A1, USD(1001)), offers(A1, 0)); + + env(trust(A1, limit, tfClearFreeze)); + env.close(); + } + + // Testing aggressive and passive offer placing, trustline deep frozen + // by the holder + if (features[featureDeepFreeze]) + { + env(trust(A1, limit, tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // test: A1 cannot create passive buy offer + env(offer(A1, USD(1), XRP(0.5)), + txflags(tfPassive), + ter(tecFROZEN)); + env.close(); + + // test: A1 cannot buy, must fail + env(offer(A1, USD(1), XRP(2)), + txflags(tfFillOrKill), + ter(tecFROZEN)); + env.close(); + + // test: A1 cannot create passive sell offer + env(offer(A1, XRP(2), USD(1)), + txflags(tfPassive), + ter(tecUNFUNDED_OFFER)); + env.close(); + + // test: A1 cannot sell to A3 + env(offer(A1, XRP(1), USD(1)), + txflags(tfFillOrKill), + ter(tecUNFUNDED_OFFER)); + env.close(); + + env(trust(A1, limit, tfClearFreeze | tfClearDeepFreeze)); + env.close(); + } + } + + void + testPathsWhenFrozen(FeatureBitset features) + { + testcase("Longer paths payment on frozen trust lines"); + using namespace test::jtx; + using path = test::jtx::path; + + Env env(*this, features); + Account G1{"G1"}; + Account A1{"A1"}; + Account A2{"A2"}; + auto const USD{G1["USD"]}; + + env.fund(XRP(10000), G1, A1, A2); + env.close(); + + auto const limit = USD(10000); + env.trust(limit, A1, A2); + env.close(); + + env(pay(G1, A1, USD(1000))); + env(pay(G1, A2, USD(1000))); + env.close(); + + env(offer(A2, XRP(100), USD(100)), txflags(tfPassive)); + env.close(); + + // Testing payments A1 <-> G1 using offer from A2 frozen by issuer. + { + env(trust(G1, A2["USD"](0), tfSetFreeze)); + env.close(); + + // test: A1 cannot send USD using XRP through A2 offer + env(pay(A1, G1, USD(10)), + path(~USD), + sendmax(XRP(11)), + txflags(tfNoRippleDirect), + ter(tecPATH_PARTIAL)); + env.close(); + + // test: G1 cannot send USD using XRP through A2 offer + env(pay(G1, A1, USD(10)), + path(~USD), + sendmax(XRP(11)), + txflags(tfNoRippleDirect), + ter(tecPATH_PARTIAL)); + env.close(); + + env(trust(G1, A2["USD"](0), tfClearFreeze)); + env.close(); + } + + // Testing payments A1 <-> G1 using offer from A2 deep frozen by issuer. + if (features[featureDeepFreeze]) + { + env(trust(G1, A2["USD"](0), tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // test: A1 cannot send USD using XRP through A2 offer + env(pay(A1, G1, USD(10)), + path(~USD), + sendmax(XRP(11)), + txflags(tfNoRippleDirect), + ter(tecPATH_PARTIAL)); + env.close(); + + // test: G1 cannot send USD using XRP through A2 offer + env(pay(G1, A1, USD(10)), + path(~USD), + sendmax(XRP(11)), + txflags(tfNoRippleDirect), + ter(tecPATH_PARTIAL)); + env.close(); + + env(trust(G1, A2["USD"](0), tfClearFreeze | tfClearDeepFreeze)); + env.close(); + } + + // Testing payments A1 <-> G1 using offer from A2 frozen by currency + // holder. + { + env(trust(A2, limit, tfSetFreeze)); + env.close(); + + // test: A1 can send USD using XRP through A2 offer + env(pay(A1, G1, USD(10)), + path(~USD), + sendmax(XRP(11)), + txflags(tfNoRippleDirect)); + env.close(); + + // test: G1 can send USD using XRP through A2 offer + env(pay(G1, A1, USD(10)), + path(~USD), + sendmax(XRP(11)), + txflags(tfNoRippleDirect)); + env.close(); + + env(trust(A2, limit, tfClearFreeze)); + env.close(); + } + + // Testing payments A1 <-> G1 using offer from A2 deep frozen by + // currency holder. + if (features[featureDeepFreeze]) + { + env(trust(A2, limit, tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // test: A1 cannot send USD using XRP through A2 offer + env(pay(A1, G1, USD(10)), + path(~USD), + sendmax(XRP(11)), + txflags(tfNoRippleDirect), + ter(tecPATH_PARTIAL)); + env.close(); + + // test: G1 cannot send USD using XRP through A2 offer + env(pay(G1, A1, USD(10)), + path(~USD), + sendmax(XRP(11)), + txflags(tfNoRippleDirect), + ter(tecPATH_PARTIAL)); + env.close(); + + env(trust(A2, limit, tfClearFreeze | tfClearDeepFreeze)); + env.close(); + } + + // Cleanup + env(offer_cancel(A1, env.seq(A1) - 1)); + env.require(offers(A1, 0)); + env.close(); + + env(offer(A2, USD(100), XRP(100)), txflags(tfPassive)); + env.close(); + + // Testing payments A1 <-> G1 using offer from A2 frozen by issuer. + { + env(trust(G1, A2["USD"](0), tfSetFreeze)); + env.close(); + + // test: A1 can send XRP using USD through A2 offer + env(pay(A1, G1, XRP(10)), + path(~XRP), + sendmax(USD(11)), + txflags(tfNoRippleDirect)); + env.close(); + + // test: G1 can send XRP using USD through A2 offer + env(pay(G1, A1, XRP(10)), + path(~XRP), + sendmax(USD(11)), + txflags(tfNoRippleDirect)); + env.close(); + + env(trust(G1, A2["USD"](0), tfClearFreeze)); + env.close(); + } + + // Testing payments A1 <-> G1 using offer from A2 deep frozen by + // issuer. + if (features[featureDeepFreeze]) + { + env(trust(G1, A2["USD"](0), tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // test: A1 cannot send XRP using USD through A2 offer + env(pay(A1, G1, XRP(10)), + path(~XRP), + sendmax(USD(11)), + txflags(tfNoRippleDirect), + ter(tecPATH_PARTIAL)); + env.close(); + + // test: G1 cannot send XRP using USD through A2 offer + env(pay(G1, A1, XRP(10)), + path(~XRP), + sendmax(USD(11)), + txflags(tfNoRippleDirect), + ter(tecPATH_PARTIAL)); + env.close(); + + env(trust(G1, A2["USD"](0), tfClearFreeze | tfClearDeepFreeze)); + env.close(); + } + + // Testing payments A1 <-> G1 using offer from A2 frozen by currency + // holder. + { + env(trust(A2, limit, tfSetFreeze)); + env.close(); + + // test: A1 can send XRP using USD through A2 offer + env(pay(A1, G1, XRP(10)), + path(~XRP), + sendmax(USD(11)), + txflags(tfNoRippleDirect)); + env.close(); + + // test: G1 can send XRP using USD through A2 offer + env(pay(G1, A1, XRP(10)), + path(~XRP), + sendmax(USD(11)), + txflags(tfNoRippleDirect)); + env.close(); + + env(trust(A2, limit, tfClearFreeze)); + env.close(); + } + + // Testing payments A1 <-> G1 using offer from A2 deep frozen by + // currency holder. + if (features[featureDeepFreeze]) + { + env(trust(A2, limit, tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // test: A1 cannot send XRP using USD through A2 offer + env(pay(A1, G1, XRP(10)), + path(~XRP), + sendmax(USD(11)), + txflags(tfNoRippleDirect), + ter(tecPATH_PARTIAL)); + env.close(); + + // test: G1 cannot send XRP using USD through A2 offer + env(pay(G1, A1, XRP(10)), + path(~XRP), + sendmax(USD(11)), + txflags(tfNoRippleDirect), + ter(tecPATH_PARTIAL)); + env.close(); + + env(trust(A2, limit, tfClearFreeze | tfClearDeepFreeze)); + env.close(); + } + + // Cleanup + env(offer_cancel(A1, env.seq(A1) - 1)); + env.require(offers(A1, 0)); + env.close(); + } + + void + testPaymentsWhenDeepFrozen(FeatureBitset features) + { + testcase("Direct payments on frozen trust lines"); + + using namespace test::jtx; + Env env(*this, features); + + Account G1{"G1"}; + Account A1{"A1"}; + Account A2{"A2"}; + auto const USD{G1["USD"]}; + + env.fund(XRP(10000), G1, A1, A2); + env.close(); + + auto const limit = USD(10000); + env.trust(limit, A1, A2); + env.close(); + + env(pay(G1, A1, USD(1000))); + env(pay(G1, A2, USD(1000))); + env.close(); + + // Checking payments before freeze + // To issuer: + env(pay(A1, G1, USD(1))); + env(pay(A2, G1, USD(1))); + env.close(); + + // To each other: + env(pay(A1, A2, USD(1))); + env(pay(A2, A1, USD(1))); + env.close(); + + // Freeze A1 + env(trust(G1, A1["USD"](0), tfSetFreeze)); + env.close(); + + // Issuer and A1 can send payments to each other + env(pay(A1, G1, USD(1))); + env(pay(G1, A1, USD(1))); + env.close(); + + // A1 cannot send tokens to A2 + env(pay(A1, A2, USD(1)), ter(tecPATH_DRY)); + + // A2 can still send to A1 + env(pay(A2, A1, USD(1))); + env.close(); + + if (features[featureDeepFreeze]) + { + // Deep freeze A1 + env(trust(G1, A1["USD"](0), tfSetDeepFreeze)); + env.close(); + + // Issuer and A1 can send payments to each other + env(pay(A1, G1, USD(1))); + env(pay(G1, A1, USD(1))); + env.close(); + + // A1 cannot send tokens to A2 + env(pay(A1, A2, USD(1)), ter(tecPATH_DRY)); + + // A2 cannot send tokens to A1 + env(pay(A2, A1, USD(1)), ter(tecPATH_DRY)); + + // Clear deep freeze on A1 + env(trust(G1, A1["USD"](0), tfClearDeepFreeze)); + env.close(); + } + + // Clear freeze on A1 + env(trust(G1, A1["USD"](0), tfClearFreeze)); + env.close(); + + // A1 freezes trust line + env(trust(A1, limit, tfSetFreeze)); + env.close(); + + // Issuer and A2 must not be affected + env(pay(A2, G1, USD(1))); + env(pay(G1, A2, USD(1))); + env.close(); + + // A1 can send tokens to the issuer + env(pay(A1, G1, USD(1))); + env.close(); + // A1 can send tokens to A2 + env(pay(A1, A2, USD(1))); + env.close(); + + // Issuer can sent tokens to A1 + env(pay(G1, A1, USD(1))); + // A2 cannot send tokens to A1 + env(pay(A2, A1, USD(1)), ter(tecPATH_DRY)); + + if (features[featureDeepFreeze]) + { + // A1 deep freezes trust line + env(trust(A1, limit, tfSetDeepFreeze)); + env.close(); + + // Issuer and A2 must not be affected + env(pay(A2, G1, USD(1))); + env(pay(G1, A2, USD(1))); + env.close(); + + // A1 can still send token to issuer + env(pay(A1, G1, USD(1))); + env.close(); + + // Issuer can send tokens to A1 + env(pay(G1, A1, USD(1))); + // A2 cannot send tokens to A1 + env(pay(A2, A1, USD(1)), ter(tecPATH_DRY)); + // A1 cannot send tokens to A2 + env(pay(A1, A2, USD(1)), ter(tecPATH_DRY)); + } + } + + void + testChecksWhenFrozen(FeatureBitset features) + { + testcase("Checks on frozen trust lines"); + + using namespace test::jtx; + Env env(*this, features); + + Account G1{"G1"}; + Account A1{"A1"}; + Account A2{"A2"}; + auto const USD{G1["USD"]}; + + env.fund(XRP(10000), G1, A1, A2); + env.close(); + + auto const limit = USD(10000); + env.trust(limit, A1, A2); + env.close(); + + env(pay(G1, A1, USD(1000))); + env(pay(G1, A2, USD(1000))); + env.close(); + + // Confirming we can write and cash checks + { + uint256 const checkId{getCheckIndex(G1, env.seq(G1))}; + env(check::create(G1, A1, USD(10))); + env.close(); + env(check::cash(A1, checkId, USD(10))); + env.close(); + } + + { + uint256 const checkId{getCheckIndex(G1, env.seq(G1))}; + env(check::create(G1, A2, USD(10))); + env.close(); + env(check::cash(A2, checkId, USD(10))); + env.close(); + } + + { + uint256 const checkId{getCheckIndex(A1, env.seq(A1))}; + env(check::create(A1, G1, USD(10))); + env.close(); + env(check::cash(G1, checkId, USD(10))); + env.close(); + } + + { + uint256 const checkId{getCheckIndex(A1, env.seq(A1))}; + env(check::create(A1, A2, USD(10))); + env.close(); + env(check::cash(A2, checkId, USD(10))); + env.close(); + } + + { + uint256 const checkId{getCheckIndex(A2, env.seq(A2))}; + env(check::create(A2, G1, USD(10))); + env.close(); + env(check::cash(G1, checkId, USD(10))); + env.close(); + } + + { + uint256 const checkId{getCheckIndex(A2, env.seq(A2))}; + env(check::create(A2, A1, USD(10))); + env.close(); + env(check::cash(A1, checkId, USD(10))); + env.close(); + } + + // Testing creation and cashing of checks on a trustline frozen by + // issuer + { + env(trust(G1, A1["USD"](0), tfSetFreeze)); + env.close(); + + // test: issuer writes check to A1. + { + uint256 const checkId{getCheckIndex(G1, env.seq(G1))}; + env(check::create(G1, A1, USD(10))); + env.close(); + env(check::cash(A1, checkId, USD(10)), ter(tecFROZEN)); + env.close(); + } + + // test: A2 writes check to A1. + { + uint256 const checkId{getCheckIndex(A2, env.seq(A2))}; + env(check::create(A2, A1, USD(10))); + env.close(); + // Same as previous test + env(check::cash(A1, checkId, USD(10)), ter(tecFROZEN)); + env.close(); + } + + // test: A1 writes check to issuer + { + env(check::create(A1, G1, USD(10)), ter(tecFROZEN)); + env.close(); + } + + // test: A1 writes check to A2 + { + // Same as previous test + env(check::create(A1, A2, USD(10)), ter(tecFROZEN)); + env.close(); + } + + // Unfreeze the trustline to create a couple of checks so that we + // could try to cash them later when the trustline is frozen again. + env(trust(G1, A1["USD"](0), tfClearFreeze)); + env.close(); + + uint256 const checkId1{getCheckIndex(A1, env.seq(A1))}; + env(check::create(A1, G1, USD(10))); + env.close(); + uint256 const checkId2{getCheckIndex(A1, env.seq(A1))}; + env(check::create(A1, A2, USD(10))); + env.close(); + + env(trust(G1, A1["USD"](0), tfSetFreeze)); + env.close(); + + // test: issuer tries to cash the check from A1 + { + env(check::cash(G1, checkId1, USD(10)), ter(tecPATH_PARTIAL)); + env.close(); + } + + // test: A2 tries to cash the check from A1 + { + env(check::cash(A2, checkId2, USD(10)), ter(tecPATH_PARTIAL)); + env.close(); + } + + env(trust(G1, A1["USD"](0), tfClearFreeze)); + env.close(); + } + + // Testing creation and cashing of checks on a trustline deep frozen by + // issuer + if (features[featureDeepFreeze]) + { + env(trust(G1, A1["USD"](0), tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // test: issuer writes check to A1. + { + uint256 const checkId{getCheckIndex(G1, env.seq(G1))}; + env(check::create(G1, A1, USD(10))); + env.close(); + + env(check::cash(A1, checkId, USD(10)), ter(tecFROZEN)); + env.close(); + } + + // test: A2 writes check to A1. + { + uint256 const checkId{getCheckIndex(A2, env.seq(A2))}; + env(check::create(A2, A1, USD(10))); + env.close(); + // Same as previous test + env(check::cash(A1, checkId, USD(10)), ter(tecFROZEN)); + env.close(); + } + + // test: A1 writes check to issuer + { + env(check::create(A1, G1, USD(10)), ter(tecFROZEN)); + env.close(); + } + + // test: A1 writes check to A2 + { + // Same as previous test + env(check::create(A1, A2, USD(10)), ter(tecFROZEN)); + env.close(); + } + + // Unfreeze the trustline to create a couple of checks so that we + // could try to cash them later when the trustline is frozen again. + env(trust(G1, A1["USD"](0), tfClearFreeze | tfClearDeepFreeze)); + env.close(); + + uint256 const checkId1{getCheckIndex(A1, env.seq(A1))}; + env(check::create(A1, G1, USD(10))); + env.close(); + uint256 const checkId2{getCheckIndex(A1, env.seq(A1))}; + env(check::create(A1, A2, USD(10))); + env.close(); + + env(trust(G1, A1["USD"](0), tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // test: issuer tries to cash the check from A1 + { + env(check::cash(G1, checkId1, USD(10)), ter(tecPATH_PARTIAL)); + env.close(); + } + + // test: A2 tries to cash the check from A1 + { + env(check::cash(A2, checkId2, USD(10)), ter(tecPATH_PARTIAL)); + env.close(); + } + + env(trust(G1, A1["USD"](0), tfClearFreeze | tfClearDeepFreeze)); + env.close(); + } + + // Testing creation and cashing of checks on a trustline frozen by + // a currency holder + { + env(trust(A1, limit, tfSetFreeze)); + env.close(); + + // test: issuer writes check to A1. + { + env(check::create(G1, A1, USD(10)), ter(tecFROZEN)); + env.close(); + } + + // test: A2 writes check to A1. + { + env(check::create(A2, A1, USD(10)), ter(tecFROZEN)); + env.close(); + } + + // test: A1 writes check to issuer + { + uint256 const checkId{getCheckIndex(A1, env.seq(A1))}; + env(check::create(A1, G1, USD(10))); + env.close(); + env(check::cash(G1, checkId, USD(10))); + env.close(); + } + + // test: A1 writes check to A2 + { + uint256 const checkId{getCheckIndex(A1, env.seq(A1))}; + env(check::create(A1, A2, USD(10))); + env.close(); + env(check::cash(A2, checkId, USD(10))); + env.close(); + } + + env(trust(A1, limit, tfClearFreeze)); + env.close(); + } + + // Testing creation and cashing of checks on a trustline deep frozen by + // a currency holder + if (features[featureDeepFreeze]) + { + env(trust(A1, limit, tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // test: issuer writes check to A1. + { + env(check::create(G1, A1, USD(10)), ter(tecFROZEN)); + env.close(); + } + + // test: A2 writes check to A1. + { + env(check::create(A2, A1, USD(10)), ter(tecFROZEN)); + env.close(); + } + + // test: A1 writes check to issuer + { + uint256 const checkId{getCheckIndex(A1, env.seq(A1))}; + env(check::create(A1, G1, USD(10))); + env.close(); + env(check::cash(G1, checkId, USD(10)), ter(tecPATH_PARTIAL)); + env.close(); + } + + // test: A1 writes check to A2 + { + uint256 const checkId{getCheckIndex(A1, env.seq(A1))}; + env(check::create(A1, A2, USD(10))); + env.close(); + env(check::cash(A2, checkId, USD(10)), ter(tecPATH_PARTIAL)); + env.close(); + } + + env(trust(A1, limit, tfClearFreeze | tfClearDeepFreeze)); + env.close(); + } + } + + void + testAMMWhenFreeze(FeatureBitset features) + { + testcase("AMM payments on frozen trust lines"); + using namespace test::jtx; + using path = test::jtx::path; + + Env env(*this, features); + Account G1{"G1"}; + Account A1{"A1"}; + Account A2{"A2"}; + auto const USD{G1["USD"]}; + + env.fund(XRP(10000), G1, A1, A2); + env.close(); + + env.trust(G1["USD"](10000), A1, A2); + env.close(); + + env(pay(G1, A1, USD(1000))); + env(pay(G1, A2, USD(1000))); + env.close(); + + AMM ammG1(env, G1, XRP(1'000), USD(1'000)); + env.close(); + + // Testing basic payment using AMM when freezing one of the trust lines. + { + env(trust(G1, A1["USD"](0), tfSetFreeze)); + env.close(); + + // test: can still use XRP to make payment + env(pay(A1, A2, USD(10)), + path(~USD), + sendmax(XRP(11)), + txflags(tfNoRippleDirect)); + env.close(); + + // test: cannot use USD to make payment + env(pay(A1, A2, XRP(10)), + path(~XRP), + sendmax(USD(11)), + txflags(tfNoRippleDirect), + ter(tecPATH_DRY)); + env.close(); + + // test: can still receive USD payments. + env(pay(A2, A1, USD(10)), + path(~USD), + sendmax(XRP(11)), + txflags(tfNoRippleDirect)); + env.close(); + + // test: can still receive XRP payments. + env(pay(A2, A1, XRP(10)), + path(~XRP), + sendmax(USD(11)), + txflags(tfNoRippleDirect)); + env.close(); + + env(trust(G1, A1["USD"](0), tfClearFreeze)); + env.close(); + } + + // Testing basic payment using AMM when deep freezing one of the trust + // lines. + if (features[featureDeepFreeze]) + { + env(trust(G1, A1["USD"](0), tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // test: can still use XRP to make payment + env(pay(A1, A2, USD(10)), + path(~USD), + sendmax(XRP(11)), + txflags(tfNoRippleDirect)); + env.close(); + + // test: cannot use USD to make payment + env(pay(A1, A2, XRP(10)), + path(~XRP), + sendmax(USD(11)), + txflags(tfNoRippleDirect), + ter(tecPATH_DRY)); + env.close(); + + // test: cannot receive USD payments. + env(pay(A2, A1, USD(10)), + path(~USD), + sendmax(XRP(11)), + txflags(tfNoRippleDirect), + ter(tecPATH_DRY)); + env.close(); + + // test: can still receive XRP payments. + env(pay(A2, A1, XRP(10)), + path(~XRP), + sendmax(USD(11)), + txflags(tfNoRippleDirect)); + env.close(); + + env(trust(G1, A1["USD"](0), tfClearFreeze | tfClearDeepFreeze)); + env.close(); + } + } + + void + testNFTOffersWhenFreeze(FeatureBitset features) + { + testcase("NFT offers on frozen trust lines"); + using namespace test::jtx; + + Env env(*this, features); + Account G1{"G1"}; + Account A1{"A1"}; + Account A2{"A2"}; + auto const USD{G1["USD"]}; + + env.fund(XRP(10000), G1, A1, A2); + env.close(); + + auto const limit = USD(10000); + env.trust(limit, A1, A2); + env.close(); + + env(pay(G1, A1, USD(1000))); + env(pay(G1, A2, USD(1000))); + env.close(); + + // Testing A2 nft offer sell when A2 frozen by issuer + { + auto const sellOfferIndex = createNFTSellOffer(env, A2, USD(10)); + env(trust(G1, A2["USD"](0), tfSetFreeze)); + env.close(); + + // test: A2 can still receive USD for his NFT + env(token::acceptSellOffer(A1, sellOfferIndex)); + env.close(); + + env(trust(G1, A2["USD"](0), tfClearFreeze)); + env.close(); + } + + // Testing A2 nft offer sell when A2 deep frozen by issuer + if (features[featureDeepFreeze]) + { + auto const sellOfferIndex = createNFTSellOffer(env, A2, USD(10)); + + env(trust(G1, A2["USD"](0), tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // test: A2 cannot receive USD for his NFT + env(token::acceptSellOffer(A1, sellOfferIndex), ter(tecFROZEN)); + env.close(); + + env(trust(G1, A2["USD"](0), tfClearFreeze | tfClearDeepFreeze)); + env.close(); + } + + // Testing A1 nft offer sell when A2 frozen by issuer + { + auto const sellOfferIndex = createNFTSellOffer(env, A1, USD(10)); + env(trust(G1, A2["USD"](0), tfSetFreeze)); + env.close(); + + // test: A2 cannot send USD for NFT + env(token::acceptSellOffer(A2, sellOfferIndex), + ter(tecINSUFFICIENT_FUNDS)); + env.close(); + + env(trust(G1, A2["USD"](0), tfClearFreeze)); + env.close(); + } + + // Testing A1 nft offer sell when A2 deep frozen by issuer + if (features[featureDeepFreeze]) + { + auto const sellOfferIndex = createNFTSellOffer(env, A1, USD(10)); + env(trust(G1, A2["USD"](0), tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // test: A2 cannot send USD for NFT + env(token::acceptSellOffer(A2, sellOfferIndex), + ter(tecINSUFFICIENT_FUNDS)); + env.close(); + + env(trust(G1, A2["USD"](0), tfClearFreeze | tfClearDeepFreeze)); + env.close(); + } + + // Testing A2 nft offer sell when A2 frozen by currency holder + { + auto const sellOfferIndex = createNFTSellOffer(env, A2, USD(10)); + env(trust(A2, limit, tfSetFreeze)); + env.close(); + + // test: offer can still be accepted. + env(token::acceptSellOffer(A1, sellOfferIndex)); + env.close(); + + env(trust(A2, limit, tfClearFreeze)); + env.close(); + } + + // Testing A2 nft offer sell when A2 deep frozen by currency holder + if (features[featureDeepFreeze]) + { + auto const sellOfferIndex = createNFTSellOffer(env, A2, USD(10)); + + env(trust(A2, limit, tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // test: A2 cannot receive USD for his NFT + env(token::acceptSellOffer(A1, sellOfferIndex), ter(tecFROZEN)); + env.close(); + + env(trust(A2, limit, tfClearFreeze | tfClearDeepFreeze)); + env.close(); + } + + // Testing A1 nft offer sell when A2 frozen by currency holder + { + auto const sellOfferIndex = createNFTSellOffer(env, A1, USD(10)); + env(trust(A2, limit, tfSetFreeze)); + env.close(); + + // test: A2 cannot send USD for NFT + env(token::acceptSellOffer(A2, sellOfferIndex)); + env.close(); + + env(trust(A2, limit, tfClearFreeze)); + env.close(); + } + + // Testing A1 nft offer sell when A2 deep frozen by currency holder + if (features[featureDeepFreeze]) + { + auto const sellOfferIndex = createNFTSellOffer(env, A1, USD(10)); + env(trust(A2, limit, tfSetFreeze | tfSetDeepFreeze)); + env.close(); + + // test: A2 cannot send USD for NFT + env(token::acceptSellOffer(A2, sellOfferIndex), + ter(tecINSUFFICIENT_FUNDS)); + env.close(); + + env(trust(A2, limit, tfClearFreeze | tfClearDeepFreeze)); + env.close(); + } + } + + // Helper function to extract trustline flags from open ledger + uint32_t + getTrustlineFlags( + test::jtx::Env& env, + size_t expectedArraySize, + size_t expectedArrayIndex, + bool modified = true) + { + using namespace test::jtx; + auto const affected = + env.meta()->getJson(JsonOptions::none)[sfAffectedNodes.fieldName]; + if (!BEAST_EXPECT(checkArraySize(affected, expectedArraySize))) + return 0; + + if (modified) + { + return affected[expectedArrayIndex][sfModifiedNode.fieldName] + [sfFinalFields.fieldName][jss::Flags] + .asUInt(); + } + + return affected[expectedArrayIndex][sfCreatedNode.fieldName] + [sfNewFields.fieldName][jss::Flags] + .asUInt(); + } + + // Helper function that returns the index of the next check on account + uint256 + getCheckIndex(AccountID const& account, std::uint32_t uSequence) + { + return keylet::check(account, uSequence).key; + } + + uint256 + createNFTSellOffer( + test::jtx::Env& env, + test::jtx::Account const& account, + test::jtx::PrettyAmount const& currency) + { + using namespace test::jtx; + uint256 const nftID{token::getNextID(env, account, 0u, tfTransferable)}; + env(token::mint(account, 0), txflags(tfTransferable)); + env.close(); + + uint256 const sellOfferIndex = + keylet::nftoffer(account, env.seq(account)).key; + env(token::createOffer(account, nftID, currency), + txflags(tfSellNFToken)); + env.close(); + + return sellOfferIndex; + } + public: void run() override { auto testAll = [this](FeatureBitset features) { testRippleState(features); + testDeepFreeze(features); + testCreateFrozenTrustline(features); + testSetAndClear(features); testGlobalFreeze(features); testNoFreeze(features); testOffersWhenFrozen(features); + testOffersWhenDeepFrozen(features); + testPaymentsWhenDeepFrozen(features); + testChecksWhenFrozen(features); + testAMMWhenFreeze(features); + testPathsWhenFrozen(features); + testNFTOffersWhenFreeze(features); }; using namespace test::jtx; auto const sa = supported_amendments(); + testAll(sa - featureFlowCross - featureDeepFreeze); testAll(sa - featureFlowCross); + testAll(sa - featureDeepFreeze); testAll(sa); } }; diff --git a/src/test/ledger/Invariants_test.cpp b/src/test/ledger/Invariants_test.cpp index 993104d6ad8..ecf1c8e3979 100644 --- a/src/test/ledger/Invariants_test.cpp +++ b/src/test/ledger/Invariants_test.cpp @@ -408,6 +408,183 @@ class Invariants_test : public beast::unit_test::suite }); } + void + testNoDeepFreezeTrustLinesWithoutFreeze() + { + using namespace test::jtx; + testcase << "trust lines with deep freeze flag without freeze " + "not allowed"; + doInvariantCheck( + {{"a trust line with deep freeze flag without normal freeze was " + "created"}}, + [](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const sleNew = std::make_shared( + keylet::line(A1, A2, A1["USD"].currency)); + sleNew->setFieldAmount(sfLowLimit, A1["USD"](0)); + sleNew->setFieldAmount(sfHighLimit, A1["USD"](0)); + + std::uint32_t uFlags = 0u; + uFlags |= lsfLowDeepFreeze; + sleNew->setFieldU32(sfFlags, uFlags); + ac.view().insert(sleNew); + return true; + }); + + doInvariantCheck( + {{"a trust line with deep freeze flag without normal freeze was " + "created"}}, + [](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const sleNew = std::make_shared( + keylet::line(A1, A2, A1["USD"].currency)); + sleNew->setFieldAmount(sfLowLimit, A1["USD"](0)); + sleNew->setFieldAmount(sfHighLimit, A1["USD"](0)); + std::uint32_t uFlags = 0u; + uFlags |= lsfHighDeepFreeze; + sleNew->setFieldU32(sfFlags, uFlags); + ac.view().insert(sleNew); + return true; + }); + + doInvariantCheck( + {{"a trust line with deep freeze flag without normal freeze was " + "created"}}, + [](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const sleNew = std::make_shared( + keylet::line(A1, A2, A1["USD"].currency)); + sleNew->setFieldAmount(sfLowLimit, A1["USD"](0)); + sleNew->setFieldAmount(sfHighLimit, A1["USD"](0)); + std::uint32_t uFlags = 0u; + uFlags |= lsfLowDeepFreeze | lsfHighDeepFreeze; + sleNew->setFieldU32(sfFlags, uFlags); + ac.view().insert(sleNew); + return true; + }); + + doInvariantCheck( + {{"a trust line with deep freeze flag without normal freeze was " + "created"}}, + [](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const sleNew = std::make_shared( + keylet::line(A1, A2, A1["USD"].currency)); + sleNew->setFieldAmount(sfLowLimit, A1["USD"](0)); + sleNew->setFieldAmount(sfHighLimit, A1["USD"](0)); + std::uint32_t uFlags = 0u; + uFlags |= lsfLowDeepFreeze | lsfHighFreeze; + sleNew->setFieldU32(sfFlags, uFlags); + ac.view().insert(sleNew); + return true; + }); + + doInvariantCheck( + {{"a trust line with deep freeze flag without normal freeze was " + "created"}}, + [](Account const& A1, Account const& A2, ApplyContext& ac) { + auto const sleNew = std::make_shared( + keylet::line(A1, A2, A1["USD"].currency)); + sleNew->setFieldAmount(sfLowLimit, A1["USD"](0)); + sleNew->setFieldAmount(sfHighLimit, A1["USD"](0)); + std::uint32_t uFlags = 0u; + uFlags |= lsfLowFreeze | lsfHighDeepFreeze; + sleNew->setFieldU32(sfFlags, uFlags); + ac.view().insert(sleNew); + return true; + }); + } + + void + testTransfersNotFrozen() + { + using namespace test::jtx; + testcase << "transfers when frozen"; + + Account G1{"G1"}; + // Helper function to establish the trustlines + auto const createTrustlines = + [&](Account const& A1, Account const& A2, Env& env) { + // Preclose callback to establish trust lines with gateway + env.fund(XRP(1000), G1); + + env.trust(G1["USD"](10000), A1); + env.trust(G1["USD"](10000), A2); + env.close(); + + env(pay(G1, A1, G1["USD"](1000))); + env(pay(G1, A2, G1["USD"](1000))); + env.close(); + + return true; + }; + + auto const A1FrozenByIssuer = + [&](Account const& A1, Account const& A2, Env& env) { + createTrustlines(A1, A2, env); + env(trust(G1, A1["USD"](10000), tfSetFreeze)); + env.close(); + + return true; + }; + + auto const A1DeepFrozenByIssuer = + [&](Account const& A1, Account const& A2, Env& env) { + A1FrozenByIssuer(A1, A2, env); + env(trust(G1, A1["USD"](10000), tfSetDeepFreeze)); + env.close(); + + return true; + }; + + auto const changeBalances = [&](Account const& A1, + Account const& A2, + ApplyContext& ac, + int A1Balance, + int A2Balance) { + auto const sleA1 = ac.view().peek(keylet::line(A1, G1["USD"])); + auto const sleA2 = ac.view().peek(keylet::line(A2, G1["USD"])); + + sleA1->setFieldAmount(sfBalance, G1["USD"](A1Balance)); + sleA2->setFieldAmount(sfBalance, G1["USD"](A2Balance)); + + ac.view().update(sleA1); + ac.view().update(sleA2); + }; + + // test: imitating frozen A1 making a payment to A2. + doInvariantCheck( + {{"Attempting to move frozen funds"}}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + changeBalances(A1, A2, ac, -900, -1100); + return true; + }, + XRPAmount{}, + STTx{ttPAYMENT, [](STObject& tx) {}}, + {tecINVARIANT_FAILED, tefINVARIANT_FAILED}, + A1FrozenByIssuer); + + // test: imitating deep frozen A1 making a payment to A2. + doInvariantCheck( + {{"Attempting to move frozen funds"}}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + changeBalances(A1, A2, ac, -900, -1100); + return true; + }, + XRPAmount{}, + STTx{ttPAYMENT, [](STObject& tx) {}}, + {tecINVARIANT_FAILED, tefINVARIANT_FAILED}, + A1DeepFrozenByIssuer); + + // test: imitating A2 making a payment to deep frozen A1. + doInvariantCheck( + {{"Attempting to move frozen funds"}}, + [&](Account const& A1, Account const& A2, ApplyContext& ac) { + changeBalances(A1, A2, ac, -1100, -900); + return true; + }, + XRPAmount{}, + STTx{ttPAYMENT, [](STObject& tx) {}}, + {tecINVARIANT_FAILED, tefINVARIANT_FAILED}, + A1DeepFrozenByIssuer); + } + void testXRPBalanceCheck() { @@ -1061,6 +1238,8 @@ class Invariants_test : public beast::unit_test::suite testAccountRootsDeletedClean(); testTypesMatch(); testNoXRPTrustLine(); + testNoDeepFreezeTrustLinesWithoutFreeze(); + testTransfersNotFrozen(); testXRPBalanceCheck(); testTransactionFeeCheck(); testNoBadOffers(); diff --git a/src/test/rpc/AccountLines_test.cpp b/src/test/rpc/AccountLines_test.cpp index d104ea14b0a..bae00d7a769 100644 --- a/src/test/rpc/AccountLines_test.cpp +++ b/src/test/rpc/AccountLines_test.cpp @@ -167,7 +167,11 @@ class AccountLines_test : public beast::unit_test::suite env.close(); // Set flags on gw2 trust lines so we can look for them. - env(trust(alice, gw2Currency(0), gw2, tfSetNoRipple | tfSetFreeze)); + env(trust( + alice, + gw2Currency(0), + gw2, + tfSetNoRipple | tfSetFreeze | tfSetDeepFreeze)); } env.close(); LedgerInfo const ledger58Info = env.closed()->info(); @@ -344,6 +348,7 @@ class AccountLines_test : public beast::unit_test::suite gw2.human() + R"("})"); auto const& line = lines[jss::result][jss::lines][0u]; BEAST_EXPECT(line[jss::freeze].asBool() == true); + BEAST_EXPECT(line[jss::deep_freeze].asBool() == true); BEAST_EXPECT(line[jss::no_ripple].asBool() == true); BEAST_EXPECT(line[jss::peer_authorized].asBool() == true); } @@ -359,6 +364,7 @@ class AccountLines_test : public beast::unit_test::suite alice.human() + R"("})"); auto const& lineA = linesA[jss::result][jss::lines][0u]; BEAST_EXPECT(lineA[jss::freeze_peer].asBool() == true); + BEAST_EXPECT(lineA[jss::deep_freeze_peer].asBool() == true); BEAST_EXPECT(lineA[jss::no_ripple_peer].asBool() == true); BEAST_EXPECT(lineA[jss::authorized].asBool() == true); @@ -981,7 +987,11 @@ class AccountLines_test : public beast::unit_test::suite env.close(); // Set flags on gw2 trust lines so we can look for them. - env(trust(alice, gw2Currency(0), gw2, tfSetNoRipple | tfSetFreeze)); + env(trust( + alice, + gw2Currency(0), + gw2, + tfSetNoRipple | tfSetFreeze | tfSetDeepFreeze)); } env.close(); LedgerInfo const ledger58Info = env.closed()->info(); @@ -1311,6 +1321,7 @@ class AccountLines_test : public beast::unit_test::suite gw2.human() + R"("}})"); auto const& line = lines[jss::result][jss::lines][0u]; BEAST_EXPECT(line[jss::freeze].asBool() == true); + BEAST_EXPECT(line[jss::deep_freeze].asBool() == true); BEAST_EXPECT(line[jss::no_ripple].asBool() == true); BEAST_EXPECT(line[jss::peer_authorized].asBool() == true); BEAST_EXPECT( @@ -1338,6 +1349,7 @@ class AccountLines_test : public beast::unit_test::suite alice.human() + R"("}})"); auto const& lineA = linesA[jss::result][jss::lines][0u]; BEAST_EXPECT(lineA[jss::freeze_peer].asBool() == true); + BEAST_EXPECT(lineA[jss::deep_freeze_peer].asBool() == true); BEAST_EXPECT(lineA[jss::no_ripple_peer].asBool() == true); BEAST_EXPECT(lineA[jss::authorized].asBool() == true); BEAST_EXPECT( diff --git a/src/xrpld/app/paths/TrustLine.h b/src/xrpld/app/paths/TrustLine.h index 381ef471875..4189f7ff481 100644 --- a/src/xrpld/app/paths/TrustLine.h +++ b/src/xrpld/app/paths/TrustLine.h @@ -139,6 +139,13 @@ class TrustLineBase return mFlags & (mViewLowest ? lsfLowFreeze : lsfHighFreeze); } + /** Have we set the deep freeze flag on our peer */ + bool + getDeepFreeze() const + { + return mFlags & (mViewLowest ? lsfLowDeepFreeze : lsfHighDeepFreeze); + } + /** Has the peer set the freeze flag on us */ bool getFreezePeer() const @@ -146,6 +153,13 @@ class TrustLineBase return mFlags & (!mViewLowest ? lsfLowFreeze : lsfHighFreeze); } + /** Has the peer set the deep freeze flag on us */ + bool + getDeepFreezePeer() const + { + return mFlags & (!mViewLowest ? lsfLowDeepFreeze : lsfHighDeepFreeze); + } + STAmount const& getBalance() const { diff --git a/src/xrpld/app/paths/detail/StepChecks.h b/src/xrpld/app/paths/detail/StepChecks.h index 2c2fee91cf9..9cbc3ef0f99 100644 --- a/src/xrpld/app/paths/detail/StepChecks.h +++ b/src/xrpld/app/paths/detail/StepChecks.h @@ -52,6 +52,12 @@ checkFreeze( { return terNO_LINE; } + // Unlike normal freeze, a deep frozen trust line acts the same + // regardless of which side froze it + if (sle->isFlag(lsfHighDeepFreeze) || sle->isFlag(lsfLowDeepFreeze)) + { + return terNO_LINE; + } } return tesSUCCESS; diff --git a/src/xrpld/app/tx/detail/CashCheck.cpp b/src/xrpld/app/tx/detail/CashCheck.cpp index 8b5ef79b6d4..f6e5f6f3e3f 100644 --- a/src/xrpld/app/tx/detail/CashCheck.cpp +++ b/src/xrpld/app/tx/detail/CashCheck.cpp @@ -392,6 +392,7 @@ CashCheck::doApply() false, // authorize account (sleDst->getFlags() & lsfDefaultRipple) == 0, false, // freeze trust line + false, // deep freeze trust line initialBalance, // zero initial balance Issue(currency, account_), // limit of zero 0, // quality in diff --git a/src/xrpld/app/tx/detail/CreateOffer.cpp b/src/xrpld/app/tx/detail/CreateOffer.cpp index 52ca602b956..f1b66468840 100644 --- a/src/xrpld/app/tx/detail/CreateOffer.cpp +++ b/src/xrpld/app/tx/detail/CreateOffer.cpp @@ -259,6 +259,32 @@ CreateOffer::checkAcceptAsset( } } + // An account can not create a trustline to itself, so no line can exist + // to be frozen. Additionally, an issuer can always accept its own + // issuance. + if (issue.account == id) + { + return tesSUCCESS; + } + + auto const trustLine = + view.read(keylet::line(id, issue.account, issue.currency)); + + if (!trustLine) + { + return tesSUCCESS; + } + + // There's no difference which side enacted deep freeze, accepting + // tokens shouldn't be possible. + bool const deepFrozen = + (*trustLine)[sfFlags] & (lsfLowDeepFreeze | lsfHighDeepFreeze); + + if (deepFrozen) + { + return tecFROZEN; + } + return tesSUCCESS; } diff --git a/src/xrpld/app/tx/detail/InvariantCheck.cpp b/src/xrpld/app/tx/detail/InvariantCheck.cpp index 63794023d40..d39492c1085 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.cpp +++ b/src/xrpld/app/tx/detail/InvariantCheck.cpp @@ -556,6 +556,322 @@ NoXRPTrustLines::finalize( //------------------------------------------------------------------------------ +void +NoDeepFreezeTrustLinesWithoutFreeze::visitEntry( + bool, + std::shared_ptr const&, + std::shared_ptr const& after) +{ + if (after && after->getType() == ltRIPPLE_STATE) + { + std::uint32_t const uFlags = after->getFieldU32(sfFlags); + bool const lowFreeze = uFlags & lsfLowFreeze; + bool const lowDeepFreeze = uFlags & lsfLowDeepFreeze; + + bool const highFreeze = uFlags & lsfHighFreeze; + bool const highDeepFreeze = uFlags & lsfHighDeepFreeze; + + deepFreezeWithoutFreeze_ = + (lowDeepFreeze && !lowFreeze) || (highDeepFreeze && !highFreeze); + } +} + +bool +NoDeepFreezeTrustLinesWithoutFreeze::finalize( + STTx const&, + TER const, + XRPAmount const, + ReadView const&, + beast::Journal const& j) +{ + if (!deepFreezeWithoutFreeze_) + return true; + + JLOG(j.fatal()) << "Invariant failed: a trust line with deep freeze flag " + "without normal freeze was created"; + return false; +} + +//------------------------------------------------------------------------------ + +void +TransfersNotFrozen::visitEntry( + bool isDelete, + std::shared_ptr const& before, + std::shared_ptr const& after) +{ + /* + * A trust line freeze state alone doesn't determine if a transfer is + * frozen. The transfer must be examined "end-to-end" because both sides of + * the transfer may have different freeze states and freeze impact depends + * on the transfer direction. This is why first we need to track the + * transfers using IssuerChanges senders/receivers. + * + * Only in validateIssuerChanges, after we collected all changes can we + * determine if the transfer is valid. + */ + if (!isValidEntry(before, after)) + { + return; + } + + auto const balanceChange = calculateBalanceChange(before, after, isDelete); + if (balanceChange.signum() == 0) + { + return; + } + + recordBalanceChanges(after, balanceChange); +} + +bool +TransfersNotFrozen::finalize( + STTx const& tx, + TER const ter, + XRPAmount const fee, + ReadView const& view, + beast::Journal const& j) +{ + /* + * We check this invariant regardless of deep freeze amendment status, + * allowing for detection and logging of potential issues even when the + * amendment is disabled. + * + * If an exploit that allows moving frozen assets is discovered, + * we can alert operators who monitor fatal messages and trigger assert in + * debug builds for an early warning. + * + * In an unlikely event that an exploit is found, this early detection + * enables encouraging the UNL to expedite deep freeze amendment activation + * or deploy hotfixes via new amendments. In case of a new amendment, we'd + * only have to change this line setting 'enforce' variable. + * enforce = view.rules().enabled(featureDeepFreeze) || + * view.rules().enabled(fixFreezeExploit); + */ + [[maybe_unused]] bool const enforce = + view.rules().enabled(featureDeepFreeze); + + for (auto const& [issue, changes] : balanceChanges_) + { + auto const issuerSle = findIssuer(issue.account, view); + // It should be impossible for the issuer to not be found, but check + // just in case so rippled doesn't crash in release. + if (!issuerSle) + { + XRPL_ASSERT( + enforce, + "ripple::TransfersNotFrozen::finalize : enforce " + "invariant."); + if (enforce) + { + return false; + } + continue; + } + + if (!validateIssuerChanges(issuerSle, changes, tx, j, enforce)) + { + return false; + } + } + + return true; +} + +bool +TransfersNotFrozen::isValidEntry( + std::shared_ptr const& before, + std::shared_ptr const& after) +{ + // `after` can never be null, even if the trust line is deleted. + XRPL_ASSERT( + after, "ripple::TransfersNotFrozen::isValidEntry : valid after."); + if (!after) + { + return false; + } + + if (after->getType() == ltACCOUNT_ROOT) + { + possibleIssuers_.emplace(after->at(sfAccount), after); + return false; + } + + /* While LedgerEntryTypesMatch invariant also checks types, all invariants + * are processed regardless of previous failures. + * + * This type check is still necessary here because it prevents potential + * issues in subsequent processing. + */ + return after->getType() == ltRIPPLE_STATE && + (!before || before->getType() == ltRIPPLE_STATE); +} + +STAmount +TransfersNotFrozen::calculateBalanceChange( + std::shared_ptr const& before, + std::shared_ptr const& after, + bool isDelete) +{ + auto const getBalance = [](auto const& line, auto const& other, bool zero) { + STAmount amt = + line ? line->at(sfBalance) : other->at(sfBalance).zeroed(); + return zero ? amt.zeroed() : amt; + }; + + /* Trust lines can be created dynamically by other transactions such as + * Payment and OfferCreate that cross offers. Such trust line won't be + * created frozen, but the sender might be, so the starting balance must be + * treated as zero. + */ + auto const balanceBefore = getBalance(before, after, false); + + /* Same as above, trust lines can be dynamically deleted, and for frozen + * trust lines, payments not involving the issuer must be blocked. This is + * achieved by treating the final balance as zero when isDelete=true to + * ensure frozen line restrictions are enforced even during deletion. + */ + auto const balanceAfter = getBalance(after, before, isDelete); + + return balanceAfter - balanceBefore; +} + +void +TransfersNotFrozen::recordBalance(Issue const& issue, BalanceChange change) +{ + XRPL_ASSERT( + change.balanceChangeSign, + "ripple::TransfersNotFrozen::recordBalance : valid trustline " + "balance sign."); + auto& changes = balanceChanges_[issue]; + if (change.balanceChangeSign < 0) + changes.senders.emplace_back(std::move(change)); + else + changes.receivers.emplace_back(std::move(change)); +} + +void +TransfersNotFrozen::recordBalanceChanges( + std::shared_ptr const& after, + STAmount const& balanceChange) +{ + auto const balanceChangeSign = balanceChange.signum(); + auto const currency = after->at(sfBalance).getCurrency(); + + // Change from low account's perspective, which is trust line default + recordBalance( + {currency, after->at(sfHighLimit).getIssuer()}, + {after, balanceChangeSign}); + + // Change from high account's perspective, which reverses the sign. + recordBalance( + {currency, after->at(sfLowLimit).getIssuer()}, + {after, -balanceChangeSign}); +} + +std::shared_ptr +TransfersNotFrozen::findIssuer(AccountID const& issuerID, ReadView const& view) +{ + if (auto it = possibleIssuers_.find(issuerID); it != possibleIssuers_.end()) + { + return it->second; + } + + return view.read(keylet::account(issuerID)); +} + +bool +TransfersNotFrozen::validateIssuerChanges( + std::shared_ptr const& issuer, + IssuerChanges const& changes, + STTx const& tx, + beast::Journal const& j, + bool enforce) +{ + if (!issuer) + { + return false; + } + + bool const globalFreeze = issuer->isFlag(lsfGlobalFreeze); + if (changes.receivers.empty() || changes.senders.empty()) + { + /* If there are no receivers, then the holder(s) are returning + * their tokens to the issuer. Likewise, if there are no + * senders, then the issuer is issuing tokens to the holder(s). + * This is allowed regardless of the issuer's freeze flags. (The + * holder may have contradicting freeze flags, but that will be + * checked when the holder is treated as issuer.) + */ + return true; + } + + for (auto const& actors : {changes.senders, changes.receivers}) + { + for (auto const& change : actors) + { + bool const high = change.line->at(sfLowLimit).getIssuer() == + issuer->at(sfAccount); + + if (!validateFrozenState( + change, high, tx, j, enforce, globalFreeze)) + { + return false; + } + } + } + return true; +} + +bool +TransfersNotFrozen::validateFrozenState( + BalanceChange const& change, + bool high, + STTx const& tx, + beast::Journal const& j, + bool enforce, + bool globalFreeze) +{ + bool const freeze = change.balanceChangeSign < 0 && + change.line->isFlag(high ? lsfLowFreeze : lsfHighFreeze); + bool const deepFreeze = + change.line->isFlag(high ? lsfLowDeepFreeze : lsfHighDeepFreeze); + bool const frozen = globalFreeze || deepFreeze || freeze; + + bool const isAMMLine = change.line->isFlag(lsfAMMNode); + + if (!frozen) + { + return true; + } + + // AMMClawbacks are allowed to override some freeze rules + if ((!isAMMLine || globalFreeze) && tx.getTxnType() == ttAMM_CLAWBACK) + { + JLOG(j.debug()) << "Invariant check allowing funds to be moved " + << (change.balanceChangeSign > 0 ? "to" : "from") + << " a frozen trustline for AMMClawback " + << tx.getTransactionID(); + return true; + } + + JLOG(j.fatal()) << "Invariant failed: Attempting to move frozen funds for " + << tx.getTransactionID(); + XRPL_ASSERT( + enforce, + "ripple::TransfersNotFrozen::validateFrozenState : enforce " + "invariant."); + + if (enforce) + { + return false; + } + + return true; +} + +//------------------------------------------------------------------------------ + void ValidNewAccountRoot::visitEntry( bool, diff --git a/src/xrpld/app/tx/detail/InvariantCheck.h b/src/xrpld/app/tx/detail/InvariantCheck.h index 19c4ef3e23f..cb06b0fb054 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.h +++ b/src/xrpld/app/tx/detail/InvariantCheck.h @@ -270,6 +270,114 @@ class NoXRPTrustLines beast::Journal const&); }; +/** + * @brief Invariant: Trust lines with deep freeze flag are not allowed if normal + * freeze flag is not set. + * + * We iterate all the trust lines created by this transaction and ensure + * that they don't have deep freeze flag set without normal freeze flag set. + */ +class NoDeepFreezeTrustLinesWithoutFreeze +{ + bool deepFreezeWithoutFreeze_ = false; + +public: + void + visitEntry( + bool, + std::shared_ptr const&, + std::shared_ptr const&); + + bool + finalize( + STTx const&, + TER const, + XRPAmount const, + ReadView const&, + beast::Journal const&); +}; + +/** + * @brief Invariant: frozen trust line balance change is not allowed. + * + * We iterate all affected trust lines and ensure that they don't have + * unexpected change of balance if they're frozen. + */ +class TransfersNotFrozen +{ + struct BalanceChange + { + std::shared_ptr const line; + int const balanceChangeSign; + }; + + struct IssuerChanges + { + std::vector senders; + std::vector receivers; + }; + + using ByIssuer = std::map; + ByIssuer balanceChanges_; + + std::map const> possibleIssuers_; + +public: + void + visitEntry( + bool, + std::shared_ptr const&, + std::shared_ptr const&); + + bool + finalize( + STTx const&, + TER const, + XRPAmount const, + ReadView const&, + beast::Journal const&); + +private: + bool + isValidEntry( + std::shared_ptr const& before, + std::shared_ptr const& after); + + STAmount + calculateBalanceChange( + std::shared_ptr const& before, + std::shared_ptr const& after, + bool isDelete); + + void + recordBalance(Issue const& issue, BalanceChange change); + + void + recordBalanceChanges( + std::shared_ptr const& after, + STAmount const& balanceChange); + + std::shared_ptr + findIssuer(AccountID const& issuerID, ReadView const& view); + + bool + validateIssuerChanges( + std::shared_ptr const& issuer, + IssuerChanges const& changes, + STTx const& tx, + beast::Journal const& j, + bool enforce); + + bool + validateFrozenState( + BalanceChange const& change, + bool high, + STTx const& tx, + beast::Journal const& j, + bool enforce, + bool globalFreeze); +}; + /** * @brief Invariant: offers should be for non-negative amounts and must not * be XRP to XRP. @@ -518,6 +626,8 @@ using InvariantChecks = std::tuple< XRPBalanceChecks, XRPNotCreated, NoXRPTrustLines, + NoDeepFreezeTrustLinesWithoutFreeze, + TransfersNotFrozen, NoBadOffers, NoZeroEscrow, ValidNewAccountRoot, diff --git a/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp b/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp index b884a791e78..9ae6616e382 100644 --- a/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp +++ b/src/xrpld/app/tx/detail/NFTokenAcceptOffer.cpp @@ -268,6 +268,20 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) ctx.j) < needed) return tecINSUFFICIENT_FUNDS; } + + // Make sure that we are allowed to hold what the taker will pay us. + // This is a similar approach taken by usual offers. + if (!needed.native()) + { + auto const result = checkAcceptAsset( + ctx.view, + ctx.flags, + (*so)[sfOwner], + ctx.j, + needed.asset().get()); + if (result != tesSUCCESS) + return result; + } } // Fix a bug where the transfer of an NFToken with a transfer fee could @@ -510,4 +524,62 @@ NFTokenAcceptOffer::doApply() return tecINTERNAL; } +TER +NFTokenAcceptOffer::checkAcceptAsset( + ReadView const& view, + ApplyFlags const flags, + AccountID const id, + beast::Journal const j, + Issue const& issue) +{ + // Only valid for custom currencies + + if (!view.rules().enabled(featureDeepFreeze)) + { + return tesSUCCESS; + } + + XRPL_ASSERT( + !isXRP(issue.currency), + "NFTokenAcceptOffer::checkAcceptAsset : valid to check."); + auto const issuerAccount = view.read(keylet::account(issue.account)); + + if (!issuerAccount) + { + JLOG(j.debug()) + << "delay: can't receive IOUs from non-existent issuer: " + << to_string(issue.account); + + return tecNO_ISSUER; + } + + // An account can not create a trustline to itself, so no line can exist + // to be frozen. Additionally, an issuer can always accept its own + // issuance. + if (issue.account == id) + { + return tesSUCCESS; + } + + auto const trustLine = + view.read(keylet::line(id, issue.account, issue.currency)); + + if (!trustLine) + { + return tesSUCCESS; + } + + // There's no difference which side enacted deep freeze, accepting + // tokens shouldn't be possible. + bool const deepFrozen = + (*trustLine)[sfFlags] & (lsfLowDeepFreeze | lsfHighDeepFreeze); + + if (deepFrozen) + { + return tecFROZEN; + } + + return tesSUCCESS; +} + } // namespace ripple diff --git a/src/xrpld/app/tx/detail/NFTokenAcceptOffer.h b/src/xrpld/app/tx/detail/NFTokenAcceptOffer.h index dff3febbb21..6a594e2b2c8 100644 --- a/src/xrpld/app/tx/detail/NFTokenAcceptOffer.h +++ b/src/xrpld/app/tx/detail/NFTokenAcceptOffer.h @@ -44,6 +44,14 @@ class NFTokenAcceptOffer : public Transactor AccountID const& seller, uint256 const& nfTokenID); + static TER + checkAcceptAsset( + ReadView const& view, + ApplyFlags const flags, + AccountID const id, + beast::Journal const j, + Issue const& issue); + public: static constexpr ConsequencesFactoryType ConsequencesFactory{Normal}; diff --git a/src/xrpld/app/tx/detail/OfferStream.cpp b/src/xrpld/app/tx/detail/OfferStream.cpp index ea18306234b..4e1cdd9b238 100644 --- a/src/xrpld/app/tx/detail/OfferStream.cpp +++ b/src/xrpld/app/tx/detail/OfferStream.cpp @@ -273,6 +273,20 @@ TOfferStreamBase::step() continue; } + bool const deepFrozen = isDeepFrozen( + view_, + offer_.owner(), + offer_.issueIn().currency, + offer_.issueIn().account); + if (deepFrozen) + { + JLOG(j_.trace()) + << "Removing deep frozen unfunded offer " << entry->key(); + permRmOffer(entry->key()); + offer_ = TOffer{}; + continue; + } + // Calculate owner funds ownerFunds_ = accountFundsHelper( view_, diff --git a/src/xrpld/app/tx/detail/SetTrust.cpp b/src/xrpld/app/tx/detail/SetTrust.cpp index 954fc6543f1..b1e0494ba46 100644 --- a/src/xrpld/app/tx/detail/SetTrust.cpp +++ b/src/xrpld/app/tx/detail/SetTrust.cpp @@ -26,6 +26,42 @@ #include #include +namespace { + +uint32_t +computeFreezeFlags( + uint32_t uFlags, + bool bHigh, + bool bNoFreeze, + bool bSetFreeze, + bool bClearFreeze, + bool bSetDeepFreeze, + bool bClearDeepFreeze) +{ + if (bSetFreeze && !bClearFreeze && !bNoFreeze) + { + uFlags |= (bHigh ? ripple::lsfHighFreeze : ripple::lsfLowFreeze); + } + else if (bClearFreeze && !bSetFreeze) + { + uFlags &= ~(bHigh ? ripple::lsfHighFreeze : ripple::lsfLowFreeze); + } + if (bSetDeepFreeze && !bClearDeepFreeze && !bNoFreeze) + { + uFlags |= + (bHigh ? ripple::lsfHighDeepFreeze : ripple::lsfLowDeepFreeze); + } + else if (bClearDeepFreeze && !bSetDeepFreeze) + { + uFlags &= + ~(bHigh ? ripple::lsfHighDeepFreeze : ripple::lsfLowDeepFreeze); + } + + return uFlags; +} + +} // namespace + namespace ripple { NotTEC @@ -45,6 +81,16 @@ SetTrust::preflight(PreflightContext const& ctx) return temINVALID_FLAG; } + if (!ctx.rules.enabled(featureDeepFreeze)) + { + // Even though the deep freeze flags are included in the + // `tfTrustSetMask`, they are not valid if the amendment is not enabled. + if (uTxFlags & (tfSetDeepFreeze | tfClearDeepFreeze)) + { + return temINVALID_FLAG; + } + } + STAmount const saLimitAmount(tx.getFieldAmount(sfLimitAmount)); if (!isLegalNet(saLimitAmount)) @@ -182,6 +228,58 @@ SetTrust::preclaim(PreclaimContext const& ctx) } } + // Checking all freeze/deep freeze flag invariants. + if (ctx.view.rules().enabled(featureDeepFreeze)) + { + bool const bNoFreeze = sle->isFlag(lsfNoFreeze); + bool const bSetFreeze = (uTxFlags & tfSetFreeze); + bool const bSetDeepFreeze = (uTxFlags & tfSetDeepFreeze); + + if (bNoFreeze && (bSetFreeze || bSetDeepFreeze)) + { + // Cannot freeze the trust line if NoFreeze is set + return tecNO_PERMISSION; + } + + bool const bClearFreeze = (uTxFlags & tfClearFreeze); + bool const bClearDeepFreeze = (uTxFlags & tfClearDeepFreeze); + if ((bSetFreeze || bSetDeepFreeze) && + (bClearFreeze || bClearDeepFreeze)) + { + // Freezing and unfreezing in the same transaction should be + // illegal + return tecNO_PERMISSION; + } + + bool const bHigh = id > uDstAccountID; + // Fetching current state of trust line + auto const sleRippleState = + ctx.view.read(keylet::line(id, uDstAccountID, currency)); + std::uint32_t uFlags = + sleRippleState ? sleRippleState->getFieldU32(sfFlags) : 0u; + // Computing expected trust line state + uFlags = computeFreezeFlags( + uFlags, + bHigh, + bNoFreeze, + bSetFreeze, + bClearFreeze, + bSetDeepFreeze, + bClearDeepFreeze); + + auto const frozen = uFlags & (bHigh ? lsfHighFreeze : lsfLowFreeze); + auto const deepFrozen = + uFlags & (bHigh ? lsfHighDeepFreeze : lsfLowDeepFreeze); + + // Trying to set deep freeze on not already frozen trust line must + // fail. This also checks that clearing normal freeze while deep + // frozen must not work + if (deepFrozen && !frozen) + { + return tecNO_PERMISSION; + } + } + return tesSUCCESS; } @@ -197,7 +295,7 @@ SetTrust::doApply() Currency const currency(saLimitAmount.getCurrency()); AccountID uDstAccountID(saLimitAmount.getIssuer()); - // true, iff current is high account. + // true, if current is high account. bool const bHigh = account_ > uDstAccountID; auto const sle = view().peek(keylet::account(account_)); @@ -242,13 +340,15 @@ SetTrust::doApply() bool const bClearNoRipple = (uTxFlags & tfClearNoRipple); bool const bSetFreeze = (uTxFlags & tfSetFreeze); bool const bClearFreeze = (uTxFlags & tfClearFreeze); + bool const bSetDeepFreeze = (uTxFlags & tfSetDeepFreeze); + bool const bClearDeepFreeze = (uTxFlags & tfClearDeepFreeze); auto viewJ = ctx_.app.journal("View"); - // Trust lines to self are impossible but because of the old bug there are - // two on 19-02-2022. This code was here to allow those trust lines to be - // deleted. The fixTrustLinesToSelf fix amendment will remove them when it - // enables so this code will no longer be needed. + // Trust lines to self are impossible but because of the old bug there + // are two on 19-02-2022. This code was here to allow those trust lines + // to be deleted. The fixTrustLinesToSelf fix amendment will remove them + // when it enables so this code will no longer be needed. if (!view().rules().enabled(fixTrustLinesToSelf) && account_ == uDstAccountID) { @@ -408,14 +508,16 @@ SetTrust::doApply() uFlagsOut &= ~(bHigh ? lsfHighNoRipple : lsfLowNoRipple); } - if (bSetFreeze && !bClearFreeze && !sle->isFlag(lsfNoFreeze)) - { - uFlagsOut |= (bHigh ? lsfHighFreeze : lsfLowFreeze); - } - else if (bClearFreeze && !bSetFreeze) - { - uFlagsOut &= ~(bHigh ? lsfHighFreeze : lsfLowFreeze); - } + // Have to use lsfNoFreeze to maintain pre-deep freeze behavior + bool const bNoFreeze = sle->isFlag(lsfNoFreeze); + uFlagsOut = computeFreezeFlags( + uFlagsOut, + bHigh, + bNoFreeze, + bSetFreeze, + bClearFreeze, + bSetDeepFreeze, + bClearDeepFreeze); if (QUALITY_ONE == uLowQualityOut) uLowQualityOut = 0; @@ -498,8 +600,8 @@ SetTrust::doApply() // Reserve is not scaled by load. else if (bReserveIncrease && mPriorBalance < reserveCreate) { - JLOG(j_.trace()) - << "Delay transaction: Insufficent reserve to add trust line."; + JLOG(j_.trace()) << "Delay transaction: Insufficent reserve to " + "add trust line."; // Another transaction could provide XRP to the account and then // this transaction would succeed. @@ -515,17 +617,18 @@ SetTrust::doApply() // Line does not exist. else if ( !saLimitAmount && // Setting default limit. - (!bQualityIn || !uQualityIn) && // Not setting quality in or setting - // default quality in. - (!bQualityOut || !uQualityOut) && // Not setting quality out or setting - // default quality out. + (!bQualityIn || !uQualityIn) && // Not setting quality in or + // setting default quality in. + (!bQualityOut || !uQualityOut) && // Not setting quality out or + // setting default quality out. (!bSetAuth)) { JLOG(j_.trace()) << "Redundant: Setting non-existent ripple line to defaults."; return tecNO_LINE_REDUNDANT; } - else if (mPriorBalance < reserveCreate) // Reserve is not scaled by load. + else if (mPriorBalance < reserveCreate) // Reserve is not scaled by + // load. { JLOG(j_.trace()) << "Delay transaction: Line does not exist. " "Insufficent reserve to create line."; @@ -555,6 +658,7 @@ SetTrust::doApply() bSetAuth, bSetNoRipple && !bClearNoRipple, bSetFreeze && !bClearFreeze, + bSetDeepFreeze, saBalance, saLimitAllow, // Limit for who is being charged. uQualityIn, diff --git a/src/xrpld/ledger/View.h b/src/xrpld/ledger/View.h index 74027752486..b964fc0ee76 100644 --- a/src/xrpld/ledger/View.h +++ b/src/xrpld/ledger/View.h @@ -153,6 +153,13 @@ isFrozen(ReadView const& view, AccountID const& account, Asset const& asset) asset.value()); } +[[nodiscard]] bool +isDeepFrozen( + ReadView const& view, + AccountID const& account, + Currency const& currency, + AccountID const& issuer); + // Returns the amount an account can spend without going into debt. // // <-- saAmount: amount of currency held by account. May be negative. @@ -438,6 +445,7 @@ trustCreate( const bool bAuth, // --> authorize account. const bool bNoRipple, // --> others cannot ripple through const bool bFreeze, // --> funds cannot leave + bool bDeepFreeze, // --> can neither receive nor send funds STAmount const& saBalance, // --> balance of account being set. // Issuer should be noAccount() STAmount const& saLimit, // --> limit for account being set. diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index ebf307f1535..1422a50a3ad 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -267,6 +267,32 @@ isFrozen( isIndividualFrozen(view, account, mptIssue); } +bool +isDeepFrozen( + ReadView const& view, + AccountID const& account, + Currency const& currency, + AccountID const& issuer) +{ + if (isXRP(currency)) + { + return false; + } + + if (issuer == account) + { + return false; + } + + auto const sle = view.read(keylet::line(account, issuer, currency)); + if (!sle) + { + return false; + } + + return sle->isFlag(lsfHighDeepFreeze) || sle->isFlag(lsfLowDeepFreeze); +} + STAmount accountHolds( ReadView const& view, @@ -284,17 +310,25 @@ accountHolds( // IOU: Return balance on trust line modulo freeze auto const sle = view.read(keylet::line(account, issuer, currency)); - if (!sle) - { - amount.clear(Issue{currency, issuer}); - } - else if ( - (zeroIfFrozen == fhZERO_IF_FROZEN) && - isFrozen(view, account, currency, issuer)) - { - amount.clear(Issue{currency, issuer}); - } - else + auto const allowBalance = [&]() { + if (!sle) + { + return false; + } + + if (zeroIfFrozen == fhZERO_IF_FROZEN) + { + if (isFrozen(view, account, currency, issuer) || + isDeepFrozen(view, account, currency, issuer)) + { + return false; + } + } + + return true; + }(); + + if (allowBalance) { amount = sle->getFieldAmount(sfBalance); if (account > issuer) @@ -304,6 +338,11 @@ accountHolds( } amount.setIssuer(issuer); } + else + { + amount.clear(Issue{currency, issuer}); + } + JLOG(j.trace()) << "accountHolds:" << " account=" << to_string(account) << " amount=" << amount.getFullText(); @@ -863,6 +902,7 @@ trustCreate( const bool bAuth, // --> authorize account. const bool bNoRipple, // --> others cannot ripple through const bool bFreeze, // --> funds cannot leave + bool bDeepFreeze, // --> can neither receive nor send funds STAmount const& saBalance, // --> balance of account being set. // Issuer should be noAccount() STAmount const& saLimit, // --> limit for account being set. @@ -944,7 +984,11 @@ trustCreate( } if (bFreeze) { - uFlags |= (!bSetHigh ? lsfLowFreeze : lsfHighFreeze); + uFlags |= (bSetHigh ? lsfHighFreeze : lsfLowFreeze); + } + if (bDeepFreeze) + { + uFlags |= (bSetHigh ? lsfHighDeepFreeze : lsfLowDeepFreeze); } if ((slePeer->getFlags() & lsfDefaultRipple) == 0) @@ -1189,6 +1233,7 @@ rippleCreditIOU( false, noRipple, false, + false, saBalance, saReceiverLimit, 0, @@ -1688,6 +1733,7 @@ issueIOU( false, noRipple, false, + false, final_balance, limit, 0, diff --git a/src/xrpld/rpc/handlers/AccountLines.cpp b/src/xrpld/rpc/handlers/AccountLines.cpp index e2e6ce19ded..5170342eb99 100644 --- a/src/xrpld/rpc/handlers/AccountLines.cpp +++ b/src/xrpld/rpc/handlers/AccountLines.cpp @@ -62,6 +62,10 @@ addLine(Json::Value& jsonLines, RPCTrustLine const& line) jPeer[jss::freeze] = true; if (line.getFreezePeer()) jPeer[jss::freeze_peer] = true; + if (line.getDeepFreeze()) + jPeer[jss::deep_freeze] = true; + if (line.getDeepFreezePeer()) + jPeer[jss::deep_freeze_peer] = true; } // { From f6d63082c0037e0198be5bed17cd24648917b36f Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Wed, 5 Feb 2025 11:36:43 -0500 Subject: [PATCH 02/29] Improve git commit hash lookup (#5225) - Also get the branch name. - Use rev-parse instead of describe to get a clean hash. - Return the git hash and branch name in server_info for admin connections. - Include git hash and branch name on separate lines in --version. --- CMakeLists.txt | 10 +++++++++- include/xrpl/protocol/jss.h | 2 ++ src/test/rpc/ServerInfo_test.cpp | 31 ++++++++++++++++++++++++++----- src/xrpld/app/main/Main.cpp | 6 ++++++ src/xrpld/app/misc/NetworkOPs.cpp | 12 ++++++++++++ 5 files changed, 55 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 49ecd192b7b..03dba51d0c5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -19,13 +19,21 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON) # make GIT_COMMIT_HASH define available to all sources find_package(Git) if(Git_FOUND) - execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${CMAKE_CURRENT_SOURCE_DIR}/.git describe --always --abbrev=40 + execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${CMAKE_CURRENT_SOURCE_DIR}/.git rev-parse HEAD OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gch) if(gch) set(GIT_COMMIT_HASH "${gch}") message(STATUS gch: ${GIT_COMMIT_HASH}) add_definitions(-DGIT_COMMIT_HASH="${GIT_COMMIT_HASH}") endif() + + execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${CMAKE_CURRENT_SOURCE_DIR}/.git rev-parse --abbrev-ref HEAD + OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gb) + if(gb) + set(GIT_BRANCH "${gb}") + message(STATUS gb: ${GIT_BRANCH}) + add_definitions(-DGIT_BRANCH="${GIT_BRANCH}") + endif() endif() #git if(thread_safety_analysis) diff --git a/include/xrpl/protocol/jss.h b/include/xrpl/protocol/jss.h index 4db8e0e32d6..483b69a962f 100644 --- a/include/xrpl/protocol/jss.h +++ b/include/xrpl/protocol/jss.h @@ -167,6 +167,7 @@ JSS(blobs_v2); // out: ValidatorList JSS(books); // in: Subscribe, Unsubscribe JSS(both); // in: Subscribe, Unsubscribe JSS(both_sides); // in: Subscribe, Unsubscribe +JSS(branch); // out: server_info JSS(broadcast); // out: SubmitTransaction JSS(bridge_account); // in: LedgerEntry JSS(build_path); // in: TransactionSign @@ -290,6 +291,7 @@ JSS(frozen_balances); // out: GatewayBalances JSS(full); // in: LedgerClearer, handlers/Ledger JSS(full_reply); // out: PathFind JSS(fullbelow_size); // out: GetCounts +JSS(git); // out: server_info JSS(good); // out: RPCVersion JSS(hash); // out: NetworkOPs, InboundLedger, // LedgerToJson, STTx; field diff --git a/src/test/rpc/ServerInfo_test.cpp b/src/test/rpc/ServerInfo_test.cpp index 2f0cdee77ef..5e202f275ae 100644 --- a/src/test/rpc/ServerInfo_test.cpp +++ b/src/test/rpc/ServerInfo_test.cpp @@ -86,21 +86,42 @@ admin = 127.0.0.1 { Env env(*this); - auto const result = env.rpc("server_info"); - BEAST_EXPECT(!result[jss::result].isMember(jss::error)); - BEAST_EXPECT(result[jss::result][jss::status] == "success"); - BEAST_EXPECT(result[jss::result].isMember(jss::info)); + auto const serverinfo = env.rpc("server_info"); + BEAST_EXPECT(serverinfo.isMember(jss::result)); + auto const& result = serverinfo[jss::result]; + BEAST_EXPECT(!result.isMember(jss::error)); + BEAST_EXPECT(result[jss::status] == "success"); + BEAST_EXPECT(result.isMember(jss::info)); + auto const& info = result[jss::info]; + BEAST_EXPECT(info.isMember(jss::build_version)); + // Git info is not guaranteed to be present + if (info.isMember(jss::git)) + { + auto const& git = info[jss::git]; + BEAST_EXPECT( + git.isMember(jss::hash) || git.isMember(jss::branch)); + BEAST_EXPECT( + !git.isMember(jss::hash) || + (git[jss::hash].isString() && + git[jss::hash].asString().size() == 40)); + BEAST_EXPECT( + !git.isMember(jss::branch) || + (git[jss::branch].isString() && + git[jss::branch].asString().size() != 0)); + } } { Env env(*this); // Call NetworkOPs directly and set the admin flag to false. - // Expect that the admin ports are not included in the result. auto const result = env.app().getOPs().getServerInfo(true, false, 0); + // Expect that the admin ports are not included in the result. auto const& ports = result[jss::ports]; BEAST_EXPECT(ports.isArray() && ports.size() == 0); + // Expect that git info is absent + BEAST_EXPECT(!result.isMember(jss::git)); } { diff --git a/src/xrpld/app/main/Main.cpp b/src/xrpld/app/main/Main.cpp index c945cfa85ea..533cda75b55 100644 --- a/src/xrpld/app/main/Main.cpp +++ b/src/xrpld/app/main/Main.cpp @@ -521,6 +521,12 @@ run(int argc, char** argv) { std::cout << "rippled version " << BuildInfo::getVersionString() << std::endl; +#ifdef GIT_COMMIT_HASH + std::cout << "Git commit hash: " << GIT_COMMIT_HASH << std::endl; +#endif +#ifdef GIT_BRANCH + std::cout << "Git build branch: " << GIT_BRANCH << std::endl; +#endif return 0; } diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index cd653120c7b..996a1fdf748 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -2493,6 +2493,18 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) x[jss::expiration] = "unknown"; } } + +#if defined(GIT_COMMIT_HASH) || defined(GIT_BRANCH) + { + auto& x = (info[jss::git] = Json::objectValue); +#ifdef GIT_COMMIT_HASH + x[jss::hash] = GIT_COMMIT_HASH; +#endif +#ifdef GIT_BRANCH + x[jss::branch] = GIT_BRANCH; +#endif + } +#endif } info[jss::io_latency_ms] = static_cast(app_.getIOLatency().count()); From fb3713bc2507d577aebe56fbc38872346811d76d Mon Sep 17 00:00:00 2001 From: Shawn Xie <35279399+shawnxie999@users.noreply.github.com> Date: Wed, 5 Feb 2025 10:05:24 -0800 Subject: [PATCH 03/29] Amendment `fixFrozenLPTokenTransfer` (#5227) Prohibits LPToken holders from sending LPToken to others if they have been frozen by one of the assets in AMM pool. --- .github/actions/dependencies/action.yml | 1 + include/xrpl/protocol/Feature.h | 2 +- include/xrpl/protocol/detail/features.macro | 1 + src/test/app/LPTokenTransfer_test.cpp | 486 ++++++++++++++++++++ src/xrpld/app/misc/detail/AMMUtils.cpp | 46 +- src/xrpld/app/paths/detail/DirectStep.cpp | 3 +- src/xrpld/app/paths/detail/StepChecks.h | 21 + src/xrpld/ledger/View.h | 7 + src/xrpld/ledger/detail/View.cpp | 40 +- 9 files changed, 597 insertions(+), 10 deletions(-) create mode 100644 src/test/app/LPTokenTransfer_test.cpp diff --git a/.github/actions/dependencies/action.yml b/.github/actions/dependencies/action.yml index 50e2999018a..d3c67e86685 100644 --- a/.github/actions/dependencies/action.yml +++ b/.github/actions/dependencies/action.yml @@ -16,6 +16,7 @@ runs: conan export external/snappy snappy/1.1.10@ conan export external/rocksdb rocksdb/6.29.5@ conan export external/soci soci/4.0.3@ + conan export external/nudb nudb/2.0.8@ - name: add Ripple Conan remote shell: bash run: | diff --git a/include/xrpl/protocol/Feature.h b/include/xrpl/protocol/Feature.h index bff3e57597f..c52f312cbfa 100644 --- a/include/xrpl/protocol/Feature.h +++ b/include/xrpl/protocol/Feature.h @@ -80,7 +80,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 86; +static constexpr std::size_t numFeatures = 87; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index 322670c5170..f82a05a7c17 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -29,6 +29,7 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +XRPL_FIX (FrozenLPTokenTransfer, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(DeepFreeze, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionedDomains, Supported::no, VoteBehavior::DefaultNo) XRPL_FEATURE(DynamicNFT, Supported::yes, VoteBehavior::DefaultNo) diff --git a/src/test/app/LPTokenTransfer_test.cpp b/src/test/app/LPTokenTransfer_test.cpp new file mode 100644 index 00000000000..96e621dccfe --- /dev/null +++ b/src/test/app/LPTokenTransfer_test.cpp @@ -0,0 +1,486 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +namespace ripple { +namespace test { + +class LPTokenTransfer_test : public jtx::AMMTest +{ + void + testDirectStep(FeatureBitset features) + { + testcase("DirectStep"); + + using namespace jtx; + Env env{*this, features}; + fund(env, gw, {alice}, {USD(20'000), BTC(0.5)}, Fund::All); + env.close(); + + AMM ammAlice(env, alice, USD(20'000), BTC(0.5)); + BEAST_EXPECT( + ammAlice.expectBalances(USD(20'000), BTC(0.5), IOUAmount{100, 0})); + + fund(env, gw, {carol}, {USD(4'000), BTC(1)}, Fund::Acct); + ammAlice.deposit(carol, 10); + BEAST_EXPECT( + ammAlice.expectBalances(USD(22'000), BTC(0.55), IOUAmount{110, 0})); + + fund(env, gw, {bob}, {USD(4'000), BTC(1)}, Fund::Acct); + ammAlice.deposit(bob, 10); + BEAST_EXPECT( + ammAlice.expectBalances(USD(24'000), BTC(0.60), IOUAmount{120, 0})); + + auto const lpIssue = ammAlice.lptIssue(); + env.trust(STAmount{lpIssue, 500}, alice); + env.trust(STAmount{lpIssue, 500}, bob); + env.trust(STAmount{lpIssue, 500}, carol); + env.close(); + + // gateway freezes carol's USD + env(trust(gw, carol["USD"](0), tfSetFreeze)); + env.close(); + + // bob can still send lptoken to carol even tho carol's USD is + // frozen, regardless of whether fixFrozenLPTokenTransfer is enabled or + // not + // Note: Deep freeze is not considered for LPToken transfer + env(pay(bob, carol, STAmount{lpIssue, 5})); + env.close(); + + // cannot transfer to an amm account + env(pay(carol, lpIssue.getIssuer(), STAmount{lpIssue, 5}), + ter(tecNO_PERMISSION)); + env.close(); + + if (features[fixFrozenLPTokenTransfer]) + { + // carol is frozen on USD and therefore can't send lptoken to bob + env(pay(carol, bob, STAmount{lpIssue, 5}), ter(tecPATH_DRY)); + } + else + { + // carol can still send lptoken with frozen USD + env(pay(carol, bob, STAmount{lpIssue, 5})); + } + } + + void + testBookStep(FeatureBitset features) + { + testcase("BookStep"); + + using namespace jtx; + Env env{*this, features}; + + fund( + env, + gw, + {alice, bob, carol}, + {USD(10'000), EUR(10'000)}, + Fund::All); + AMM ammAlice(env, alice, USD(10'000), EUR(10'000)); + ammAlice.deposit(carol, 1'000); + ammAlice.deposit(bob, 1'000); + + auto const lpIssue = ammAlice.lptIssue(); + + // carols creates an offer to sell lptoken + env(offer(carol, XRP(10), STAmount{lpIssue, 10}), txflags(tfPassive)); + env.close(); + BEAST_EXPECT(expectOffers(env, carol, 1)); + + env.trust(STAmount{lpIssue, 1'000'000'000}, alice); + env.trust(STAmount{lpIssue, 1'000'000'000}, bob); + env.trust(STAmount{lpIssue, 1'000'000'000}, carol); + env.close(); + + // gateway freezes carol's USD + env(trust(gw, carol["USD"](0), tfSetFreeze)); + env.close(); + + // exercises alice's ability to consume carol's offer to sell lptoken + // when carol's USD is frozen pre/post fixFrozenLPTokenTransfer + // amendment + if (features[fixFrozenLPTokenTransfer]) + { + // with fixFrozenLPTokenTransfer, alice fails to consume carol's + // offer since carol's USD is frozen + env(pay(alice, bob, STAmount{lpIssue, 10}), + txflags(tfPartialPayment), + sendmax(XRP(10)), + ter(tecPATH_DRY)); + env.close(); + BEAST_EXPECT(expectOffers(env, carol, 1)); + + // gateway unfreezes carol's USD + env(trust(gw, carol["USD"](1'000'000'000), tfClearFreeze)); + env.close(); + + // alice successfully consumes carol's offer + env(pay(alice, bob, STAmount{lpIssue, 10}), + txflags(tfPartialPayment), + sendmax(XRP(10))); + env.close(); + BEAST_EXPECT(expectOffers(env, carol, 0)); + } + else + { + // without fixFrozenLPTokenTransfer, alice can consume carol's offer + // even when carol's USD is frozen + env(pay(alice, bob, STAmount{lpIssue, 10}), + txflags(tfPartialPayment), + sendmax(XRP(10))); + env.close(); + BEAST_EXPECT(expectOffers(env, carol, 0)); + } + + // make sure carol's USD is not frozen + env(trust(gw, carol["USD"](1'000'000'000), tfClearFreeze)); + env.close(); + + // ensure that carol's offer to buy lptoken can be consumed by alice + // even when carol's USD is frozen + { + // carol creates an offer to buy lptoken + env(offer(carol, STAmount{lpIssue, 10}, XRP(10)), + txflags(tfPassive)); + env.close(); + BEAST_EXPECT(expectOffers(env, carol, 1)); + + // gateway freezes carol's USD + env(trust(gw, carol["USD"](0), tfSetFreeze)); + env.close(); + + // alice successfully consumes carol's offer + env(pay(alice, bob, XRP(10)), + txflags(tfPartialPayment), + sendmax(STAmount{lpIssue, 10})); + env.close(); + BEAST_EXPECT(expectOffers(env, carol, 0)); + } + } + + void + testOfferCreation(FeatureBitset features) + { + testcase("Create offer"); + + using namespace jtx; + Env env{*this, features}; + + fund( + env, + gw, + {alice, bob, carol}, + {USD(10'000), EUR(10'000)}, + Fund::All); + AMM ammAlice(env, alice, USD(10'000), EUR(10'000)); + ammAlice.deposit(carol, 1'000); + ammAlice.deposit(bob, 1'000); + + auto const lpIssue = ammAlice.lptIssue(); + + // gateway freezes carol's USD + env(trust(gw, carol["USD"](0), tfSetFreeze)); + env.close(); + + // exercises carol's ability to create a new offer to sell lptoken with + // frozen USD, before and after fixFrozenLPTokenTransfer + if (features[fixFrozenLPTokenTransfer]) + { + // with fixFrozenLPTokenTransfer, carol can't create an offer to + // sell lptoken when one of the assets is frozen + + // carol can't create an offer to sell lptoken + env(offer(carol, XRP(10), STAmount{lpIssue, 10}), + txflags(tfPassive), + ter(tecUNFUNDED_OFFER)); + env.close(); + BEAST_EXPECT(expectOffers(env, carol, 0)); + + // gateway unfreezes carol's USD + env(trust(gw, carol["USD"](1'000'000'000), tfClearFreeze)); + env.close(); + + // carol can create an offer to sell lptoken after USD is unfrozen + env(offer(carol, XRP(10), STAmount{lpIssue, 10}), + txflags(tfPassive)); + env.close(); + BEAST_EXPECT(expectOffers(env, carol, 1)); + } + else + { + // without fixFrozenLPTokenTransfer, carol can create an offer + env(offer(carol, XRP(10), STAmount{lpIssue, 10}), + txflags(tfPassive)); + env.close(); + BEAST_EXPECT(expectOffers(env, carol, 1)); + } + + // gateway freezes carol's USD + env(trust(gw, carol["USD"](0), tfSetFreeze)); + env.close(); + + // carol can create offer to buy lptoken even if USD is frozen + env(offer(carol, STAmount{lpIssue, 10}, XRP(5)), txflags(tfPassive)); + env.close(); + BEAST_EXPECT(expectOffers(env, carol, 2)); + } + + void + testOfferCrossing(FeatureBitset features) + { + testcase("Offer crossing"); + + using namespace jtx; + Env env{*this, features}; + + // Offer crossing with two AMM LPTokens. + fund(env, gw, {alice, carol}, {USD(10'000)}, Fund::All); + AMM ammAlice1(env, alice, XRP(10'000), USD(10'000)); + ammAlice1.deposit(carol, 10'000'000); + + fund(env, gw, {alice, carol}, {EUR(10'000)}, Fund::IOUOnly); + AMM ammAlice2(env, alice, XRP(10'000), EUR(10'000)); + ammAlice2.deposit(carol, 10'000'000); + auto const token1 = ammAlice1.lptIssue(); + auto const token2 = ammAlice2.lptIssue(); + + // carol creates offer + env(offer(carol, STAmount{token2, 100}, STAmount{token1, 100})); + env.close(); + BEAST_EXPECT(expectOffers(env, carol, 1)); + + // gateway freezes carol's USD, carol's token1 should be frozen as well + env(trust(gw, carol["USD"](0), tfSetFreeze)); + env.close(); + + // alice creates an offer which exhibits different behavior on offer + // crossing depending on if fixFrozenLPTokenTransfer is enabled + env(offer(alice, STAmount{token1, 100}, STAmount{token2, 100})); + env.close(); + + // exercises carol's offer's ability to cross with alice's offer when + // carol's USD is frozen, before and after fixFrozenLPTokenTransfer + if (features[fixFrozenLPTokenTransfer]) + { + // with fixFrozenLPTokenTransfer enabled, alice's offer can no + // longer cross with carol's offer + BEAST_EXPECT( + expectLine(env, alice, STAmount{token1, 10'000'000}) && + expectLine(env, alice, STAmount{token2, 10'000'000})); + BEAST_EXPECT( + expectLine(env, carol, STAmount{token2, 10'000'000}) && + expectLine(env, carol, STAmount{token1, 10'000'000})); + BEAST_EXPECT( + expectOffers(env, alice, 1) && expectOffers(env, carol, 0)); + } + else + { + // alice's offer still crosses with carol's offer despite carol's + // token1 is frozen + BEAST_EXPECT( + expectLine(env, alice, STAmount{token1, 10'000'100}) && + expectLine(env, alice, STAmount{token2, 9'999'900})); + BEAST_EXPECT( + expectLine(env, carol, STAmount{token2, 10'000'100}) && + expectLine(env, carol, STAmount{token1, 9'999'900})); + BEAST_EXPECT( + expectOffers(env, alice, 0) && expectOffers(env, carol, 0)); + } + } + + void + testCheck(FeatureBitset features) + { + testcase("Check"); + + using namespace jtx; + Env env{*this, features}; + + fund( + env, + gw, + {alice, bob, carol}, + {USD(10'000), EUR(10'000)}, + Fund::All); + AMM ammAlice(env, alice, USD(10'000), EUR(10'000)); + ammAlice.deposit(carol, 1'000); + ammAlice.deposit(bob, 1'000); + + auto const lpIssue = ammAlice.lptIssue(); + + // gateway freezes carol's USD + env(trust(gw, carol["USD"](0), tfSetFreeze)); + env.close(); + + // carol can always create a check with lptoken that has frozen + // token + uint256 const carolChkId{keylet::check(carol, env.seq(carol)).key}; + env(check::create(carol, bob, STAmount{lpIssue, 10})); + env.close(); + + // with fixFrozenLPTokenTransfer enabled, bob fails to cash the check + if (features[fixFrozenLPTokenTransfer]) + env(check::cash(bob, carolChkId, STAmount{lpIssue, 10}), + ter(tecPATH_PARTIAL)); + else + env(check::cash(bob, carolChkId, STAmount{lpIssue, 10})); + + env.close(); + + // bob creates a check + uint256 const bobChkId{keylet::check(bob, env.seq(bob)).key}; + env(check::create(bob, carol, STAmount{lpIssue, 10})); + env.close(); + + // carol cashes the bob's check. Even though carol is frozen, she can + // still receive LPToken + env(check::cash(carol, bobChkId, STAmount{lpIssue, 10})); + env.close(); + } + + void + testNFTOffers(FeatureBitset features) + { + testcase("NFT Offers"); + using namespace test::jtx; + + Env env{*this, features}; + + // Setup AMM + fund( + env, + gw, + {alice, bob, carol}, + {USD(10'000), EUR(10'000)}, + Fund::All); + AMM ammAlice(env, alice, USD(10'000), EUR(10'000)); + ammAlice.deposit(carol, 1'000); + ammAlice.deposit(bob, 1'000); + + auto const lpIssue = ammAlice.lptIssue(); + + // bob mints a nft + uint256 const nftID{token::getNextID(env, bob, 0u, tfTransferable)}; + env(token::mint(bob, 0), txflags(tfTransferable)); + env.close(); + + // bob creates a sell offer for lptoken + uint256 const sellOfferIndex = keylet::nftoffer(bob, env.seq(bob)).key; + env(token::createOffer(bob, nftID, STAmount{lpIssue, 10}), + txflags(tfSellNFToken)); + env.close(); + + // gateway freezes carol's USD + env(trust(gw, carol["USD"](0), tfSetFreeze)); + env.close(); + + // exercises one's ability to transfer NFT using lptoken when one of the + // assets is frozen + if (features[fixFrozenLPTokenTransfer]) + { + // with fixFrozenLPTokenTransfer, freezing USD will prevent buy/sell + // offers with lptokens from being created/accepted + + // carol fails to accept bob's offer with lptoken because carol's + // USD is frozen + env(token::acceptSellOffer(carol, sellOfferIndex), + ter(tecINSUFFICIENT_FUNDS)); + env.close(); + + // gateway unfreezes carol's USD + env(trust(gw, carol["USD"](1'000'000), tfClearFreeze)); + env.close(); + + // carol can now accept the offer and own the nft + env(token::acceptSellOffer(carol, sellOfferIndex)); + env.close(); + + // gateway freezes bobs's USD + env(trust(gw, bob["USD"](0), tfSetFreeze)); + env.close(); + + // bob fails to create a buy offer with lptoken for carol's nft + // since bob's USD is frozen + env(token::createOffer(bob, nftID, STAmount{lpIssue, 10}), + token::owner(carol), + ter(tecUNFUNDED_OFFER)); + env.close(); + + // gateway unfreezes bob's USD + env(trust(gw, bob["USD"](1'000'000), tfClearFreeze)); + env.close(); + + // bob can now create a buy offer + env(token::createOffer(bob, nftID, STAmount{lpIssue, 10}), + token::owner(carol)); + env.close(); + } + else + { + // without fixFrozenLPTokenTransfer, freezing USD will still allow + // buy/sell offers to be created/accepted with lptoken + + // carol can still accept bob's offer despite carol's USD is frozen + env(token::acceptSellOffer(carol, sellOfferIndex)); + env.close(); + + // gateway freezes bob's USD + env(trust(gw, bob["USD"](0), tfSetFreeze)); + env.close(); + + // bob creates a buy offer with lptoken despite bob's USD is frozen + uint256 const buyOfferIndex = + keylet::nftoffer(bob, env.seq(bob)).key; + env(token::createOffer(bob, nftID, STAmount{lpIssue, 10}), + token::owner(carol)); + env.close(); + + // carol accepts bob's offer + env(token::acceptBuyOffer(carol, buyOfferIndex)); + env.close(); + } + } + +public: + void + run() override + { + FeatureBitset const all{jtx::supported_amendments()}; + + for (auto const features : {all, all - fixFrozenLPTokenTransfer}) + { + testDirectStep(features); + testBookStep(features); + testOfferCreation(features); + testOfferCrossing(features); + testCheck(features); + testNFTOffers(features); + } + } +}; + +BEAST_DEFINE_TESTSUITE(LPTokenTransfer, app, ripple); +} // namespace test +} // namespace ripple diff --git a/src/xrpld/app/misc/detail/AMMUtils.cpp b/src/xrpld/app/misc/detail/AMMUtils.cpp index f5f6ae6612c..0b83afc6d39 100644 --- a/src/xrpld/app/misc/detail/AMMUtils.cpp +++ b/src/xrpld/app/misc/detail/AMMUtils.cpp @@ -116,13 +116,45 @@ ammLPHolds( AccountID const& lpAccount, beast::Journal const j) { - return accountHolds( - view, - lpAccount, - ammLPTCurrency(cur1, cur2), - ammAccount, - FreezeHandling::fhZERO_IF_FROZEN, - j); + // This function looks similar to `accountHolds`. However, it only checks if + // a LPToken holder has enough balance. On the other hand, `accountHolds` + // checks if the underlying assets of LPToken are frozen with the + // fixFrozenLPTokenTransfer amendment + + auto const currency = ammLPTCurrency(cur1, cur2); + STAmount amount; + + auto const sle = view.read(keylet::line(lpAccount, ammAccount, currency)); + if (!sle) + { + amount.clear(Issue{currency, ammAccount}); + JLOG(j.trace()) << "ammLPHolds: no SLE " + << " lpAccount=" << to_string(lpAccount) + << " amount=" << amount.getFullText(); + } + else if (isFrozen(view, lpAccount, currency, ammAccount)) + { + amount.clear(Issue{currency, ammAccount}); + JLOG(j.trace()) << "ammLPHolds: frozen currency " + << " lpAccount=" << to_string(lpAccount) + << " amount=" << amount.getFullText(); + } + else + { + amount = sle->getFieldAmount(sfBalance); + if (lpAccount > ammAccount) + { + // Put balance in account terms. + amount.negate(); + } + amount.setIssuer(ammAccount); + + JLOG(j.trace()) << "ammLPHolds:" + << " lpAccount=" << to_string(lpAccount) + << " amount=" << amount.getFullText(); + } + + return view.balanceHook(lpAccount, ammAccount, amount); } STAmount diff --git a/src/xrpld/app/paths/detail/DirectStep.cpp b/src/xrpld/app/paths/detail/DirectStep.cpp index ffd500009e7..46aa129ac71 100644 --- a/src/xrpld/app/paths/detail/DirectStep.cpp +++ b/src/xrpld/app/paths/detail/DirectStep.cpp @@ -204,7 +204,8 @@ class DirectStepI : public StepImp> logStringImpl(char const* name) const { std::ostringstream ostr; - ostr << name << ": " << "\nSrc: " << src_ << "\nDst: " << dst_; + ostr << name << ": " + << "\nSrc: " << src_ << "\nDst: " << dst_; return ostr.str(); } diff --git a/src/xrpld/app/paths/detail/StepChecks.h b/src/xrpld/app/paths/detail/StepChecks.h index 9cbc3ef0f99..d4fda2bfe62 100644 --- a/src/xrpld/app/paths/detail/StepChecks.h +++ b/src/xrpld/app/paths/detail/StepChecks.h @@ -21,6 +21,7 @@ #define RIPPLE_APP_PATHS_IMPL_STEP_CHECKS_H_INCLUDED #include +#include #include #include #include @@ -60,6 +61,26 @@ checkFreeze( } } + if (view.rules().enabled(fixFrozenLPTokenTransfer)) + { + if (auto const sleDst = view.read(keylet::account(dst)); + sleDst && sleDst->isFieldPresent(sfAMMID)) + { + auto const sleAmm = view.read(keylet::amm((*sleDst)[sfAMMID])); + if (!sleAmm) + return tecINTERNAL; // LCOV_EXCL_LINE + + if (isLPTokenFrozen( + view, + src, + (*sleAmm)[sfAsset].get(), + (*sleAmm)[sfAsset2].get())) + { + return terNO_LINE; + } + } + } + return tesSUCCESS; } diff --git a/src/xrpld/ledger/View.h b/src/xrpld/ledger/View.h index b964fc0ee76..aca3f9fa6d8 100644 --- a/src/xrpld/ledger/View.h +++ b/src/xrpld/ledger/View.h @@ -160,6 +160,13 @@ isDeepFrozen( Currency const& currency, AccountID const& issuer); +[[nodiscard]] bool +isLPTokenFrozen( + ReadView const& view, + AccountID const& account, + Issue const& asset, + Issue const& asset2); + // Returns the amount an account can spend without going into debt. // // <-- saAmount: amount of currency held by account. May be negative. diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index 1422a50a3ad..85abf7fc62c 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -293,6 +293,17 @@ isDeepFrozen( return sle->isFlag(lsfHighDeepFreeze) || sle->isFlag(lsfLowDeepFreeze); } +bool +isLPTokenFrozen( + ReadView const& view, + AccountID const& account, + Issue const& asset, + Issue const& asset2) +{ + return isFrozen(view, account, asset.currency, asset.account) || + isFrozen(view, account, asset2.currency, asset2.account); +} + STAmount accountHolds( ReadView const& view, @@ -323,6 +334,32 @@ accountHolds( { return false; } + + // when fixFrozenLPTokenTransfer is enabled, if currency is lptoken, + // we need to check if the associated assets have been frozen + if (view.rules().enabled(fixFrozenLPTokenTransfer)) + { + auto const sleIssuer = view.read(keylet::account(issuer)); + if (!sleIssuer) + { + return false; // LCOV_EXCL_LINE + } + else if (sleIssuer->isFieldPresent(sfAMMID)) + { + auto const sleAmm = + view.read(keylet::amm((*sleIssuer)[sfAMMID])); + + if (!sleAmm || + isLPTokenFrozen( + view, + account, + (*sleAmm)[sfAsset].get(), + (*sleAmm)[sfAsset2].get())) + { + return false; + } + } + } } return true; @@ -492,7 +529,8 @@ xrpLiquid( STAmount const amount = (balance < reserve) ? STAmount{0} : balance - reserve; - JLOG(j.trace()) << "accountHolds:" << " account=" << to_string(id) + JLOG(j.trace()) << "accountHolds:" + << " account=" << to_string(id) << " amount=" << amount.getFullText() << " fullBalance=" << fullBalance.getFullText() << " balance=" << balance.getFullText() From 02387fd227d9b7307ec417138b6c6af5b9d156f0 Mon Sep 17 00:00:00 2001 From: Bart Date: Thu, 6 Feb 2025 13:11:49 -0800 Subject: [PATCH 04/29] Updates Conan dependencies (#5256) This PR updates several Conan dependencies: * boost * date * libarchive * libmysqlclient * libpq * lz4 * onetbb * openssl * sqlite3 * zlib * zstd --- conanfile.py | 14 +++++++------- external/rocksdb/conanfile.py | 6 +++--- external/soci/conanfile.py | 8 ++++---- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/conanfile.py b/conanfile.py index 14fc49a1946..d4513068fb5 100644 --- a/conanfile.py +++ b/conanfile.py @@ -24,14 +24,14 @@ class Xrpl(ConanFile): } requires = [ - 'date/3.0.1', + 'date/3.0.3', 'grpc/1.50.1', - 'libarchive/3.6.2', + 'libarchive/3.7.6', 'nudb/2.0.8', - 'openssl/1.1.1u', + 'openssl/1.1.1v', 'soci/4.0.3', 'xxhash/0.8.2', - 'zlib/1.2.13', + 'zlib/1.3.1', ] tool_requires = [ @@ -99,10 +99,10 @@ def configure(self): self.options['boost'].visibility = 'global' def requirements(self): - self.requires('boost/1.82.0', force=True) - self.requires('lz4/1.9.3', force=True) + self.requires('boost/1.83.0', force=True) + self.requires('lz4/1.10.0', force=True) self.requires('protobuf/3.21.9', force=True) - self.requires('sqlite3/3.42.0', force=True) + self.requires('sqlite3/3.47.0', force=True) if self.options.jemalloc: self.requires('jemalloc/5.3.0') if self.options.rocksdb: diff --git a/external/rocksdb/conanfile.py b/external/rocksdb/conanfile.py index 09425b9f863..1c7853d8140 100644 --- a/external/rocksdb/conanfile.py +++ b/external/rocksdb/conanfile.py @@ -89,13 +89,13 @@ def requirements(self): if self.options.with_snappy: self.requires("snappy/1.1.10") if self.options.with_lz4: - self.requires("lz4/1.9.4") + self.requires("lz4/1.10.0") if self.options.with_zlib: self.requires("zlib/[>=1.2.11 <2]") if self.options.with_zstd: - self.requires("zstd/1.5.5") + self.requires("zstd/1.5.6") if self.options.get_safe("with_tbb"): - self.requires("onetbb/2021.10.0") + self.requires("onetbb/2021.12.0") if self.options.with_jemalloc: self.requires("jemalloc/5.3.0") diff --git a/external/soci/conanfile.py b/external/soci/conanfile.py index 67c572d5ad8..7e611493d70 100644 --- a/external/soci/conanfile.py +++ b/external/soci/conanfile.py @@ -62,15 +62,15 @@ def configure(self): def requirements(self): if self.options.with_sqlite3: - self.requires("sqlite3/3.41.1") + self.requires("sqlite3/3.47.0") if self.options.with_odbc and self.settings.os != "Windows": self.requires("odbc/2.3.11") if self.options.with_mysql: - self.requires("libmysqlclient/8.0.31") + self.requires("libmysqlclient/8.1.0") if self.options.with_postgresql: - self.requires("libpq/14.7") + self.requires("libpq/15.5") if self.options.with_boost: - self.requires("boost/1.81.0") + self.requires("boost/1.83.0") @property def _minimum_compilers_version(self): From d9e4009e33c32aa5162ab7f6392faf51a5542e9b Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Fri, 7 Feb 2025 12:17:37 -0800 Subject: [PATCH 05/29] fix: issues in `simulate` RPC (#5265) Make `simulate` RPC easier to use: * Prevent the use of `seed`, `secret`, `seed_hex`, and `passphrase` fields (to avoid confusing with the signing methods). * Add autofilling of the `NetworkID` field. --- API-CHANGELOG.md | 2 +- src/test/jtx/JTx.h | 1 + src/test/jtx/impl/Env.cpp | 9 +- src/test/rpc/JSONRPC_test.cpp | 31 +++++ src/test/rpc/Simulate_test.cpp | 155 +++++++++++++++++++++-- src/xrpld/rpc/detail/TransactionSign.cpp | 7 + src/xrpld/rpc/handlers/Simulate.cpp | 16 +++ 7 files changed, 203 insertions(+), 18 deletions(-) diff --git a/API-CHANGELOG.md b/API-CHANGELOG.md index 9f99b4ab9a4..fda03c2d00a 100644 --- a/API-CHANGELOG.md +++ b/API-CHANGELOG.md @@ -92,7 +92,7 @@ As of 2025-01-28, version 2.4.0 is in development. You can use a pre-release ver - `ledger_entry`: `state` is added an alias for `ripple_state`. - `validators`: Added new field `validator_list_threshold` in response. - `simulate`: A new RPC that executes a [dry run of a transaction submission](https://github.com/XRPLF/XRPL-Standards/tree/master/XLS-0069d-simulate#2-rpc-simulate) -- Signing methods autofill fees better and properly handle transactions that don't have a base fee. +- Signing methods autofill fees better and properly handle transactions that don't have a base fee, and will also autofill the `NetworkID` field. ## XRP Ledger server version 2.3.0 diff --git a/src/test/jtx/JTx.h b/src/test/jtx/JTx.h index a5a4a9eb1b9..81ba1b406a0 100644 --- a/src/test/jtx/JTx.h +++ b/src/test/jtx/JTx.h @@ -51,6 +51,7 @@ struct JTx bool fill_fee = true; bool fill_seq = true; bool fill_sig = true; + bool fill_netid = true; std::shared_ptr stx; std::function signer; diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index c87b1260244..43286ab7824 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -493,9 +493,12 @@ Env::autofill(JTx& jt) if (jt.fill_seq) jtx::fill_seq(jv, *current()); - uint32_t networkID = app().config().NETWORK_ID; - if (!jv.isMember(jss::NetworkID) && networkID > 1024) - jv[jss::NetworkID] = std::to_string(networkID); + if (jt.fill_netid) + { + uint32_t networkID = app().config().NETWORK_ID; + if (!jv.isMember(jss::NetworkID) && networkID > 1024) + jv[jss::NetworkID] = std::to_string(networkID); + } // Must come last try diff --git a/src/test/rpc/JSONRPC_test.cpp b/src/test/rpc/JSONRPC_test.cpp index 6841eb2af72..6e97301fc3e 100644 --- a/src/test/rpc/JSONRPC_test.cpp +++ b/src/test/rpc/JSONRPC_test.cpp @@ -2049,6 +2049,7 @@ class JSONRPC_test : public beast::unit_test::suite void testBadRpcCommand() { + testcase("bad RPC command"); test::jtx::Env env(*this); Json::Value const result{ env.rpc("bad_command", R"({"MakingThisUp": 0})")}; @@ -2061,6 +2062,7 @@ class JSONRPC_test : public beast::unit_test::suite void testAutoFillFees() { + testcase("autofill fees"); test::jtx::Env env(*this); auto ledger = env.current(); auto const& feeTrack = env.app().getFeeTrack(); @@ -2207,6 +2209,7 @@ class JSONRPC_test : public beast::unit_test::suite void testAutoFillEscalatedFees() { + testcase("autofill escalated fees"); using namespace test::jtx; Env env{*this, envconfig([](std::unique_ptr cfg) { cfg->loadFromString("[" SECTION_SIGNING_SUPPORT "]\ntrue"); @@ -2538,6 +2541,32 @@ class JSONRPC_test : public beast::unit_test::suite } } + void + testAutoFillNetworkID() + { + testcase("autofill NetworkID"); + using namespace test::jtx; + Env env{*this, envconfig([&](std::unique_ptr cfg) { + cfg->NETWORK_ID = 1025; + return cfg; + })}; + + { + Json::Value toSign; + toSign[jss::tx_json] = noop(env.master); + + BEAST_EXPECT(!toSign[jss::tx_json].isMember(jss::NetworkID)); + toSign[jss::secret] = "masterpassphrase"; + auto rpcResult = env.rpc("json", "sign", to_string(toSign)); + auto result = rpcResult[jss::result]; + + BEAST_EXPECT(!RPC::contains_error(result)); + BEAST_EXPECT( + result[jss::tx_json].isMember(jss::NetworkID) && + result[jss::tx_json][jss::NetworkID] == 1025); + } + } + // A function that can be called as though it would process a transaction. static void fakeProcessTransaction( @@ -2552,6 +2581,7 @@ class JSONRPC_test : public beast::unit_test::suite void testTransactionRPC() { + testcase("sign/submit RPCs"); using namespace std::chrono_literals; // Use jtx to set up a ledger so the tests will do the right thing. test::jtx::Account const a{"a"}; // rnUy2SHTrB9DubsPmkJZUXTf5FcNDGrYEA @@ -2678,6 +2708,7 @@ class JSONRPC_test : public beast::unit_test::suite testBadRpcCommand(); testAutoFillFees(); testAutoFillEscalatedFees(); + testAutoFillNetworkID(); testTransactionRPC(); } }; diff --git a/src/test/rpc/Simulate_test.cpp b/src/test/rpc/Simulate_test.cpp index 636f1d04cd0..656e5f0577e 100644 --- a/src/test/rpc/Simulate_test.cpp +++ b/src/test/rpc/Simulate_test.cpp @@ -93,13 +93,17 @@ class Simulate_test : public beast::unit_test::suite validate, bool testSerialized = true) { + env.close(); + Json::Value params; params[jss::tx_json] = tx; validate(env.rpc("json", "simulate", to_string(params)), tx); + params[jss::binary] = true; validate(env.rpc("json", "simulate", to_string(params)), tx); validate(env.rpc("simulate", to_string(tx)), tx); validate(env.rpc("simulate", to_string(tx), "binary"), tx); + if (testSerialized) { // This cannot be tested in the multisign autofill scenario @@ -119,6 +123,10 @@ class Simulate_test : public beast::unit_test::suite validate(env.rpc("simulate", tx_blob), tx); validate(env.rpc("simulate", tx_blob, "binary"), tx); } + + BEAST_EXPECTS( + env.current()->txCount() == 0, + std::to_string(env.current()->txCount())); } Json::Value @@ -235,6 +243,58 @@ class Simulate_test : public beast::unit_test::suite resp[jss::result][jss::error_message] == "Invalid field 'tx_json', not object."); } + { + // `seed` field included + Json::Value params = Json::objectValue; + params[jss::seed] = "doesnt_matter"; + Json::Value tx_json = Json::objectValue; + tx_json[jss::TransactionType] = jss::AccountSet; + tx_json[jss::Account] = env.master.human(); + params[jss::tx_json] = tx_json; + auto const resp = env.rpc("json", "simulate", to_string(params)); + BEAST_EXPECT( + resp[jss::result][jss::error_message] == + "Invalid field 'seed'."); + } + { + // `secret` field included + Json::Value params = Json::objectValue; + params[jss::secret] = "doesnt_matter"; + Json::Value tx_json = Json::objectValue; + tx_json[jss::TransactionType] = jss::AccountSet; + tx_json[jss::Account] = env.master.human(); + params[jss::tx_json] = tx_json; + auto const resp = env.rpc("json", "simulate", to_string(params)); + BEAST_EXPECT( + resp[jss::result][jss::error_message] == + "Invalid field 'secret'."); + } + { + // `seed_hex` field included + Json::Value params = Json::objectValue; + params[jss::seed_hex] = "doesnt_matter"; + Json::Value tx_json = Json::objectValue; + tx_json[jss::TransactionType] = jss::AccountSet; + tx_json[jss::Account] = env.master.human(); + params[jss::tx_json] = tx_json; + auto const resp = env.rpc("json", "simulate", to_string(params)); + BEAST_EXPECT( + resp[jss::result][jss::error_message] == + "Invalid field 'seed_hex'."); + } + { + // `passphrase` field included + Json::Value params = Json::objectValue; + params[jss::passphrase] = "doesnt_matter"; + Json::Value tx_json = Json::objectValue; + tx_json[jss::TransactionType] = jss::AccountSet; + tx_json[jss::Account] = env.master.human(); + params[jss::tx_json] = tx_json; + auto const resp = env.rpc("json", "simulate", to_string(params)); + BEAST_EXPECT( + resp[jss::result][jss::error_message] == + "Invalid field 'passphrase'."); + } { // Invalid transaction Json::Value params = Json::objectValue; @@ -412,7 +472,10 @@ class Simulate_test : public beast::unit_test::suite testcase("Successful transaction"); using namespace jtx; - Env env(*this); + Env env{*this, envconfig([&](std::unique_ptr cfg) { + cfg->NETWORK_ID = 0; + return cfg; + })}; static auto const newDomain = "123ABC"; { @@ -473,8 +536,6 @@ class Simulate_test : public beast::unit_test::suite // test without autofill testTx(env, tx, validateOutput); - - // TODO: check that the ledger wasn't affected } } @@ -523,8 +584,6 @@ class Simulate_test : public beast::unit_test::suite // test without autofill testTx(env, tx, testSimulation); - - // TODO: check that the ledger wasn't affected } } @@ -604,8 +663,6 @@ class Simulate_test : public beast::unit_test::suite // test without autofill testTx(env, tx, testSimulation); - - // TODO: check that the ledger wasn't affected } } @@ -625,6 +682,7 @@ class Simulate_test : public beast::unit_test::suite // set up valid multisign env(signers(alice, 1, {{becky, 1}, {carol, 1}})); + env.close(); { auto validateOutput = [&](Json::Value const& resp, @@ -662,7 +720,7 @@ class Simulate_test : public beast::unit_test::suite BEAST_EXPECT(finalFields[sfDomain] == newDomain); } } - BEAST_EXPECT(metadata[sfTransactionIndex.jsonName] == 1); + BEAST_EXPECT(metadata[sfTransactionIndex.jsonName] == 0); BEAST_EXPECT( metadata[sfTransactionResult.jsonName] == "tesSUCCESS"); } @@ -697,8 +755,6 @@ class Simulate_test : public beast::unit_test::suite // test without autofill testTx(env, tx, validateOutput); - - // TODO: check that the ledger wasn't affected } } @@ -754,8 +810,6 @@ class Simulate_test : public beast::unit_test::suite // test without autofill testTx(env, tx, testSimulation); - - // TODO: check that the ledger wasn't affected } } @@ -825,8 +879,6 @@ class Simulate_test : public beast::unit_test::suite // test without autofill testTx(env, tx, validateOutput); - - // TODO: check that the ledger wasn't affected } } @@ -948,6 +1000,80 @@ class Simulate_test : public beast::unit_test::suite BEAST_EXPECT(ownerCount(env, subject) == 0); } + void + testSuccessfulTransactionNetworkID() + { + testcase("Successful transaction with a custom network ID"); + + using namespace jtx; + Env env{*this, envconfig([&](std::unique_ptr cfg) { + cfg->NETWORK_ID = 1025; + return cfg; + })}; + static auto const newDomain = "123ABC"; + + { + auto validateOutput = [&](Json::Value const& resp, + Json::Value const& tx) { + auto result = resp[jss::result]; + checkBasicReturnValidity( + result, tx, 1, env.current()->fees().base); + + BEAST_EXPECT(result[jss::engine_result] == "tesSUCCESS"); + BEAST_EXPECT(result[jss::engine_result_code] == 0); + BEAST_EXPECT( + result[jss::engine_result_message] == + "The simulated transaction would have been applied."); + + if (BEAST_EXPECT( + result.isMember(jss::meta) || + result.isMember(jss::meta_blob))) + { + Json::Value const metadata = getJsonMetadata(result); + + if (BEAST_EXPECT( + metadata.isMember(sfAffectedNodes.jsonName))) + { + BEAST_EXPECT( + metadata[sfAffectedNodes.jsonName].size() == 1); + auto node = metadata[sfAffectedNodes.jsonName][0u]; + if (BEAST_EXPECT( + node.isMember(sfModifiedNode.jsonName))) + { + auto modifiedNode = node[sfModifiedNode]; + BEAST_EXPECT( + modifiedNode[sfLedgerEntryType] == + "AccountRoot"); + auto finalFields = modifiedNode[sfFinalFields]; + BEAST_EXPECT(finalFields[sfDomain] == newDomain); + } + } + BEAST_EXPECT(metadata[sfTransactionIndex.jsonName] == 0); + BEAST_EXPECT( + metadata[sfTransactionResult.jsonName] == "tesSUCCESS"); + } + }; + + Json::Value tx; + + tx[jss::Account] = env.master.human(); + tx[jss::TransactionType] = jss::AccountSet; + tx[sfDomain] = newDomain; + + // test with autofill + testTx(env, tx, validateOutput); + + tx[sfSigningPubKey] = ""; + tx[sfTxnSignature] = ""; + tx[sfSequence] = 1; + tx[sfFee] = env.current()->fees().base.jsonClipped().asString(); + tx[sfNetworkID] = 1025; + + // test without autofill + testTx(env, tx, validateOutput); + } + } + public: void run() override @@ -961,6 +1087,7 @@ class Simulate_test : public beast::unit_test::suite testTransactionSigningFailure(); testMultisignedBadPubKey(); testDeleteExpiredCredentials(); + testSuccessfulTransactionNetworkID(); } }; diff --git a/src/xrpld/rpc/detail/TransactionSign.cpp b/src/xrpld/rpc/detail/TransactionSign.cpp index d8e4758ddd3..376a0ce24a5 100644 --- a/src/xrpld/rpc/detail/TransactionSign.cpp +++ b/src/xrpld/rpc/detail/TransactionSign.cpp @@ -467,6 +467,13 @@ transactionPreProcessImpl( if (!tx_json.isMember(jss::Flags)) tx_json[jss::Flags] = tfFullyCanonicalSig; + + if (!tx_json.isMember(jss::NetworkID)) + { + auto const networkId = app.config().NETWORK_ID; + if (networkId > 1024) + tx_json[jss::NetworkID] = to_string(networkId); + } } { diff --git a/src/xrpld/rpc/handlers/Simulate.cpp b/src/xrpld/rpc/handlers/Simulate.cpp index 538f6803f8c..7d391497f63 100644 --- a/src/xrpld/rpc/handlers/Simulate.cpp +++ b/src/xrpld/rpc/handlers/Simulate.cpp @@ -144,6 +144,13 @@ autofillTx(Json::Value& tx_json, RPC::JsonContext& context) tx_json[sfSequence.jsonName] = *seq; } + if (!tx_json.isMember(jss::NetworkID)) + { + auto const networkId = context.app.config().NETWORK_ID; + if (networkId > 1024) + tx_json[jss::NetworkID] = to_string(networkId); + } + return std::nullopt; } @@ -299,6 +306,15 @@ doSimulate(RPC::JsonContext& context) return RPC::invalid_field_error(jss::binary); } + for (auto const field : + {jss::secret, jss::seed, jss::seed_hex, jss::passphrase}) + { + if (context.params.isMember(field)) + { + return RPC::invalid_field_error(field); + } + } + // get JSON equivalent of transaction tx_json = getTxJsonFromParams(context.params); if (tx_json.isMember(jss::error)) From 0968cdf34085c8a14d5d30ab6b8757c2300a24ec Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Fri, 7 Feb 2025 23:11:29 +0000 Subject: [PATCH 06/29] fix: Do not allow creating Permissioned Domains if credentials are not enabled (#5275) If the permissioned domains amendment XLS-80 is enabled before credentials XLS-70, then the permissioned domain users will not be able to match any credentials. The changes here prevent the creation of any permissioned domain objects if credentials are not enabled. --- src/test/app/PermissionedDomains_test.cpp | 21 +++++++++++++++++-- .../app/tx/detail/PermissionedDomainSet.cpp | 4 +++- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/src/test/app/PermissionedDomains_test.cpp b/src/test/app/PermissionedDomains_test.cpp index 5184c462dac..a80352cac5a 100644 --- a/src/test/app/PermissionedDomains_test.cpp +++ b/src/test/app/PermissionedDomains_test.cpp @@ -54,9 +54,10 @@ exceptionExpected(Env& env, Json::Value const& jv) class PermissionedDomains_test : public beast::unit_test::suite { - FeatureBitset withFeature_{ - supported_amendments() | featurePermissionedDomains}; FeatureBitset withoutFeature_{supported_amendments()}; + FeatureBitset withFeature_{ + supported_amendments() // + | featurePermissionedDomains | featureCredentials}; // Verify that each tx type can execute if the feature is enabled. void @@ -77,6 +78,21 @@ class PermissionedDomains_test : public beast::unit_test::suite env(pdomain::deleteTx(alice, domain)); } + // Verify that PD cannot be created or updated if credentials are disabled + void + testCredentialsDisabled() + { + auto amendments = supported_amendments(); + amendments.set(featurePermissionedDomains); + amendments.reset(featureCredentials); + testcase("Credentials disabled"); + Account const alice("alice"); + Env env(*this, amendments); + env.fund(XRP(1000), alice); + pdomain::Credentials credentials{{alice, "first credential"}}; + env(pdomain::setTx(alice, credentials), ter(temDISABLED)); + } + // Verify that each tx does not execute if feature is disabled void testDisabled() @@ -556,6 +572,7 @@ class PermissionedDomains_test : public beast::unit_test::suite run() override { testEnabled(); + testCredentialsDisabled(); testDisabled(); testSet(); testDelete(); diff --git a/src/xrpld/app/tx/detail/PermissionedDomainSet.cpp b/src/xrpld/app/tx/detail/PermissionedDomainSet.cpp index fcc5563b954..a5141523b3c 100644 --- a/src/xrpld/app/tx/detail/PermissionedDomainSet.cpp +++ b/src/xrpld/app/tx/detail/PermissionedDomainSet.cpp @@ -30,8 +30,10 @@ namespace ripple { NotTEC PermissionedDomainSet::preflight(PreflightContext const& ctx) { - if (!ctx.rules.enabled(featurePermissionedDomains)) + if (!ctx.rules.enabled(featurePermissionedDomains) || + !ctx.rules.enabled(featureCredentials)) return temDISABLED; + if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) return ret; From 81034596a878d410598837534d1a0c44c3747206 Mon Sep 17 00:00:00 2001 From: Donovan Hide Date: Mon, 10 Feb 2025 17:08:36 +0000 Subject: [PATCH 07/29] fix: Omit superfluous setCurrentThreadName call in GRPCServer.cpp (#5280) --- src/xrpld/app/main/GRPCServer.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/xrpld/app/main/GRPCServer.cpp b/src/xrpld/app/main/GRPCServer.cpp index 4bcd95ceb82..7dc4d03b2c2 100644 --- a/src/xrpld/app/main/GRPCServer.cpp +++ b/src/xrpld/app/main/GRPCServer.cpp @@ -599,7 +599,6 @@ GRPCServer::start() if (running_ = impl_.start(); running_) { thread_ = std::thread([this]() { - beast::setCurrentThreadName("rippled : GRPCServer"); // Start the event loop and begin handling requests beast::setCurrentThreadName("rippled: grpc"); this->impl_.handleRpcs(); From fa5a85439f07f8a373e3aa83aa1ae9a32c5de38b Mon Sep 17 00:00:00 2001 From: Olek <115580134+oleks-rip@users.noreply.github.com> Date: Mon, 10 Feb 2025 15:33:37 -0500 Subject: [PATCH 08/29] fix: Amendment to add transaction flag checking functionality for Credentials (#5250) CredentialCreate / CredentialAccept / CredentialDelete transactions will check sfFlags field in preflight() when the amendment is enabled. --- include/xrpl/protocol/Feature.h | 2 +- include/xrpl/protocol/detail/features.macro | 2 ++ src/test/app/Credentials_test.cpp | 39 +++++++++++++++++++++ src/test/app/MultiSign_test.cpp | 27 ++++++++++++++ src/xrpld/app/tx/detail/Credentials.cpp | 21 +++++++++++ src/xrpld/app/tx/detail/SetSignerList.cpp | 9 +++++ 6 files changed, 99 insertions(+), 1 deletion(-) diff --git a/include/xrpl/protocol/Feature.h b/include/xrpl/protocol/Feature.h index c52f312cbfa..1c476df617f 100644 --- a/include/xrpl/protocol/Feature.h +++ b/include/xrpl/protocol/Feature.h @@ -80,7 +80,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 87; +static constexpr std::size_t numFeatures = 88; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index f82a05a7c17..aa0782b1378 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -29,6 +29,8 @@ // If you add an amendment here, then do not forget to increment `numFeatures` // in include/xrpl/protocol/Feature.h. +// Check flags in Credential transactions +XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (FrozenLPTokenTransfer, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(DeepFreeze, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(PermissionedDomains, Supported::no, VoteBehavior::DefaultNo) diff --git a/src/test/app/Credentials_test.cpp b/src/test/app/Credentials_test.cpp index e5d90d9766c..481850562fd 100644 --- a/src/test/app/Credentials_test.cpp +++ b/src/test/app/Credentials_test.cpp @@ -1058,6 +1058,43 @@ struct Credentials_test : public beast::unit_test::suite } } + void + testFlags(FeatureBitset features) + { + using namespace test::jtx; + + bool const enabled = features[fixInvalidTxFlags]; + testcase( + std::string("Test flag, fix ") + + (enabled ? "enabled" : "disabled")); + + const char credType[] = "abcde"; + Account const issuer{"issuer"}; + Account const subject{"subject"}; + + { + using namespace jtx; + Env env{*this, features}; + + env.fund(XRP(5000), subject, issuer); + env.close(); + + { + ter const expected( + enabled ? TER(temINVALID_FLAG) : TER(tesSUCCESS)); + env(credentials::create(subject, issuer, credType), + txflags(tfTransferable), + expected); + env(credentials::accept(subject, issuer, credType), + txflags(tfSellNFToken), + expected); + env(credentials::deleteCred(subject, subject, issuer, credType), + txflags(tfPassive), + expected); + } + } + } + void run() override { @@ -1069,6 +1106,8 @@ struct Credentials_test : public beast::unit_test::suite testAcceptFailed(all); testDeleteFailed(all); testFeatureFailed(all - featureCredentials); + testFlags(all - fixInvalidTxFlags); + testFlags(all); testRPC(); } }; diff --git a/src/test/app/MultiSign_test.cpp b/src/test/app/MultiSign_test.cpp index 77d85d9011b..9648bed886f 100644 --- a/src/test/app/MultiSign_test.cpp +++ b/src/test/app/MultiSign_test.cpp @@ -1672,6 +1672,29 @@ class MultiSign_test : public beast::unit_test::suite BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); } + void + test_signerListSetFlags(FeatureBitset features) + { + using namespace test::jtx; + + Env env{*this, features}; + Account const alice{"alice"}; + + env.fund(XRP(1000), alice); + env.close(); + + bool const enabled = features[fixInvalidTxFlags]; + testcase( + std::string("SignerListSet flag, fix ") + + (enabled ? "enabled" : "disabled")); + + ter const expected(enabled ? TER(temINVALID_FLAG) : TER(tesSUCCESS)); + env(signers(alice, 2, {{bogie, 1}, {ghost, 1}}), + expected, + txflags(tfPassive)); + env.close(); + } + void testAll(FeatureBitset features) { @@ -1708,6 +1731,10 @@ class MultiSign_test : public beast::unit_test::suite testAll(all - featureMultiSignReserve - featureExpandedSignerList); testAll(all - featureExpandedSignerList); testAll(all); + + test_signerListSetFlags(all - fixInvalidTxFlags); + test_signerListSetFlags(all); + test_amendmentTransition(); } }; diff --git a/src/xrpld/app/tx/detail/Credentials.cpp b/src/xrpld/app/tx/detail/Credentials.cpp index 4da875f8d7c..ca80bc159e3 100644 --- a/src/xrpld/app/tx/detail/Credentials.cpp +++ b/src/xrpld/app/tx/detail/Credentials.cpp @@ -65,6 +65,13 @@ CredentialCreate::preflight(PreflightContext const& ctx) auto const& tx = ctx.tx; auto& j = ctx.j; + if (ctx.rules.enabled(fixInvalidTxFlags) && + (tx.getFlags() & tfUniversalMask)) + { + JLOG(ctx.j.debug()) << "CredentialCreate: invalid flags."; + return temINVALID_FLAG; + } + if (!tx[sfSubject]) { JLOG(j.trace()) << "Malformed transaction: Invalid Subject"; @@ -209,6 +216,13 @@ CredentialDelete::preflight(PreflightContext const& ctx) if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) return ret; + if (ctx.rules.enabled(fixInvalidTxFlags) && + (ctx.tx.getFlags() & tfUniversalMask)) + { + JLOG(ctx.j.debug()) << "CredentialDelete: invalid flags."; + return temINVALID_FLAG; + } + auto const subject = ctx.tx[~sfSubject]; auto const issuer = ctx.tx[~sfIssuer]; @@ -289,6 +303,13 @@ CredentialAccept::preflight(PreflightContext const& ctx) if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) return ret; + if (ctx.rules.enabled(fixInvalidTxFlags) && + (ctx.tx.getFlags() & tfUniversalMask)) + { + JLOG(ctx.j.debug()) << "CredentialAccept: invalid flags."; + return temINVALID_FLAG; + } + if (!ctx.tx[sfIssuer]) { JLOG(ctx.j.trace()) << "Malformed transaction: Issuer field zeroed."; diff --git a/src/xrpld/app/tx/detail/SetSignerList.cpp b/src/xrpld/app/tx/detail/SetSignerList.cpp index a74b0f7351c..173107e02ae 100644 --- a/src/xrpld/app/tx/detail/SetSignerList.cpp +++ b/src/xrpld/app/tx/detail/SetSignerList.cpp @@ -27,6 +27,8 @@ #include #include #include +#include + #include #include @@ -81,6 +83,13 @@ SetSignerList::preflight(PreflightContext const& ctx) if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) return ret; + if (ctx.rules.enabled(fixInvalidTxFlags) && + (ctx.tx.getFlags() & tfUniversalMask)) + { + JLOG(ctx.j.debug()) << "SetSignerList: invalid flags."; + return temINVALID_FLAG; + } + auto const result = determineOperation(ctx.tx, ctx.flags, ctx.j); if (std::get<0>(result) != tesSUCCESS) From 3a55a64e1cf917229e3b4b8f0930d7d4d009994d Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Tue, 11 Feb 2025 15:50:51 -0500 Subject: [PATCH 09/29] docs: Add a summary of the git commit message rules (#5283) --- CONTRIBUTING.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ca83017d275..cb3eb6f0481 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -107,6 +107,19 @@ Refer to ["How to Write a Git Commit Message"](https://cbea.ms/git-commit/) for general rules on writing a good commit message. +tl;dr +> 1. Separate subject from body with a blank line. +> 2. Limit the subject line to 50 characters. +> * [...]shoot for 50 characters, but consider 72 the hard limit. +> 3. Capitalize the subject line. +> 4. Do not end the subject line with a period. +> 5. Use the imperative mood in the subject line. +> * A properly formed Git commit subject line should always be able +> to complete the following sentence: "If applied, this commit will +> _your subject line here_". +> 6. Wrap the body at 72 characters. +> 7. Use the body to explain what and why vs. how. + In addition to those guidelines, please add one of the following prefixes to the subject line if appropriate. * `fix:` - The primary purpose is to fix an existing bug. From a079bac153f4b79ff18fe5a9b1405127a41305f9 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Wed, 12 Feb 2025 08:44:03 -0500 Subject: [PATCH 10/29] chore: Rename missing-commits job, and combine nix job files (#5268) - Rename the job in missing-commits.yml from "check" to "up_to_date", because other jobs named "check" prevent merges, but this one should not prevent merges. How else are branches going to get caught up? - Move the job in instrumentation.yml to nix.yml, but keep it entirely independent. --- .github/workflows/instrumentation.yml | 103 -------------------------- .github/workflows/missing-commits.yml | 2 +- .github/workflows/nix.yml | 103 ++++++++++++++++++++++++-- 3 files changed, 97 insertions(+), 111 deletions(-) delete mode 100644 .github/workflows/instrumentation.yml diff --git a/.github/workflows/instrumentation.yml b/.github/workflows/instrumentation.yml deleted file mode 100644 index 6a1aef64350..00000000000 --- a/.github/workflows/instrumentation.yml +++ /dev/null @@ -1,103 +0,0 @@ -name: instrumentation -on: - pull_request: - push: - # If the branches list is ever changed, be sure to change it on all - # build/test jobs (nix, macos, windows, instrumentation) - branches: - # Always build the package branches - - develop - - release - - master - # Branches that opt-in to running - - 'ci/**' -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - - # NOTE we are not using dependencies built inside nix because nix is lagging - # with compiler versions. Instrumentation requires clang version 16 or later - - instrumentation-build: - env: - CLANG_RELEASE: 16 - strategy: - fail-fast: false - runs-on: [self-hosted, heavy] - container: debian:bookworm - steps: - - name: install prerequisites - env: - DEBIAN_FRONTEND: noninteractive - run: | - apt-get update - apt-get install --yes --no-install-recommends \ - clang-${CLANG_RELEASE} clang++-${CLANG_RELEASE} \ - python3-pip python-is-python3 make cmake git wget - apt-get clean - update-alternatives --install \ - /usr/bin/clang clang /usr/bin/clang-${CLANG_RELEASE} 100 \ - --slave /usr/bin/clang++ clang++ /usr/bin/clang++-${CLANG_RELEASE} - update-alternatives --auto clang - pip install --no-cache --break-system-packages "conan<2" - - - name: checkout - uses: actions/checkout@v4 - - - name: prepare environment - run: | - mkdir ${GITHUB_WORKSPACE}/.build - echo "SOURCE_DIR=$GITHUB_WORKSPACE" >> $GITHUB_ENV - echo "BUILD_DIR=$GITHUB_WORKSPACE/.build" >> $GITHUB_ENV - echo "CC=/usr/bin/clang" >> $GITHUB_ENV - echo "CXX=/usr/bin/clang++" >> $GITHUB_ENV - - - name: configure Conan - run: | - conan profile new --detect default - conan profile update settings.compiler=clang default - conan profile update settings.compiler.version=${CLANG_RELEASE} default - conan profile update settings.compiler.libcxx=libstdc++11 default - conan profile update settings.compiler.cppstd=20 default - conan profile update options.rocksdb=False default - conan profile update \ - 'conf.tools.build:compiler_executables={"c": "/usr/bin/clang", "cpp": "/usr/bin/clang++"}' default - conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_DISABLE_CONCEPTS"' default - conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_DISABLE_CONCEPTS"]' default - conan export external/snappy snappy/1.1.10@ - conan export external/soci soci/4.0.3@ - - - name: build dependencies - run: | - cd ${BUILD_DIR} - conan install ${SOURCE_DIR} \ - --output-folder ${BUILD_DIR} \ - --install-folder ${BUILD_DIR} \ - --build missing \ - --settings build_type=Debug - - - name: build with instrumentation - run: | - cd ${BUILD_DIR} - cmake -S ${SOURCE_DIR} -B ${BUILD_DIR} \ - -Dvoidstar=ON \ - -Dtests=ON \ - -Dxrpld=ON \ - -DCMAKE_BUILD_TYPE=Debug \ - -DSECP256K1_BUILD_BENCHMARK=OFF \ - -DSECP256K1_BUILD_TESTS=OFF \ - -DSECP256K1_BUILD_EXHAUSTIVE_TESTS=OFF \ - -DCMAKE_TOOLCHAIN_FILE=${BUILD_DIR}/build/generators/conan_toolchain.cmake - cmake --build . --parallel $(nproc) - - - name: verify instrumentation enabled - run: | - cd ${BUILD_DIR} - ./rippled --version | grep libvoidstar - - - name: run unit tests - run: | - cd ${BUILD_DIR} - ./rippled -u --unittest-jobs $(( $(nproc)/4 )) diff --git a/.github/workflows/missing-commits.yml b/.github/workflows/missing-commits.yml index cc6a7faa369..8715671f33f 100644 --- a/.github/workflows/missing-commits.yml +++ b/.github/workflows/missing-commits.yml @@ -9,7 +9,7 @@ on: - release jobs: - check: + up_to_date: runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 9fc0387c3ae..c63adec56fb 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -3,7 +3,7 @@ on: pull_request: push: # If the branches list is ever changed, be sure to change it on all - # build/test jobs (nix, macos, windows, instrumentation) + # build/test jobs (nix, macos, windows) branches: # Always build the package branches - develop @@ -15,9 +15,9 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true -# This workflow has two job matrixes. -# They can be considered phases because the second matrix ("test") -# depends on the first ("dependencies"). +# This workflow has multiple job matrixes. +# They can be considered phases because most of the matrices ("test", +# "coverage", "conan", ) depend on the first ("dependencies"). # # The first phase has a job in the matrix for each combination of # variables that affects dependency ABI: @@ -30,9 +30,12 @@ concurrency: # to hold the binaries if they are built locally. # We must use the "{upload,download}-artifact" actions instead. # -# The second phase has a job in the matrix for each test configuration. -# It installs dependency binaries from the cache, whichever was used, -# and builds and tests rippled. +# The remaining phases have a job in the matrix for each test +# configuration. They install dependency binaries from the cache, +# whichever was used, and build and test rippled. +# +# "instrumentation" is independent, but is included here because it also +# builds on linux in the same "on:" conditions. jobs: dependencies: @@ -293,3 +296,89 @@ jobs: -DCMAKE_BUILD_TYPE=${configuration} cmake --build . ./example | grep '^[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+' + + # NOTE we are not using dependencies built above because it lags with + # compiler versions. Instrumentation requires clang version 16 or + # later + + instrumentation-build: + env: + CLANG_RELEASE: 16 + strategy: + fail-fast: false + runs-on: [self-hosted, heavy] + container: debian:bookworm + steps: + - name: install prerequisites + env: + DEBIAN_FRONTEND: noninteractive + run: | + apt-get update + apt-get install --yes --no-install-recommends \ + clang-${CLANG_RELEASE} clang++-${CLANG_RELEASE} \ + python3-pip python-is-python3 make cmake git wget + apt-get clean + update-alternatives --install \ + /usr/bin/clang clang /usr/bin/clang-${CLANG_RELEASE} 100 \ + --slave /usr/bin/clang++ clang++ /usr/bin/clang++-${CLANG_RELEASE} + update-alternatives --auto clang + pip install --no-cache --break-system-packages "conan<2" + + - name: checkout + uses: actions/checkout@v4 + + - name: prepare environment + run: | + mkdir ${GITHUB_WORKSPACE}/.build + echo "SOURCE_DIR=$GITHUB_WORKSPACE" >> $GITHUB_ENV + echo "BUILD_DIR=$GITHUB_WORKSPACE/.build" >> $GITHUB_ENV + echo "CC=/usr/bin/clang" >> $GITHUB_ENV + echo "CXX=/usr/bin/clang++" >> $GITHUB_ENV + + - name: configure Conan + run: | + conan profile new --detect default + conan profile update settings.compiler=clang default + conan profile update settings.compiler.version=${CLANG_RELEASE} default + conan profile update settings.compiler.libcxx=libstdc++11 default + conan profile update settings.compiler.cppstd=20 default + conan profile update options.rocksdb=False default + conan profile update \ + 'conf.tools.build:compiler_executables={"c": "/usr/bin/clang", "cpp": "/usr/bin/clang++"}' default + conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_DISABLE_CONCEPTS"' default + conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_DISABLE_CONCEPTS"]' default + conan export external/snappy snappy/1.1.10@ + conan export external/soci soci/4.0.3@ + + - name: build dependencies + run: | + cd ${BUILD_DIR} + conan install ${SOURCE_DIR} \ + --output-folder ${BUILD_DIR} \ + --install-folder ${BUILD_DIR} \ + --build missing \ + --settings build_type=Debug + + - name: build with instrumentation + run: | + cd ${BUILD_DIR} + cmake -S ${SOURCE_DIR} -B ${BUILD_DIR} \ + -Dvoidstar=ON \ + -Dtests=ON \ + -Dxrpld=ON \ + -DCMAKE_BUILD_TYPE=Debug \ + -DSECP256K1_BUILD_BENCHMARK=OFF \ + -DSECP256K1_BUILD_TESTS=OFF \ + -DSECP256K1_BUILD_EXHAUSTIVE_TESTS=OFF \ + -DCMAKE_TOOLCHAIN_FILE=${BUILD_DIR}/build/generators/conan_toolchain.cmake + cmake --build . --parallel $(nproc) + + - name: verify instrumentation enabled + run: | + cd ${BUILD_DIR} + ./rippled --version | grep libvoidstar + + - name: run unit tests + run: | + cd ${BUILD_DIR} + ./rippled -u --unittest-jobs $(( $(nproc)/4 )) From b02b8d016c0f005c444735b0394ae00a688c938b Mon Sep 17 00:00:00 2001 From: code0xff <31336310+code0xff@users.noreply.github.com> Date: Thu, 13 Feb 2025 22:48:48 +0900 Subject: [PATCH 11/29] chore: Fix small typos in protocol files (#5279) --- include/xrpl/protocol/AccountID.h | 2 +- include/xrpl/protocol/Sign.h | 2 +- include/xrpl/protocol/TxFormats.h | 2 +- src/xrpld/app/main/Main.cpp | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/xrpl/protocol/AccountID.h b/include/xrpl/protocol/AccountID.h index 7edf8d388f7..2677dd76bce 100644 --- a/include/xrpl/protocol/AccountID.h +++ b/include/xrpl/protocol/AccountID.h @@ -114,7 +114,7 @@ operator<<(std::ostream& os, AccountID const& x) is expensive (it requires a SHA-256 operation) in most cases the overhead of the cache is worth the benefit. - @param count The number of entries the cache should accomodate. Zero will + @param count The number of entries the cache should accommodate. Zero will disable the cache, releasing any memory associated with it. @note The function will only initialize the cache the first time it is diff --git a/include/xrpl/protocol/Sign.h b/include/xrpl/protocol/Sign.h index 30fbb26244b..dcbe5dfd0db 100644 --- a/include/xrpl/protocol/Sign.h +++ b/include/xrpl/protocol/Sign.h @@ -75,7 +75,7 @@ buildMultiSigningData(STObject const& obj, AccountID const& signingID); The following methods support that optimization: 1. startMultiSigningData provides the large part which can be shared. - 2. finishMuiltiSigningData caps the passed in serializer with each + 2. finishMultiSigningData caps the passed in serializer with each signer's unique data. */ Serializer diff --git a/include/xrpl/protocol/TxFormats.h b/include/xrpl/protocol/TxFormats.h index 2f9121cecb4..7eb6fb72f7a 100644 --- a/include/xrpl/protocol/TxFormats.h +++ b/include/xrpl/protocol/TxFormats.h @@ -30,7 +30,7 @@ namespace ripple { @ingroup protocol */ -/** Transaction type identifieers +/** Transaction type identifiers Each ledger object requires a unique type identifier, which is stored within the object itself; this makes it possible to iterate the entire diff --git a/src/xrpld/app/main/Main.cpp b/src/xrpld/app/main/Main.cpp index 533cda75b55..6e92c2e83a7 100644 --- a/src/xrpld/app/main/Main.cpp +++ b/src/xrpld/app/main/Main.cpp @@ -56,7 +56,7 @@ #include #endif -// Do we know the plaform we're compiling on? If you're adding new platforms +// Do we know the platform we're compiling on? If you're adding new platforms // modify this check accordingly. #if !BOOST_OS_LINUX && !BOOST_OS_WINDOWS && !BOOST_OS_MACOS #error Supported platforms are: Linux, Windows and MacOS From e8e7888a23a5668c59dbafc426ef4e70602dc1b4 Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Thu, 13 Feb 2025 07:28:23 -0800 Subject: [PATCH 12/29] docs: ensure build_type and CMAKE_BUILD_TYPE match (#5274) --- BUILD.md | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/BUILD.md b/BUILD.md index 31755c36919..310c538584d 100644 --- a/BUILD.md +++ b/BUILD.md @@ -222,13 +222,15 @@ It fixes some source files to add missing `#include`s. the `install-folder` or `-if` option to every `conan install` command in the next step. -2. Generate CMake files for every configuration you want to build. +2. Use conan to generate CMake files for every configuration you want to build: ``` conan install .. --output-folder . --build missing --settings build_type=Release conan install .. --output-folder . --build missing --settings build_type=Debug ``` + To build Debug, in the next step, be sure to set `-DCMAKE_BUILD_TYPE=Debug` + For a single-configuration generator, e.g. `Unix Makefiles` or `Ninja`, you only need to run this command once. For a multi-configuration generator, e.g. `Visual Studio`, you may want to @@ -258,13 +260,16 @@ It fixes some source files to add missing `#include`s. Single-config generators: + Pass the CMake variable [`CMAKE_BUILD_TYPE`][build_type] + and make sure it matches the one of the `build_type` settings + you chose in the previous step. + + For example, to build Debug, in the next command, replace "Release" with "Debug" + ``` cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release -Dxrpld=ON -Dtests=ON .. ``` - Pass the CMake variable [`CMAKE_BUILD_TYPE`][build_type] - and make sure it matches the `build_type` setting you chose in the previous - step. Multi-config generators: @@ -274,7 +279,7 @@ It fixes some source files to add missing `#include`s. **Note:** You can pass build options for `rippled` in this step. -4. Build `rippled`. +5. Build `rippled`. For a single-configuration generator, it will build whatever configuration you passed for `CMAKE_BUILD_TYPE`. For a multi-configuration generator, @@ -293,7 +298,7 @@ It fixes some source files to add missing `#include`s. cmake --build . --config Debug ``` -5. Test rippled. +6. Test rippled. Single-config generators: @@ -403,6 +408,23 @@ After any updates or changes to dependencies, you may need to do the following: 4. Re-run [conan install](#build-and-test). +### 'protobuf/port_def.inc' file not found + +If `cmake --build .` results in an error due to a missing a protobuf file, then you might have generated CMake files for a different `build_type` than the `CMAKE_BUILD_TYPE` you passed to conan. + +``` +/rippled/.build/pb-xrpl.libpb/xrpl/proto/ripple.pb.h:10:10: fatal error: 'google/protobuf/port_def.inc' file not found + 10 | #include + | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +1 error generated. +``` + +For example, if you want to build Debug: + +1. For conan install, pass `--settings build_type=Debug` +2. For cmake, pass `-DCMAKE_BUILD_TYPE=Debug` + + ### no std::result_of If your compiler version is recent enough to have removed `std::result_of` as From 97e3dae6f4390cf7516ed1f797c1ba31b3e1aa4a Mon Sep 17 00:00:00 2001 From: Bart Date: Thu, 13 Feb 2025 11:54:01 -0500 Subject: [PATCH 13/29] fix: Replace charge() by fee_.update() in OnMessage functions (#5269) In PeerImpl.cpp, if the function is a message handler (onMessage) or called directly from a message handler, then it should use fee_, since when the handler returns (OnMessageEnd) then the charge function is called. If the function is not a message handler, such as a job queue item, it should remain charge. --- include/xrpl/resource/Charge.h | 3 ++ src/libxrpl/resource/Charge.cpp | 6 ++++ src/xrpld/overlay/detail/PeerImp.cpp | 53 +++++++++++++++++----------- 3 files changed, 41 insertions(+), 21 deletions(-) diff --git a/include/xrpl/resource/Charge.h b/include/xrpl/resource/Charge.h index e5799710b2c..a75ad326242 100644 --- a/include/xrpl/resource/Charge.h +++ b/include/xrpl/resource/Charge.h @@ -57,6 +57,9 @@ class Charge std::strong_ordering operator<=>(Charge const&) const; + Charge + operator*(value_type m) const; + private: value_type m_cost; std::string m_label; diff --git a/src/libxrpl/resource/Charge.cpp b/src/libxrpl/resource/Charge.cpp index 66b5049dbd3..53df4e592dc 100644 --- a/src/libxrpl/resource/Charge.cpp +++ b/src/libxrpl/resource/Charge.cpp @@ -67,5 +67,11 @@ Charge::operator<=>(Charge const& c) const return m_cost <=> c.m_cost; } +Charge +Charge::operator*(value_type m) const +{ + return Charge(m_cost * m, m_label); +} + } // namespace Resource } // namespace ripple diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index fe88fcfd2cf..c3656c9445c 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -31,14 +31,11 @@ #include #include #include -#include #include #include #include #include #include -#include -// #include #include #include @@ -1111,7 +1108,7 @@ PeerImp::onMessage(std::shared_ptr const& m) // VFALCO NOTE I think we should drop the peer immediately if (!cluster()) { - fee_.fee = Resource::feeUselessData; + fee_.update(Resource::feeUselessData, "unknown cluster"); return; } @@ -1189,13 +1186,14 @@ PeerImp::onMessage(std::shared_ptr const& m) // implication for the protocol. if (m->endpoints_v2().size() >= 1024) { - charge(Resource::feeInvalidData, "endpoints too large"); + fee_.update(Resource::feeUselessData, "endpoints too large"); return; } std::vector endpoints; endpoints.reserve(m->endpoints_v2().size()); + auto malformed = 0; for (auto const& tm : m->endpoints_v2()) { auto result = beast::IP::Endpoint::from_string_checked(tm.endpoint()); @@ -1204,7 +1202,7 @@ PeerImp::onMessage(std::shared_ptr const& m) { JLOG(p_journal_.error()) << "failed to parse incoming endpoint: {" << tm.endpoint() << "}"; - charge(Resource::feeInvalidData, "endpoints malformed"); + malformed++; continue; } @@ -1220,6 +1218,15 @@ PeerImp::onMessage(std::shared_ptr const& m) endpoints.emplace_back(*result, tm.hops()); } + // Charge the peer for each malformed endpoint. As there still may be + // multiple valid endpoints we don't return early. + if (malformed > 0) + { + fee_.update( + Resource::feeInvalidData * malformed, + std::to_string(malformed) + " malformed endpoints"); + } + if (!endpoints.empty()) overlay_.peerFinder().on_endpoints(slot_, endpoints); } @@ -1340,7 +1347,7 @@ void PeerImp::onMessage(std::shared_ptr const& m) { auto badData = [&](std::string const& msg) { - charge(Resource::feeInvalidData, "get_ledger " + msg); + fee_.update(Resource::feeInvalidData, "get_ledger " + msg); JLOG(p_journal_.warn()) << "TMGetLedger: " << msg; }; auto const itype{m->itype()}; @@ -1431,7 +1438,8 @@ PeerImp::onMessage(std::shared_ptr const& m) JLOG(p_journal_.trace()) << "onMessage, TMProofPathRequest"; if (!ledgerReplayEnabled_) { - charge(Resource::feeMalformedRequest, "proof_path_request disabled"); + fee_.update( + Resource::feeMalformedRequest, "proof_path_request disabled"); return; } @@ -1468,13 +1476,14 @@ PeerImp::onMessage(std::shared_ptr const& m) { if (!ledgerReplayEnabled_) { - charge(Resource::feeMalformedRequest, "proof_path_response disabled"); + fee_.update( + Resource::feeMalformedRequest, "proof_path_response disabled"); return; } if (!ledgerReplayMsgHandler_.processProofPathResponse(m)) { - charge(Resource::feeInvalidData, "proof_path_response"); + fee_.update(Resource::feeInvalidData, "proof_path_response"); } } @@ -1484,7 +1493,8 @@ PeerImp::onMessage(std::shared_ptr const& m) JLOG(p_journal_.trace()) << "onMessage, TMReplayDeltaRequest"; if (!ledgerReplayEnabled_) { - charge(Resource::feeMalformedRequest, "replay_delta_request disabled"); + fee_.update( + Resource::feeMalformedRequest, "replay_delta_request disabled"); return; } @@ -1521,13 +1531,14 @@ PeerImp::onMessage(std::shared_ptr const& m) { if (!ledgerReplayEnabled_) { - charge(Resource::feeMalformedRequest, "replay_delta_response disabled"); + fee_.update( + Resource::feeMalformedRequest, "replay_delta_response disabled"); return; } if (!ledgerReplayMsgHandler_.processReplayDeltaResponse(m)) { - charge(Resource::feeInvalidData, "replay_delta_response"); + fee_.update(Resource::feeInvalidData, "replay_delta_response"); } } @@ -2408,10 +2419,6 @@ PeerImp::onMessage(std::shared_ptr const& m) return; } - fee_.update( - Resource::feeModerateBurdenPeer, - " received a get object by hash request"); - protocol::TMGetObjectByHash reply; reply.set_query(false); @@ -2432,6 +2439,10 @@ PeerImp::onMessage(std::shared_ptr const& m) reply.set_ledgerhash(packet.ledgerhash()); } + fee_.update( + Resource::feeModerateBurdenPeer, + " received a get object by hash request"); + // This is a very minimal implementation for (int i = 0; i < packet.objects_size(); ++i) { @@ -2628,14 +2639,14 @@ PeerImp::onMessage(std::shared_ptr const& m) if (!m->has_validatorpubkey()) { - charge(Resource::feeInvalidData, "squelch no pubkey"); + fee_.update(Resource::feeInvalidData, "squelch no pubkey"); return; } auto validator = m->validatorpubkey(); auto const slice{makeSlice(validator)}; if (!publicKeyType(slice)) { - charge(Resource::feeInvalidData, "squelch bad pubkey"); + fee_.update(Resource::feeInvalidData, "squelch bad pubkey"); return; } PublicKey key(slice); @@ -2643,7 +2654,7 @@ PeerImp::onMessage(std::shared_ptr const& m) // Ignore non-validator squelch if (!app_.validators().listed(key)) { - charge(Resource::feeInvalidData, "squelch non-validator"); + fee_.update(Resource::feeInvalidData, "squelch non-validator"); JLOG(p_journal_.debug()) << "onMessage: TMSquelch discarding non-validator squelch " << slice; @@ -2663,7 +2674,7 @@ PeerImp::onMessage(std::shared_ptr const& m) if (!m->squelch()) squelch_.removeSquelch(key); else if (!squelch_.addSquelch(key, std::chrono::seconds{duration})) - charge(Resource::feeInvalidData, "squelch duration"); + fee_.update(Resource::feeInvalidData, "squelch duration"); JLOG(p_journal_.debug()) << "onMessage: TMSquelch " << slice << " " << id() << " " << duration; From 01fe9477f47b28d8e9d105d0d5936d86e97c9cfb Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Thu, 13 Feb 2025 17:32:37 -0500 Subject: [PATCH 14/29] refactor: Change recursive_mutex to mutex in DatabaseRotatingImp (#5276) Rewrites the code so that the lock is not held during the callback. Instead it locks twice, once before, and once after. This is safe due to the structure of the code, but is checked after the second lock. This allows mutex_ to be changed back to a regular mutex. --- Builds/levelization/results/ordering.txt | 1 + src/test/app/SHAMapStore_test.cpp | 126 ++++++++++++++++++ src/xrpld/app/misc/SHAMapStoreImp.cpp | 12 +- src/xrpld/nodestore/DatabaseRotating.h | 12 +- .../nodestore/detail/DatabaseRotatingImp.cpp | 32 +++-- .../nodestore/detail/DatabaseRotatingImp.h | 16 +-- 6 files changed, 172 insertions(+), 27 deletions(-) diff --git a/Builds/levelization/results/ordering.txt b/Builds/levelization/results/ordering.txt index acf8daafb79..681f76dd5db 100644 --- a/Builds/levelization/results/ordering.txt +++ b/Builds/levelization/results/ordering.txt @@ -19,6 +19,7 @@ test.app > xrpl.basics test.app > xrpld.app test.app > xrpld.core test.app > xrpld.ledger +test.app > xrpld.nodestore test.app > xrpld.overlay test.app > xrpld.rpc test.app > xrpl.json diff --git a/src/test/app/SHAMapStore_test.cpp b/src/test/app/SHAMapStore_test.cpp index 376cb4eb7ba..5fd3f79c9f5 100644 --- a/src/test/app/SHAMapStore_test.cpp +++ b/src/test/app/SHAMapStore_test.cpp @@ -20,9 +20,11 @@ #include #include #include +#include #include #include #include +#include #include namespace ripple { @@ -518,12 +520,136 @@ class SHAMapStore_test : public beast::unit_test::suite lastRotated = ledgerSeq - 1; } + std::unique_ptr + makeBackendRotating( + jtx::Env& env, + NodeStoreScheduler& scheduler, + std::string path) + { + Section section{ + env.app().config().section(ConfigSection::nodeDatabase())}; + boost::filesystem::path newPath; + + if (!BEAST_EXPECT(path.size())) + return {}; + newPath = path; + section.set("path", newPath.string()); + + auto backend{NodeStore::Manager::instance().make_Backend( + section, + megabytes(env.app().config().getValueFor( + SizedItem::burstSize, std::nullopt)), + scheduler, + env.app().logs().journal("NodeStoreTest"))}; + backend->open(); + return backend; + } + + void + testRotate() + { + // The only purpose of this test is to ensure that if something that + // should never happen happens, we don't get a deadlock. + testcase("rotate with lock contention"); + + using namespace jtx; + Env env(*this, envconfig(onlineDelete)); + + ///////////////////////////////////////////////////////////// + // Create the backend. Normally, SHAMapStoreImp handles all these + // details + auto nscfg = env.app().config().section(ConfigSection::nodeDatabase()); + + // Provide default values: + if (!nscfg.exists("cache_size")) + nscfg.set( + "cache_size", + std::to_string(env.app().config().getValueFor( + SizedItem::treeCacheSize, std::nullopt))); + + if (!nscfg.exists("cache_age")) + nscfg.set( + "cache_age", + std::to_string(env.app().config().getValueFor( + SizedItem::treeCacheAge, std::nullopt))); + + NodeStoreScheduler scheduler(env.app().getJobQueue()); + + std::string const writableDb = "write"; + std::string const archiveDb = "archive"; + auto writableBackend = makeBackendRotating(env, scheduler, writableDb); + auto archiveBackend = makeBackendRotating(env, scheduler, archiveDb); + + // Create NodeStore with two backends to allow online deletion of + // data + constexpr int readThreads = 4; + auto dbr = std::make_unique( + scheduler, + readThreads, + std::move(writableBackend), + std::move(archiveBackend), + nscfg, + env.app().logs().journal("NodeStoreTest")); + + ///////////////////////////////////////////////////////////// + // Check basic functionality + using namespace std::chrono_literals; + std::atomic threadNum = 0; + + { + auto newBackend = makeBackendRotating( + env, scheduler, std::to_string(++threadNum)); + + auto const cb = [&](std::string const& writableName, + std::string const& archiveName) { + BEAST_EXPECT(writableName == "1"); + BEAST_EXPECT(archiveName == "write"); + // Ensure that dbr functions can be called from within the + // callback + BEAST_EXPECT(dbr->getName() == "1"); + }; + + dbr->rotate(std::move(newBackend), cb); + } + BEAST_EXPECT(threadNum == 1); + BEAST_EXPECT(dbr->getName() == "1"); + + ///////////////////////////////////////////////////////////// + // Do something stupid. Try to re-enter rotate from inside the callback. + { + auto const cb = [&](std::string const& writableName, + std::string const& archiveName) { + BEAST_EXPECT(writableName == "3"); + BEAST_EXPECT(archiveName == "2"); + // Ensure that dbr functions can be called from within the + // callback + BEAST_EXPECT(dbr->getName() == "3"); + }; + auto const cbReentrant = [&](std::string const& writableName, + std::string const& archiveName) { + BEAST_EXPECT(writableName == "2"); + BEAST_EXPECT(archiveName == "1"); + auto newBackend = makeBackendRotating( + env, scheduler, std::to_string(++threadNum)); + // Reminder: doing this is stupid and should never happen + dbr->rotate(std::move(newBackend), cb); + }; + auto newBackend = makeBackendRotating( + env, scheduler, std::to_string(++threadNum)); + dbr->rotate(std::move(newBackend), cbReentrant); + } + + BEAST_EXPECT(threadNum == 3); + BEAST_EXPECT(dbr->getName() == "3"); + } + void run() override { testClear(); testAutomatic(); testCanDelete(); + testRotate(); } }; diff --git a/src/xrpld/app/misc/SHAMapStoreImp.cpp b/src/xrpld/app/misc/SHAMapStoreImp.cpp index 3a530e0e410..e2e0e3b9c46 100644 --- a/src/xrpld/app/misc/SHAMapStoreImp.cpp +++ b/src/xrpld/app/misc/SHAMapStoreImp.cpp @@ -366,17 +366,17 @@ SHAMapStoreImp::run() lastRotated = validatedSeq; - dbRotating_->rotateWithLock( - [&](std::string const& writableBackendName) { + dbRotating_->rotate( + std::move(newBackend), + [&](std::string const& writableName, + std::string const& archiveName) { SavedState savedState; - savedState.writableDb = newBackend->getName(); - savedState.archiveDb = writableBackendName; + savedState.writableDb = writableName; + savedState.archiveDb = archiveName; savedState.lastRotated = lastRotated; state_db_.setState(savedState); clearCaches(validatedSeq); - - return std::move(newBackend); }); JLOG(journal_.warn()) << "finished rotation " << validatedSeq; diff --git a/src/xrpld/nodestore/DatabaseRotating.h b/src/xrpld/nodestore/DatabaseRotating.h index 10f575c4662..3e8c6a7d5f0 100644 --- a/src/xrpld/nodestore/DatabaseRotating.h +++ b/src/xrpld/nodestore/DatabaseRotating.h @@ -44,11 +44,17 @@ class DatabaseRotating : public Database /** Rotates the backends. - @param f A function executed before the rotation and under the same lock + @param newBackend New writable backend + @param f A function executed after the rotation outside of lock. The + values passed to f will be the new backend database names _after_ + rotation. */ virtual void - rotateWithLock(std::function( - std::string const& writableBackendName)> const& f) = 0; + rotate( + std::unique_ptr&& newBackend, + std::function const& f) = 0; }; } // namespace NodeStore diff --git a/src/xrpld/nodestore/detail/DatabaseRotatingImp.cpp b/src/xrpld/nodestore/detail/DatabaseRotatingImp.cpp index 58cc3599dc6..c7e6c8c349f 100644 --- a/src/xrpld/nodestore/detail/DatabaseRotatingImp.cpp +++ b/src/xrpld/nodestore/detail/DatabaseRotatingImp.cpp @@ -41,16 +41,32 @@ DatabaseRotatingImp::DatabaseRotatingImp( } void -DatabaseRotatingImp::rotateWithLock( - std::function( - std::string const& writableBackendName)> const& f) +DatabaseRotatingImp::rotate( + std::unique_ptr&& newBackend, + std::function const& f) { - std::lock_guard lock(mutex_); + // Pass these two names to the callback function + std::string const newWritableBackendName = newBackend->getName(); + std::string newArchiveBackendName; + // Hold on to current archive backend pointer until after the + // callback finishes. Only then will the archive directory be + // deleted. + std::shared_ptr oldArchiveBackend; + { + std::lock_guard lock(mutex_); + + archiveBackend_->setDeletePath(); + oldArchiveBackend = std::move(archiveBackend_); + + archiveBackend_ = std::move(writableBackend_); + newArchiveBackendName = archiveBackend_->getName(); + + writableBackend_ = std::move(newBackend); + } - auto newBackend = f(writableBackend_->getName()); - archiveBackend_->setDeletePath(); - archiveBackend_ = std::move(writableBackend_); - writableBackend_ = std::move(newBackend); + f(newWritableBackendName, newArchiveBackendName); } std::string diff --git a/src/xrpld/nodestore/detail/DatabaseRotatingImp.h b/src/xrpld/nodestore/detail/DatabaseRotatingImp.h index 5183aa1e2e4..d9f114f5039 100644 --- a/src/xrpld/nodestore/detail/DatabaseRotatingImp.h +++ b/src/xrpld/nodestore/detail/DatabaseRotatingImp.h @@ -49,9 +49,11 @@ class DatabaseRotatingImp : public DatabaseRotating } void - rotateWithLock( - std::function( - std::string const& writableBackendName)> const& f) override; + rotate( + std::unique_ptr&& newBackend, + std::function const& f) override; std::string getName() const override; @@ -82,13 +84,7 @@ class DatabaseRotatingImp : public DatabaseRotating private: std::shared_ptr writableBackend_; std::shared_ptr archiveBackend_; - // This needs to be a recursive mutex because callbacks in `rotateWithLock` - // can call function that also lock the mutex. A current example of this is - // a callback from SHAMapStoreImp, which calls `clearCaches`. This - // `clearCaches` call eventually calls `fetchNodeObject` which tries to - // relock the mutex. It would be desirable to rewrite the code so the lock - // was not held during a callback. - mutable std::recursive_mutex mutex_; + mutable std::mutex mutex_; std::shared_ptr fetchNodeObject( From dc9e6c37fe8fe0d78228c1998786579ae8e0211e Mon Sep 17 00:00:00 2001 From: Darius Tumas Date: Fri, 14 Feb 2025 16:54:29 +0200 Subject: [PATCH 15/29] chore: Update XRPL Foundation public key (#5289) Following the XRPL Foundation UNL migration a new set of keys was generated. --- cfg/validators-example.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cfg/validators-example.txt b/cfg/validators-example.txt index 5b59e5c4fde..802cea4a391 100644 --- a/cfg/validators-example.txt +++ b/cfg/validators-example.txt @@ -60,7 +60,7 @@ https://vl.xrplf.org #vl.ripple.com ED2677ABFFD1B33AC6FBC3062B71F1E8397C1505E1C42C64D11AD1B28FF73F4734 # vl.xrplf.org -ED45D1840EE724BE327ABE9146503D5848EFD5F38B6D5FEDE71E80ACCE5E6E738B +ED42AEC58B701EEBB77356FFFEC26F83C1F0407263530F068C7C73D392C7E06FD1 # To use the test network (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html), # use the following configuration instead: From 7c9d652d9ba55814006e5f4ed3b4f3617813f958 Mon Sep 17 00:00:00 2001 From: Bart Date: Fri, 14 Feb 2025 11:12:19 -0500 Subject: [PATCH 16/29] Support canonical ledger entry names (#5271) This change enhances the filtering in the ledger, ledger_data, and account_objects methods by also supporting filtering by the canonical name of the LedgerEntryType using case-insensitive matching. --- API-CHANGELOG.md | 1 + src/test/rpc/RPCHelpers_test.cpp | 92 +++++++++++++++++++++++++++++ src/xrpld/rpc/detail/RPCHelpers.cpp | 24 ++++---- 3 files changed, 107 insertions(+), 10 deletions(-) create mode 100644 src/test/rpc/RPCHelpers_test.cpp diff --git a/API-CHANGELOG.md b/API-CHANGELOG.md index fda03c2d00a..0d5d8a8196b 100644 --- a/API-CHANGELOG.md +++ b/API-CHANGELOG.md @@ -90,6 +90,7 @@ As of 2025-01-28, version 2.4.0 is in development. You can use a pre-release ver ### Additions and bugfixes in 2.4.0 - `ledger_entry`: `state` is added an alias for `ripple_state`. +- `ledger_entry`: Enables case-insensitive filtering by canonical name in addition to case-sensitive filtering by RPC name. - `validators`: Added new field `validator_list_threshold` in response. - `simulate`: A new RPC that executes a [dry run of a transaction submission](https://github.com/XRPLF/XRPL-Standards/tree/master/XLS-0069d-simulate#2-rpc-simulate) - Signing methods autofill fees better and properly handle transactions that don't have a base fee, and will also autofill the `NetworkID` field. diff --git a/src/test/rpc/RPCHelpers_test.cpp b/src/test/rpc/RPCHelpers_test.cpp new file mode 100644 index 00000000000..11a0ef787d1 --- /dev/null +++ b/src/test/rpc/RPCHelpers_test.cpp @@ -0,0 +1,92 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012, 2013 Ripple Labs Inc. + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +namespace ripple { +namespace test { + +class RPCHelpers_test : public beast::unit_test::suite +{ +public: + void + testChooseLedgerEntryType() + { + testcase("ChooseLedgerEntryType"); + + // Test no type. + Json::Value tx = Json::objectValue; + auto result = RPC::chooseLedgerEntryType(tx); + BEAST_EXPECT(result.first == RPC::Status::OK); + BEAST_EXPECT(result.second == 0); + + // Test empty type. + tx[jss::type] = ""; + result = RPC::chooseLedgerEntryType(tx); + BEAST_EXPECT(result.first == RPC::Status{rpcINVALID_PARAMS}); + BEAST_EXPECT(result.second == 0); + + // Test type using canonical name in mixedcase. + tx[jss::type] = "MPTokenIssuance"; + result = RPC::chooseLedgerEntryType(tx); + BEAST_EXPECT(result.first == RPC::Status::OK); + BEAST_EXPECT(result.second == ltMPTOKEN_ISSUANCE); + + // Test type using canonical name in lowercase. + tx[jss::type] = "mptokenissuance"; + result = RPC::chooseLedgerEntryType(tx); + BEAST_EXPECT(result.first == RPC::Status::OK); + BEAST_EXPECT(result.second == ltMPTOKEN_ISSUANCE); + + // Test type using RPC name with exact match. + tx[jss::type] = "mpt_issuance"; + result = RPC::chooseLedgerEntryType(tx); + BEAST_EXPECT(result.first == RPC::Status::OK); + BEAST_EXPECT(result.second == ltMPTOKEN_ISSUANCE); + + // Test type using RPC name with inexact match. + tx[jss::type] = "MPT_Issuance"; + result = RPC::chooseLedgerEntryType(tx); + BEAST_EXPECT(result.first == RPC::Status{rpcINVALID_PARAMS}); + BEAST_EXPECT(result.second == 0); + + // Test invalid type. + tx[jss::type] = 1234; + result = RPC::chooseLedgerEntryType(tx); + BEAST_EXPECT(result.first == RPC::Status{rpcINVALID_PARAMS}); + BEAST_EXPECT(result.second == 0); + + // Test unknown type. + tx[jss::type] = "unknown"; + result = RPC::chooseLedgerEntryType(tx); + BEAST_EXPECT(result.first == RPC::Status{rpcINVALID_PARAMS}); + BEAST_EXPECT(result.second == 0); + } + + void + run() override + { + testChooseLedgerEntryType(); + } +}; + +BEAST_DEFINE_TESTSUITE(RPCHelpers, app, ripple); + +} // namespace test +} // namespace ripple diff --git a/src/xrpld/rpc/detail/RPCHelpers.cpp b/src/xrpld/rpc/detail/RPCHelpers.cpp index e99f175cbe7..11c7a7698f3 100644 --- a/src/xrpld/rpc/detail/RPCHelpers.cpp +++ b/src/xrpld/rpc/detail/RPCHelpers.cpp @@ -35,8 +35,7 @@ #include #include - -#include +#include namespace ripple { namespace RPC { @@ -934,18 +933,19 @@ chooseLedgerEntryType(Json::Value const& params) std::pair result{RPC::Status::OK, ltANY}; if (params.isMember(jss::type)) { - static constexpr auto types = - std::to_array>({ + static constexpr auto types = std::to_array< + std::tuple>({ #pragma push_macro("LEDGER_ENTRY") #undef LEDGER_ENTRY -#define LEDGER_ENTRY(tag, value, name, rpcName, fields) {jss::rpcName, tag}, +#define LEDGER_ENTRY(tag, value, name, rpcName, fields) \ + {jss::name, jss::rpcName, tag}, #include #undef LEDGER_ENTRY #pragma pop_macro("LEDGER_ENTRY") - }); + }); auto const& p = params[jss::type]; if (!p.isString()) @@ -958,10 +958,14 @@ chooseLedgerEntryType(Json::Value const& params) return result; } + // Use the passed in parameter to find a ledger type based on matching + // against the canonical name (case-insensitive) or the RPC name + // (case-sensitive). auto const filter = p.asString(); - auto iter = std::find_if( - types.begin(), types.end(), [&filter](decltype(types.front())& t) { - return t.first == filter; + const auto iter = + std::ranges::find_if(types, [&filter](decltype(types.front())& t) { + return boost::iequals(std::get<0>(t), filter) || + std::get<1>(t) == filter; }); if (iter == types.end()) { @@ -973,7 +977,7 @@ chooseLedgerEntryType(Json::Value const& params) "type"); return result; } - result.second = iter->second; + result.second = std::get<2>(*iter); } return result; } From dd5e6559dda14f8a1684cf81d90b86920628a4df Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Fri, 14 Feb 2025 18:51:51 -0500 Subject: [PATCH 17/29] Reduce duplicate peer traffic for ledger data (#5126) - Drop duplicate outgoing TMGetLedger messages per peer - Allow a retry after 30s in case of peer or network congestion. - Addresses RIPD-1870 - (Changes levelization. That is not desirable, and will need to be fixed.) - Drop duplicate incoming TMGetLedger messages per peer - Allow a retry after 15s in case of peer or network congestion. - The requestCookie is ignored when computing the hash, thus increasing the chances of detecting duplicate messages. - With duplicate messages, keep track of the different requestCookies (or lack of cookie). When work is finally done for a given request, send the response to all the peers that are waiting on the request, sending one message per peer, including all the cookies and a "directResponse" flag indicating the data is intended for the sender, too. - Addresses RIPD-1871 - Drop duplicate incoming TMLedgerData messages - Addresses RIPD-1869 - Improve logging related to ledger acquisition - Class "CanProcess" to keep track of processing of distinct items --------- Co-authored-by: Valentin Balaschenko <13349202+vlntb@users.noreply.github.com> --- Builds/levelization/results/loops.txt | 2 +- include/xrpl/basics/CanProcess.h | 134 ++++++ include/xrpl/basics/base_uint.h | 7 + include/xrpl/proto/ripple.proto | 10 + include/xrpl/protocol/LedgerHeader.h | 2 + src/test/app/HashRouter_test.cpp | 28 ++ src/test/app/LedgerReplay_test.cpp | 5 + src/test/basics/base_uint_test.cpp | 5 + src/test/overlay/ProtocolVersion_test.cpp | 4 +- src/test/overlay/reduce_relay_test.cpp | 5 + src/xrpld/app/consensus/RCLConsensus.cpp | 3 +- src/xrpld/app/ledger/InboundLedger.h | 19 + src/xrpld/app/ledger/detail/InboundLedger.cpp | 21 +- .../app/ledger/detail/InboundLedgers.cpp | 134 ++++-- src/xrpld/app/ledger/detail/LedgerMaster.cpp | 5 +- .../app/ledger/detail/TimeoutCounter.cpp | 11 +- src/xrpld/app/ledger/detail/TimeoutCounter.h | 3 + src/xrpld/app/misc/HashRouter.cpp | 23 + src/xrpld/app/misc/HashRouter.h | 42 +- src/xrpld/app/misc/NetworkOPs.cpp | 84 ++-- src/xrpld/app/misc/NetworkOPs.h | 2 +- src/xrpld/overlay/Peer.h | 8 + src/xrpld/overlay/detail/PeerImp.cpp | 451 ++++++++++++++++-- src/xrpld/overlay/detail/PeerImp.h | 36 +- src/xrpld/overlay/detail/PeerSet.cpp | 42 +- src/xrpld/overlay/detail/ProtocolMessage.h | 66 +++ src/xrpld/overlay/detail/ProtocolVersion.cpp | 4 +- 27 files changed, 1013 insertions(+), 143 deletions(-) create mode 100644 include/xrpl/basics/CanProcess.h diff --git a/Builds/levelization/results/loops.txt b/Builds/levelization/results/loops.txt index 7c132f5429e..06ab5266c91 100644 --- a/Builds/levelization/results/loops.txt +++ b/Builds/levelization/results/loops.txt @@ -14,7 +14,7 @@ Loop: xrpld.app xrpld.net xrpld.app > xrpld.net Loop: xrpld.app xrpld.overlay - xrpld.overlay == xrpld.app + xrpld.overlay ~= xrpld.app Loop: xrpld.app xrpld.peerfinder xrpld.app > xrpld.peerfinder diff --git a/include/xrpl/basics/CanProcess.h b/include/xrpl/basics/CanProcess.h new file mode 100644 index 00000000000..3ee49d00877 --- /dev/null +++ b/include/xrpl/basics/CanProcess.h @@ -0,0 +1,134 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_BASICS_CANPROCESS_H_INCLUDED +#define RIPPLE_BASICS_CANPROCESS_H_INCLUDED + +#include +#include +#include + +/** RAII class to check if an Item is already being processed on another thread, + * as indicated by it's presence in a Collection. + * + * If the Item is not in the Collection, it will be added under lock in the + * ctor, and removed under lock in the dtor. The object will be considered + * "usable" and evaluate to `true`. + * + * If the Item is in the Collection, no changes will be made to the collection, + * and the CanProcess object will be considered "unusable". + * + * It's up to the caller to decide what "usable" and "unusable" mean. (e.g. + * Process or skip a block of code, or set a flag.) + * + * The current use is to avoid lock contention that would be involved in + * processing something associated with the Item. + * + * Examples: + * + * void IncomingLedgers::acquireAsync(LedgerHash const& hash, ...) + * { + * if (CanProcess check{acquiresMutex_, pendingAcquires_, hash}) + * { + * acquire(hash, ...); + * } + * } + * + * bool + * NetworkOPsImp::recvValidation( + * std::shared_ptr const& val, + * std::string const& source) + * { + * CanProcess check( + * validationsMutex_, pendingValidations_, val->getLedgerHash()); + * BypassAccept bypassAccept = + * check ? BypassAccept::no : BypassAccept::yes; + * handleNewValidation(app_, val, source, bypassAccept, m_journal); + * } + * + */ +class CanProcess +{ +public: + template + CanProcess(Mutex& mtx, Collection& collection, Item const& item) + : cleanup_(insert(mtx, collection, item)) + { + } + + ~CanProcess() + { + if (cleanup_) + cleanup_(); + } + + explicit + operator bool() const + { + return static_cast(cleanup_); + } + +private: + template + std::function + doInsert(Mutex& mtx, Collection& collection, Item const& item) + { + std::unique_lock lock(mtx); + // TODO: Use structured binding once LLVM 16 is the minimum supported + // version. See also: https://github.com/llvm/llvm-project/issues/48582 + // https://github.com/llvm/llvm-project/commit/127bf44385424891eb04cff8e52d3f157fc2cb7c + auto const insertResult = collection.insert(item); + auto const it = insertResult.first; + if (!insertResult.second) + return {}; + if constexpr (useIterator) + return [&, it]() { + std::unique_lock lock(mtx); + collection.erase(it); + }; + else + return [&]() { + std::unique_lock lock(mtx); + collection.erase(item); + }; + } + + // Generic insert() function doesn't use iterators because they may get + // invalidated + template + std::function + insert(Mutex& mtx, Collection& collection, Item const& item) + { + return doInsert(mtx, collection, item); + } + + // Specialize insert() for std::set, which does not invalidate iterators for + // insert and erase + template + std::function + insert(Mutex& mtx, std::set& collection, Item const& item) + { + return doInsert(mtx, collection, item); + } + + // If set, then the item is "usable" + std::function cleanup_; +}; + +#endif diff --git a/include/xrpl/basics/base_uint.h b/include/xrpl/basics/base_uint.h index 05d83b3bb0a..a2c714f4be1 100644 --- a/include/xrpl/basics/base_uint.h +++ b/include/xrpl/basics/base_uint.h @@ -631,6 +631,13 @@ to_string(base_uint const& a) return strHex(a.cbegin(), a.cend()); } +template +inline std::string +to_short_string(base_uint const& a) +{ + return strHex(a.cbegin(), a.cend()).substr(0, 8) + "..."; +} + template inline std::ostream& operator<<(std::ostream& out, base_uint const& u) diff --git a/include/xrpl/proto/ripple.proto b/include/xrpl/proto/ripple.proto index a06bbd9a311..e121a39706c 100644 --- a/include/xrpl/proto/ripple.proto +++ b/include/xrpl/proto/ripple.proto @@ -321,8 +321,18 @@ message TMLedgerData required uint32 ledgerSeq = 2; required TMLedgerInfoType type = 3; repeated TMLedgerNode nodes = 4; + // If the peer supports "responseCookies", this field will + // never be populated. optional uint32 requestCookie = 5; optional TMReplyError error = 6; + // The old field is called "requestCookie", but this is + // a response, so this name makes more sense + repeated uint32 responseCookies = 7; + // If a TMGetLedger request was received without a "requestCookie", + // and the peer supports it, this flag will be set to true to + // indicate that the receiver should process the result in addition + // to forwarding it to its "responseCookies" peers. + optional bool directResponse = 8; } message TMPing diff --git a/include/xrpl/protocol/LedgerHeader.h b/include/xrpl/protocol/LedgerHeader.h index 0b35979971a..806e732593a 100644 --- a/include/xrpl/protocol/LedgerHeader.h +++ b/include/xrpl/protocol/LedgerHeader.h @@ -55,6 +55,8 @@ struct LedgerHeader // If validated is false, it means "not yet validated." // Once validated is true, it will never be set false at a later time. + // NOTE: If you are accessing this directly, you are probably doing it + // wrong. Use LedgerMaster::isValidated(). // VFALCO TODO Make this not mutable bool mutable validated = false; bool accepted = false; diff --git a/src/test/app/HashRouter_test.cpp b/src/test/app/HashRouter_test.cpp index 1234bc5b9cb..68e0d830657 100644 --- a/src/test/app/HashRouter_test.cpp +++ b/src/test/app/HashRouter_test.cpp @@ -242,6 +242,33 @@ class HashRouter_test : public beast::unit_test::suite BEAST_EXPECT(router.shouldProcess(key, peer, flags, 1s)); } + void + testProcessPeer() + { + using namespace std::chrono_literals; + TestStopwatch stopwatch; + HashRouter router(stopwatch, 5s); + uint256 const key(1); + HashRouter::PeerShortID peer1 = 1; + HashRouter::PeerShortID peer2 = 2; + auto const timeout = 2s; + + BEAST_EXPECT(router.shouldProcessForPeer(key, peer1, timeout)); + BEAST_EXPECT(!router.shouldProcessForPeer(key, peer1, timeout)); + ++stopwatch; + BEAST_EXPECT(!router.shouldProcessForPeer(key, peer1, timeout)); + BEAST_EXPECT(router.shouldProcessForPeer(key, peer2, timeout)); + BEAST_EXPECT(!router.shouldProcessForPeer(key, peer2, timeout)); + ++stopwatch; + BEAST_EXPECT(router.shouldProcessForPeer(key, peer1, timeout)); + BEAST_EXPECT(!router.shouldProcessForPeer(key, peer2, timeout)); + ++stopwatch; + BEAST_EXPECT(router.shouldProcessForPeer(key, peer2, timeout)); + ++stopwatch; + BEAST_EXPECT(router.shouldProcessForPeer(key, peer1, timeout)); + BEAST_EXPECT(!router.shouldProcessForPeer(key, peer2, timeout)); + } + public: void run() override @@ -252,6 +279,7 @@ class HashRouter_test : public beast::unit_test::suite testSetFlags(); testRelay(); testProcess(); + testProcessPeer(); } }; diff --git a/src/test/app/LedgerReplay_test.cpp b/src/test/app/LedgerReplay_test.cpp index 883aca7bced..d4911f82833 100644 --- a/src/test/app/LedgerReplay_test.cpp +++ b/src/test/app/LedgerReplay_test.cpp @@ -322,6 +322,11 @@ class TestPeer : public Peer { return false; } + std::set> + releaseRequestCookies(uint256 const& requestHash) override + { + return {}; + } bool ledgerReplayEnabled_; PublicKey nodePublicKey_; diff --git a/src/test/basics/base_uint_test.cpp b/src/test/basics/base_uint_test.cpp index 9f3194f4fbc..50411461e0d 100644 --- a/src/test/basics/base_uint_test.cpp +++ b/src/test/basics/base_uint_test.cpp @@ -151,6 +151,7 @@ struct base_uint_test : beast::unit_test::suite uset.insert(u); BEAST_EXPECT(raw.size() == u.size()); BEAST_EXPECT(to_string(u) == "0102030405060708090A0B0C"); + BEAST_EXPECT(to_short_string(u) == "01020304..."); BEAST_EXPECT(*u.data() == 1); BEAST_EXPECT(u.signum() == 1); BEAST_EXPECT(!!u); @@ -173,6 +174,7 @@ struct base_uint_test : beast::unit_test::suite test96 v{~u}; uset.insert(v); BEAST_EXPECT(to_string(v) == "FEFDFCFBFAF9F8F7F6F5F4F3"); + BEAST_EXPECT(to_short_string(v) == "FEFDFCFB..."); BEAST_EXPECT(*v.data() == 0xfe); BEAST_EXPECT(v.signum() == 1); BEAST_EXPECT(!!v); @@ -193,6 +195,7 @@ struct base_uint_test : beast::unit_test::suite test96 z{beast::zero}; uset.insert(z); BEAST_EXPECT(to_string(z) == "000000000000000000000000"); + BEAST_EXPECT(to_short_string(z) == "00000000..."); BEAST_EXPECT(*z.data() == 0); BEAST_EXPECT(*z.begin() == 0); BEAST_EXPECT(*std::prev(z.end(), 1) == 0); @@ -213,6 +216,7 @@ struct base_uint_test : beast::unit_test::suite BEAST_EXPECT(n == z); n--; BEAST_EXPECT(to_string(n) == "FFFFFFFFFFFFFFFFFFFFFFFF"); + BEAST_EXPECT(to_short_string(n) == "FFFFFFFF..."); n = beast::zero; BEAST_EXPECT(n == z); @@ -223,6 +227,7 @@ struct base_uint_test : beast::unit_test::suite test96 x{zm1 ^ zp1}; uset.insert(x); BEAST_EXPECTS(to_string(x) == "FFFFFFFFFFFFFFFFFFFFFFFE", to_string(x)); + BEAST_EXPECTS(to_short_string(x) == "FFFFFFFF...", to_short_string(x)); BEAST_EXPECT(uset.size() == 4); diff --git a/src/test/overlay/ProtocolVersion_test.cpp b/src/test/overlay/ProtocolVersion_test.cpp index dfc0ee70b8e..97469c59805 100644 --- a/src/test/overlay/ProtocolVersion_test.cpp +++ b/src/test/overlay/ProtocolVersion_test.cpp @@ -87,8 +87,8 @@ class ProtocolVersion_test : public beast::unit_test::suite negotiateProtocolVersion("XRPL/2.2") == make_protocol(2, 2)); BEAST_EXPECT( negotiateProtocolVersion( - "RTXP/1.2, XRPL/2.2, XRPL/2.3, XRPL/999.999") == - make_protocol(2, 2)); + "RTXP/1.2, XRPL/2.2, XRPL/2.3, XRPL/2.4, XRPL/999.999") == + make_protocol(2, 3)); BEAST_EXPECT( negotiateProtocolVersion("XRPL/999.999, WebSocket/1.0") == std::nullopt); diff --git a/src/test/overlay/reduce_relay_test.cpp b/src/test/overlay/reduce_relay_test.cpp index e0edae54897..e907f60b0e2 100644 --- a/src/test/overlay/reduce_relay_test.cpp +++ b/src/test/overlay/reduce_relay_test.cpp @@ -182,6 +182,11 @@ class PeerPartial : public Peer removeTxQueue(const uint256&) override { } + std::set> + releaseRequestCookies(uint256 const& requestHash) override + { + return {}; + } }; /** Manually advanced clock. */ diff --git a/src/xrpld/app/consensus/RCLConsensus.cpp b/src/xrpld/app/consensus/RCLConsensus.cpp index a746b30357d..47414cd20ab 100644 --- a/src/xrpld/app/consensus/RCLConsensus.cpp +++ b/src/xrpld/app/consensus/RCLConsensus.cpp @@ -1073,7 +1073,8 @@ void RCLConsensus::Adaptor::updateOperatingMode(std::size_t const positions) const { if (!positions && app_.getOPs().isFull()) - app_.getOPs().setMode(OperatingMode::CONNECTED); + app_.getOPs().setMode( + OperatingMode::CONNECTED, "updateOperatingMode: no positions"); } void diff --git a/src/xrpld/app/ledger/InboundLedger.h b/src/xrpld/app/ledger/InboundLedger.h index 13f603e79d0..ccd9aa0710f 100644 --- a/src/xrpld/app/ledger/InboundLedger.h +++ b/src/xrpld/app/ledger/InboundLedger.h @@ -196,6 +196,25 @@ class InboundLedger final : public TimeoutCounter, std::unique_ptr mPeerSet; }; +inline std::string +to_string(InboundLedger::Reason reason) +{ + using enum InboundLedger::Reason; + switch (reason) + { + case HISTORY: + return "HISTORY"; + case GENERIC: + return "GENERIC"; + case CONSENSUS: + return "CONSENSUS"; + default: + UNREACHABLE( + "ripple::to_string(InboundLedger::Reason) : unknown value"); + return "unknown"; + } +} + } // namespace ripple #endif diff --git a/src/xrpld/app/ledger/detail/InboundLedger.cpp b/src/xrpld/app/ledger/detail/InboundLedger.cpp index 32fdff76ab3..ca955d14ff3 100644 --- a/src/xrpld/app/ledger/detail/InboundLedger.cpp +++ b/src/xrpld/app/ledger/detail/InboundLedger.cpp @@ -392,7 +392,14 @@ InboundLedger::onTimer(bool wasProgress, ScopedLockType&) if (!wasProgress) { - checkLocal(); + if (checkLocal()) + { + // Done. Something else (probably consensus) built the ledger + // locally while waiting for data (or possibly before requesting) + XRPL_ASSERT(isDone(), "ripple::InboundLedger::onTimer : done"); + JLOG(journal_.info()) << "Finished while waiting " << hash_; + return; + } mByHash = true; @@ -502,15 +509,17 @@ InboundLedger::trigger(std::shared_ptr const& peer, TriggerReason reason) if (auto stream = journal_.debug()) { - stream << "Trigger acquiring ledger " << hash_; + std::stringstream ss; + ss << "Trigger acquiring ledger " << hash_; if (peer) - stream << " from " << peer; + ss << " from " << peer; if (complete_ || failed_) - stream << "complete=" << complete_ << " failed=" << failed_; + ss << " complete=" << complete_ << " failed=" << failed_; else - stream << "header=" << mHaveHeader << " tx=" << mHaveTransactions - << " as=" << mHaveState; + ss << " header=" << mHaveHeader << " tx=" << mHaveTransactions + << " as=" << mHaveState; + stream << ss.str(); } if (!mHaveHeader) diff --git a/src/xrpld/app/ledger/detail/InboundLedgers.cpp b/src/xrpld/app/ledger/detail/InboundLedgers.cpp index 99a26ce8f9f..a6699aa73f4 100644 --- a/src/xrpld/app/ledger/detail/InboundLedgers.cpp +++ b/src/xrpld/app/ledger/detail/InboundLedgers.cpp @@ -23,9 +23,9 @@ #include #include #include +#include #include #include -#include #include #include #include @@ -77,11 +77,85 @@ class InboundLedgersImp : public InboundLedgers hash.isNonZero(), "ripple::InboundLedgersImp::acquire::doAcquire : nonzero hash"); - // probably not the right rule - if (app_.getOPs().isNeedNetworkLedger() && - (reason != InboundLedger::Reason::GENERIC) && - (reason != InboundLedger::Reason::CONSENSUS)) + bool const needNetworkLedger = app_.getOPs().isNeedNetworkLedger(); + bool const shouldAcquire = [&]() { + if (!needNetworkLedger) + return true; + if (reason == InboundLedger::Reason::GENERIC) + return true; + if (reason == InboundLedger::Reason::CONSENSUS) + return true; + return false; + }(); + + std::stringstream ss; + ss << "InboundLedger::acquire: " + << "Request: " << to_string(hash) << ", " << seq + << " NeedNetworkLedger: " << (needNetworkLedger ? "yes" : "no") + << " Reason: " << to_string(reason) + << " Should acquire: " << (shouldAcquire ? "true." : "false."); + + /* Acquiring ledgers is somewhat expensive. It requires lots of + * computation and network communication. Avoid it when it's not + * appropriate. Every validation from a peer for a ledger that + * we do not have locally results in a call to this function: even + * if we are moments away from validating the same ledger. + */ + bool const shouldBroadcast = [&]() { + // If the node is not in "full" state, it needs to sync to + // the network, and doesn't have the necessary tx's and + // ledger entries to build the ledger. + bool const isFull = app_.getOPs().isFull(); + // If everything else is ok, don't try to acquire the ledger + // if the requested seq is in the near future relative to + // the validated ledger. If the requested ledger is between + // 1 and 19 inclusive ledgers ahead of the valid ledger this + // node has not built it yet, but it's possible/likely it + // has the tx's necessary to build it and get caught up. + // Plus it might not become validated. On the other hand, if + // it's more than 20 in the future, this node should request + // it so that it can jump ahead and get caught up. + LedgerIndex const validSeq = + app_.getLedgerMaster().getValidLedgerIndex(); + constexpr std::size_t lagLeeway = 20; + bool const nearFuture = + (seq > validSeq) && (seq < validSeq + lagLeeway); + // If everything else is ok, don't try to acquire the ledger + // if the request is related to consensus. (Note that + // consensus calls usually pass a seq of 0, so nearFuture + // will be false other than on a brand new network.) + bool const consensus = + reason == InboundLedger::Reason::CONSENSUS; + ss << " Evaluating whether to broadcast requests to peers" + << ". full: " << (isFull ? "true" : "false") + << ". ledger sequence " << seq + << ". Valid sequence: " << validSeq + << ". Lag leeway: " << lagLeeway + << ". request for near future ledger: " + << (nearFuture ? "true" : "false") + << ". Consensus: " << (consensus ? "true" : "false"); + + // If the node is not synced, send requests. + if (!isFull) + return true; + // If the ledger is in the near future, do NOT send requests. + // This node is probably about to build it. + if (nearFuture) + return false; + // If the request is because of consensus, do NOT send requests. + // This node is probably about to build it. + if (consensus) + return false; + return true; + }(); + ss << ". Would broadcast to peers? " + << (shouldBroadcast ? "true." : "false."); + + if (!shouldAcquire) + { + JLOG(j_.debug()) << "Abort(rule): " << ss.str(); return {}; + } bool isNew = true; std::shared_ptr inbound; @@ -89,6 +163,7 @@ class InboundLedgersImp : public InboundLedgers ScopedLockType sl(mLock); if (stopping_) { + JLOG(j_.debug()) << "Abort(stopping): " << ss.str(); return {}; } @@ -112,23 +187,29 @@ class InboundLedgersImp : public InboundLedgers ++mCounter; } } + ss << " IsNew: " << (isNew ? "true" : "false"); if (inbound->isFailed()) + { + JLOG(j_.debug()) << "Abort(failed): " << ss.str(); return {}; + } if (!isNew) inbound->update(seq); if (!inbound->isComplete()) + { + JLOG(j_.debug()) << "InProgress: " << ss.str(); return {}; + } + JLOG(j_.debug()) << "Complete: " << ss.str(); return inbound->getLedger(); }; using namespace std::chrono_literals; - std::shared_ptr ledger = perf::measureDurationAndLog( + return perf::measureDurationAndLog( doAcquire, "InboundLedgersImp::acquire", 500ms, j_); - - return ledger; } void @@ -137,28 +218,25 @@ class InboundLedgersImp : public InboundLedgers std::uint32_t seq, InboundLedger::Reason reason) override { - std::unique_lock lock(acquiresMutex_); - try - { - if (pendingAcquires_.contains(hash)) - return; - pendingAcquires_.insert(hash); - scope_unlock unlock(lock); - acquire(hash, seq, reason); - } - catch (std::exception const& e) - { - JLOG(j_.warn()) - << "Exception thrown for acquiring new inbound ledger " << hash - << ": " << e.what(); - } - catch (...) + if (CanProcess const check{acquiresMutex_, pendingAcquires_, hash}) { - JLOG(j_.warn()) - << "Unknown exception thrown for acquiring new inbound ledger " - << hash; + try + { + acquire(hash, seq, reason); + } + catch (std::exception const& e) + { + JLOG(j_.warn()) + << "Exception thrown for acquiring new inbound ledger " + << hash << ": " << e.what(); + } + catch (...) + { + JLOG(j_.warn()) << "Unknown exception thrown for acquiring new " + "inbound ledger " + << hash; + } } - pendingAcquires_.erase(hash); } std::shared_ptr diff --git a/src/xrpld/app/ledger/detail/LedgerMaster.cpp b/src/xrpld/app/ledger/detail/LedgerMaster.cpp index 6bc894da487..7875541e7bc 100644 --- a/src/xrpld/app/ledger/detail/LedgerMaster.cpp +++ b/src/xrpld/app/ledger/detail/LedgerMaster.cpp @@ -973,8 +973,9 @@ LedgerMaster::checkAccept(std::shared_ptr const& ledger) } JLOG(m_journal.info()) << "Advancing accepted ledger to " - << ledger->info().seq << " with >= " << minVal - << " validations"; + << ledger->info().seq << " (" + << to_short_string(ledger->info().hash) + << ") with >= " << minVal << " validations"; ledger->setValidated(); ledger->setFull(); diff --git a/src/xrpld/app/ledger/detail/TimeoutCounter.cpp b/src/xrpld/app/ledger/detail/TimeoutCounter.cpp index 35d8f1fffb1..343bbd83db4 100644 --- a/src/xrpld/app/ledger/detail/TimeoutCounter.cpp +++ b/src/xrpld/app/ledger/detail/TimeoutCounter.cpp @@ -33,7 +33,8 @@ TimeoutCounter::TimeoutCounter( QueueJobParameter&& jobParameter, beast::Journal journal) : app_(app) - , journal_(journal) + , sink_(journal, to_short_string(hash) + " ") + , journal_(sink_) , hash_(hash) , timeouts_(0) , complete_(false) @@ -53,6 +54,8 @@ TimeoutCounter::setTimer(ScopedLockType& sl) { if (isDone()) return; + JLOG(journal_.debug()) << "Setting timer for " << timerInterval_.count() + << "ms"; timer_.expires_after(timerInterval_); timer_.async_wait( [wptr = pmDowncast()](boost::system::error_code const& ec) { @@ -61,6 +64,12 @@ TimeoutCounter::setTimer(ScopedLockType& sl) if (auto ptr = wptr.lock()) { + JLOG(ptr->journal_.debug()) + << "timer: ec: " << ec << " (operation_aborted: " + << boost::asio::error::operation_aborted << " - " + << (ec == boost::asio::error::operation_aborted ? "aborted" + : "other") + << ")"; ScopedLockType sl(ptr->mtx_); ptr->queueJob(sl); } diff --git a/src/xrpld/app/ledger/detail/TimeoutCounter.h b/src/xrpld/app/ledger/detail/TimeoutCounter.h index 228e879d4de..a65208a938b 100644 --- a/src/xrpld/app/ledger/detail/TimeoutCounter.h +++ b/src/xrpld/app/ledger/detail/TimeoutCounter.h @@ -24,6 +24,8 @@ #include #include #include +#include + #include #include @@ -121,6 +123,7 @@ class TimeoutCounter // Used in this class for access to boost::asio::io_service and // ripple::Overlay. Used in subtypes for the kitchen sink. Application& app_; + beast::WrappedSink sink_; beast::Journal journal_; mutable std::recursive_mutex mtx_; diff --git a/src/xrpld/app/misc/HashRouter.cpp b/src/xrpld/app/misc/HashRouter.cpp index 58e811d4b8f..28d2449db5e 100644 --- a/src/xrpld/app/misc/HashRouter.cpp +++ b/src/xrpld/app/misc/HashRouter.cpp @@ -90,6 +90,20 @@ HashRouter::shouldProcess( return s.shouldProcess(suppressionMap_.clock().now(), tx_interval); } +bool +HashRouter::shouldProcessForPeer( + uint256 const& key, + PeerShortID peer, + std::chrono::seconds interval) +{ + std::lock_guard lock(mutex_); + + auto& entry = emplace(key).first; + + return entry.shouldProcessForPeer( + peer, suppressionMap_.clock().now(), interval); +} + int HashRouter::getFlags(uint256 const& key) { @@ -128,4 +142,13 @@ HashRouter::shouldRelay(uint256 const& key) return s.releasePeerSet(); } +auto +HashRouter::getPeers(uint256 const& key) -> std::set +{ + std::lock_guard lock(mutex_); + + auto& s = emplace(key).first; + return s.peekPeerSet(); +} + } // namespace ripple diff --git a/src/xrpld/app/misc/HashRouter.h b/src/xrpld/app/misc/HashRouter.h index e9d040fc8bf..403c7ce8603 100644 --- a/src/xrpld/app/misc/HashRouter.h +++ b/src/xrpld/app/misc/HashRouter.h @@ -92,6 +92,13 @@ class HashRouter return std::move(peers_); } + /** Return set of peers waiting for reply. Leaves list unchanged. */ + std::set const& + peekPeerSet() + { + return peers_; + } + /** Return seated relay time point if the message has been relayed */ std::optional relayed() const @@ -125,6 +132,21 @@ class HashRouter return true; } + bool + shouldProcessForPeer( + PeerShortID peer, + Stopwatch::time_point now, + std::chrono::seconds interval) + { + if (peerProcessed_.contains(peer) && + ((peerProcessed_[peer] + interval) > now)) + return false; + // Peer may already be in the list, but adding it again doesn't hurt + addPeer(peer); + peerProcessed_[peer] = now; + return true; + } + private: int flags_ = 0; std::set peers_; @@ -132,6 +154,7 @@ class HashRouter // than one flag needs to expire independently. std::optional relayed_; std::optional processed_; + std::map peerProcessed_; }; public: @@ -163,7 +186,7 @@ class HashRouter /** Add a suppression peer and get message's relay status. * Return pair: - * element 1: true if the peer is added. + * element 1: true if the key is added. * element 2: optional is seated to the relay time point or * is unseated if has not relayed yet. */ std::pair> @@ -180,6 +203,18 @@ class HashRouter int& flags, std::chrono::seconds tx_interval); + /** Determines whether the hashed item should be processed for the given + peer. Could be an incoming or outgoing message. + + Items filtered with this function should only be processed for the given + peer once. Unlike shouldProcess, it can be processed for other peers. + */ + bool + shouldProcessForPeer( + uint256 const& key, + PeerShortID peer, + std::chrono::seconds interval); + /** Set the flags on a hash. @return `true` if the flags were changed. `false` if unchanged. @@ -205,6 +240,11 @@ class HashRouter std::optional> shouldRelay(uint256 const& key); + /** Returns a copy of the set of peers in the Entry for the key + */ + std::set + getPeers(uint256 const& key); + private: // pair.second indicates whether the entry was created std::pair diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index 996a1fdf748..e526382df03 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -50,10 +50,10 @@ #include #include #include +#include #include #include #include -#include #include #include #include @@ -403,7 +403,7 @@ class NetworkOPsImp final : public NetworkOPs isFull() override; void - setMode(OperatingMode om) override; + setMode(OperatingMode om, const char* reason) override; bool isBlocked() override; @@ -874,7 +874,7 @@ NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const inline void NetworkOPsImp::setStandAlone() { - setMode(OperatingMode::FULL); + setMode(OperatingMode::FULL, "setStandAlone"); } inline void @@ -1022,7 +1022,9 @@ NetworkOPsImp::processHeartbeatTimer() { if (mMode != OperatingMode::DISCONNECTED) { - setMode(OperatingMode::DISCONNECTED); + setMode( + OperatingMode::DISCONNECTED, + "Heartbeat: insufficient peers"); JLOG(m_journal.warn()) << "Node count (" << numPeers << ") has fallen " << "below required minimum (" << minPeerCount_ << ")."; @@ -1038,7 +1040,7 @@ NetworkOPsImp::processHeartbeatTimer() if (mMode == OperatingMode::DISCONNECTED) { - setMode(OperatingMode::CONNECTED); + setMode(OperatingMode::CONNECTED, "Heartbeat: sufficient peers"); JLOG(m_journal.info()) << "Node count (" << numPeers << ") is sufficient."; } @@ -1046,9 +1048,9 @@ NetworkOPsImp::processHeartbeatTimer() // Check if the last validated ledger forces a change between these // states. if (mMode == OperatingMode::SYNCING) - setMode(OperatingMode::SYNCING); + setMode(OperatingMode::SYNCING, "Heartbeat: check syncing"); else if (mMode == OperatingMode::CONNECTED) - setMode(OperatingMode::CONNECTED); + setMode(OperatingMode::CONNECTED, "Heartbeat: check connected"); } mConsensus.timerEntry(app_.timeKeeper().closeTime()); @@ -1614,7 +1616,7 @@ void NetworkOPsImp::setAmendmentBlocked() { amendmentBlocked_ = true; - setMode(OperatingMode::CONNECTED); + setMode(OperatingMode::CONNECTED, "setAmendmentBlocked"); } inline bool @@ -1645,7 +1647,7 @@ void NetworkOPsImp::setUNLBlocked() { unlBlocked_ = true; - setMode(OperatingMode::CONNECTED); + setMode(OperatingMode::CONNECTED, "setUNLBlocked"); } inline void @@ -1746,7 +1748,7 @@ NetworkOPsImp::checkLastClosedLedger( if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL)) { - setMode(OperatingMode::CONNECTED); + setMode(OperatingMode::CONNECTED, "check LCL: not on consensus ledger"); } if (consensus) @@ -1833,8 +1835,9 @@ NetworkOPsImp::beginConsensus(uint256 const& networkClosed) // this shouldn't happen unless we jump ledgers if (mMode == OperatingMode::FULL) { - JLOG(m_journal.warn()) << "Don't have LCL, going to tracking"; - setMode(OperatingMode::TRACKING); + JLOG(m_journal.warn()) + << "beginConsensus Don't have LCL, going to tracking"; + setMode(OperatingMode::TRACKING, "beginConsensus: No LCL"); } return false; @@ -1944,7 +1947,7 @@ NetworkOPsImp::endConsensus() // validations we have for LCL. If the ledger is good enough, go to // TRACKING - TODO if (!needNetworkLedger_) - setMode(OperatingMode::TRACKING); + setMode(OperatingMode::TRACKING, "endConsensus: check tracking"); } if (((mMode == OperatingMode::CONNECTED) || @@ -1958,7 +1961,7 @@ NetworkOPsImp::endConsensus() if (app_.timeKeeper().now() < (current->info().parentCloseTime + 2 * current->info().closeTimeResolution)) { - setMode(OperatingMode::FULL); + setMode(OperatingMode::FULL, "endConsensus: check full"); } } @@ -1970,7 +1973,7 @@ NetworkOPsImp::consensusViewChange() { if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING)) { - setMode(OperatingMode::CONNECTED); + setMode(OperatingMode::CONNECTED, "consensusViewChange"); } } @@ -2288,7 +2291,7 @@ NetworkOPsImp::pubPeerStatus(std::function const& func) } void -NetworkOPsImp::setMode(OperatingMode om) +NetworkOPsImp::setMode(OperatingMode om, const char* reason) { using namespace std::chrono_literals; if (om == OperatingMode::CONNECTED) @@ -2308,11 +2311,12 @@ NetworkOPsImp::setMode(OperatingMode om) if (mMode == om) return; + auto const sink = om < mMode ? m_journal.warn() : m_journal.info(); mMode = om; accounting_.mode(om); - JLOG(m_journal.info()) << "STATE->" << strOperatingMode(); + JLOG(sink) << "STATE->" << strOperatingMode() << " - " << reason; pubServer(); } @@ -2324,34 +2328,28 @@ NetworkOPsImp::recvValidation( JLOG(m_journal.trace()) << "recvValidation " << val->getLedgerHash() << " from " << source; - std::unique_lock lock(validationsMutex_); - BypassAccept bypassAccept = BypassAccept::no; - try - { - if (pendingValidations_.contains(val->getLedgerHash())) - bypassAccept = BypassAccept::yes; - else - pendingValidations_.insert(val->getLedgerHash()); - scope_unlock unlock(lock); - handleNewValidation(app_, val, source, bypassAccept, m_journal); - } - catch (std::exception const& e) { - JLOG(m_journal.warn()) - << "Exception thrown for handling new validation " - << val->getLedgerHash() << ": " << e.what(); - } - catch (...) - { - JLOG(m_journal.warn()) - << "Unknown exception thrown for handling new validation " - << val->getLedgerHash(); - } - if (bypassAccept == BypassAccept::no) - { - pendingValidations_.erase(val->getLedgerHash()); + CanProcess const check( + validationsMutex_, pendingValidations_, val->getLedgerHash()); + try + { + BypassAccept bypassAccept = + check ? BypassAccept::no : BypassAccept::yes; + handleNewValidation(app_, val, source, bypassAccept, m_journal); + } + catch (std::exception const& e) + { + JLOG(m_journal.warn()) + << "Exception thrown for handling new validation " + << val->getLedgerHash() << ": " << e.what(); + } + catch (...) + { + JLOG(m_journal.warn()) + << "Unknown exception thrown for handling new validation " + << val->getLedgerHash(); + } } - lock.unlock(); pubValidation(val); diff --git a/src/xrpld/app/misc/NetworkOPs.h b/src/xrpld/app/misc/NetworkOPs.h index 166b9e9e11f..96969f4bcba 100644 --- a/src/xrpld/app/misc/NetworkOPs.h +++ b/src/xrpld/app/misc/NetworkOPs.h @@ -197,7 +197,7 @@ class NetworkOPs : public InfoSub::Source virtual bool isFull() = 0; virtual void - setMode(OperatingMode om) = 0; + setMode(OperatingMode om, const char* reason) = 0; virtual bool isBlocked() = 0; virtual bool diff --git a/src/xrpld/overlay/Peer.h b/src/xrpld/overlay/Peer.h index 2646b24a3ed..b53fcb21a96 100644 --- a/src/xrpld/overlay/Peer.h +++ b/src/xrpld/overlay/Peer.h @@ -36,6 +36,7 @@ enum class ProtocolFeature { ValidatorListPropagation, ValidatorList2Propagation, LedgerReplay, + LedgerDataCookies }; /** Represents a peer connection in the overlay. */ @@ -133,6 +134,13 @@ class Peer virtual bool txReduceRelayEnabled() const = 0; + + // + // Messages + // + + virtual std::set> + releaseRequestCookies(uint256 const& requestHash) = 0; }; } // namespace ripple diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index c3656c9445c..0fcff031116 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -57,6 +58,9 @@ std::chrono::milliseconds constexpr peerHighLatency{300}; /** How often we PING the peer to check for latency and sendq probe */ std::chrono::seconds constexpr peerTimerInterval{60}; + +/** How often we process duplicate incoming TMGetLedger messages */ +std::chrono::seconds constexpr getledgerInterval{15}; } // namespace // TODO: Remove this exclusion once unit tests are added after the hotfix @@ -504,6 +508,8 @@ PeerImp::supportsFeature(ProtocolFeature f) const return protocol_ >= make_protocol(2, 2); case ProtocolFeature::LedgerReplay: return ledgerReplayEnabled_; + case ProtocolFeature::LedgerDataCookies: + return protocol_ >= make_protocol(2, 3); } return false; } @@ -1346,8 +1352,9 @@ PeerImp::handleTransaction( void PeerImp::onMessage(std::shared_ptr const& m) { - auto badData = [&](std::string const& msg) { - fee_.update(Resource::feeInvalidData, "get_ledger " + msg); + auto badData = [&](std::string const& msg, bool chargefee = true) { + if (chargefee) + fee_.update(Resource::feeInvalidData, "get_ledger " + msg); JLOG(p_journal_.warn()) << "TMGetLedger: " << msg; }; auto const itype{m->itype()}; @@ -1424,12 +1431,74 @@ PeerImp::onMessage(std::shared_ptr const& m) } } + // Drop duplicate requests from the same peer for at least + // `getLedgerInterval` seconds. + // Append a little junk to prevent the hash of an incoming messsage + // from matching the hash of the same outgoing message. + // `shouldProcessForPeer` does not distingish between incoming and + // outgoing, and some of the message relay logic checks the hash to see + // if the message has been relayed already. If the hashes are the same, + // a duplicate will be detected when sending the message is attempted, + // so it will fail. + auto const messageHash = sha512Half(*m, nullptr); + // Request cookies are not included in the hash. Track them here. + auto const requestCookie = [&m]() -> std::optional { + if (m->has_requestcookie()) + return m->requestcookie(); + return std::nullopt; + }(); + auto const [inserted, pending] = [&] { + std::lock_guard lock{cookieLock_}; + auto& cookies = messageRequestCookies_[messageHash]; + bool const pending = !cookies.empty(); + return std::pair{cookies.emplace(requestCookie).second, pending}; + }(); + // Check if the request has been seen from this peer. + if (!app_.getHashRouter().shouldProcessForPeer( + messageHash, id_, getledgerInterval)) + { + // This request has already been seen from this peer. + // Has it been seen with this request cookie (or lack thereof)? + + if (inserted) + { + // This is a duplicate request, but with a new cookie. When a + // response is ready, one will be sent for each request cookie. + JLOG(p_journal_.debug()) + << "TMGetLedger: duplicate request with new request cookie: " + << requestCookie.value_or(0) + << ". Job pending: " << (pending ? "yes" : "no") << ": " + << messageHash; + if (pending) + { + // Don't bother queueing up a new job if other requests are + // already pending. This should limit entries in the job queue + // to one per peer per unique request. + JLOG(p_journal_.debug()) + << "TMGetLedger: Suppressing recvGetLedger job, since one " + "is pending: " + << messageHash; + return; + } + } + else + { + // Don't punish nodes that don't know any better + return badData( + "duplicate request: " + to_string(messageHash), + supportsFeature(ProtocolFeature::LedgerDataCookies)); + } + } + // Queue a job to process the request + JLOG(p_journal_.debug()) + << "TMGetLedger: Adding recvGetLedger job: " << messageHash; std::weak_ptr weak = shared_from_this(); - app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m]() { - if (auto peer = weak.lock()) - peer->processLedgerRequest(m); - }); + app_.getJobQueue().addJob( + jtLEDGER_REQ, "recvGetLedger", [weak, m, messageHash]() { + if (auto peer = weak.lock()) + peer->processLedgerRequest(m, messageHash); + }); } void @@ -1545,8 +1614,9 @@ PeerImp::onMessage(std::shared_ptr const& m) void PeerImp::onMessage(std::shared_ptr const& m) { - auto badData = [&](std::string const& msg) { - fee_.update(Resource::feeInvalidData, msg); + auto badData = [&](std::string const& msg, bool charge = true) { + if (charge) + fee_.update(Resource::feeInvalidData, msg); JLOG(p_journal_.warn()) << "TMLedgerData: " << msg; }; @@ -1597,23 +1667,99 @@ PeerImp::onMessage(std::shared_ptr const& m) "Invalid Ledger/TXset nodes " + std::to_string(m->nodes_size())); } - // If there is a request cookie, attempt to relay the message - if (m->has_requestcookie()) + auto const messageHash = sha512Half(*m); + if (!app_.getHashRouter().addSuppressionPeer(messageHash, id_)) { - if (auto peer = overlay_.findPeerByShortID(m->requestcookie())) + // Don't punish nodes that don't know any better + return badData( + "Duplicate message: " + to_string(messageHash), + supportsFeature(ProtocolFeature::LedgerDataCookies)); + } + + bool const routed = m->has_directresponse() || m->responsecookies_size() || + m->has_requestcookie(); + + { + // Check if this message needs to be forwarded to one or more peers. + // Maximum of one of the relevant fields should be populated. + XRPL_ASSERT( + !m->has_requestcookie() || !m->responsecookies_size(), + "ripple::PeerImp::onMessage(TMLedgerData) : valid cookie fields"); + + // Make a copy of the response cookies, then wipe the list so it can be + // forwarded cleanly + auto const responseCookies = m->responsecookies(); + m->clear_responsecookies(); + // Flag indicating if this response should be processed locally, + // possibly in addition to being forwarded. + bool const directResponse = + m->has_directresponse() && m->directresponse(); + m->clear_directresponse(); + + auto const relay = [this, m, &messageHash](auto const cookie) { + if (auto peer = overlay_.findPeerByShortID(cookie)) + { + XRPL_ASSERT( + !m->has_requestcookie() && !m->responsecookies_size(), + "ripple::PeerImp::onMessage(TMLedgerData) relay : no " + "cookies"); + if (peer->supportsFeature(ProtocolFeature::LedgerDataCookies)) + // Setting this flag is not _strictly_ necessary for peers + // that support it if there are no cookies included in the + // message, but it is more accurate. + m->set_directresponse(true); + else + m->clear_directresponse(); + peer->send( + std::make_shared(*m, protocol::mtLEDGER_DATA)); + } + else + JLOG(p_journal_.info()) + << "Unable to route TX/ledger data reply to peer [" + << cookie << "]: " << messageHash; + }; + // If there is a request cookie, attempt to relay the message + if (m->has_requestcookie()) { + XRPL_ASSERT( + responseCookies.empty(), + "ripple::PeerImp::onMessage(TMLedgerData) : no response " + "cookies"); m->clear_requestcookie(); - peer->send(std::make_shared(*m, protocol::mtLEDGER_DATA)); + relay(m->requestcookie()); + if (!directResponse && responseCookies.empty()) + return; } - else + // If there's a list of request cookies, attempt to relay the message to + // all of them. + if (responseCookies.size()) + { + for (auto const cookie : responseCookies) + relay(cookie); + if (!directResponse) + return; + } + } + + // Now that any forwarding is done check the base message (data only, no + // routing info for duplicates) + if (routed) + { + m->clear_directresponse(); + XRPL_ASSERT( + !m->has_requestcookie() && !m->responsecookies_size(), + "ripple::PeerImp::onMessage(TMLedgerData) : no cookies"); + auto const baseMessageHash = sha512Half(*m); + if (!app_.getHashRouter().addSuppressionPeer(baseMessageHash, id_)) { - JLOG(p_journal_.info()) << "Unable to route TX/ledger data reply"; + // Don't punish nodes that don't know any better + return badData( + "Duplicate message: " + to_string(baseMessageHash), + supportsFeature(ProtocolFeature::LedgerDataCookies)); } - return; } uint256 const ledgerHash{m->ledgerhash()}; - // Otherwise check if received data for a candidate transaction set if (m->type() == protocol::liTS_CANDIDATE) { @@ -2997,16 +3143,22 @@ PeerImp::checkValidation( // the TX tree with the specified root hash. // static std::shared_ptr -getPeerWithTree(OverlayImpl& ov, uint256 const& rootHash, PeerImp const* skip) +getPeerWithTree( + OverlayImpl& ov, + uint256 const& rootHash, + PeerImp const* skip, + std::function shouldProcessCallback) { std::shared_ptr ret; int retScore = 0; + XRPL_ASSERT( + shouldProcessCallback, "ripple::getPeerWithTree : callback provided"); ov.for_each([&](std::shared_ptr&& p) { if (p->hasTxSet(rootHash) && p.get() != skip) { auto score = p->getScore(true); - if (!ret || (score > retScore)) + if (!ret || (score > retScore && shouldProcessCallback(p->id()))) { ret = std::move(p); retScore = score; @@ -3025,16 +3177,19 @@ getPeerWithLedger( OverlayImpl& ov, uint256 const& ledgerHash, LedgerIndex ledger, - PeerImp const* skip) + PeerImp const* skip, + std::function shouldProcessCallback) { std::shared_ptr ret; int retScore = 0; + XRPL_ASSERT( + shouldProcessCallback, "ripple::getPeerWithLedger : callback provided"); ov.for_each([&](std::shared_ptr&& p) { if (p->hasLedger(ledgerHash, ledger) && p.get() != skip) { auto score = p->getScore(true); - if (!ret || (score > retScore)) + if (!ret || (score > retScore && shouldProcessCallback(p->id()))) { ret = std::move(p); retScore = score; @@ -3048,7 +3203,8 @@ getPeerWithLedger( void PeerImp::sendLedgerBase( std::shared_ptr const& ledger, - protocol::TMLedgerData& ledgerData) + protocol::TMLedgerData& ledgerData, + PeerCookieMap const& destinations) { JLOG(p_journal_.trace()) << "sendLedgerBase: Base data"; @@ -3080,15 +3236,102 @@ PeerImp::sendLedgerBase( } } - auto message{ - std::make_shared(ledgerData, protocol::mtLEDGER_DATA)}; - send(message); + sendToMultiple(ledgerData, destinations); +} + +void +PeerImp::sendToMultiple( + protocol::TMLedgerData& ledgerData, + PeerCookieMap const& destinations) +{ + bool foundSelf = false; + for (auto const& [peer, cookies] : destinations) + { + if (peer.get() == this) + foundSelf = true; + bool const multipleCookies = + peer->supportsFeature(ProtocolFeature::LedgerDataCookies); + std::vector sendCookies; + + bool directResponse = false; + if (!multipleCookies) + { + JLOG(p_journal_.debug()) + << "sendToMultiple: Sending " << cookies.size() + << " TMLedgerData messages to peer [" << peer->id() + << "]: " << sha512Half(ledgerData); + } + for (auto const& cookie : cookies) + { + // Unfortunately, need a separate Message object for every + // combination + if (cookie) + { + if (multipleCookies) + { + // Save this one for later to send a single message + sendCookies.emplace_back(*cookie); + continue; + } + + // Feature not supported, so send a single message with a + // single cookie + ledgerData.set_requestcookie(*cookie); + } + else + { + if (multipleCookies) + { + // Set this flag later on the single message + directResponse = true; + continue; + } + + ledgerData.clear_requestcookie(); + } + XRPL_ASSERT( + !multipleCookies, + "ripple::PeerImp::sendToMultiple : ledger data cookies " + "unsupported"); + auto message{ + std::make_shared(ledgerData, protocol::mtLEDGER_DATA)}; + peer->send(message); + } + if (multipleCookies) + { + // Send a single message with all the cookies and/or the direct + // response flag, so the receiver can farm out the single message to + // multiple peers and/or itself + XRPL_ASSERT( + sendCookies.size() || directResponse, + "ripple::PeerImp::sendToMultiple : valid response options"); + ledgerData.clear_requestcookie(); + ledgerData.clear_responsecookies(); + ledgerData.set_directresponse(directResponse); + for (auto const& cookie : sendCookies) + ledgerData.add_responsecookies(cookie); + auto message{ + std::make_shared(ledgerData, protocol::mtLEDGER_DATA)}; + peer->send(message); + + JLOG(p_journal_.debug()) + << "sendToMultiple: Sent 1 TMLedgerData message to peer [" + << peer->id() << "]: including " + << (directResponse ? "the direct response flag and " : "") + << sendCookies.size() << " response cookies. " + << ": " << sha512Half(ledgerData); + } + } + XRPL_ASSERT( + foundSelf, "ripple::PeerImp::sendToMultiple : current peer included"); } std::shared_ptr -PeerImp::getLedger(std::shared_ptr const& m) +PeerImp::getLedger( + std::shared_ptr const& m, + uint256 const& mHash) { - JLOG(p_journal_.trace()) << "getLedger: Ledger"; + JLOG(p_journal_.trace()) << "getLedger: Ledger " << mHash; std::shared_ptr ledger; @@ -3105,22 +3348,33 @@ PeerImp::getLedger(std::shared_ptr const& m) if (m->has_querytype() && !m->has_requestcookie()) { // Attempt to relay the request to a peer + // Note repeated messages will not relay to the same peer + // before `getLedgerInterval` seconds. This prevents one + // peer from getting flooded, and distributes the request + // load. If a request has been relayed to all eligible + // peers, then this message will not be relayed. if (auto const peer = getPeerWithLedger( overlay_, ledgerHash, m->has_ledgerseq() ? m->ledgerseq() : 0, - this)) + this, + [&](Peer::id_t id) { + return app_.getHashRouter().shouldProcessForPeer( + mHash, id, getledgerInterval); + })) { m->set_requestcookie(id()); peer->send( std::make_shared(*m, protocol::mtGET_LEDGER)); JLOG(p_journal_.debug()) - << "getLedger: Request relayed to peer"; + << "getLedger: Request relayed to peer [" << peer->id() + << "]: " << mHash; return ledger; } JLOG(p_journal_.trace()) - << "getLedger: Failed to find peer to relay request"; + << "getLedger: Don't have ledger with hash " << ledgerHash + << ": " << mHash; } } } @@ -3130,7 +3384,7 @@ PeerImp::getLedger(std::shared_ptr const& m) if (m->ledgerseq() < app_.getLedgerMaster().getEarliestFetch()) { JLOG(p_journal_.debug()) - << "getLedger: Early ledger sequence request"; + << "getLedger: Early ledger sequence request " << mHash; } else { @@ -3139,7 +3393,7 @@ PeerImp::getLedger(std::shared_ptr const& m) { JLOG(p_journal_.debug()) << "getLedger: Don't have ledger with sequence " - << m->ledgerseq(); + << m->ledgerseq() << ": " << mHash; } } } @@ -3162,29 +3416,33 @@ PeerImp::getLedger(std::shared_ptr const& m) Resource::feeMalformedRequest, "get_ledger ledgerSeq"); ledger.reset(); - JLOG(p_journal_.warn()) - << "getLedger: Invalid ledger sequence " << ledgerSeq; + JLOG(p_journal_.warn()) << "getLedger: Invalid ledger sequence " + << ledgerSeq << ": " << mHash; } } else if (ledgerSeq < app_.getLedgerMaster().getEarliestFetch()) { ledger.reset(); JLOG(p_journal_.debug()) - << "getLedger: Early ledger sequence request " << ledgerSeq; + << "getLedger: Early ledger sequence request " << ledgerSeq + << ": " << mHash; } } else { - JLOG(p_journal_.debug()) << "getLedger: Unable to find ledger"; + JLOG(p_journal_.debug()) + << "getLedger: Unable to find ledger " << mHash; } return ledger; } std::shared_ptr -PeerImp::getTxSet(std::shared_ptr const& m) const +PeerImp::getTxSet( + std::shared_ptr const& m, + uint256 const& mHash) const { - JLOG(p_journal_.trace()) << "getTxSet: TX set"; + JLOG(p_journal_.trace()) << "getTxSet: TX set " << mHash; uint256 const txSetHash{m->ledgerhash()}; std::shared_ptr shaMap{ @@ -3194,22 +3452,34 @@ PeerImp::getTxSet(std::shared_ptr const& m) const if (m->has_querytype() && !m->has_requestcookie()) { // Attempt to relay the request to a peer - if (auto const peer = getPeerWithTree(overlay_, txSetHash, this)) + // Note repeated messages will not relay to the same peer + // before `getLedgerInterval` seconds. This prevents one + // peer from getting flooded, and distributes the request + // load. If a request has been relayed to all eligible + // peers, then this message will not be relayed. + if (auto const peer = getPeerWithTree( + overlay_, txSetHash, this, [&](Peer::id_t id) { + return app_.getHashRouter().shouldProcessForPeer( + mHash, id, getledgerInterval); + })) { m->set_requestcookie(id()); peer->send( std::make_shared(*m, protocol::mtGET_LEDGER)); - JLOG(p_journal_.debug()) << "getTxSet: Request relayed"; + JLOG(p_journal_.debug()) + << "getTxSet: Request relayed to peer [" << peer->id() + << "]: " << mHash; } else { JLOG(p_journal_.debug()) - << "getTxSet: Failed to find relay peer"; + << "getTxSet: Failed to find relay peer: " << mHash; } } else { - JLOG(p_journal_.debug()) << "getTxSet: Failed to find TX set"; + JLOG(p_journal_.debug()) + << "getTxSet: Failed to find TX set " << mHash; } } @@ -3217,7 +3487,9 @@ PeerImp::getTxSet(std::shared_ptr const& m) const } void -PeerImp::processLedgerRequest(std::shared_ptr const& m) +PeerImp::processLedgerRequest( + std::shared_ptr const& m, + uint256 const& mHash) { // Do not resource charge a peer responding to a relay if (!m->has_requestcookie()) @@ -3231,9 +3503,74 @@ PeerImp::processLedgerRequest(std::shared_ptr const& m) bool fatLeaves{true}; auto const itype{m->itype()}; + auto getDestinations = [&] { + // If a ledger data message is generated, it's going to be sent to every + // peer that is waiting for it. + + PeerCookieMap result; + + std::size_t numCookies = 0; + { + // Don't do the work under this peer if this peer is not waiting for + // any replies + auto myCookies = releaseRequestCookies(mHash); + if (myCookies.empty()) + { + JLOG(p_journal_.debug()) << "TMGetLedger: peer is no longer " + "waiting for response to request: " + << mHash; + return result; + } + numCookies += myCookies.size(); + result[shared_from_this()] = myCookies; + } + + std::set const peers = + app_.getHashRouter().getPeers(mHash); + for (auto const peerID : peers) + { + // This loop does not need to be done under the HashRouter + // lock because findPeerByShortID and releaseRequestCookies + // are thread safe, and everything else is local + if (auto p = overlay_.findPeerByShortID(peerID)) + { + auto cookies = p->releaseRequestCookies(mHash); + numCookies += cookies.size(); + if (result.contains(p)) + { + // Unlikely, but if a request came in to this peer while + // iterating, add the items instead of copying / + // overwriting. + XRPL_ASSERT( + p.get() == this, + "ripple::PeerImp::processLedgerRequest : found self in " + "map"); + for (auto const& cookie : cookies) + result[p].emplace(cookie); + } + else if (cookies.size()) + result[p] = cookies; + } + } + + JLOG(p_journal_.debug()) + << "TMGetLedger: Processing request for " << result.size() + << " peers. Will send " << numCookies + << " messages if successful: " << mHash; + + return result; + }; + // Will only populate this if we're going to do work. + PeerCookieMap destinations; + if (itype == protocol::liTS_CANDIDATE) { - if (sharedMap = getTxSet(m); !sharedMap) + destinations = getDestinations(); + if (destinations.empty()) + // Nowhere to send the response! + return; + + if (sharedMap = getTxSet(m, mHash); !sharedMap) return; map = sharedMap.get(); @@ -3241,8 +3578,6 @@ PeerImp::processLedgerRequest(std::shared_ptr const& m) ledgerData.set_ledgerseq(0); ledgerData.set_ledgerhash(m->ledgerhash()); ledgerData.set_type(protocol::liTS_CANDIDATE); - if (m->has_requestcookie()) - ledgerData.set_requestcookie(m->requestcookie()); // We'll already have most transactions fatLeaves = false; @@ -3261,7 +3596,12 @@ PeerImp::processLedgerRequest(std::shared_ptr const& m) return; } - if (ledger = getLedger(m); !ledger) + destinations = getDestinations(); + if (destinations.empty()) + // Nowhere to send the response! + return; + + if (ledger = getLedger(m, mHash); !ledger) return; // Fill out the reply @@ -3269,13 +3609,11 @@ PeerImp::processLedgerRequest(std::shared_ptr const& m) ledgerData.set_ledgerhash(ledgerHash.begin(), ledgerHash.size()); ledgerData.set_ledgerseq(ledger->info().seq); ledgerData.set_type(itype); - if (m->has_requestcookie()) - ledgerData.set_requestcookie(m->requestcookie()); switch (itype) { case protocol::liBASE: - sendLedgerBase(ledger, ledgerData); + sendLedgerBase(ledger, ledgerData, destinations); return; case protocol::liTX_NODE: @@ -3392,7 +3730,7 @@ PeerImp::processLedgerRequest(std::shared_ptr const& m) if (ledgerData.nodes_size() == 0) return; - send(std::make_shared(ledgerData, protocol::mtLEDGER_DATA)); + sendToMultiple(ledgerData, destinations); } int @@ -3450,6 +3788,19 @@ PeerImp::reduceRelayReady() return vpReduceRelayEnabled_ && reduceRelayReady_; } +std::set> +PeerImp::releaseRequestCookies(uint256 const& requestHash) +{ + std::set> result; + std::lock_guard lock(cookieLock_); + if (messageRequestCookies_.contains(requestHash)) + { + std::swap(result, messageRequestCookies_[requestHash]); + messageRequestCookies_.erase(requestHash); + } + return result; +}; + void PeerImp::Metrics::add_message(std::uint64_t bytes) { diff --git a/src/xrpld/overlay/detail/PeerImp.h b/src/xrpld/overlay/detail/PeerImp.h index 14591efbb18..7db2ecf5f0f 100644 --- a/src/xrpld/overlay/detail/PeerImp.h +++ b/src/xrpld/overlay/detail/PeerImp.h @@ -195,6 +195,15 @@ class PeerImp : public Peer, bool ledgerReplayEnabled_ = false; LedgerReplayMsgHandler ledgerReplayMsgHandler_; + // Track message requests and responses + // TODO: Use an expiring cache or something + using MessageCookieMap = + std::map>>; + using PeerCookieMap = + std::map, std::set>>; + std::mutex mutable cookieLock_; + MessageCookieMap messageRequestCookies_; + friend class OverlayImpl; class Metrics @@ -441,6 +450,13 @@ class PeerImp : public Peer, return txReduceRelayEnabled_; } + // + // Messages + // + + std::set> + releaseRequestCookies(uint256 const& requestHash) override; + private: void close(); @@ -639,16 +655,28 @@ class PeerImp : public Peer, void sendLedgerBase( std::shared_ptr const& ledger, - protocol::TMLedgerData& ledgerData); + protocol::TMLedgerData& ledgerData, + PeerCookieMap const& destinations); + + void + sendToMultiple( + protocol::TMLedgerData& ledgerData, + PeerCookieMap const& destinations); std::shared_ptr - getLedger(std::shared_ptr const& m); + getLedger( + std::shared_ptr const& m, + uint256 const& mHash); std::shared_ptr - getTxSet(std::shared_ptr const& m) const; + getTxSet( + std::shared_ptr const& m, + uint256 const& mHash) const; void - processLedgerRequest(std::shared_ptr const& m); + processLedgerRequest( + std::shared_ptr const& m, + uint256 const& mHash); }; //------------------------------------------------------------------------------ diff --git a/src/xrpld/overlay/detail/PeerSet.cpp b/src/xrpld/overlay/detail/PeerSet.cpp index 909b20c3079..cb7b77db7fc 100644 --- a/src/xrpld/overlay/detail/PeerSet.cpp +++ b/src/xrpld/overlay/detail/PeerSet.cpp @@ -18,9 +18,11 @@ //============================================================================== #include +#include #include #include #include +#include namespace ripple { @@ -104,16 +106,52 @@ PeerSetImpl::sendRequest( std::shared_ptr const& peer) { auto packet = std::make_shared(message, type); + + auto const messageHash = [&]() { + auto const packetBuffer = + packet->getBuffer(compression::Compressed::Off); + return sha512Half(Slice(packetBuffer.data(), packetBuffer.size())); + }(); + + // Allow messages to be re-sent to the same peer after a delay + using namespace std::chrono_literals; + constexpr std::chrono::seconds interval = 30s; + if (peer) { - peer->send(packet); + if (app_.getHashRouter().shouldProcessForPeer( + messageHash, peer->id(), interval)) + { + JLOG(journal_.trace()) + << "Sending " << protocolMessageName(type) << " message to [" + << peer->id() << "]: " << messageHash; + peer->send(packet); + } + else + JLOG(journal_.debug()) + << "Suppressing sending duplicate " << protocolMessageName(type) + << " message to [" << peer->id() << "]: " << messageHash; return; } for (auto id : peers_) { if (auto p = app_.overlay().findPeerByShortID(id)) - p->send(packet); + { + if (app_.getHashRouter().shouldProcessForPeer( + messageHash, p->id(), interval)) + { + JLOG(journal_.trace()) + << "Sending " << protocolMessageName(type) + << " message to [" << p->id() << "]: " << messageHash; + p->send(packet); + } + else + JLOG(journal_.debug()) + << "Suppressing sending duplicate " + << protocolMessageName(type) << " message to [" << p->id() + << "]: " << messageHash; + } } } diff --git a/src/xrpld/overlay/detail/ProtocolMessage.h b/src/xrpld/overlay/detail/ProtocolMessage.h index 54f99eb73d0..86b630081a5 100644 --- a/src/xrpld/overlay/detail/ProtocolMessage.h +++ b/src/xrpld/overlay/detail/ProtocolMessage.h @@ -43,6 +43,12 @@ protocolMessageType(protocol::TMGetLedger const&) return protocol::mtGET_LEDGER; } +inline protocol::MessageType +protocolMessageType(protocol::TMLedgerData const&) +{ + return protocol::mtLEDGER_DATA; +} + inline protocol::MessageType protocolMessageType(protocol::TMReplayDeltaRequest const&) { @@ -486,4 +492,64 @@ invokeProtocolMessage( } // namespace ripple +namespace protocol { + +template +void +hash_append(Hasher& h, TMGetLedger const& msg) +{ + using beast::hash_append; + using namespace ripple; + hash_append(h, safe_cast(protocolMessageType(msg))); + hash_append(h, safe_cast(msg.itype())); + if (msg.has_ltype()) + hash_append(h, safe_cast(msg.ltype())); + + if (msg.has_ledgerhash()) + hash_append(h, msg.ledgerhash()); + + if (msg.has_ledgerseq()) + hash_append(h, msg.ledgerseq()); + + for (auto const& nodeId : msg.nodeids()) + hash_append(h, nodeId); + hash_append(h, msg.nodeids_size()); + + // Do NOT include the request cookie. It does not affect the content of the + // request, but only where to route the results. + // if (msg.has_requestcookie()) + // hash_append(h, msg.requestcookie()); + + if (msg.has_querytype()) + hash_append(h, safe_cast(msg.querytype())); + + if (msg.has_querydepth()) + hash_append(h, msg.querydepth()); +} + +template +void +hash_append(Hasher& h, TMLedgerData const& msg) +{ + using beast::hash_append; + using namespace ripple; + hash_append(h, safe_cast(protocolMessageType(msg))); + hash_append(h, msg.ledgerhash()); + hash_append(h, msg.ledgerseq()); + hash_append(h, safe_cast(msg.type())); + for (auto const& node : msg.nodes()) + { + hash_append(h, node.nodedata()); + if (node.has_nodeid()) + hash_append(h, node.nodeid()); + } + hash_append(h, msg.nodes_size()); + if (msg.has_requestcookie()) + hash_append(h, msg.requestcookie()); + if (msg.has_error()) + hash_append(h, safe_cast(msg.error())); +} + +} // namespace protocol + #endif diff --git a/src/xrpld/overlay/detail/ProtocolVersion.cpp b/src/xrpld/overlay/detail/ProtocolVersion.cpp index 0fecb301f7f..ce6c1e6fa3d 100644 --- a/src/xrpld/overlay/detail/ProtocolVersion.cpp +++ b/src/xrpld/overlay/detail/ProtocolVersion.cpp @@ -37,7 +37,9 @@ namespace ripple { constexpr ProtocolVersion const supportedProtocolList[] { {2, 1}, - {2, 2} + {2, 2}, + // Adds TMLedgerData::responseCookies and directResponse + {2, 3} }; // clang-format on From db0fad6826d342f77f8f98716a4973ff22d3c377 Mon Sep 17 00:00:00 2001 From: Mark Travis Date: Fri, 14 Feb 2025 17:48:12 -0800 Subject: [PATCH 18/29] Log proposals and validations (#5291) Adds detailed log messages for each validation and proposal received from the network. --- include/xrpl/protocol/STValidation.h | 18 ++++++++++++++++++ src/test/csf/Peer.h | 6 ++++++ src/xrpld/app/consensus/RCLCxPeerPos.h | 6 ++++++ src/xrpld/app/misc/NetworkOPs.cpp | 15 +++++++++++++++ src/xrpld/consensus/Consensus.h | 1 + src/xrpld/consensus/ConsensusProposal.h | 13 +++++++++++++ 6 files changed, 59 insertions(+) diff --git a/include/xrpl/protocol/STValidation.h b/include/xrpl/protocol/STValidation.h index 32c60026fcd..4f3e18de326 100644 --- a/include/xrpl/protocol/STValidation.h +++ b/include/xrpl/protocol/STValidation.h @@ -30,6 +30,7 @@ #include #include #include +#include namespace ripple { @@ -141,6 +142,23 @@ class STValidation final : public STObject, public CountedObject Blob getSignature() const; + std::string + render() const + { + std::stringstream ss; + ss << "validation: " + << " ledger_hash: " << getLedgerHash() + << " consensus_hash: " << getConsensusHash() + << " sign_time: " << to_string(getSignTime()) + << " seen_time: " << to_string(getSeenTime()) + << " signer_public_key: " << getSignerPublic() + << " node_id: " << getNodeID() << " is_valid: " << isValid() + << " is_full: " << isFull() << " is_trusted: " << isTrusted() + << " signing_hash: " << getSigningHash() + << " base58: " << toBase58(TokenType::NodePublic, getSignerPublic()); + return ss.str(); + } + private: static SOTemplate const& validationFormat(); diff --git a/src/test/csf/Peer.h b/src/test/csf/Peer.h index 2f3b460e02f..e6bc7d24e08 100644 --- a/src/test/csf/Peer.h +++ b/src/test/csf/Peer.h @@ -77,6 +77,12 @@ struct Peer return proposal_.getJson(); } + std::string + render() const + { + return ""; + } + private: Proposal proposal_; }; diff --git a/src/xrpld/app/consensus/RCLCxPeerPos.h b/src/xrpld/app/consensus/RCLCxPeerPos.h index 4236e2ab128..b5d3d152cb2 100644 --- a/src/xrpld/app/consensus/RCLCxPeerPos.h +++ b/src/xrpld/app/consensus/RCLCxPeerPos.h @@ -97,6 +97,12 @@ class RCLCxPeerPos Json::Value getJson() const; + std::string + render() const + { + return proposal_.render(); + } + private: PublicKey publicKey_; uint256 suppression_; diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index e526382df03..3800b359efa 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -2353,6 +2353,21 @@ NetworkOPsImp::recvValidation( pubValidation(val); + JLOG(m_journal.debug()) << [this, &val]() -> auto { + std::stringstream ss; + ss << "VALIDATION: " << val->render() << " master_key: "; + auto master = app_.validators().getTrustedKey(val->getSignerPublic()); + if (master) + { + ss << toBase58(TokenType::NodePublic, *master); + } + else + { + ss << "none"; + } + return ss.str(); + }(); + // We will always relay trusted validations; if configured, we will // also relay all untrusted validations. return app_.config().RELAY_UNTRUSTED_VALIDATIONS == 1 || val->isTrusted(); diff --git a/src/xrpld/consensus/Consensus.h b/src/xrpld/consensus/Consensus.h index daad520c77f..e340105737b 100644 --- a/src/xrpld/consensus/Consensus.h +++ b/src/xrpld/consensus/Consensus.h @@ -704,6 +704,7 @@ Consensus::peerProposal( NetClock::time_point const& now, PeerPosition_t const& newPeerPos) { + JLOG(j_.debug()) << "PROPOSAL " << newPeerPos.render(); auto const& peerID = newPeerPos.proposal().nodeID(); // Always need to store recent positions diff --git a/src/xrpld/consensus/ConsensusProposal.h b/src/xrpld/consensus/ConsensusProposal.h index c00bffe0237..18dcf60006b 100644 --- a/src/xrpld/consensus/ConsensusProposal.h +++ b/src/xrpld/consensus/ConsensusProposal.h @@ -26,6 +26,7 @@ #include #include #include +#include namespace ripple { /** Represents a proposed position taken during a round of consensus. @@ -194,6 +195,18 @@ class ConsensusProposal proposeSeq_ = seqLeave; } + std::string + render() const + { + std::stringstream ss; + ss << "proposal: previous_ledger: " << previousLedger_ + << " proposal_seq: " << proposeSeq_ << " position: " << position_ + << " close_time: " << to_string(closeTime_) + << " now: " << to_string(time_) << " is_bow_out:" << isBowOut() + << " node_id: " << nodeID_; + return ss.str(); + } + //! Get JSON representation for debugging Json::Value getJson() const From 466849efe8294f001103524443064bd3cd15d270 Mon Sep 17 00:00:00 2001 From: Bart Date: Fri, 14 Feb 2025 21:37:14 -0500 Subject: [PATCH 19/29] docs: Clarifies default port of hosts (#5290) The current comment in the example cfg file incorrectly mentions both "may" and "must". This change fixes this comment to clarify that the default port of hosts is 2459 and that specifying it is therefore optional. It further sets the default port to 2459 instead of the legacy 51235. --- cfg/rippled-example.cfg | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cfg/rippled-example.cfg b/cfg/rippled-example.cfg index 6fabe980cc1..3586f26d937 100644 --- a/cfg/rippled-example.cfg +++ b/cfg/rippled-example.cfg @@ -410,9 +410,11 @@ # starter list is included in the code and used if no other hostnames are # available. # -# One address or domain name per line is allowed. A port may must be -# specified after adding a space to the address. The ordering of entries -# does not generally matter. +# One address or domain name per line is allowed. A port may be specified +# after adding a space to the address. If a port is not specified, the default +# port of 2459 will be used. Many servers still use the legacy port of 51235. +# To connect to such servers, you must specify the port number. The ordering +# of entries does not generally matter. # # The default list of entries is: # - r.ripple.com 51235 @@ -1423,7 +1425,7 @@ admin = 127.0.0.1 protocol = http [port_peer] -port = 51235 +port = 2459 ip = 0.0.0.0 # alternatively, to accept connections on IPv4 + IPv6, use: #ip = :: From 43e1d4440eda27e04735ce27ca25a646974956c8 Mon Sep 17 00:00:00 2001 From: Olek <115580134+oleks-rip@users.noreply.github.com> Date: Sat, 15 Feb 2025 10:08:25 -0500 Subject: [PATCH 20/29] fix: Switch Permissioned Domain to Supported::yes (#5287) Switch Permissioned Domain feature's supported flag from Supported::no to Supported::yes for it to be votable. --- include/xrpl/protocol/detail/features.macro | 2 +- src/test/app/PermissionedDomains_test.cpp | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/include/xrpl/protocol/detail/features.macro b/include/xrpl/protocol/detail/features.macro index aa0782b1378..7b120c0b8d2 100644 --- a/include/xrpl/protocol/detail/features.macro +++ b/include/xrpl/protocol/detail/features.macro @@ -33,7 +33,7 @@ XRPL_FIX (InvalidTxFlags, Supported::yes, VoteBehavior::DefaultNo) XRPL_FIX (FrozenLPTokenTransfer, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(DeepFreeze, Supported::yes, VoteBehavior::DefaultNo) -XRPL_FEATURE(PermissionedDomains, Supported::no, VoteBehavior::DefaultNo) +XRPL_FEATURE(PermissionedDomains, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(DynamicNFT, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(Credentials, Supported::yes, VoteBehavior::DefaultNo) XRPL_FEATURE(AMMClawback, Supported::yes, VoteBehavior::DefaultNo) diff --git a/src/test/app/PermissionedDomains_test.cpp b/src/test/app/PermissionedDomains_test.cpp index a80352cac5a..7d78652b287 100644 --- a/src/test/app/PermissionedDomains_test.cpp +++ b/src/test/app/PermissionedDomains_test.cpp @@ -54,7 +54,8 @@ exceptionExpected(Env& env, Json::Value const& jv) class PermissionedDomains_test : public beast::unit_test::suite { - FeatureBitset withoutFeature_{supported_amendments()}; + FeatureBitset withoutFeature_{ + supported_amendments() - featurePermissionedDomains}; FeatureBitset withFeature_{ supported_amendments() // | featurePermissionedDomains | featureCredentials}; From 01fc8f2209be524848ea0937d4f47844b30df7a8 Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Tue, 18 Feb 2025 13:58:56 -0800 Subject: [PATCH 21/29] Set version to 2.4.0-rc1 --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index 93a38d062ab..6384f4de8db 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.4.0-b3" +char const* const versionString = "2.4.0-rc1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 844646dc5002690257b2bc4aa4496b653e5f3a1a Mon Sep 17 00:00:00 2001 From: Bart Date: Wed, 19 Feb 2025 17:14:00 -0500 Subject: [PATCH 22/29] docs: Revert peer port to 51235 (#5299) Reverts the [port_peer] back to the legacy port 51235 rather than to the default port 2459, to avoid potentially inconveniencing existing operators. --- cfg/rippled-example.cfg | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cfg/rippled-example.cfg b/cfg/rippled-example.cfg index 3586f26d937..ee9fdbd2744 100644 --- a/cfg/rippled-example.cfg +++ b/cfg/rippled-example.cfg @@ -1425,7 +1425,10 @@ admin = 127.0.0.1 protocol = http [port_peer] -port = 2459 +# Many servers still use the legacy port of 51235, so for backward-compatibility +# we maintain that port number here. However, for new servers we recommend +# changing this to the default port of 2459. +port = 51235 ip = 0.0.0.0 # alternatively, to accept connections on IPv4 + IPv6, use: #ip = :: From 159dfb5acb416310a76e5f8336abde4d934df757 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Wed, 19 Feb 2025 18:52:08 -0500 Subject: [PATCH 23/29] Revert "Reduce duplicate peer traffic for ledger data (#5126)" (#5300) This reverts commit dd5e6559dda14f8a1684cf81d90b86920628a4df. It has introduced a regression causing slow close times and syncing issues. A fix will be attempted later. --- Builds/levelization/results/loops.txt | 2 +- include/xrpl/basics/CanProcess.h | 134 ------ include/xrpl/basics/base_uint.h | 7 - include/xrpl/proto/ripple.proto | 10 - include/xrpl/protocol/LedgerHeader.h | 2 - src/test/app/HashRouter_test.cpp | 28 -- src/test/app/LedgerReplay_test.cpp | 5 - src/test/basics/base_uint_test.cpp | 5 - src/test/overlay/ProtocolVersion_test.cpp | 4 +- src/test/overlay/reduce_relay_test.cpp | 5 - src/xrpld/app/consensus/RCLConsensus.cpp | 3 +- src/xrpld/app/ledger/InboundLedger.h | 19 - src/xrpld/app/ledger/detail/InboundLedger.cpp | 21 +- .../app/ledger/detail/InboundLedgers.cpp | 134 ++---- src/xrpld/app/ledger/detail/LedgerMaster.cpp | 5 +- .../app/ledger/detail/TimeoutCounter.cpp | 11 +- src/xrpld/app/ledger/detail/TimeoutCounter.h | 3 - src/xrpld/app/misc/HashRouter.cpp | 23 - src/xrpld/app/misc/HashRouter.h | 42 +- src/xrpld/app/misc/NetworkOPs.cpp | 84 ++-- src/xrpld/app/misc/NetworkOPs.h | 2 +- src/xrpld/overlay/Peer.h | 8 - src/xrpld/overlay/detail/PeerImp.cpp | 451 ++---------------- src/xrpld/overlay/detail/PeerImp.h | 36 +- src/xrpld/overlay/detail/PeerSet.cpp | 42 +- src/xrpld/overlay/detail/ProtocolMessage.h | 66 --- src/xrpld/overlay/detail/ProtocolVersion.cpp | 4 +- 27 files changed, 143 insertions(+), 1013 deletions(-) delete mode 100644 include/xrpl/basics/CanProcess.h diff --git a/Builds/levelization/results/loops.txt b/Builds/levelization/results/loops.txt index 06ab5266c91..7c132f5429e 100644 --- a/Builds/levelization/results/loops.txt +++ b/Builds/levelization/results/loops.txt @@ -14,7 +14,7 @@ Loop: xrpld.app xrpld.net xrpld.app > xrpld.net Loop: xrpld.app xrpld.overlay - xrpld.overlay ~= xrpld.app + xrpld.overlay == xrpld.app Loop: xrpld.app xrpld.peerfinder xrpld.app > xrpld.peerfinder diff --git a/include/xrpl/basics/CanProcess.h b/include/xrpl/basics/CanProcess.h deleted file mode 100644 index 3ee49d00877..00000000000 --- a/include/xrpl/basics/CanProcess.h +++ /dev/null @@ -1,134 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2024 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_BASICS_CANPROCESS_H_INCLUDED -#define RIPPLE_BASICS_CANPROCESS_H_INCLUDED - -#include -#include -#include - -/** RAII class to check if an Item is already being processed on another thread, - * as indicated by it's presence in a Collection. - * - * If the Item is not in the Collection, it will be added under lock in the - * ctor, and removed under lock in the dtor. The object will be considered - * "usable" and evaluate to `true`. - * - * If the Item is in the Collection, no changes will be made to the collection, - * and the CanProcess object will be considered "unusable". - * - * It's up to the caller to decide what "usable" and "unusable" mean. (e.g. - * Process or skip a block of code, or set a flag.) - * - * The current use is to avoid lock contention that would be involved in - * processing something associated with the Item. - * - * Examples: - * - * void IncomingLedgers::acquireAsync(LedgerHash const& hash, ...) - * { - * if (CanProcess check{acquiresMutex_, pendingAcquires_, hash}) - * { - * acquire(hash, ...); - * } - * } - * - * bool - * NetworkOPsImp::recvValidation( - * std::shared_ptr const& val, - * std::string const& source) - * { - * CanProcess check( - * validationsMutex_, pendingValidations_, val->getLedgerHash()); - * BypassAccept bypassAccept = - * check ? BypassAccept::no : BypassAccept::yes; - * handleNewValidation(app_, val, source, bypassAccept, m_journal); - * } - * - */ -class CanProcess -{ -public: - template - CanProcess(Mutex& mtx, Collection& collection, Item const& item) - : cleanup_(insert(mtx, collection, item)) - { - } - - ~CanProcess() - { - if (cleanup_) - cleanup_(); - } - - explicit - operator bool() const - { - return static_cast(cleanup_); - } - -private: - template - std::function - doInsert(Mutex& mtx, Collection& collection, Item const& item) - { - std::unique_lock lock(mtx); - // TODO: Use structured binding once LLVM 16 is the minimum supported - // version. See also: https://github.com/llvm/llvm-project/issues/48582 - // https://github.com/llvm/llvm-project/commit/127bf44385424891eb04cff8e52d3f157fc2cb7c - auto const insertResult = collection.insert(item); - auto const it = insertResult.first; - if (!insertResult.second) - return {}; - if constexpr (useIterator) - return [&, it]() { - std::unique_lock lock(mtx); - collection.erase(it); - }; - else - return [&]() { - std::unique_lock lock(mtx); - collection.erase(item); - }; - } - - // Generic insert() function doesn't use iterators because they may get - // invalidated - template - std::function - insert(Mutex& mtx, Collection& collection, Item const& item) - { - return doInsert(mtx, collection, item); - } - - // Specialize insert() for std::set, which does not invalidate iterators for - // insert and erase - template - std::function - insert(Mutex& mtx, std::set& collection, Item const& item) - { - return doInsert(mtx, collection, item); - } - - // If set, then the item is "usable" - std::function cleanup_; -}; - -#endif diff --git a/include/xrpl/basics/base_uint.h b/include/xrpl/basics/base_uint.h index a2c714f4be1..05d83b3bb0a 100644 --- a/include/xrpl/basics/base_uint.h +++ b/include/xrpl/basics/base_uint.h @@ -631,13 +631,6 @@ to_string(base_uint const& a) return strHex(a.cbegin(), a.cend()); } -template -inline std::string -to_short_string(base_uint const& a) -{ - return strHex(a.cbegin(), a.cend()).substr(0, 8) + "..."; -} - template inline std::ostream& operator<<(std::ostream& out, base_uint const& u) diff --git a/include/xrpl/proto/ripple.proto b/include/xrpl/proto/ripple.proto index e121a39706c..a06bbd9a311 100644 --- a/include/xrpl/proto/ripple.proto +++ b/include/xrpl/proto/ripple.proto @@ -321,18 +321,8 @@ message TMLedgerData required uint32 ledgerSeq = 2; required TMLedgerInfoType type = 3; repeated TMLedgerNode nodes = 4; - // If the peer supports "responseCookies", this field will - // never be populated. optional uint32 requestCookie = 5; optional TMReplyError error = 6; - // The old field is called "requestCookie", but this is - // a response, so this name makes more sense - repeated uint32 responseCookies = 7; - // If a TMGetLedger request was received without a "requestCookie", - // and the peer supports it, this flag will be set to true to - // indicate that the receiver should process the result in addition - // to forwarding it to its "responseCookies" peers. - optional bool directResponse = 8; } message TMPing diff --git a/include/xrpl/protocol/LedgerHeader.h b/include/xrpl/protocol/LedgerHeader.h index 806e732593a..0b35979971a 100644 --- a/include/xrpl/protocol/LedgerHeader.h +++ b/include/xrpl/protocol/LedgerHeader.h @@ -55,8 +55,6 @@ struct LedgerHeader // If validated is false, it means "not yet validated." // Once validated is true, it will never be set false at a later time. - // NOTE: If you are accessing this directly, you are probably doing it - // wrong. Use LedgerMaster::isValidated(). // VFALCO TODO Make this not mutable bool mutable validated = false; bool accepted = false; diff --git a/src/test/app/HashRouter_test.cpp b/src/test/app/HashRouter_test.cpp index 68e0d830657..1234bc5b9cb 100644 --- a/src/test/app/HashRouter_test.cpp +++ b/src/test/app/HashRouter_test.cpp @@ -242,33 +242,6 @@ class HashRouter_test : public beast::unit_test::suite BEAST_EXPECT(router.shouldProcess(key, peer, flags, 1s)); } - void - testProcessPeer() - { - using namespace std::chrono_literals; - TestStopwatch stopwatch; - HashRouter router(stopwatch, 5s); - uint256 const key(1); - HashRouter::PeerShortID peer1 = 1; - HashRouter::PeerShortID peer2 = 2; - auto const timeout = 2s; - - BEAST_EXPECT(router.shouldProcessForPeer(key, peer1, timeout)); - BEAST_EXPECT(!router.shouldProcessForPeer(key, peer1, timeout)); - ++stopwatch; - BEAST_EXPECT(!router.shouldProcessForPeer(key, peer1, timeout)); - BEAST_EXPECT(router.shouldProcessForPeer(key, peer2, timeout)); - BEAST_EXPECT(!router.shouldProcessForPeer(key, peer2, timeout)); - ++stopwatch; - BEAST_EXPECT(router.shouldProcessForPeer(key, peer1, timeout)); - BEAST_EXPECT(!router.shouldProcessForPeer(key, peer2, timeout)); - ++stopwatch; - BEAST_EXPECT(router.shouldProcessForPeer(key, peer2, timeout)); - ++stopwatch; - BEAST_EXPECT(router.shouldProcessForPeer(key, peer1, timeout)); - BEAST_EXPECT(!router.shouldProcessForPeer(key, peer2, timeout)); - } - public: void run() override @@ -279,7 +252,6 @@ class HashRouter_test : public beast::unit_test::suite testSetFlags(); testRelay(); testProcess(); - testProcessPeer(); } }; diff --git a/src/test/app/LedgerReplay_test.cpp b/src/test/app/LedgerReplay_test.cpp index d4911f82833..883aca7bced 100644 --- a/src/test/app/LedgerReplay_test.cpp +++ b/src/test/app/LedgerReplay_test.cpp @@ -322,11 +322,6 @@ class TestPeer : public Peer { return false; } - std::set> - releaseRequestCookies(uint256 const& requestHash) override - { - return {}; - } bool ledgerReplayEnabled_; PublicKey nodePublicKey_; diff --git a/src/test/basics/base_uint_test.cpp b/src/test/basics/base_uint_test.cpp index 50411461e0d..9f3194f4fbc 100644 --- a/src/test/basics/base_uint_test.cpp +++ b/src/test/basics/base_uint_test.cpp @@ -151,7 +151,6 @@ struct base_uint_test : beast::unit_test::suite uset.insert(u); BEAST_EXPECT(raw.size() == u.size()); BEAST_EXPECT(to_string(u) == "0102030405060708090A0B0C"); - BEAST_EXPECT(to_short_string(u) == "01020304..."); BEAST_EXPECT(*u.data() == 1); BEAST_EXPECT(u.signum() == 1); BEAST_EXPECT(!!u); @@ -174,7 +173,6 @@ struct base_uint_test : beast::unit_test::suite test96 v{~u}; uset.insert(v); BEAST_EXPECT(to_string(v) == "FEFDFCFBFAF9F8F7F6F5F4F3"); - BEAST_EXPECT(to_short_string(v) == "FEFDFCFB..."); BEAST_EXPECT(*v.data() == 0xfe); BEAST_EXPECT(v.signum() == 1); BEAST_EXPECT(!!v); @@ -195,7 +193,6 @@ struct base_uint_test : beast::unit_test::suite test96 z{beast::zero}; uset.insert(z); BEAST_EXPECT(to_string(z) == "000000000000000000000000"); - BEAST_EXPECT(to_short_string(z) == "00000000..."); BEAST_EXPECT(*z.data() == 0); BEAST_EXPECT(*z.begin() == 0); BEAST_EXPECT(*std::prev(z.end(), 1) == 0); @@ -216,7 +213,6 @@ struct base_uint_test : beast::unit_test::suite BEAST_EXPECT(n == z); n--; BEAST_EXPECT(to_string(n) == "FFFFFFFFFFFFFFFFFFFFFFFF"); - BEAST_EXPECT(to_short_string(n) == "FFFFFFFF..."); n = beast::zero; BEAST_EXPECT(n == z); @@ -227,7 +223,6 @@ struct base_uint_test : beast::unit_test::suite test96 x{zm1 ^ zp1}; uset.insert(x); BEAST_EXPECTS(to_string(x) == "FFFFFFFFFFFFFFFFFFFFFFFE", to_string(x)); - BEAST_EXPECTS(to_short_string(x) == "FFFFFFFF...", to_short_string(x)); BEAST_EXPECT(uset.size() == 4); diff --git a/src/test/overlay/ProtocolVersion_test.cpp b/src/test/overlay/ProtocolVersion_test.cpp index 97469c59805..dfc0ee70b8e 100644 --- a/src/test/overlay/ProtocolVersion_test.cpp +++ b/src/test/overlay/ProtocolVersion_test.cpp @@ -87,8 +87,8 @@ class ProtocolVersion_test : public beast::unit_test::suite negotiateProtocolVersion("XRPL/2.2") == make_protocol(2, 2)); BEAST_EXPECT( negotiateProtocolVersion( - "RTXP/1.2, XRPL/2.2, XRPL/2.3, XRPL/2.4, XRPL/999.999") == - make_protocol(2, 3)); + "RTXP/1.2, XRPL/2.2, XRPL/2.3, XRPL/999.999") == + make_protocol(2, 2)); BEAST_EXPECT( negotiateProtocolVersion("XRPL/999.999, WebSocket/1.0") == std::nullopt); diff --git a/src/test/overlay/reduce_relay_test.cpp b/src/test/overlay/reduce_relay_test.cpp index e907f60b0e2..e0edae54897 100644 --- a/src/test/overlay/reduce_relay_test.cpp +++ b/src/test/overlay/reduce_relay_test.cpp @@ -182,11 +182,6 @@ class PeerPartial : public Peer removeTxQueue(const uint256&) override { } - std::set> - releaseRequestCookies(uint256 const& requestHash) override - { - return {}; - } }; /** Manually advanced clock. */ diff --git a/src/xrpld/app/consensus/RCLConsensus.cpp b/src/xrpld/app/consensus/RCLConsensus.cpp index 47414cd20ab..a746b30357d 100644 --- a/src/xrpld/app/consensus/RCLConsensus.cpp +++ b/src/xrpld/app/consensus/RCLConsensus.cpp @@ -1073,8 +1073,7 @@ void RCLConsensus::Adaptor::updateOperatingMode(std::size_t const positions) const { if (!positions && app_.getOPs().isFull()) - app_.getOPs().setMode( - OperatingMode::CONNECTED, "updateOperatingMode: no positions"); + app_.getOPs().setMode(OperatingMode::CONNECTED); } void diff --git a/src/xrpld/app/ledger/InboundLedger.h b/src/xrpld/app/ledger/InboundLedger.h index ccd9aa0710f..13f603e79d0 100644 --- a/src/xrpld/app/ledger/InboundLedger.h +++ b/src/xrpld/app/ledger/InboundLedger.h @@ -196,25 +196,6 @@ class InboundLedger final : public TimeoutCounter, std::unique_ptr mPeerSet; }; -inline std::string -to_string(InboundLedger::Reason reason) -{ - using enum InboundLedger::Reason; - switch (reason) - { - case HISTORY: - return "HISTORY"; - case GENERIC: - return "GENERIC"; - case CONSENSUS: - return "CONSENSUS"; - default: - UNREACHABLE( - "ripple::to_string(InboundLedger::Reason) : unknown value"); - return "unknown"; - } -} - } // namespace ripple #endif diff --git a/src/xrpld/app/ledger/detail/InboundLedger.cpp b/src/xrpld/app/ledger/detail/InboundLedger.cpp index ca955d14ff3..32fdff76ab3 100644 --- a/src/xrpld/app/ledger/detail/InboundLedger.cpp +++ b/src/xrpld/app/ledger/detail/InboundLedger.cpp @@ -392,14 +392,7 @@ InboundLedger::onTimer(bool wasProgress, ScopedLockType&) if (!wasProgress) { - if (checkLocal()) - { - // Done. Something else (probably consensus) built the ledger - // locally while waiting for data (or possibly before requesting) - XRPL_ASSERT(isDone(), "ripple::InboundLedger::onTimer : done"); - JLOG(journal_.info()) << "Finished while waiting " << hash_; - return; - } + checkLocal(); mByHash = true; @@ -509,17 +502,15 @@ InboundLedger::trigger(std::shared_ptr const& peer, TriggerReason reason) if (auto stream = journal_.debug()) { - std::stringstream ss; - ss << "Trigger acquiring ledger " << hash_; + stream << "Trigger acquiring ledger " << hash_; if (peer) - ss << " from " << peer; + stream << " from " << peer; if (complete_ || failed_) - ss << " complete=" << complete_ << " failed=" << failed_; + stream << "complete=" << complete_ << " failed=" << failed_; else - ss << " header=" << mHaveHeader << " tx=" << mHaveTransactions - << " as=" << mHaveState; - stream << ss.str(); + stream << "header=" << mHaveHeader << " tx=" << mHaveTransactions + << " as=" << mHaveState; } if (!mHaveHeader) diff --git a/src/xrpld/app/ledger/detail/InboundLedgers.cpp b/src/xrpld/app/ledger/detail/InboundLedgers.cpp index a6699aa73f4..99a26ce8f9f 100644 --- a/src/xrpld/app/ledger/detail/InboundLedgers.cpp +++ b/src/xrpld/app/ledger/detail/InboundLedgers.cpp @@ -23,9 +23,9 @@ #include #include #include -#include #include #include +#include #include #include #include @@ -77,85 +77,11 @@ class InboundLedgersImp : public InboundLedgers hash.isNonZero(), "ripple::InboundLedgersImp::acquire::doAcquire : nonzero hash"); - bool const needNetworkLedger = app_.getOPs().isNeedNetworkLedger(); - bool const shouldAcquire = [&]() { - if (!needNetworkLedger) - return true; - if (reason == InboundLedger::Reason::GENERIC) - return true; - if (reason == InboundLedger::Reason::CONSENSUS) - return true; - return false; - }(); - - std::stringstream ss; - ss << "InboundLedger::acquire: " - << "Request: " << to_string(hash) << ", " << seq - << " NeedNetworkLedger: " << (needNetworkLedger ? "yes" : "no") - << " Reason: " << to_string(reason) - << " Should acquire: " << (shouldAcquire ? "true." : "false."); - - /* Acquiring ledgers is somewhat expensive. It requires lots of - * computation and network communication. Avoid it when it's not - * appropriate. Every validation from a peer for a ledger that - * we do not have locally results in a call to this function: even - * if we are moments away from validating the same ledger. - */ - bool const shouldBroadcast = [&]() { - // If the node is not in "full" state, it needs to sync to - // the network, and doesn't have the necessary tx's and - // ledger entries to build the ledger. - bool const isFull = app_.getOPs().isFull(); - // If everything else is ok, don't try to acquire the ledger - // if the requested seq is in the near future relative to - // the validated ledger. If the requested ledger is between - // 1 and 19 inclusive ledgers ahead of the valid ledger this - // node has not built it yet, but it's possible/likely it - // has the tx's necessary to build it and get caught up. - // Plus it might not become validated. On the other hand, if - // it's more than 20 in the future, this node should request - // it so that it can jump ahead and get caught up. - LedgerIndex const validSeq = - app_.getLedgerMaster().getValidLedgerIndex(); - constexpr std::size_t lagLeeway = 20; - bool const nearFuture = - (seq > validSeq) && (seq < validSeq + lagLeeway); - // If everything else is ok, don't try to acquire the ledger - // if the request is related to consensus. (Note that - // consensus calls usually pass a seq of 0, so nearFuture - // will be false other than on a brand new network.) - bool const consensus = - reason == InboundLedger::Reason::CONSENSUS; - ss << " Evaluating whether to broadcast requests to peers" - << ". full: " << (isFull ? "true" : "false") - << ". ledger sequence " << seq - << ". Valid sequence: " << validSeq - << ". Lag leeway: " << lagLeeway - << ". request for near future ledger: " - << (nearFuture ? "true" : "false") - << ". Consensus: " << (consensus ? "true" : "false"); - - // If the node is not synced, send requests. - if (!isFull) - return true; - // If the ledger is in the near future, do NOT send requests. - // This node is probably about to build it. - if (nearFuture) - return false; - // If the request is because of consensus, do NOT send requests. - // This node is probably about to build it. - if (consensus) - return false; - return true; - }(); - ss << ". Would broadcast to peers? " - << (shouldBroadcast ? "true." : "false."); - - if (!shouldAcquire) - { - JLOG(j_.debug()) << "Abort(rule): " << ss.str(); + // probably not the right rule + if (app_.getOPs().isNeedNetworkLedger() && + (reason != InboundLedger::Reason::GENERIC) && + (reason != InboundLedger::Reason::CONSENSUS)) return {}; - } bool isNew = true; std::shared_ptr inbound; @@ -163,7 +89,6 @@ class InboundLedgersImp : public InboundLedgers ScopedLockType sl(mLock); if (stopping_) { - JLOG(j_.debug()) << "Abort(stopping): " << ss.str(); return {}; } @@ -187,29 +112,23 @@ class InboundLedgersImp : public InboundLedgers ++mCounter; } } - ss << " IsNew: " << (isNew ? "true" : "false"); if (inbound->isFailed()) - { - JLOG(j_.debug()) << "Abort(failed): " << ss.str(); return {}; - } if (!isNew) inbound->update(seq); if (!inbound->isComplete()) - { - JLOG(j_.debug()) << "InProgress: " << ss.str(); return {}; - } - JLOG(j_.debug()) << "Complete: " << ss.str(); return inbound->getLedger(); }; using namespace std::chrono_literals; - return perf::measureDurationAndLog( + std::shared_ptr ledger = perf::measureDurationAndLog( doAcquire, "InboundLedgersImp::acquire", 500ms, j_); + + return ledger; } void @@ -218,25 +137,28 @@ class InboundLedgersImp : public InboundLedgers std::uint32_t seq, InboundLedger::Reason reason) override { - if (CanProcess const check{acquiresMutex_, pendingAcquires_, hash}) + std::unique_lock lock(acquiresMutex_); + try { - try - { - acquire(hash, seq, reason); - } - catch (std::exception const& e) - { - JLOG(j_.warn()) - << "Exception thrown for acquiring new inbound ledger " - << hash << ": " << e.what(); - } - catch (...) - { - JLOG(j_.warn()) << "Unknown exception thrown for acquiring new " - "inbound ledger " - << hash; - } + if (pendingAcquires_.contains(hash)) + return; + pendingAcquires_.insert(hash); + scope_unlock unlock(lock); + acquire(hash, seq, reason); + } + catch (std::exception const& e) + { + JLOG(j_.warn()) + << "Exception thrown for acquiring new inbound ledger " << hash + << ": " << e.what(); + } + catch (...) + { + JLOG(j_.warn()) + << "Unknown exception thrown for acquiring new inbound ledger " + << hash; } + pendingAcquires_.erase(hash); } std::shared_ptr diff --git a/src/xrpld/app/ledger/detail/LedgerMaster.cpp b/src/xrpld/app/ledger/detail/LedgerMaster.cpp index 7875541e7bc..6bc894da487 100644 --- a/src/xrpld/app/ledger/detail/LedgerMaster.cpp +++ b/src/xrpld/app/ledger/detail/LedgerMaster.cpp @@ -973,9 +973,8 @@ LedgerMaster::checkAccept(std::shared_ptr const& ledger) } JLOG(m_journal.info()) << "Advancing accepted ledger to " - << ledger->info().seq << " (" - << to_short_string(ledger->info().hash) - << ") with >= " << minVal << " validations"; + << ledger->info().seq << " with >= " << minVal + << " validations"; ledger->setValidated(); ledger->setFull(); diff --git a/src/xrpld/app/ledger/detail/TimeoutCounter.cpp b/src/xrpld/app/ledger/detail/TimeoutCounter.cpp index 343bbd83db4..35d8f1fffb1 100644 --- a/src/xrpld/app/ledger/detail/TimeoutCounter.cpp +++ b/src/xrpld/app/ledger/detail/TimeoutCounter.cpp @@ -33,8 +33,7 @@ TimeoutCounter::TimeoutCounter( QueueJobParameter&& jobParameter, beast::Journal journal) : app_(app) - , sink_(journal, to_short_string(hash) + " ") - , journal_(sink_) + , journal_(journal) , hash_(hash) , timeouts_(0) , complete_(false) @@ -54,8 +53,6 @@ TimeoutCounter::setTimer(ScopedLockType& sl) { if (isDone()) return; - JLOG(journal_.debug()) << "Setting timer for " << timerInterval_.count() - << "ms"; timer_.expires_after(timerInterval_); timer_.async_wait( [wptr = pmDowncast()](boost::system::error_code const& ec) { @@ -64,12 +61,6 @@ TimeoutCounter::setTimer(ScopedLockType& sl) if (auto ptr = wptr.lock()) { - JLOG(ptr->journal_.debug()) - << "timer: ec: " << ec << " (operation_aborted: " - << boost::asio::error::operation_aborted << " - " - << (ec == boost::asio::error::operation_aborted ? "aborted" - : "other") - << ")"; ScopedLockType sl(ptr->mtx_); ptr->queueJob(sl); } diff --git a/src/xrpld/app/ledger/detail/TimeoutCounter.h b/src/xrpld/app/ledger/detail/TimeoutCounter.h index a65208a938b..228e879d4de 100644 --- a/src/xrpld/app/ledger/detail/TimeoutCounter.h +++ b/src/xrpld/app/ledger/detail/TimeoutCounter.h @@ -24,8 +24,6 @@ #include #include #include -#include - #include #include @@ -123,7 +121,6 @@ class TimeoutCounter // Used in this class for access to boost::asio::io_service and // ripple::Overlay. Used in subtypes for the kitchen sink. Application& app_; - beast::WrappedSink sink_; beast::Journal journal_; mutable std::recursive_mutex mtx_; diff --git a/src/xrpld/app/misc/HashRouter.cpp b/src/xrpld/app/misc/HashRouter.cpp index 28d2449db5e..58e811d4b8f 100644 --- a/src/xrpld/app/misc/HashRouter.cpp +++ b/src/xrpld/app/misc/HashRouter.cpp @@ -90,20 +90,6 @@ HashRouter::shouldProcess( return s.shouldProcess(suppressionMap_.clock().now(), tx_interval); } -bool -HashRouter::shouldProcessForPeer( - uint256 const& key, - PeerShortID peer, - std::chrono::seconds interval) -{ - std::lock_guard lock(mutex_); - - auto& entry = emplace(key).first; - - return entry.shouldProcessForPeer( - peer, suppressionMap_.clock().now(), interval); -} - int HashRouter::getFlags(uint256 const& key) { @@ -142,13 +128,4 @@ HashRouter::shouldRelay(uint256 const& key) return s.releasePeerSet(); } -auto -HashRouter::getPeers(uint256 const& key) -> std::set -{ - std::lock_guard lock(mutex_); - - auto& s = emplace(key).first; - return s.peekPeerSet(); -} - } // namespace ripple diff --git a/src/xrpld/app/misc/HashRouter.h b/src/xrpld/app/misc/HashRouter.h index 403c7ce8603..e9d040fc8bf 100644 --- a/src/xrpld/app/misc/HashRouter.h +++ b/src/xrpld/app/misc/HashRouter.h @@ -92,13 +92,6 @@ class HashRouter return std::move(peers_); } - /** Return set of peers waiting for reply. Leaves list unchanged. */ - std::set const& - peekPeerSet() - { - return peers_; - } - /** Return seated relay time point if the message has been relayed */ std::optional relayed() const @@ -132,21 +125,6 @@ class HashRouter return true; } - bool - shouldProcessForPeer( - PeerShortID peer, - Stopwatch::time_point now, - std::chrono::seconds interval) - { - if (peerProcessed_.contains(peer) && - ((peerProcessed_[peer] + interval) > now)) - return false; - // Peer may already be in the list, but adding it again doesn't hurt - addPeer(peer); - peerProcessed_[peer] = now; - return true; - } - private: int flags_ = 0; std::set peers_; @@ -154,7 +132,6 @@ class HashRouter // than one flag needs to expire independently. std::optional relayed_; std::optional processed_; - std::map peerProcessed_; }; public: @@ -186,7 +163,7 @@ class HashRouter /** Add a suppression peer and get message's relay status. * Return pair: - * element 1: true if the key is added. + * element 1: true if the peer is added. * element 2: optional is seated to the relay time point or * is unseated if has not relayed yet. */ std::pair> @@ -203,18 +180,6 @@ class HashRouter int& flags, std::chrono::seconds tx_interval); - /** Determines whether the hashed item should be processed for the given - peer. Could be an incoming or outgoing message. - - Items filtered with this function should only be processed for the given - peer once. Unlike shouldProcess, it can be processed for other peers. - */ - bool - shouldProcessForPeer( - uint256 const& key, - PeerShortID peer, - std::chrono::seconds interval); - /** Set the flags on a hash. @return `true` if the flags were changed. `false` if unchanged. @@ -240,11 +205,6 @@ class HashRouter std::optional> shouldRelay(uint256 const& key); - /** Returns a copy of the set of peers in the Entry for the key - */ - std::set - getPeers(uint256 const& key); - private: // pair.second indicates whether the entry was created std::pair diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index 3800b359efa..a5c8200ccbc 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -50,10 +50,10 @@ #include #include #include -#include #include #include #include +#include #include #include #include @@ -403,7 +403,7 @@ class NetworkOPsImp final : public NetworkOPs isFull() override; void - setMode(OperatingMode om, const char* reason) override; + setMode(OperatingMode om) override; bool isBlocked() override; @@ -874,7 +874,7 @@ NetworkOPsImp::strOperatingMode(bool const admin /* = false */) const inline void NetworkOPsImp::setStandAlone() { - setMode(OperatingMode::FULL, "setStandAlone"); + setMode(OperatingMode::FULL); } inline void @@ -1022,9 +1022,7 @@ NetworkOPsImp::processHeartbeatTimer() { if (mMode != OperatingMode::DISCONNECTED) { - setMode( - OperatingMode::DISCONNECTED, - "Heartbeat: insufficient peers"); + setMode(OperatingMode::DISCONNECTED); JLOG(m_journal.warn()) << "Node count (" << numPeers << ") has fallen " << "below required minimum (" << minPeerCount_ << ")."; @@ -1040,7 +1038,7 @@ NetworkOPsImp::processHeartbeatTimer() if (mMode == OperatingMode::DISCONNECTED) { - setMode(OperatingMode::CONNECTED, "Heartbeat: sufficient peers"); + setMode(OperatingMode::CONNECTED); JLOG(m_journal.info()) << "Node count (" << numPeers << ") is sufficient."; } @@ -1048,9 +1046,9 @@ NetworkOPsImp::processHeartbeatTimer() // Check if the last validated ledger forces a change between these // states. if (mMode == OperatingMode::SYNCING) - setMode(OperatingMode::SYNCING, "Heartbeat: check syncing"); + setMode(OperatingMode::SYNCING); else if (mMode == OperatingMode::CONNECTED) - setMode(OperatingMode::CONNECTED, "Heartbeat: check connected"); + setMode(OperatingMode::CONNECTED); } mConsensus.timerEntry(app_.timeKeeper().closeTime()); @@ -1616,7 +1614,7 @@ void NetworkOPsImp::setAmendmentBlocked() { amendmentBlocked_ = true; - setMode(OperatingMode::CONNECTED, "setAmendmentBlocked"); + setMode(OperatingMode::CONNECTED); } inline bool @@ -1647,7 +1645,7 @@ void NetworkOPsImp::setUNLBlocked() { unlBlocked_ = true; - setMode(OperatingMode::CONNECTED, "setUNLBlocked"); + setMode(OperatingMode::CONNECTED); } inline void @@ -1748,7 +1746,7 @@ NetworkOPsImp::checkLastClosedLedger( if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL)) { - setMode(OperatingMode::CONNECTED, "check LCL: not on consensus ledger"); + setMode(OperatingMode::CONNECTED); } if (consensus) @@ -1835,9 +1833,8 @@ NetworkOPsImp::beginConsensus(uint256 const& networkClosed) // this shouldn't happen unless we jump ledgers if (mMode == OperatingMode::FULL) { - JLOG(m_journal.warn()) - << "beginConsensus Don't have LCL, going to tracking"; - setMode(OperatingMode::TRACKING, "beginConsensus: No LCL"); + JLOG(m_journal.warn()) << "Don't have LCL, going to tracking"; + setMode(OperatingMode::TRACKING); } return false; @@ -1947,7 +1944,7 @@ NetworkOPsImp::endConsensus() // validations we have for LCL. If the ledger is good enough, go to // TRACKING - TODO if (!needNetworkLedger_) - setMode(OperatingMode::TRACKING, "endConsensus: check tracking"); + setMode(OperatingMode::TRACKING); } if (((mMode == OperatingMode::CONNECTED) || @@ -1961,7 +1958,7 @@ NetworkOPsImp::endConsensus() if (app_.timeKeeper().now() < (current->info().parentCloseTime + 2 * current->info().closeTimeResolution)) { - setMode(OperatingMode::FULL, "endConsensus: check full"); + setMode(OperatingMode::FULL); } } @@ -1973,7 +1970,7 @@ NetworkOPsImp::consensusViewChange() { if ((mMode == OperatingMode::FULL) || (mMode == OperatingMode::TRACKING)) { - setMode(OperatingMode::CONNECTED, "consensusViewChange"); + setMode(OperatingMode::CONNECTED); } } @@ -2291,7 +2288,7 @@ NetworkOPsImp::pubPeerStatus(std::function const& func) } void -NetworkOPsImp::setMode(OperatingMode om, const char* reason) +NetworkOPsImp::setMode(OperatingMode om) { using namespace std::chrono_literals; if (om == OperatingMode::CONNECTED) @@ -2311,12 +2308,11 @@ NetworkOPsImp::setMode(OperatingMode om, const char* reason) if (mMode == om) return; - auto const sink = om < mMode ? m_journal.warn() : m_journal.info(); mMode = om; accounting_.mode(om); - JLOG(sink) << "STATE->" << strOperatingMode() << " - " << reason; + JLOG(m_journal.info()) << "STATE->" << strOperatingMode(); pubServer(); } @@ -2328,28 +2324,34 @@ NetworkOPsImp::recvValidation( JLOG(m_journal.trace()) << "recvValidation " << val->getLedgerHash() << " from " << source; + std::unique_lock lock(validationsMutex_); + BypassAccept bypassAccept = BypassAccept::no; + try { - CanProcess const check( - validationsMutex_, pendingValidations_, val->getLedgerHash()); - try - { - BypassAccept bypassAccept = - check ? BypassAccept::no : BypassAccept::yes; - handleNewValidation(app_, val, source, bypassAccept, m_journal); - } - catch (std::exception const& e) - { - JLOG(m_journal.warn()) - << "Exception thrown for handling new validation " - << val->getLedgerHash() << ": " << e.what(); - } - catch (...) - { - JLOG(m_journal.warn()) - << "Unknown exception thrown for handling new validation " - << val->getLedgerHash(); - } + if (pendingValidations_.contains(val->getLedgerHash())) + bypassAccept = BypassAccept::yes; + else + pendingValidations_.insert(val->getLedgerHash()); + scope_unlock unlock(lock); + handleNewValidation(app_, val, source, bypassAccept, m_journal); + } + catch (std::exception const& e) + { + JLOG(m_journal.warn()) + << "Exception thrown for handling new validation " + << val->getLedgerHash() << ": " << e.what(); + } + catch (...) + { + JLOG(m_journal.warn()) + << "Unknown exception thrown for handling new validation " + << val->getLedgerHash(); + } + if (bypassAccept == BypassAccept::no) + { + pendingValidations_.erase(val->getLedgerHash()); } + lock.unlock(); pubValidation(val); diff --git a/src/xrpld/app/misc/NetworkOPs.h b/src/xrpld/app/misc/NetworkOPs.h index 96969f4bcba..166b9e9e11f 100644 --- a/src/xrpld/app/misc/NetworkOPs.h +++ b/src/xrpld/app/misc/NetworkOPs.h @@ -197,7 +197,7 @@ class NetworkOPs : public InfoSub::Source virtual bool isFull() = 0; virtual void - setMode(OperatingMode om, const char* reason) = 0; + setMode(OperatingMode om) = 0; virtual bool isBlocked() = 0; virtual bool diff --git a/src/xrpld/overlay/Peer.h b/src/xrpld/overlay/Peer.h index b53fcb21a96..2646b24a3ed 100644 --- a/src/xrpld/overlay/Peer.h +++ b/src/xrpld/overlay/Peer.h @@ -36,7 +36,6 @@ enum class ProtocolFeature { ValidatorListPropagation, ValidatorList2Propagation, LedgerReplay, - LedgerDataCookies }; /** Represents a peer connection in the overlay. */ @@ -134,13 +133,6 @@ class Peer virtual bool txReduceRelayEnabled() const = 0; - - // - // Messages - // - - virtual std::set> - releaseRequestCookies(uint256 const& requestHash) = 0; }; } // namespace ripple diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index 0fcff031116..c3656c9445c 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -58,9 +57,6 @@ std::chrono::milliseconds constexpr peerHighLatency{300}; /** How often we PING the peer to check for latency and sendq probe */ std::chrono::seconds constexpr peerTimerInterval{60}; - -/** How often we process duplicate incoming TMGetLedger messages */ -std::chrono::seconds constexpr getledgerInterval{15}; } // namespace // TODO: Remove this exclusion once unit tests are added after the hotfix @@ -508,8 +504,6 @@ PeerImp::supportsFeature(ProtocolFeature f) const return protocol_ >= make_protocol(2, 2); case ProtocolFeature::LedgerReplay: return ledgerReplayEnabled_; - case ProtocolFeature::LedgerDataCookies: - return protocol_ >= make_protocol(2, 3); } return false; } @@ -1352,9 +1346,8 @@ PeerImp::handleTransaction( void PeerImp::onMessage(std::shared_ptr const& m) { - auto badData = [&](std::string const& msg, bool chargefee = true) { - if (chargefee) - fee_.update(Resource::feeInvalidData, "get_ledger " + msg); + auto badData = [&](std::string const& msg) { + fee_.update(Resource::feeInvalidData, "get_ledger " + msg); JLOG(p_journal_.warn()) << "TMGetLedger: " << msg; }; auto const itype{m->itype()}; @@ -1431,74 +1424,12 @@ PeerImp::onMessage(std::shared_ptr const& m) } } - // Drop duplicate requests from the same peer for at least - // `getLedgerInterval` seconds. - // Append a little junk to prevent the hash of an incoming messsage - // from matching the hash of the same outgoing message. - // `shouldProcessForPeer` does not distingish between incoming and - // outgoing, and some of the message relay logic checks the hash to see - // if the message has been relayed already. If the hashes are the same, - // a duplicate will be detected when sending the message is attempted, - // so it will fail. - auto const messageHash = sha512Half(*m, nullptr); - // Request cookies are not included in the hash. Track them here. - auto const requestCookie = [&m]() -> std::optional { - if (m->has_requestcookie()) - return m->requestcookie(); - return std::nullopt; - }(); - auto const [inserted, pending] = [&] { - std::lock_guard lock{cookieLock_}; - auto& cookies = messageRequestCookies_[messageHash]; - bool const pending = !cookies.empty(); - return std::pair{cookies.emplace(requestCookie).second, pending}; - }(); - // Check if the request has been seen from this peer. - if (!app_.getHashRouter().shouldProcessForPeer( - messageHash, id_, getledgerInterval)) - { - // This request has already been seen from this peer. - // Has it been seen with this request cookie (or lack thereof)? - - if (inserted) - { - // This is a duplicate request, but with a new cookie. When a - // response is ready, one will be sent for each request cookie. - JLOG(p_journal_.debug()) - << "TMGetLedger: duplicate request with new request cookie: " - << requestCookie.value_or(0) - << ". Job pending: " << (pending ? "yes" : "no") << ": " - << messageHash; - if (pending) - { - // Don't bother queueing up a new job if other requests are - // already pending. This should limit entries in the job queue - // to one per peer per unique request. - JLOG(p_journal_.debug()) - << "TMGetLedger: Suppressing recvGetLedger job, since one " - "is pending: " - << messageHash; - return; - } - } - else - { - // Don't punish nodes that don't know any better - return badData( - "duplicate request: " + to_string(messageHash), - supportsFeature(ProtocolFeature::LedgerDataCookies)); - } - } - // Queue a job to process the request - JLOG(p_journal_.debug()) - << "TMGetLedger: Adding recvGetLedger job: " << messageHash; std::weak_ptr weak = shared_from_this(); - app_.getJobQueue().addJob( - jtLEDGER_REQ, "recvGetLedger", [weak, m, messageHash]() { - if (auto peer = weak.lock()) - peer->processLedgerRequest(m, messageHash); - }); + app_.getJobQueue().addJob(jtLEDGER_REQ, "recvGetLedger", [weak, m]() { + if (auto peer = weak.lock()) + peer->processLedgerRequest(m); + }); } void @@ -1614,9 +1545,8 @@ PeerImp::onMessage(std::shared_ptr const& m) void PeerImp::onMessage(std::shared_ptr const& m) { - auto badData = [&](std::string const& msg, bool charge = true) { - if (charge) - fee_.update(Resource::feeInvalidData, msg); + auto badData = [&](std::string const& msg) { + fee_.update(Resource::feeInvalidData, msg); JLOG(p_journal_.warn()) << "TMLedgerData: " << msg; }; @@ -1667,99 +1597,23 @@ PeerImp::onMessage(std::shared_ptr const& m) "Invalid Ledger/TXset nodes " + std::to_string(m->nodes_size())); } - auto const messageHash = sha512Half(*m); - if (!app_.getHashRouter().addSuppressionPeer(messageHash, id_)) + // If there is a request cookie, attempt to relay the message + if (m->has_requestcookie()) { - // Don't punish nodes that don't know any better - return badData( - "Duplicate message: " + to_string(messageHash), - supportsFeature(ProtocolFeature::LedgerDataCookies)); - } - - bool const routed = m->has_directresponse() || m->responsecookies_size() || - m->has_requestcookie(); - - { - // Check if this message needs to be forwarded to one or more peers. - // Maximum of one of the relevant fields should be populated. - XRPL_ASSERT( - !m->has_requestcookie() || !m->responsecookies_size(), - "ripple::PeerImp::onMessage(TMLedgerData) : valid cookie fields"); - - // Make a copy of the response cookies, then wipe the list so it can be - // forwarded cleanly - auto const responseCookies = m->responsecookies(); - m->clear_responsecookies(); - // Flag indicating if this response should be processed locally, - // possibly in addition to being forwarded. - bool const directResponse = - m->has_directresponse() && m->directresponse(); - m->clear_directresponse(); - - auto const relay = [this, m, &messageHash](auto const cookie) { - if (auto peer = overlay_.findPeerByShortID(cookie)) - { - XRPL_ASSERT( - !m->has_requestcookie() && !m->responsecookies_size(), - "ripple::PeerImp::onMessage(TMLedgerData) relay : no " - "cookies"); - if (peer->supportsFeature(ProtocolFeature::LedgerDataCookies)) - // Setting this flag is not _strictly_ necessary for peers - // that support it if there are no cookies included in the - // message, but it is more accurate. - m->set_directresponse(true); - else - m->clear_directresponse(); - peer->send( - std::make_shared(*m, protocol::mtLEDGER_DATA)); - } - else - JLOG(p_journal_.info()) - << "Unable to route TX/ledger data reply to peer [" - << cookie << "]: " << messageHash; - }; - // If there is a request cookie, attempt to relay the message - if (m->has_requestcookie()) + if (auto peer = overlay_.findPeerByShortID(m->requestcookie())) { - XRPL_ASSERT( - responseCookies.empty(), - "ripple::PeerImp::onMessage(TMLedgerData) : no response " - "cookies"); m->clear_requestcookie(); - relay(m->requestcookie()); - if (!directResponse && responseCookies.empty()) - return; - } - // If there's a list of request cookies, attempt to relay the message to - // all of them. - if (responseCookies.size()) - { - for (auto const cookie : responseCookies) - relay(cookie); - if (!directResponse) - return; + peer->send(std::make_shared(*m, protocol::mtLEDGER_DATA)); } - } - - // Now that any forwarding is done check the base message (data only, no - // routing info for duplicates) - if (routed) - { - m->clear_directresponse(); - XRPL_ASSERT( - !m->has_requestcookie() && !m->responsecookies_size(), - "ripple::PeerImp::onMessage(TMLedgerData) : no cookies"); - auto const baseMessageHash = sha512Half(*m); - if (!app_.getHashRouter().addSuppressionPeer(baseMessageHash, id_)) + else { - // Don't punish nodes that don't know any better - return badData( - "Duplicate message: " + to_string(baseMessageHash), - supportsFeature(ProtocolFeature::LedgerDataCookies)); + JLOG(p_journal_.info()) << "Unable to route TX/ledger data reply"; } + return; } uint256 const ledgerHash{m->ledgerhash()}; + // Otherwise check if received data for a candidate transaction set if (m->type() == protocol::liTS_CANDIDATE) { @@ -3143,22 +2997,16 @@ PeerImp::checkValidation( // the TX tree with the specified root hash. // static std::shared_ptr -getPeerWithTree( - OverlayImpl& ov, - uint256 const& rootHash, - PeerImp const* skip, - std::function shouldProcessCallback) +getPeerWithTree(OverlayImpl& ov, uint256 const& rootHash, PeerImp const* skip) { std::shared_ptr ret; int retScore = 0; - XRPL_ASSERT( - shouldProcessCallback, "ripple::getPeerWithTree : callback provided"); ov.for_each([&](std::shared_ptr&& p) { if (p->hasTxSet(rootHash) && p.get() != skip) { auto score = p->getScore(true); - if (!ret || (score > retScore && shouldProcessCallback(p->id()))) + if (!ret || (score > retScore)) { ret = std::move(p); retScore = score; @@ -3177,19 +3025,16 @@ getPeerWithLedger( OverlayImpl& ov, uint256 const& ledgerHash, LedgerIndex ledger, - PeerImp const* skip, - std::function shouldProcessCallback) + PeerImp const* skip) { std::shared_ptr ret; int retScore = 0; - XRPL_ASSERT( - shouldProcessCallback, "ripple::getPeerWithLedger : callback provided"); ov.for_each([&](std::shared_ptr&& p) { if (p->hasLedger(ledgerHash, ledger) && p.get() != skip) { auto score = p->getScore(true); - if (!ret || (score > retScore && shouldProcessCallback(p->id()))) + if (!ret || (score > retScore)) { ret = std::move(p); retScore = score; @@ -3203,8 +3048,7 @@ getPeerWithLedger( void PeerImp::sendLedgerBase( std::shared_ptr const& ledger, - protocol::TMLedgerData& ledgerData, - PeerCookieMap const& destinations) + protocol::TMLedgerData& ledgerData) { JLOG(p_journal_.trace()) << "sendLedgerBase: Base data"; @@ -3236,102 +3080,15 @@ PeerImp::sendLedgerBase( } } - sendToMultiple(ledgerData, destinations); -} - -void -PeerImp::sendToMultiple( - protocol::TMLedgerData& ledgerData, - PeerCookieMap const& destinations) -{ - bool foundSelf = false; - for (auto const& [peer, cookies] : destinations) - { - if (peer.get() == this) - foundSelf = true; - bool const multipleCookies = - peer->supportsFeature(ProtocolFeature::LedgerDataCookies); - std::vector sendCookies; - - bool directResponse = false; - if (!multipleCookies) - { - JLOG(p_journal_.debug()) - << "sendToMultiple: Sending " << cookies.size() - << " TMLedgerData messages to peer [" << peer->id() - << "]: " << sha512Half(ledgerData); - } - for (auto const& cookie : cookies) - { - // Unfortunately, need a separate Message object for every - // combination - if (cookie) - { - if (multipleCookies) - { - // Save this one for later to send a single message - sendCookies.emplace_back(*cookie); - continue; - } - - // Feature not supported, so send a single message with a - // single cookie - ledgerData.set_requestcookie(*cookie); - } - else - { - if (multipleCookies) - { - // Set this flag later on the single message - directResponse = true; - continue; - } - - ledgerData.clear_requestcookie(); - } - XRPL_ASSERT( - !multipleCookies, - "ripple::PeerImp::sendToMultiple : ledger data cookies " - "unsupported"); - auto message{ - std::make_shared(ledgerData, protocol::mtLEDGER_DATA)}; - peer->send(message); - } - if (multipleCookies) - { - // Send a single message with all the cookies and/or the direct - // response flag, so the receiver can farm out the single message to - // multiple peers and/or itself - XRPL_ASSERT( - sendCookies.size() || directResponse, - "ripple::PeerImp::sendToMultiple : valid response options"); - ledgerData.clear_requestcookie(); - ledgerData.clear_responsecookies(); - ledgerData.set_directresponse(directResponse); - for (auto const& cookie : sendCookies) - ledgerData.add_responsecookies(cookie); - auto message{ - std::make_shared(ledgerData, protocol::mtLEDGER_DATA)}; - peer->send(message); - - JLOG(p_journal_.debug()) - << "sendToMultiple: Sent 1 TMLedgerData message to peer [" - << peer->id() << "]: including " - << (directResponse ? "the direct response flag and " : "") - << sendCookies.size() << " response cookies. " - << ": " << sha512Half(ledgerData); - } - } - XRPL_ASSERT( - foundSelf, "ripple::PeerImp::sendToMultiple : current peer included"); + auto message{ + std::make_shared(ledgerData, protocol::mtLEDGER_DATA)}; + send(message); } std::shared_ptr -PeerImp::getLedger( - std::shared_ptr const& m, - uint256 const& mHash) +PeerImp::getLedger(std::shared_ptr const& m) { - JLOG(p_journal_.trace()) << "getLedger: Ledger " << mHash; + JLOG(p_journal_.trace()) << "getLedger: Ledger"; std::shared_ptr ledger; @@ -3348,33 +3105,22 @@ PeerImp::getLedger( if (m->has_querytype() && !m->has_requestcookie()) { // Attempt to relay the request to a peer - // Note repeated messages will not relay to the same peer - // before `getLedgerInterval` seconds. This prevents one - // peer from getting flooded, and distributes the request - // load. If a request has been relayed to all eligible - // peers, then this message will not be relayed. if (auto const peer = getPeerWithLedger( overlay_, ledgerHash, m->has_ledgerseq() ? m->ledgerseq() : 0, - this, - [&](Peer::id_t id) { - return app_.getHashRouter().shouldProcessForPeer( - mHash, id, getledgerInterval); - })) + this)) { m->set_requestcookie(id()); peer->send( std::make_shared(*m, protocol::mtGET_LEDGER)); JLOG(p_journal_.debug()) - << "getLedger: Request relayed to peer [" << peer->id() - << "]: " << mHash; + << "getLedger: Request relayed to peer"; return ledger; } JLOG(p_journal_.trace()) - << "getLedger: Don't have ledger with hash " << ledgerHash - << ": " << mHash; + << "getLedger: Failed to find peer to relay request"; } } } @@ -3384,7 +3130,7 @@ PeerImp::getLedger( if (m->ledgerseq() < app_.getLedgerMaster().getEarliestFetch()) { JLOG(p_journal_.debug()) - << "getLedger: Early ledger sequence request " << mHash; + << "getLedger: Early ledger sequence request"; } else { @@ -3393,7 +3139,7 @@ PeerImp::getLedger( { JLOG(p_journal_.debug()) << "getLedger: Don't have ledger with sequence " - << m->ledgerseq() << ": " << mHash; + << m->ledgerseq(); } } } @@ -3416,33 +3162,29 @@ PeerImp::getLedger( Resource::feeMalformedRequest, "get_ledger ledgerSeq"); ledger.reset(); - JLOG(p_journal_.warn()) << "getLedger: Invalid ledger sequence " - << ledgerSeq << ": " << mHash; + JLOG(p_journal_.warn()) + << "getLedger: Invalid ledger sequence " << ledgerSeq; } } else if (ledgerSeq < app_.getLedgerMaster().getEarliestFetch()) { ledger.reset(); JLOG(p_journal_.debug()) - << "getLedger: Early ledger sequence request " << ledgerSeq - << ": " << mHash; + << "getLedger: Early ledger sequence request " << ledgerSeq; } } else { - JLOG(p_journal_.debug()) - << "getLedger: Unable to find ledger " << mHash; + JLOG(p_journal_.debug()) << "getLedger: Unable to find ledger"; } return ledger; } std::shared_ptr -PeerImp::getTxSet( - std::shared_ptr const& m, - uint256 const& mHash) const +PeerImp::getTxSet(std::shared_ptr const& m) const { - JLOG(p_journal_.trace()) << "getTxSet: TX set " << mHash; + JLOG(p_journal_.trace()) << "getTxSet: TX set"; uint256 const txSetHash{m->ledgerhash()}; std::shared_ptr shaMap{ @@ -3452,34 +3194,22 @@ PeerImp::getTxSet( if (m->has_querytype() && !m->has_requestcookie()) { // Attempt to relay the request to a peer - // Note repeated messages will not relay to the same peer - // before `getLedgerInterval` seconds. This prevents one - // peer from getting flooded, and distributes the request - // load. If a request has been relayed to all eligible - // peers, then this message will not be relayed. - if (auto const peer = getPeerWithTree( - overlay_, txSetHash, this, [&](Peer::id_t id) { - return app_.getHashRouter().shouldProcessForPeer( - mHash, id, getledgerInterval); - })) + if (auto const peer = getPeerWithTree(overlay_, txSetHash, this)) { m->set_requestcookie(id()); peer->send( std::make_shared(*m, protocol::mtGET_LEDGER)); - JLOG(p_journal_.debug()) - << "getTxSet: Request relayed to peer [" << peer->id() - << "]: " << mHash; + JLOG(p_journal_.debug()) << "getTxSet: Request relayed"; } else { JLOG(p_journal_.debug()) - << "getTxSet: Failed to find relay peer: " << mHash; + << "getTxSet: Failed to find relay peer"; } } else { - JLOG(p_journal_.debug()) - << "getTxSet: Failed to find TX set " << mHash; + JLOG(p_journal_.debug()) << "getTxSet: Failed to find TX set"; } } @@ -3487,9 +3217,7 @@ PeerImp::getTxSet( } void -PeerImp::processLedgerRequest( - std::shared_ptr const& m, - uint256 const& mHash) +PeerImp::processLedgerRequest(std::shared_ptr const& m) { // Do not resource charge a peer responding to a relay if (!m->has_requestcookie()) @@ -3503,74 +3231,9 @@ PeerImp::processLedgerRequest( bool fatLeaves{true}; auto const itype{m->itype()}; - auto getDestinations = [&] { - // If a ledger data message is generated, it's going to be sent to every - // peer that is waiting for it. - - PeerCookieMap result; - - std::size_t numCookies = 0; - { - // Don't do the work under this peer if this peer is not waiting for - // any replies - auto myCookies = releaseRequestCookies(mHash); - if (myCookies.empty()) - { - JLOG(p_journal_.debug()) << "TMGetLedger: peer is no longer " - "waiting for response to request: " - << mHash; - return result; - } - numCookies += myCookies.size(); - result[shared_from_this()] = myCookies; - } - - std::set const peers = - app_.getHashRouter().getPeers(mHash); - for (auto const peerID : peers) - { - // This loop does not need to be done under the HashRouter - // lock because findPeerByShortID and releaseRequestCookies - // are thread safe, and everything else is local - if (auto p = overlay_.findPeerByShortID(peerID)) - { - auto cookies = p->releaseRequestCookies(mHash); - numCookies += cookies.size(); - if (result.contains(p)) - { - // Unlikely, but if a request came in to this peer while - // iterating, add the items instead of copying / - // overwriting. - XRPL_ASSERT( - p.get() == this, - "ripple::PeerImp::processLedgerRequest : found self in " - "map"); - for (auto const& cookie : cookies) - result[p].emplace(cookie); - } - else if (cookies.size()) - result[p] = cookies; - } - } - - JLOG(p_journal_.debug()) - << "TMGetLedger: Processing request for " << result.size() - << " peers. Will send " << numCookies - << " messages if successful: " << mHash; - - return result; - }; - // Will only populate this if we're going to do work. - PeerCookieMap destinations; - if (itype == protocol::liTS_CANDIDATE) { - destinations = getDestinations(); - if (destinations.empty()) - // Nowhere to send the response! - return; - - if (sharedMap = getTxSet(m, mHash); !sharedMap) + if (sharedMap = getTxSet(m); !sharedMap) return; map = sharedMap.get(); @@ -3578,6 +3241,8 @@ PeerImp::processLedgerRequest( ledgerData.set_ledgerseq(0); ledgerData.set_ledgerhash(m->ledgerhash()); ledgerData.set_type(protocol::liTS_CANDIDATE); + if (m->has_requestcookie()) + ledgerData.set_requestcookie(m->requestcookie()); // We'll already have most transactions fatLeaves = false; @@ -3596,12 +3261,7 @@ PeerImp::processLedgerRequest( return; } - destinations = getDestinations(); - if (destinations.empty()) - // Nowhere to send the response! - return; - - if (ledger = getLedger(m, mHash); !ledger) + if (ledger = getLedger(m); !ledger) return; // Fill out the reply @@ -3609,11 +3269,13 @@ PeerImp::processLedgerRequest( ledgerData.set_ledgerhash(ledgerHash.begin(), ledgerHash.size()); ledgerData.set_ledgerseq(ledger->info().seq); ledgerData.set_type(itype); + if (m->has_requestcookie()) + ledgerData.set_requestcookie(m->requestcookie()); switch (itype) { case protocol::liBASE: - sendLedgerBase(ledger, ledgerData, destinations); + sendLedgerBase(ledger, ledgerData); return; case protocol::liTX_NODE: @@ -3730,7 +3392,7 @@ PeerImp::processLedgerRequest( if (ledgerData.nodes_size() == 0) return; - sendToMultiple(ledgerData, destinations); + send(std::make_shared(ledgerData, protocol::mtLEDGER_DATA)); } int @@ -3788,19 +3450,6 @@ PeerImp::reduceRelayReady() return vpReduceRelayEnabled_ && reduceRelayReady_; } -std::set> -PeerImp::releaseRequestCookies(uint256 const& requestHash) -{ - std::set> result; - std::lock_guard lock(cookieLock_); - if (messageRequestCookies_.contains(requestHash)) - { - std::swap(result, messageRequestCookies_[requestHash]); - messageRequestCookies_.erase(requestHash); - } - return result; -}; - void PeerImp::Metrics::add_message(std::uint64_t bytes) { diff --git a/src/xrpld/overlay/detail/PeerImp.h b/src/xrpld/overlay/detail/PeerImp.h index 7db2ecf5f0f..14591efbb18 100644 --- a/src/xrpld/overlay/detail/PeerImp.h +++ b/src/xrpld/overlay/detail/PeerImp.h @@ -195,15 +195,6 @@ class PeerImp : public Peer, bool ledgerReplayEnabled_ = false; LedgerReplayMsgHandler ledgerReplayMsgHandler_; - // Track message requests and responses - // TODO: Use an expiring cache or something - using MessageCookieMap = - std::map>>; - using PeerCookieMap = - std::map, std::set>>; - std::mutex mutable cookieLock_; - MessageCookieMap messageRequestCookies_; - friend class OverlayImpl; class Metrics @@ -450,13 +441,6 @@ class PeerImp : public Peer, return txReduceRelayEnabled_; } - // - // Messages - // - - std::set> - releaseRequestCookies(uint256 const& requestHash) override; - private: void close(); @@ -655,28 +639,16 @@ class PeerImp : public Peer, void sendLedgerBase( std::shared_ptr const& ledger, - protocol::TMLedgerData& ledgerData, - PeerCookieMap const& destinations); - - void - sendToMultiple( - protocol::TMLedgerData& ledgerData, - PeerCookieMap const& destinations); + protocol::TMLedgerData& ledgerData); std::shared_ptr - getLedger( - std::shared_ptr const& m, - uint256 const& mHash); + getLedger(std::shared_ptr const& m); std::shared_ptr - getTxSet( - std::shared_ptr const& m, - uint256 const& mHash) const; + getTxSet(std::shared_ptr const& m) const; void - processLedgerRequest( - std::shared_ptr const& m, - uint256 const& mHash); + processLedgerRequest(std::shared_ptr const& m); }; //------------------------------------------------------------------------------ diff --git a/src/xrpld/overlay/detail/PeerSet.cpp b/src/xrpld/overlay/detail/PeerSet.cpp index cb7b77db7fc..909b20c3079 100644 --- a/src/xrpld/overlay/detail/PeerSet.cpp +++ b/src/xrpld/overlay/detail/PeerSet.cpp @@ -18,11 +18,9 @@ //============================================================================== #include -#include #include #include #include -#include namespace ripple { @@ -106,52 +104,16 @@ PeerSetImpl::sendRequest( std::shared_ptr const& peer) { auto packet = std::make_shared(message, type); - - auto const messageHash = [&]() { - auto const packetBuffer = - packet->getBuffer(compression::Compressed::Off); - return sha512Half(Slice(packetBuffer.data(), packetBuffer.size())); - }(); - - // Allow messages to be re-sent to the same peer after a delay - using namespace std::chrono_literals; - constexpr std::chrono::seconds interval = 30s; - if (peer) { - if (app_.getHashRouter().shouldProcessForPeer( - messageHash, peer->id(), interval)) - { - JLOG(journal_.trace()) - << "Sending " << protocolMessageName(type) << " message to [" - << peer->id() << "]: " << messageHash; - peer->send(packet); - } - else - JLOG(journal_.debug()) - << "Suppressing sending duplicate " << protocolMessageName(type) - << " message to [" << peer->id() << "]: " << messageHash; + peer->send(packet); return; } for (auto id : peers_) { if (auto p = app_.overlay().findPeerByShortID(id)) - { - if (app_.getHashRouter().shouldProcessForPeer( - messageHash, p->id(), interval)) - { - JLOG(journal_.trace()) - << "Sending " << protocolMessageName(type) - << " message to [" << p->id() << "]: " << messageHash; - p->send(packet); - } - else - JLOG(journal_.debug()) - << "Suppressing sending duplicate " - << protocolMessageName(type) << " message to [" << p->id() - << "]: " << messageHash; - } + p->send(packet); } } diff --git a/src/xrpld/overlay/detail/ProtocolMessage.h b/src/xrpld/overlay/detail/ProtocolMessage.h index 86b630081a5..54f99eb73d0 100644 --- a/src/xrpld/overlay/detail/ProtocolMessage.h +++ b/src/xrpld/overlay/detail/ProtocolMessage.h @@ -43,12 +43,6 @@ protocolMessageType(protocol::TMGetLedger const&) return protocol::mtGET_LEDGER; } -inline protocol::MessageType -protocolMessageType(protocol::TMLedgerData const&) -{ - return protocol::mtLEDGER_DATA; -} - inline protocol::MessageType protocolMessageType(protocol::TMReplayDeltaRequest const&) { @@ -492,64 +486,4 @@ invokeProtocolMessage( } // namespace ripple -namespace protocol { - -template -void -hash_append(Hasher& h, TMGetLedger const& msg) -{ - using beast::hash_append; - using namespace ripple; - hash_append(h, safe_cast(protocolMessageType(msg))); - hash_append(h, safe_cast(msg.itype())); - if (msg.has_ltype()) - hash_append(h, safe_cast(msg.ltype())); - - if (msg.has_ledgerhash()) - hash_append(h, msg.ledgerhash()); - - if (msg.has_ledgerseq()) - hash_append(h, msg.ledgerseq()); - - for (auto const& nodeId : msg.nodeids()) - hash_append(h, nodeId); - hash_append(h, msg.nodeids_size()); - - // Do NOT include the request cookie. It does not affect the content of the - // request, but only where to route the results. - // if (msg.has_requestcookie()) - // hash_append(h, msg.requestcookie()); - - if (msg.has_querytype()) - hash_append(h, safe_cast(msg.querytype())); - - if (msg.has_querydepth()) - hash_append(h, msg.querydepth()); -} - -template -void -hash_append(Hasher& h, TMLedgerData const& msg) -{ - using beast::hash_append; - using namespace ripple; - hash_append(h, safe_cast(protocolMessageType(msg))); - hash_append(h, msg.ledgerhash()); - hash_append(h, msg.ledgerseq()); - hash_append(h, safe_cast(msg.type())); - for (auto const& node : msg.nodes()) - { - hash_append(h, node.nodedata()); - if (node.has_nodeid()) - hash_append(h, node.nodeid()); - } - hash_append(h, msg.nodes_size()); - if (msg.has_requestcookie()) - hash_append(h, msg.requestcookie()); - if (msg.has_error()) - hash_append(h, safe_cast(msg.error())); -} - -} // namespace protocol - #endif diff --git a/src/xrpld/overlay/detail/ProtocolVersion.cpp b/src/xrpld/overlay/detail/ProtocolVersion.cpp index ce6c1e6fa3d..0fecb301f7f 100644 --- a/src/xrpld/overlay/detail/ProtocolVersion.cpp +++ b/src/xrpld/overlay/detail/ProtocolVersion.cpp @@ -37,9 +37,7 @@ namespace ripple { constexpr ProtocolVersion const supportedProtocolList[] { {2, 1}, - {2, 2}, - // Adds TMLedgerData::responseCookies and directResponse - {2, 3} + {2, 2} }; // clang-format on From dce3e1efa6fc708fdac6e8d993e17c5c61a76f84 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Thu, 20 Feb 2025 13:35:04 -0500 Subject: [PATCH 24/29] Add logging and improve counting of amendment votes from UNL (#5173) * Add logging for amendment voting decision process * When counting "received validations" to determine quorum, count the number of validators actually voting, not the total number of possible votes. --- src/xrpld/app/consensus/RCLConsensus.cpp | 2 +- src/xrpld/app/misc/AmendmentTable.h | 7 +- src/xrpld/app/misc/detail/AmendmentTable.cpp | 142 +++++++++++++++---- 3 files changed, 119 insertions(+), 32 deletions(-) diff --git a/src/xrpld/app/consensus/RCLConsensus.cpp b/src/xrpld/app/consensus/RCLConsensus.cpp index a746b30357d..387fe459c09 100644 --- a/src/xrpld/app/consensus/RCLConsensus.cpp +++ b/src/xrpld/app/consensus/RCLConsensus.cpp @@ -360,7 +360,7 @@ RCLConsensus::Adaptor::onClose( { feeVote_->doVoting(prevLedger, validations, initialSet); app_.getAmendmentTable().doVoting( - prevLedger, validations, initialSet); + prevLedger, validations, initialSet, j_); } } else if ( diff --git a/src/xrpld/app/misc/AmendmentTable.h b/src/xrpld/app/misc/AmendmentTable.h index 538d7299f3b..d6193adca2c 100644 --- a/src/xrpld/app/misc/AmendmentTable.h +++ b/src/xrpld/app/misc/AmendmentTable.h @@ -147,7 +147,8 @@ class AmendmentTable doVoting( std::shared_ptr const& lastClosedLedger, std::vector> const& parentValidations, - std::shared_ptr const& initialPosition) + std::shared_ptr const& initialPosition, + beast::Journal j) { // Ask implementation what to do auto actions = doVoting( @@ -174,6 +175,10 @@ class AmendmentTable Serializer s; amendTx.add(s); + JLOG(j.debug()) << "Amendments: Adding pseudo-transaction: " + << amendTx.getTransactionID() << ": " + << strHex(s.slice()) << ": " << amendTx; + initialPosition->addGiveItem( SHAMapNodeType::tnTRANSACTION_NM, make_shamapitem(amendTx.getTransactionID(), s.slice())); diff --git a/src/xrpld/app/misc/detail/AmendmentTable.cpp b/src/xrpld/app/misc/detail/AmendmentTable.cpp index 270b5daced2..d496fc4e5ee 100644 --- a/src/xrpld/app/misc/detail/AmendmentTable.cpp +++ b/src/xrpld/app/misc/detail/AmendmentTable.cpp @@ -25,7 +25,10 @@ #include #include #include + +#include #include +#include #include #include #include @@ -88,15 +91,17 @@ parseSection(Section const& section) class TrustedVotes { private: - static constexpr NetClock::time_point maxTimeout = - NetClock::time_point::max(); - // Associates each trusted validator with the last votes we saw from them // and an expiration for that record. struct UpvotesAndTimeout { std::vector upVotes; - NetClock::time_point timeout = maxTimeout; + /** An unseated timeout indicates that either + 1. No validations have ever been received + 2. The validator has not been heard from in long enough that the + timeout passed, and votes expired. + */ + std::optional timeout; }; hash_map recordedVotes_; @@ -130,7 +135,7 @@ class TrustedVotes else { // New validators have a starting position of no on everything. - // Add the entry with an empty vector and maxTimeout. + // Add the entry with an empty vector and unseated timeout. newRecordedVotes[trusted]; } } @@ -147,6 +152,7 @@ class TrustedVotes Rules const& rules, std::vector> const& valSet, NetClock::time_point const closeTime, + beast::Journal j, std::lock_guard const& lock) { // When we get an STValidation we save the upVotes it contains, but @@ -163,38 +169,86 @@ class TrustedVotes using namespace std::chrono_literals; static constexpr NetClock::duration expiresAfter = 24h; + auto const newTimeout = closeTime + expiresAfter; + // Walk all validations and replace previous votes from trusted // validators with these newest votes. for (auto const& val : valSet) { + auto const pkHuman = + toBase58(TokenType::NodePublic, val->getSignerPublic()); // If this validation comes from one of our trusted validators... if (auto const iter = recordedVotes_.find(val->getSignerPublic()); iter != recordedVotes_.end()) { - iter->second.timeout = closeTime + expiresAfter; + iter->second.timeout = newTimeout; if (val->isFieldPresent(sfAmendments)) { auto const& choices = val->getFieldV256(sfAmendments); iter->second.upVotes.assign(choices.begin(), choices.end()); + JLOG(j.debug()) + << "recordVotes: Validation from trusted " << pkHuman + << " has " << choices.size() << " amendment votes: " + << boost::algorithm::join( + iter->second.upVotes | + boost::adaptors::transformed( + to_string<256, void>), + ", "); + // TODO: Maybe transform using to_short_string once #5126 is + // merged + // + // iter->second.upVotes | + // boost::adaptors::transformed(to_short_string<256, void>) } else { // This validator does not upVote any amendments right now. iter->second.upVotes.clear(); + JLOG(j.debug()) << "recordVotes: Validation from trusted " + << pkHuman << " has no amendment votes."; } } + else + { + JLOG(j.debug()) + << "recordVotes: Ignoring validation from untrusted " + << pkHuman; + } } // Now remove any expired records from recordedVotes_. std::for_each( recordedVotes_.begin(), recordedVotes_.end(), - [&closeTime](decltype(recordedVotes_)::value_type& votes) { - if (closeTime > votes.second.timeout) + [&closeTime, newTimeout, &j]( + decltype(recordedVotes_)::value_type& votes) { + auto const pkHuman = + toBase58(TokenType::NodePublic, votes.first); + if (!votes.second.timeout) { - votes.second.timeout = maxTimeout; + assert(votes.second.upVotes.empty()); + JLOG(j.debug()) + << "recordVotes: Have not received any " + "amendment votes from " + << pkHuman << " since last timeout or startup"; + } + else if (closeTime > votes.second.timeout) + { + JLOG(j.debug()) + << "recordVotes: Timeout: Clearing votes from " + << pkHuman; + votes.second.timeout.reset(); votes.second.upVotes.clear(); } + else if (votes.second.timeout != newTimeout) + { + assert(votes.second.timeout < newTimeout); + using namespace std::chrono; + auto const age = duration_cast( + newTimeout - *votes.second.timeout); + JLOG(j.debug()) << "recordVotes: Using " << age.count() + << "min old cached votes from " << pkHuman; + } }); } @@ -205,14 +259,20 @@ class TrustedVotes getVotes(Rules const& rules, std::lock_guard const& lock) const { hash_map ret; + int available = 0; for (auto& validatorVotes : recordedVotes_) { + assert( + validatorVotes.second.timeout || + validatorVotes.second.upVotes.empty()); + if (validatorVotes.second.timeout) + ++available; for (uint256 const& amendment : validatorVotes.second.upVotes) { ret[amendment] += 1; } } - return {recordedVotes_.size(), ret}; + return {available, ret}; } }; @@ -789,13 +849,13 @@ AmendmentTableImpl::doVoting( std::lock_guard lock(mutex_); // Keep a record of the votes we received. - previousTrustedVotes_.recordVotes(rules, valSet, closeTime, lock); + previousTrustedVotes_.recordVotes(rules, valSet, closeTime, j_, lock); // Tally the most recent votes. auto vote = std::make_unique(rules, previousTrustedVotes_, lock); - JLOG(j_.debug()) << "Received " << vote->trustedValidations() - << " trusted validations, threshold is: " + JLOG(j_.debug()) << "Counted votes from " << vote->trustedValidations() + << " valid trusted validations, threshold is: " << vote->threshold(); // Map of amendments to the action to be taken for each one. The action is @@ -805,43 +865,65 @@ AmendmentTableImpl::doVoting( // process all amendments we know of for (auto const& entry : amendmentMap_) { - NetClock::time_point majorityTime = {}; + if (enabledAmendments.contains(entry.first)) + { + JLOG(j_.trace()) << entry.first << ": amendment already enabled"; + + continue; + } bool const hasValMajority = vote->passes(entry.first); - { + auto const majorityTime = [&]() -> std::optional { auto const it = majorityAmendments.find(entry.first); if (it != majorityAmendments.end()) - majorityTime = it->second; - } + return it->second; + return std::nullopt; + }(); - if (enabledAmendments.count(entry.first) != 0) - { - JLOG(j_.debug()) << entry.first << ": amendment already enabled"; - } - else if ( - hasValMajority && (majorityTime == NetClock::time_point{}) && + bool const hasLedgerMajority = majorityTime.has_value(); + + auto const logStr = [&entry, &vote]() { + std::stringstream ss; + ss << entry.first << " (" << entry.second.name << ") has " + << vote->votes(entry.first) << " votes"; + return ss.str(); + }(); + + if (hasValMajority && !hasLedgerMajority && entry.second.vote == AmendmentVote::up) { - // Ledger says no majority, validators say yes - JLOG(j_.debug()) << entry.first << ": amendment got majority"; + // Ledger says no majority, validators say yes, and voting yes + // locally + JLOG(j_.debug()) << logStr << ": amendment got majority"; actions[entry.first] = tfGotMajority; } - else if (!hasValMajority && (majorityTime != NetClock::time_point{})) + else if (!hasValMajority && hasLedgerMajority) { // Ledger says majority, validators say no - JLOG(j_.debug()) << entry.first << ": amendment lost majority"; + JLOG(j_.debug()) << logStr << ": amendment lost majority"; actions[entry.first] = tfLostMajority; } else if ( - (majorityTime != NetClock::time_point{}) && - ((majorityTime + majorityTime_) <= closeTime) && + hasLedgerMajority && + ((*majorityTime + majorityTime_) <= closeTime) && entry.second.vote == AmendmentVote::up) { // Ledger says majority held - JLOG(j_.debug()) << entry.first << ": amendment majority held"; + JLOG(j_.debug()) << logStr << ": amendment majority held"; actions[entry.first] = 0; } + // Logging only below this point + else if (hasValMajority && hasLedgerMajority) + { + JLOG(j_.debug()) + << logStr + << ": amendment holding majority, waiting to be enabled"; + } + else if (!hasValMajority) + { + JLOG(j_.debug()) << logStr << ": amendment does not have majority"; + } } // Stash for reporting From ab44cc31e2f8091a28fa440a4e088cca074945a7 Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Thu, 20 Feb 2025 09:23:27 -0800 Subject: [PATCH 25/29] Set version to 2.4.0-rc2 --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index 6384f4de8db..96138873ded 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.4.0-rc1" +char const* const versionString = "2.4.0-rc2" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 97457184675dbb22e3839c4970fdc76cbef7bf79 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Tue, 25 Feb 2025 14:14:10 +0000 Subject: [PATCH 26/29] fix: Remove 'new parent hash' assert (#5313) This assert is known to occasionally trigger, without causing errors downstream. It is replaced with a log message. --- src/xrpld/app/misc/TxQ.h | 5 +---- src/xrpld/app/misc/detail/TxQ.cpp | 12 ++++++------ 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/src/xrpld/app/misc/TxQ.h b/src/xrpld/app/misc/TxQ.h index 877b92e99d9..db5af4509f2 100644 --- a/src/xrpld/app/misc/TxQ.h +++ b/src/xrpld/app/misc/TxQ.h @@ -785,13 +785,10 @@ class TxQ */ std::optional maxSize_; -#if !NDEBUG /** - parentHash_ checks that no unexpected ledger transitions - happen, and is only checked via debug asserts. + parentHash_ used for logging only */ LedgerHash parentHash_{beast::zero}; -#endif /** Most queue operations are done under the master lock, but use this mutex for the RPC "fee" command, which isn't. diff --git a/src/xrpld/app/misc/detail/TxQ.cpp b/src/xrpld/app/misc/detail/TxQ.cpp index a0721e031ef..6e7a2139081 100644 --- a/src/xrpld/app/misc/detail/TxQ.cpp +++ b/src/xrpld/app/misc/detail/TxQ.cpp @@ -1569,12 +1569,12 @@ TxQ::accept(Application& app, OpenView& view) // parent hash, so that transactions paying the same fee are // reordered. LedgerHash const& parentHash = view.info().parentHash; -#if !NDEBUG - auto const startingSize = byFee_.size(); - XRPL_ASSERT( - parentHash != parentHash_, "ripple::TxQ::accept : new parent hash"); - parentHash_ = parentHash; -#endif + if (parentHash == parentHash_) + JLOG(j_.warn()) << "Parent ledger hash unchanged from " << parentHash; + else + parentHash_ = parentHash; + + [[maybe_unused]] auto const startingSize = byFee_.size(); // byFee_ doesn't "own" the candidate objects inside it, so it's // perfectly safe to wipe it and start over, repopulating from // byAccount_. From 37d06bcce826fb174cb17eb544fb486817e3fd72 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Tue, 25 Feb 2025 16:43:26 +0000 Subject: [PATCH 27/29] Fix Replace `assert` with `XRPL_ASSERT` (#5312) --- src/xrpld/app/misc/detail/AmendmentTable.cpp | 15 +++++++++++---- src/xrpld/overlay/detail/PeerImp.cpp | 2 +- src/xrpld/overlay/detail/PeerImp.h | 3 ++- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/src/xrpld/app/misc/detail/AmendmentTable.cpp b/src/xrpld/app/misc/detail/AmendmentTable.cpp index d496fc4e5ee..d7a5ae82474 100644 --- a/src/xrpld/app/misc/detail/AmendmentTable.cpp +++ b/src/xrpld/app/misc/detail/AmendmentTable.cpp @@ -226,7 +226,10 @@ class TrustedVotes toBase58(TokenType::NodePublic, votes.first); if (!votes.second.timeout) { - assert(votes.second.upVotes.empty()); + XRPL_ASSERT( + votes.second.upVotes.empty(), + "ripple::TrustedVotes::recordVotes : received no " + "upvotes"); JLOG(j.debug()) << "recordVotes: Have not received any " "amendment votes from " @@ -242,7 +245,10 @@ class TrustedVotes } else if (votes.second.timeout != newTimeout) { - assert(votes.second.timeout < newTimeout); + XRPL_ASSERT( + votes.second.timeout < newTimeout, + "ripple::TrustedVotes::recordVotes : votes not " + "expired"); using namespace std::chrono; auto const age = duration_cast( newTimeout - *votes.second.timeout); @@ -262,9 +268,10 @@ class TrustedVotes int available = 0; for (auto& validatorVotes : recordedVotes_) { - assert( + XRPL_ASSERT( validatorVotes.second.timeout || - validatorVotes.second.upVotes.empty()); + validatorVotes.second.upVotes.empty(), + "ripple::TrustedVotes::getVotes : valid votes"); if (validatorVotes.second.timeout) ++available; for (uint256 const& amendment : validatorVotes.second.upVotes) diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index c3656c9445c..8989f89fb3e 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -1245,7 +1245,7 @@ PeerImp::handleTransaction( { XRPL_ASSERT( eraseTxQueue != batch, - ("ripple::PeerImp::handleTransaction correct function params")); + ("ripple::PeerImp::handleTransaction : valid inputs")); if (tracking_.load() == Tracking::diverged) return; diff --git a/src/xrpld/overlay/detail/PeerImp.h b/src/xrpld/overlay/detail/PeerImp.h index 14591efbb18..23916db2175 100644 --- a/src/xrpld/overlay/detail/PeerImp.h +++ b/src/xrpld/overlay/detail/PeerImp.h @@ -154,7 +154,8 @@ class PeerImp : public Peer, update(Resource::Charge f, std::string const& add) { XRPL_ASSERT( - f >= fee, "ripple::PeerImp::ChargeWithContext fee increases"); + f >= fee, + "ripple::PeerImp::ChargeWithContext::update : fee increases"); fee = f; if (!context.empty()) { From cd7c62818bc47ee252714bc936f4c4a106016a24 Mon Sep 17 00:00:00 2001 From: Mark Travis Date: Tue, 25 Feb 2025 17:00:50 -0800 Subject: [PATCH 28/29] fix: Acquire previously failed transaction set from network as new proposal arrives (#5318) Reset the failure variable. --- src/xrpld/app/ledger/detail/TransactionAcquire.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/xrpld/app/ledger/detail/TransactionAcquire.cpp b/src/xrpld/app/ledger/detail/TransactionAcquire.cpp index b3561875e96..fa4758ebcc5 100644 --- a/src/xrpld/app/ledger/detail/TransactionAcquire.cpp +++ b/src/xrpld/app/ledger/detail/TransactionAcquire.cpp @@ -262,6 +262,7 @@ TransactionAcquire::stillNeed() if (timeouts_ > NORM_TIMEOUTS) timeouts_ = NORM_TIMEOUTS; + failed_ = false; } } // namespace ripple From 0a1ca0600f65d6f37f0c8462e3b220cc76b2bbb6 Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Wed, 26 Feb 2025 12:41:15 -0800 Subject: [PATCH 29/29] Set version to 2.4.0-rc3 --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index 96138873ded..68337a598f3 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.4.0-rc2" +char const* const versionString = "2.4.0-rc3" // clang-format on #if defined(DEBUG) || defined(SANITIZER)