From eba5d19377cf0fb6f010e5c413e2079431bf4078 Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Fri, 12 Jul 2024 17:58:17 -0400 Subject: [PATCH 01/26] Improve error handling in some RPC commands --- Builds/CMake/RippledCore.cmake | 2 +- src/ripple/rpc/handlers/AccountChannels.cpp | 3 + .../rpc/handlers/AccountCurrenciesHandler.cpp | 24 +++++--- src/ripple/rpc/handlers/AccountInfo.cpp | 9 +++ src/ripple/rpc/handlers/AccountLines.cpp | 3 + src/ripple/rpc/handlers/AccountObjects.cpp | 17 ++++-- src/ripple/rpc/handlers/AccountOffers.cpp | 13 +++-- src/ripple/rpc/handlers/AccountTx.cpp | 9 ++- src/ripple/rpc/handlers/NoRippleCheck.cpp | 6 +- src/test/app/PayChan_test.cpp | 19 ++++++ src/test/rpc/AccountCurrencies_test.cpp | 47 ++++++++++++++- src/test/rpc/AccountInfo_test.cpp | 47 ++++++++++++++- ...inesRPC_test.cpp => AccountLines_test.cpp} | 23 +++++++- src/test/rpc/AccountObjects_test.cpp | 58 ++++++++++++++++++- src/test/rpc/AccountOffers_test.cpp | 33 ++++++++++- src/test/rpc/AccountTx_test.cpp | 26 ++++++++- src/test/rpc/NoRippleCheck_test.cpp | 25 +++++++- 17 files changed, 329 insertions(+), 35 deletions(-) rename src/test/rpc/{AccountLinesRPC_test.cpp => AccountLines_test.cpp} (98%) diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 6b7b2aae683..44d7061d739 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -1119,7 +1119,7 @@ if (tests) #]===============================] src/test/rpc/AccountCurrencies_test.cpp src/test/rpc/AccountInfo_test.cpp - src/test/rpc/AccountLinesRPC_test.cpp + src/test/rpc/AccountLines_test.cpp src/test/rpc/AccountObjects_test.cpp src/test/rpc/AccountOffers_test.cpp src/test/rpc/AccountSet_test.cpp diff --git a/src/ripple/rpc/handlers/AccountChannels.cpp b/src/ripple/rpc/handlers/AccountChannels.cpp index ebd89b04418..da751d0dae2 100644 --- a/src/ripple/rpc/handlers/AccountChannels.cpp +++ b/src/ripple/rpc/handlers/AccountChannels.cpp @@ -71,6 +71,9 @@ doAccountChannels(RPC::JsonContext& context) if (!params.isMember(jss::account)) return RPC::missing_field_error(jss::account); + if (!params[jss::account].isString()) + return RPC::invalid_field_error(jss::account); + std::shared_ptr ledger; auto result = RPC::lookupLedger(ledger, context); if (!ledger) diff --git a/src/ripple/rpc/handlers/AccountCurrenciesHandler.cpp b/src/ripple/rpc/handlers/AccountCurrenciesHandler.cpp index 45dc8b545ca..0114e25c712 100644 --- a/src/ripple/rpc/handlers/AccountCurrenciesHandler.cpp +++ b/src/ripple/rpc/handlers/AccountCurrenciesHandler.cpp @@ -33,19 +33,29 @@ doAccountCurrencies(RPC::JsonContext& context) { auto& params = context.params; + if (!(params.isMember(jss::account) || params.isMember(jss::ident))) + return RPC::missing_field_error(jss::account); + + std::string strIdent; + if (params.isMember(jss::account)) + { + if (!params[jss::account].isString()) + return RPC::invalid_field_error(jss::account); + strIdent = params[jss::account].asString(); + } + else if (params.isMember(jss::ident)) + { + if (!params[jss::ident].isString()) + return RPC::invalid_field_error(jss::ident); + strIdent = params[jss::ident].asString(); + } + // Get the current ledger std::shared_ptr ledger; auto result = RPC::lookupLedger(ledger, context); if (!ledger) return result; - if (!(params.isMember(jss::account) || params.isMember(jss::ident))) - return RPC::missing_field_error(jss::account); - - std::string const strIdent( - params.isMember(jss::account) ? params[jss::account].asString() - : params[jss::ident].asString()); - // Get info on account. auto id = parseBase58(strIdent); if (!id) diff --git a/src/ripple/rpc/handlers/AccountInfo.cpp b/src/ripple/rpc/handlers/AccountInfo.cpp index bd2184f49a3..b4d31f6187c 100644 --- a/src/ripple/rpc/handlers/AccountInfo.cpp +++ b/src/ripple/rpc/handlers/AccountInfo.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -53,9 +54,17 @@ doAccountInfo(RPC::JsonContext& context) std::string strIdent; if (params.isMember(jss::account)) + { + if (!params[jss::account].isString()) + return RPC::invalid_field_error(jss::account); strIdent = params[jss::account].asString(); + } else if (params.isMember(jss::ident)) + { + if (!params[jss::ident].isString()) + return RPC::invalid_field_error(jss::ident); strIdent = params[jss::ident].asString(); + } else return RPC::missing_field_error(jss::account); diff --git a/src/ripple/rpc/handlers/AccountLines.cpp b/src/ripple/rpc/handlers/AccountLines.cpp index 3bfcd225b14..3f7e154690b 100644 --- a/src/ripple/rpc/handlers/AccountLines.cpp +++ b/src/ripple/rpc/handlers/AccountLines.cpp @@ -80,6 +80,9 @@ doAccountLines(RPC::JsonContext& context) if (!params.isMember(jss::account)) return RPC::missing_field_error(jss::account); + if (!params[jss::account].isString()) + return RPC::invalid_field_error(jss::account); + std::shared_ptr ledger; auto result = RPC::lookupLedger(ledger, context); if (!ledger) diff --git a/src/ripple/rpc/handlers/AccountObjects.cpp b/src/ripple/rpc/handlers/AccountObjects.cpp index 2531cd03115..f228272f640 100644 --- a/src/ripple/rpc/handlers/AccountObjects.cpp +++ b/src/ripple/rpc/handlers/AccountObjects.cpp @@ -54,17 +54,19 @@ doAccountNFTs(RPC::JsonContext& context) if (!params.isMember(jss::account)) return RPC::missing_field_error(jss::account); - std::shared_ptr ledger; - auto result = RPC::lookupLedger(ledger, context); - if (ledger == nullptr) - return result; + if (!params[jss::account].isString()) + return RPC::invalid_field_error(jss::account); auto id = parseBase58(params[jss::account].asString()); if (!id) { - RPC::inject_error(rpcACT_MALFORMED, result); - return result; + return rpcError(rpcACT_MALFORMED); } + + std::shared_ptr ledger; + auto result = RPC::lookupLedger(ledger, context); + if (ledger == nullptr) + return result; auto const accountID{id.value()}; if (!ledger->exists(keylet::account(accountID))) @@ -167,6 +169,9 @@ doAccountObjects(RPC::JsonContext& context) if (!params.isMember(jss::account)) return RPC::missing_field_error(jss::account); + if (!params[jss::account].isString()) + return RPC::invalid_field_error(jss::account); + std::shared_ptr ledger; auto result = RPC::lookupLedger(ledger, context); if (ledger == nullptr) diff --git a/src/ripple/rpc/handlers/AccountOffers.cpp b/src/ripple/rpc/handlers/AccountOffers.cpp index 867f888e241..2be50e16880 100644 --- a/src/ripple/rpc/handlers/AccountOffers.cpp +++ b/src/ripple/rpc/handlers/AccountOffers.cpp @@ -60,6 +60,9 @@ doAccountOffers(RPC::JsonContext& context) if (!params.isMember(jss::account)) return RPC::missing_field_error(jss::account); + if (!params[jss::account].isString()) + return RPC::invalid_field_error(jss::account); + std::shared_ptr ledger; auto result = RPC::lookupLedger(ledger, context); if (!ledger) @@ -84,7 +87,7 @@ doAccountOffers(RPC::JsonContext& context) return *err; if (limit == 0) - return rpcError(rpcINVALID_PARAMS); + return RPC::invalid_field_error(jss::limit); Json::Value& jsonOffers(result[jss::offers] = Json::arrayValue); std::vector> offers; @@ -101,13 +104,13 @@ doAccountOffers(RPC::JsonContext& context) std::stringstream marker(params[jss::marker].asString()); std::string value; if (!std::getline(marker, value, ',')) - return rpcError(rpcINVALID_PARAMS); + return RPC::invalid_field_error(jss::marker); if (!startAfter.parseHex(value)) - return rpcError(rpcINVALID_PARAMS); + return RPC::invalid_field_error(jss::marker); if (!std::getline(marker, value, ',')) - return rpcError(rpcINVALID_PARAMS); + return RPC::invalid_field_error(jss::marker); try { @@ -115,7 +118,7 @@ doAccountOffers(RPC::JsonContext& context) } catch (boost::bad_lexical_cast&) { - return rpcError(rpcINVALID_PARAMS); + return RPC::invalid_field_error(jss::marker); } // We then must check if the object pointed to by the marker is actually diff --git a/src/ripple/rpc/handlers/AccountTx.cpp b/src/ripple/rpc/handlers/AccountTx.cpp index 7fe7472721f..89d4f49a519 100644 --- a/src/ripple/rpc/handlers/AccountTx.cpp +++ b/src/ripple/rpc/handlers/AccountTx.cpp @@ -426,12 +426,12 @@ doAccountTxJson(RPC::JsonContext& context) if (context.apiVersion > 1u && params.isMember(jss::binary) && !params[jss::binary].isBool()) { - return rpcError(rpcINVALID_PARAMS); + return RPC::invalid_field_error(jss::binary); } if (context.apiVersion > 1u && params.isMember(jss::forward) && !params[jss::forward].isBool()) { - return rpcError(rpcINVALID_PARAMS); + return RPC::invalid_field_error(jss::forward); } args.limit = params.isMember(jss::limit) ? params[jss::limit].asUInt() : 0; @@ -440,7 +440,10 @@ doAccountTxJson(RPC::JsonContext& context) params.isMember(jss::forward) && params[jss::forward].asBool(); if (!params.isMember(jss::account)) - return rpcError(rpcINVALID_PARAMS); + return RPC::missing_field_error(jss::account); + + if (!params[jss::account].isString()) + return RPC::invalid_field_error(jss::account); auto const account = parseBase58(params[jss::account].asString()); diff --git a/src/ripple/rpc/handlers/NoRippleCheck.cpp b/src/ripple/rpc/handlers/NoRippleCheck.cpp index 91d69df96bd..6cb206e2530 100644 --- a/src/ripple/rpc/handlers/NoRippleCheck.cpp +++ b/src/ripple/rpc/handlers/NoRippleCheck.cpp @@ -66,6 +66,10 @@ doNoRippleCheck(RPC::JsonContext& context) if (!params.isMember("role")) return RPC::missing_field_error("role"); + + if (!params[jss::account].isString()) + return RPC::invalid_field_error(jss::account); + bool roleGateway = false; { std::string const role = params["role"].asString(); @@ -90,7 +94,7 @@ doNoRippleCheck(RPC::JsonContext& context) if (context.apiVersion > 1u && params.isMember(jss::transactions) && !params[jss::transactions].isBool()) { - return rpcError(rpcINVALID_PARAMS); + return RPC::invalid_field_error(jss::transactions); } std::shared_ptr ledger; diff --git a/src/test/app/PayChan_test.cpp b/src/test/app/PayChan_test.cpp index a479e43b170..4bd23a4edb8 100644 --- a/src/test/app/PayChan_test.cpp +++ b/src/test/app/PayChan_test.cpp @@ -873,6 +873,25 @@ struct PayChan_test : public beast::unit_test::suite auto const chan1Str = to_string(channel(alice, bob, env.seq(alice))); env(create(alice, bob, channelFunds, settleDelay, pk)); env.close(); + { + // test account non-string + auto testInvalidAccountParam = [&](auto const& param) { + Json::Value params; + params[jss::account] = param; + auto jrr = env.rpc( + "json", "account_channels", to_string(params))[jss::result]; + BEAST_EXPECT(jrr[jss::error] == "invalidParams"); + BEAST_EXPECT( + jrr[jss::error_message] == "Invalid field 'account'."); + }; + + testInvalidAccountParam(1); + testInvalidAccountParam(1.1); + testInvalidAccountParam(true); + testInvalidAccountParam(Json::Value(Json::nullValue)); + testInvalidAccountParam(Json::Value(Json::objectValue)); + testInvalidAccountParam(Json::Value(Json::arrayValue)); + } { auto const r = env.rpc("account_channels", alice.human(), bob.human()); diff --git a/src/test/rpc/AccountCurrencies_test.cpp b/src/test/rpc/AccountCurrencies_test.cpp index c3e46a3e66c..64b73743024 100644 --- a/src/test/rpc/AccountCurrencies_test.cpp +++ b/src/test/rpc/AccountCurrencies_test.cpp @@ -39,6 +39,7 @@ class AccountCurrencies_test : public beast::unit_test::suite { // invalid ledger (hash) Json::Value params; + params[jss::account] = Account{"bob"}.human(); params[jss::ledger_hash] = 1; auto const result = env.rpc( "json", @@ -56,6 +57,50 @@ class AccountCurrencies_test : public beast::unit_test::suite result[jss::error_message] == "Missing field 'account'."); } + { + // test account non-string + auto testInvalidAccountParam = [&](auto const& param) { + Json::Value params; + params[jss::account] = param; + auto jrr = env.rpc( + "json", + "account_currencies", + to_string(params))[jss::result]; + BEAST_EXPECT(jrr[jss::error] == "invalidParams"); + BEAST_EXPECT( + jrr[jss::error_message] == "Invalid field 'account'."); + }; + + testInvalidAccountParam(1); + testInvalidAccountParam(1.1); + testInvalidAccountParam(true); + testInvalidAccountParam(Json::Value(Json::nullValue)); + testInvalidAccountParam(Json::Value(Json::objectValue)); + testInvalidAccountParam(Json::Value(Json::arrayValue)); + } + + { + // test ident non-string + auto testInvalidIdentParam = [&](auto const& param) { + Json::Value params; + params[jss::ident] = param; + auto jrr = env.rpc( + "json", + "account_currencies", + to_string(params))[jss::result]; + BEAST_EXPECT(jrr[jss::error] == "invalidParams"); + BEAST_EXPECT( + jrr[jss::error_message] == "Invalid field 'ident'."); + }; + + testInvalidIdentParam(1); + testInvalidIdentParam(1.1); + testInvalidIdentParam(true); + testInvalidIdentParam(Json::Value(Json::nullValue)); + testInvalidIdentParam(Json::Value(Json::objectValue)); + testInvalidIdentParam(Json::Value(Json::arrayValue)); + } + { Json::Value params; params[jss::account] = @@ -198,6 +243,6 @@ class AccountCurrencies_test : public beast::unit_test::suite } }; -BEAST_DEFINE_TESTSUITE(AccountCurrencies, app, ripple); +BEAST_DEFINE_TESTSUITE(AccountCurrencies, rpc, ripple); } // namespace ripple diff --git a/src/test/rpc/AccountInfo_test.cpp b/src/test/rpc/AccountInfo_test.cpp index ff23a5e53c1..2d6832cee05 100644 --- a/src/test/rpc/AccountInfo_test.cpp +++ b/src/test/rpc/AccountInfo_test.cpp @@ -36,6 +36,7 @@ class AccountInfo_test : public beast::unit_test::suite void testErrors() { + testcase("Errors"); using namespace jtx; Env env(*this); { @@ -78,12 +79,53 @@ class AccountInfo_test : public beast::unit_test::suite BEAST_EXPECT( info[jss::result][jss::error_message] == "Account malformed."); } + { + // Cannot pass a non-string into the `account` param + + auto testInvalidAccountParam = [&](auto const& param) { + Json::Value params; + params[jss::account] = param; + auto jrr = env.rpc( + "json", "account_info", to_string(params))[jss::result]; + BEAST_EXPECT(jrr[jss::error] == "invalidParams"); + BEAST_EXPECT( + jrr[jss::error_message] == "Invalid field 'account'."); + }; + + testInvalidAccountParam(1); + testInvalidAccountParam(1.1); + testInvalidAccountParam(true); + testInvalidAccountParam(Json::Value(Json::nullValue)); + testInvalidAccountParam(Json::Value(Json::objectValue)); + testInvalidAccountParam(Json::Value(Json::arrayValue)); + } + { + // Cannot pass a non-string into the `ident` param + + auto testInvalidIdentParam = [&](auto const& param) { + Json::Value params; + params[jss::ident] = param; + auto jrr = env.rpc( + "json", "account_info", to_string(params))[jss::result]; + BEAST_EXPECT(jrr[jss::error] == "invalidParams"); + BEAST_EXPECT( + jrr[jss::error_message] == "Invalid field 'ident'."); + }; + + testInvalidIdentParam(1); + testInvalidIdentParam(1.1); + testInvalidIdentParam(true); + testInvalidIdentParam(Json::Value(Json::nullValue)); + testInvalidIdentParam(Json::Value(Json::objectValue)); + testInvalidIdentParam(Json::Value(Json::arrayValue)); + } } // Test the "signer_lists" argument in account_info. void testSignerLists() { + testcase("Signer lists"); using namespace jtx; Env env(*this); Account const alice{"alice"}; @@ -205,6 +247,7 @@ class AccountInfo_test : public beast::unit_test::suite void testSignerListsApiVersion2() { + testcase("Signer lists APIv2"); using namespace jtx; Env env{*this}; Account const alice{"alice"}; @@ -326,6 +369,7 @@ class AccountInfo_test : public beast::unit_test::suite void testSignerListsV2() { + testcase("Signer lists v2"); using namespace jtx; Env env(*this); Account const alice{"alice"}; @@ -515,6 +559,7 @@ class AccountInfo_test : public beast::unit_test::suite void testAccountFlags(FeatureBitset const& features) { + testcase("Account flags"); using namespace jtx; Env env(*this, features); @@ -652,7 +697,7 @@ class AccountInfo_test : public beast::unit_test::suite } }; -BEAST_DEFINE_TESTSUITE(AccountInfo, app, ripple); +BEAST_DEFINE_TESTSUITE(AccountInfo, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/AccountLinesRPC_test.cpp b/src/test/rpc/AccountLines_test.cpp similarity index 98% rename from src/test/rpc/AccountLinesRPC_test.cpp rename to src/test/rpc/AccountLines_test.cpp index 04688156d12..c02cc25e1a8 100644 --- a/src/test/rpc/AccountLinesRPC_test.cpp +++ b/src/test/rpc/AccountLines_test.cpp @@ -27,7 +27,7 @@ namespace ripple { namespace RPC { -class AccountLinesRPC_test : public beast::unit_test::suite +class AccountLines_test : public beast::unit_test::suite { public: void @@ -55,6 +55,25 @@ class AccountLinesRPC_test : public beast::unit_test::suite lines[jss::result][jss::error_message] == RPC::make_error(rpcACT_MALFORMED)[jss::error_message]); } + { + // test account non-string + auto testInvalidAccountParam = [&](auto const& param) { + Json::Value params; + params[jss::account] = param; + auto jrr = env.rpc( + "json", "account_lines", to_string(params))[jss::result]; + BEAST_EXPECT(jrr[jss::error] == "invalidParams"); + BEAST_EXPECT( + jrr[jss::error_message] == "Invalid field 'account'."); + }; + + testInvalidAccountParam(1); + testInvalidAccountParam(1.1); + testInvalidAccountParam(true); + testInvalidAccountParam(Json::Value(Json::nullValue)); + testInvalidAccountParam(Json::Value(Json::objectValue)); + testInvalidAccountParam(Json::Value(Json::arrayValue)); + } Account const alice{"alice"}; { // account_lines on an unfunded account. @@ -1474,7 +1493,7 @@ class AccountLinesRPC_test : public beast::unit_test::suite } }; -BEAST_DEFINE_TESTSUITE(AccountLinesRPC, app, ripple); +BEAST_DEFINE_TESTSUITE(AccountLines, rpc, ripple); } // namespace RPC } // namespace ripple diff --git a/src/test/rpc/AccountObjects_test.cpp b/src/test/rpc/AccountObjects_test.cpp index 17217f2c880..46b092c4a6d 100644 --- a/src/test/rpc/AccountObjects_test.cpp +++ b/src/test/rpc/AccountObjects_test.cpp @@ -123,8 +123,30 @@ class AccountObjects_test : public beast::unit_test::suite // test error on no account { - auto resp = env.rpc("json", "account_objects"); - BEAST_EXPECT(resp[jss::error_message] == "Syntax error."); + Json::Value params; + auto resp = env.rpc("json", "account_objects", to_string(params)); + BEAST_EXPECT( + resp[jss::result][jss::error_message] == + "Missing field 'account'."); + } + // test account non-string + { + auto testInvalidAccountParam = [&](auto const& param) { + Json::Value params; + params[jss::account] = param; + auto jrr = env.rpc( + "json", "account_objects", to_string(params))[jss::result]; + BEAST_EXPECT(jrr[jss::error] == "invalidParams"); + BEAST_EXPECT( + jrr[jss::error_message] == "Invalid field 'account'."); + }; + + testInvalidAccountParam(1); + testInvalidAccountParam(1.1); + testInvalidAccountParam(true); + testInvalidAccountParam(Json::Value(Json::nullValue)); + testInvalidAccountParam(Json::Value(Json::objectValue)); + testInvalidAccountParam(Json::Value(Json::arrayValue)); } // test error on malformed account string. { @@ -1032,6 +1054,35 @@ class AccountObjects_test : public beast::unit_test::suite BEAST_EXPECT(acct_objs_is_size(acct_objs(gw, jss::hashes), 0)); } + void + testAccountNFTs() + { + testcase("account_nfts"); + + using namespace jtx; + Env env(*this); + + // test validation + { + auto testInvalidAccountParam = [&](auto const& param) { + Json::Value params; + params[jss::account] = param; + auto jrr = env.rpc( + "json", "account_nfts", to_string(params))[jss::result]; + BEAST_EXPECT(jrr[jss::error] == "invalidParams"); + BEAST_EXPECT( + jrr[jss::error_message] == "Invalid field 'account'."); + }; + + testInvalidAccountParam(1); + testInvalidAccountParam(1.1); + testInvalidAccountParam(true); + testInvalidAccountParam(Json::Value(Json::nullValue)); + testInvalidAccountParam(Json::Value(Json::objectValue)); + testInvalidAccountParam(Json::Value(Json::arrayValue)); + } + } + void run() override { @@ -1039,10 +1090,11 @@ class AccountObjects_test : public beast::unit_test::suite testUnsteppedThenStepped(); testUnsteppedThenSteppedWithNFTs(); testObjectTypes(); + testAccountNFTs(); } }; -BEAST_DEFINE_TESTSUITE(AccountObjects, app, ripple); +BEAST_DEFINE_TESTSUITE(AccountObjects, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/AccountOffers_test.cpp b/src/test/rpc/AccountOffers_test.cpp index d94442ea8e5..635dd1a63b4 100644 --- a/src/test/rpc/AccountOffers_test.cpp +++ b/src/test/rpc/AccountOffers_test.cpp @@ -37,6 +37,8 @@ class AccountOffers_test : public beast::unit_test::suite void testNonAdminMinLimit() { + testcase("Non-Admin Min Limit"); + using namespace jtx; Env env{*this, envconfig(no_admin)}; Account const gw("G1"); @@ -81,6 +83,9 @@ class AccountOffers_test : public beast::unit_test::suite void testSequential(bool asAdmin) { + testcase( + std::string("Sequential - ") + (asAdmin ? "admin" : "non-admin")); + using namespace jtx; Env env{*this, asAdmin ? envconfig() : envconfig(no_admin)}; Account const gw("G1"); @@ -215,6 +220,8 @@ class AccountOffers_test : public beast::unit_test::suite void testBadInput() { + testcase("Bad input"); + using namespace jtx; Env env(*this); Account const gw("G1"); @@ -233,6 +240,26 @@ class AccountOffers_test : public beast::unit_test::suite BEAST_EXPECT(jrr[jss::error_message] == "Syntax error."); } + { + // test account non-string + auto testInvalidAccountParam = [&](auto const& param) { + Json::Value params; + params[jss::account] = param; + auto jrr = env.rpc( + "json", "account_offers", to_string(params))[jss::result]; + BEAST_EXPECT(jrr[jss::error] == "invalidParams"); + BEAST_EXPECT( + jrr[jss::error_message] == "Invalid field 'account'."); + }; + + testInvalidAccountParam(1); + testInvalidAccountParam(1.1); + testInvalidAccountParam(true); + testInvalidAccountParam(Json::Value(Json::nullValue)); + testInvalidAccountParam(Json::Value(Json::objectValue)); + testInvalidAccountParam(Json::Value(Json::arrayValue)); + } + { // empty string account Json::Value jvParams; @@ -282,7 +309,9 @@ class AccountOffers_test : public beast::unit_test::suite jvParams.toStyledString())[jss::result]; BEAST_EXPECT(jrr[jss::error] == "invalidParams"); BEAST_EXPECT(jrr[jss::status] == "error"); - BEAST_EXPECT(jrr[jss::error_message] == "Invalid parameters."); + BEAST_EXPECTS( + jrr[jss::error_message] == "Invalid field 'marker'.", + jrr.toStyledString()); } { @@ -326,7 +355,7 @@ class AccountOffers_test : public beast::unit_test::suite } }; -BEAST_DEFINE_TESTSUITE(AccountOffers, app, ripple); +BEAST_DEFINE_TESTSUITE(AccountOffers, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/AccountTx_test.cpp b/src/test/rpc/AccountTx_test.cpp index 6601996925e..2cec59d7198 100644 --- a/src/test/rpc/AccountTx_test.cpp +++ b/src/test/rpc/AccountTx_test.cpp @@ -109,6 +109,7 @@ class AccountTx_test : public beast::unit_test::suite void testParameters(unsigned int apiVersion) { + testcase("Parameters APIv" + std::to_string(apiVersion)); using namespace test::jtx; Env env(*this); @@ -353,6 +354,25 @@ class AccountTx_test : public beast::unit_test::suite env.rpc("json", "account_tx", to_string(p)), rpcLGR_IDX_MALFORMED)); } + // test account non-string + { + auto testInvalidAccountParam = [&](auto const& param) { + Json::Value params; + params[jss::account] = param; + auto jrr = env.rpc( + "json", "account_tx", to_string(params))[jss::result]; + BEAST_EXPECT(jrr[jss::error] == "invalidParams"); + BEAST_EXPECT( + jrr[jss::error_message] == "Invalid field 'account'."); + }; + + testInvalidAccountParam(1); + testInvalidAccountParam(1.1); + testInvalidAccountParam(true); + testInvalidAccountParam(Json::Value(Json::nullValue)); + testInvalidAccountParam(Json::Value(Json::objectValue)); + testInvalidAccountParam(Json::Value(Json::arrayValue)); + } // test binary and forward for bool/non bool values { Json::Value p{jParms}; @@ -388,6 +408,8 @@ class AccountTx_test : public beast::unit_test::suite void testContents() { + testcase("Contents"); + // Get results for all transaction types that can be associated // with an account. Start by generating all transaction types. using namespace test::jtx; @@ -600,6 +622,8 @@ class AccountTx_test : public beast::unit_test::suite void testAccountDelete() { + testcase("AccountDelete"); + // Verify that if an account is resurrected then the account_tx RPC // command still recovers all transactions on that account before // and after resurrection. @@ -740,7 +764,7 @@ class AccountTx_test : public beast::unit_test::suite testAccountDelete(); } }; -BEAST_DEFINE_TESTSUITE(AccountTx, app, ripple); +BEAST_DEFINE_TESTSUITE(AccountTx, rpc, ripple); } // namespace test } // namespace ripple diff --git a/src/test/rpc/NoRippleCheck_test.cpp b/src/test/rpc/NoRippleCheck_test.cpp index 3d34f55c90d..9858adf8466 100644 --- a/src/test/rpc/NoRippleCheck_test.cpp +++ b/src/test/rpc/NoRippleCheck_test.cpp @@ -64,6 +64,27 @@ class NoRippleCheck_test : public beast::unit_test::suite BEAST_EXPECT(result[jss::error_message] == "Missing field 'role'."); } + // test account non-string + { + auto testInvalidAccountParam = [&](auto const& param) { + Json::Value params; + params[jss::account] = param; + params[jss::role] = "user"; + auto jrr = env.rpc( + "json", "noripple_check", to_string(params))[jss::result]; + BEAST_EXPECT(jrr[jss::error] == "invalidParams"); + BEAST_EXPECT( + jrr[jss::error_message] == "Invalid field 'account'."); + }; + + testInvalidAccountParam(1); + testInvalidAccountParam(1.1); + testInvalidAccountParam(true); + testInvalidAccountParam(Json::Value(Json::nullValue)); + testInvalidAccountParam(Json::Value(Json::objectValue)); + testInvalidAccountParam(Json::Value(Json::arrayValue)); + } + { // invalid role field Json::Value params; params[jss::account] = alice.human(); @@ -369,12 +390,12 @@ class NoRippleCheckLimits_test : public beast::unit_test::suite } }; -BEAST_DEFINE_TESTSUITE(NoRippleCheck, app, ripple); +BEAST_DEFINE_TESTSUITE(NoRippleCheck, rpc, ripple); // These tests that deal with limit amounts are slow because of the // offer/account setup, so making them manual -- the additional coverage // provided by them is minimal -BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(NoRippleCheckLimits, app, ripple, 1); +BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(NoRippleCheckLimits, rpc, ripple, 1); } // namespace ripple From c157816017ae869c4fb6ca3e2bcac97e5a33ef65 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Mon, 22 Jul 2024 15:47:01 -0400 Subject: [PATCH 02/26] Use error codes throughout fast Base58 implementation --- src/ripple/protocol/impl/b58_utils.h | 24 +++++++------- src/ripple/protocol/impl/token_errors.h | 1 + src/ripple/protocol/impl/tokens.cpp | 26 ++++++++++++--- src/test/basics/base58_test.cpp | 42 +++++++++++++++++++++++-- 4 files changed, 75 insertions(+), 18 deletions(-) diff --git a/src/ripple/protocol/impl/b58_utils.h b/src/ripple/protocol/impl/b58_utils.h index c3bb0c03750..1e7519f0eb0 100644 --- a/src/ripple/protocol/impl/b58_utils.h +++ b/src/ripple/protocol/impl/b58_utils.h @@ -21,6 +21,7 @@ #define RIPPLE_PROTOCOL_B58_UTILS_H_INCLUDED #include +#include #include #include @@ -71,12 +72,12 @@ carrying_add(std::uint64_t a, std::uint64_t b) // (i.e a[0] is the 2^0 coefficient, a[n] is the 2^(64*n) coefficient) // panics if overflows (this is a specialized adder for b58 decoding. // it should never overflow). -inline void +[[nodiscard]] inline TokenCodecErrc inplace_bigint_add(std::span a, std::uint64_t b) { if (a.size() <= 1) { - ripple::LogicError("Input span too small for inplace_bigint_add"); + return TokenCodecErrc::inputTooSmall; } std::uint64_t carry; @@ -86,28 +87,29 @@ inplace_bigint_add(std::span a, std::uint64_t b) { if (!carry) { - return; + return TokenCodecErrc::success; } std::tie(v, carry) = carrying_add(v, 1); } if (carry) { - LogicError("Overflow in inplace_bigint_add"); + return TokenCodecErrc::overflowAdd; } + return TokenCodecErrc::success; } -inline void +[[nodiscard]] inline TokenCodecErrc inplace_bigint_mul(std::span a, std::uint64_t b) { if (a.empty()) { - LogicError("Empty span passed to inplace_bigint_mul"); + return TokenCodecErrc::inputTooSmall; } auto const last_index = a.size() - 1; if (a[last_index] != 0) { - LogicError("Non-zero element in inplace_bigint_mul last index"); + return TokenCodecErrc::inputTooLarge; } std::uint64_t carry = 0; @@ -116,7 +118,9 @@ inplace_bigint_mul(std::span a, std::uint64_t b) std::tie(coeff, carry) = carrying_mul(coeff, b, carry); } a[last_index] = carry; + return TokenCodecErrc::success; } + // divide a "big uint" value inplace and return the mod // numerator is stored so smallest coefficients come first [[nodiscard]] inline std::uint64_t @@ -166,11 +170,7 @@ inplace_bigint_div_rem(std::span numerator, std::uint64_t divisor) b58_10_to_b58_be(std::uint64_t input) { constexpr std::uint64_t B_58_10 = 430804206899405824; // 58^10; - if (input >= B_58_10) - { - LogicError("Input to b58_10_to_b58_be equals or exceeds 58^10."); - } - + assert(input < B_58_10); constexpr std::size_t resultSize = 10; std::array result{}; int i = 0; diff --git a/src/ripple/protocol/impl/token_errors.h b/src/ripple/protocol/impl/token_errors.h index 59b09974149..23a46bd1c5e 100644 --- a/src/ripple/protocol/impl/token_errors.h +++ b/src/ripple/protocol/impl/token_errors.h @@ -32,6 +32,7 @@ enum class TokenCodecErrc { mismatchedTokenType, mismatchedChecksum, invalidEncodingChar, + overflowAdd, unknown, }; } diff --git a/src/ripple/protocol/impl/tokens.cpp b/src/ripple/protocol/impl/tokens.cpp index 8445eec38ca..a166ac733cd 100644 --- a/src/ripple/protocol/impl/tokens.cpp +++ b/src/ripple/protocol/impl/tokens.cpp @@ -467,6 +467,11 @@ b256_to_b58_be(std::span input, std::span out) { continue; } + static constexpr std::uint64_t B_58_10 = 430804206899405824; // 58^10; + if (base_58_10_coeff[i] >= B_58_10) + { + return Unexpected(TokenCodecErrc::inputTooLarge); + } std::array const b58_be = ripple::b58_fast::detail::b58_10_to_b58_be(base_58_10_coeff[i]); std::size_t to_skip = 0; @@ -565,10 +570,23 @@ b58_to_b256_be(std::string_view input, std::span out) for (int i = 1; i < num_b_58_10_coeffs; ++i) { std::uint64_t const c = b_58_10_coeff[i]; - ripple::b58_fast::detail::inplace_bigint_mul( - std::span(&result[0], cur_result_size + 1), B_58_10); - ripple::b58_fast::detail::inplace_bigint_add( - std::span(&result[0], cur_result_size + 1), c); + + { + auto code = ripple::b58_fast::detail::inplace_bigint_mul( + std::span(&result[0], cur_result_size + 1), B_58_10); + if (code != TokenCodecErrc::success) + { + return Unexpected(code); + } + } + { + auto code = ripple::b58_fast::detail::inplace_bigint_add( + std::span(&result[0], cur_result_size + 1), c); + if (code != TokenCodecErrc::success) + { + return Unexpected(code); + } + } if (result[cur_result_size] != 0) { cur_result_size += 1; diff --git a/src/test/basics/base58_test.cpp b/src/test/basics/base58_test.cpp index 6f3d495d7a9..8b79d2729d7 100644 --- a/src/test/basics/base58_test.cpp +++ b/src/test/basics/base58_test.cpp @@ -177,6 +177,7 @@ class base58_test : public beast::unit_test::suite constexpr std::size_t iters = 100000; auto eng = randEngine(); std::uniform_int_distribution dist; + std::uniform_int_distribution dist1(1); for (int i = 0; i < iters; ++i) { std::uint64_t const d = dist(eng); @@ -209,12 +210,31 @@ class base58_test : public beast::unit_test::suite auto const refAdd = boostBigInt + d; - b58_fast::detail::inplace_bigint_add( + auto const result = b58_fast::detail::inplace_bigint_add( std::span(bigInt.data(), bigInt.size()), d); + BEAST_EXPECT(result == TokenCodecErrc::success); auto const foundAdd = multiprecision_utils::toBoostMP(bigInt); BEAST_EXPECT(refAdd == foundAdd); } for (int i = 0; i < iters; ++i) + { + std::uint64_t const d = dist1(eng); + // Force overflow + std::vector bigInt( + 5, std::numeric_limits::max()); + + auto const boostBigInt = multiprecision_utils::toBoostMP( + std::span(bigInt.data(), bigInt.size())); + + auto const refAdd = boostBigInt + d; + + auto const result = b58_fast::detail::inplace_bigint_add( + std::span(bigInt.data(), bigInt.size()), d); + BEAST_EXPECT(result == TokenCodecErrc::overflowAdd); + auto const foundAdd = multiprecision_utils::toBoostMP(bigInt); + BEAST_EXPECT(refAdd != foundAdd); + } + for (int i = 0; i < iters; ++i) { std::uint64_t const d = dist(eng); auto bigInt = multiprecision_utils::randomBigInt(/* minSize */ 2); @@ -226,11 +246,29 @@ class base58_test : public beast::unit_test::suite auto const refMul = boostBigInt * d; - b58_fast::detail::inplace_bigint_mul( + auto const result = b58_fast::detail::inplace_bigint_mul( std::span(bigInt.data(), bigInt.size()), d); + BEAST_EXPECT(result == TokenCodecErrc::success); auto const foundMul = multiprecision_utils::toBoostMP(bigInt); BEAST_EXPECT(refMul == foundMul); } + for (int i = 0; i < iters; ++i) + { + std::uint64_t const d = dist1(eng); + // Force overflow + std::vector bigInt( + 5, std::numeric_limits::max()); + auto const boostBigInt = multiprecision_utils::toBoostMP( + std::span(bigInt.data(), bigInt.size())); + + auto const refMul = boostBigInt * d; + + auto const result = b58_fast::detail::inplace_bigint_mul( + std::span(bigInt.data(), bigInt.size()), d); + BEAST_EXPECT(result == TokenCodecErrc::inputTooLarge); + auto const foundMul = multiprecision_utils::toBoostMP(bigInt); + BEAST_EXPECT(refMul != foundMul); + } } void From e6ef0fc26cb8d4db25075eaa1fe21fcc7f984751 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Mon, 22 Jul 2024 18:08:16 -0400 Subject: [PATCH 03/26] Set version to 2.2.1 --- src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 8a83011e906..e6359cd3a52 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.2.0" +char const* const versionString = "2.2.1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From a39720e94a3fb5be07d0b526c840f2e607db42cb Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Tue, 30 Jul 2024 11:18:25 -0400 Subject: [PATCH 04/26] fix: change error for invalid `feature` param in `feature` RPC (#5063) * Returns an "Invalid parameters" error if the `feature` parameter is provided and is not a string. --- src/test/rpc/Feature_test.cpp | 25 ++++++++++++++++++++++--- src/xrpld/rpc/handlers/Feature1.cpp | 9 +++++++++ 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/src/test/rpc/Feature_test.cpp b/src/test/rpc/Feature_test.cpp index 488255542f2..12d4b27745c 100644 --- a/src/test/rpc/Feature_test.cpp +++ b/src/test/rpc/Feature_test.cpp @@ -229,9 +229,28 @@ class Feature_test : public beast::unit_test::suite using namespace test::jtx; Env env{*this}; - auto jrr = env.rpc("feature", "AllTheThings")[jss::result]; - BEAST_EXPECT(jrr[jss::error] == "badFeature"); - BEAST_EXPECT(jrr[jss::error_message] == "Feature unknown or invalid."); + auto testInvalidParam = [&](auto const& param) { + Json::Value params; + params[jss::feature] = param; + auto jrr = + env.rpc("json", "feature", to_string(params))[jss::result]; + BEAST_EXPECT(jrr[jss::error] == "invalidParams"); + BEAST_EXPECT(jrr[jss::error_message] == "Invalid parameters."); + }; + + testInvalidParam(1); + testInvalidParam(1.1); + testInvalidParam(true); + testInvalidParam(Json::Value(Json::nullValue)); + testInvalidParam(Json::Value(Json::objectValue)); + testInvalidParam(Json::Value(Json::arrayValue)); + + { + auto jrr = env.rpc("feature", "AllTheThings")[jss::result]; + BEAST_EXPECT(jrr[jss::error] == "badFeature"); + BEAST_EXPECT( + jrr[jss::error_message] == "Feature unknown or invalid."); + } } void diff --git a/src/xrpld/rpc/handlers/Feature1.cpp b/src/xrpld/rpc/handlers/Feature1.cpp index d4499f120ef..c06756ca00a 100644 --- a/src/xrpld/rpc/handlers/Feature1.cpp +++ b/src/xrpld/rpc/handlers/Feature1.cpp @@ -38,6 +38,15 @@ doFeature(RPC::JsonContext& context) if (context.app.config().reporting()) return rpcError(rpcREPORTING_UNSUPPORTED); + if (context.params.isMember(jss::feature)) + { + // ensure that the `feature` param is a string + if (!context.params[jss::feature].isString()) + { + return rpcError(rpcINVALID_PARAMS); + } + } + bool const isAdmin = context.role == Role::ADMIN; // Get majority amendment status majorityAmendments_t majorities; From b9b75ddcf5bd5dfda98c34480326e4304694a731 Mon Sep 17 00:00:00 2001 From: Scott Schurr Date: Tue, 30 Jul 2024 09:47:04 -0700 Subject: [PATCH 05/26] Remove unused constants from resource/Fees.h (#4856) --- include/xrpl/resource/Fees.h | 41 ++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 23 deletions(-) diff --git a/include/xrpl/resource/Fees.h b/include/xrpl/resource/Fees.h index d3750ec8282..1eb1a9bd725 100644 --- a/include/xrpl/resource/Fees.h +++ b/include/xrpl/resource/Fees.h @@ -25,43 +25,38 @@ namespace ripple { namespace Resource { +// clang-format off /** Schedule of fees charged for imposing load on the server. */ /** @{ */ -extern Charge const - feeInvalidRequest; // A request that we can immediately tell is invalid +extern Charge const feeInvalidRequest; // A request that we can immediately + // tell is invalid extern Charge const feeRequestNoReply; // A request that we cannot satisfy -extern Charge const feeInvalidSignature; // An object whose signature we had to - // check and it failed +extern Charge const feeInvalidSignature; // An object whose signature we had + // to check and it failed extern Charge const feeUnwantedData; // Data we have no use for -extern Charge const feeBadData; // Data we have to verify before rejecting +extern Charge const feeBadData; // Data we have to verify before + // rejecting // RPC loads -extern Charge const - feeInvalidRPC; // An RPC request that we can immediately tell is invalid. -extern Charge const feeReferenceRPC; // A default "reference" unspecified load -extern Charge const feeExceptionRPC; // An RPC load that causes an exception -extern Charge const feeLightRPC; // A normal RPC command -extern Charge const feeLowBurdenRPC; // A slightly burdensome RPC load -extern Charge const feeMediumBurdenRPC; // A somewhat burdensome RPC load -extern Charge const feeHighBurdenRPC; // A very burdensome RPC load -extern Charge const feePathFindUpdate; // An update to an existing PF request +extern Charge const feeInvalidRPC; // An RPC request that we can + // immediately tell is invalid. +extern Charge const feeReferenceRPC; // A default "reference" unspecified + // load +extern Charge const feeExceptionRPC; // RPC load that causes an exception +extern Charge const feeMediumBurdenRPC; // A somewhat burdensome RPC load +extern Charge const feeHighBurdenRPC; // A very burdensome RPC load // Peer loads extern Charge const feeLightPeer; // Requires no reply -extern Charge const feeLowBurdenPeer; // Quick/cheap, slight reply extern Charge const feeMediumBurdenPeer; // Requires some work extern Charge const feeHighBurdenPeer; // Extensive work -// Good things -extern Charge const - feeNewTrustedNote; // A new transaction/validation/proposal we trust -extern Charge const feeNewValidTx; // A new, valid transaction -extern Charge const feeSatisfiedRequest; // Data we requested - // Administrative -extern Charge const feeWarning; // The cost of receiving a warning -extern Charge const feeDrop; // The cost of being dropped for excess load +extern Charge const feeWarning; // The cost of receiving a warning +extern Charge const feeDrop; // The cost of being dropped for + // excess load /** @} */ +// clang-format on } // namespace Resource } // namespace ripple From f5a349558e0854078917b731ad1cba316ae71650 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Tue, 30 Jul 2024 20:19:03 -0400 Subject: [PATCH 06/26] docs: Document the process for merging pull requests (#5010) --- CONTRIBUTING.md | 220 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 214 insertions(+), 6 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5355878fa79..ceca1eaa6fc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -73,27 +73,235 @@ The source must be formatted according to the style guide below. Header includes must be [levelized](./Builds/levelization). +Changes should be usually squashed down into a single commit. +Some larger or more complicated change sets make more sense, +and are easier to review if organized into multiple logical commits. +Either way, all commits should fit the following criteria: +* Changes should be presented in a single commit or a logical + sequence of commits. + Specifically, chronological commits that simply + reflect the history of how the author implemented + the change, "warts and all", are not useful to + reviewers. +* Every commit should have a [good message](#good-commit-messages). + to explain a specific aspects of the change. +* Every commit should be signed. +* Every commit should be well-formed (builds successfully, + unit tests passing), as this helps to resolve merge + conflicts, and makes it easier to use `git bisect` + to find bugs. + +### Good commit messages + +Refer to +["How to Write a Git Commit Message"](https://cbea.ms/git-commit/) +for general rules on writing a good commit message. + +In addition to those guidelines, please add one of the following +prefixes to the subject line if appropriate. +* `fix:` - The primary purpose is to fix an existing bug. +* `perf:` - The primary purpose is performance improvements. +* `refactor:` - The changes refactor code without affecting + functionality. +* `test:` - The changes _only_ affect unit tests. +* `docs:` - The changes _only_ affect documentation. This can + include code comments in addition to `.md` files like this one. +* `build:` - The changes _only_ affect the build process, + including CMake and/or Conan settings. +* `chore:` - Other tasks that don't affect the binary, but don't fit + any of the other cases. e.g. formatting, git settings, updating + Github Actions jobs. + +Whenever possible, when updating commits after the PR is open, please +add the PR number to the end of the subject line. e.g. `test: Add +unit tests for Feature X (#1234)`. ## Pull requests In general, pull requests use `develop` as the base branch. - (Hotfixes are an exception.) -Changes to pull requests must be added as new commits. -Once code reviewers have started looking at your code, please avoid -force-pushing a branch in a pull request. +If your changes are not quite ready, but you want to make it easily available +for preliminary examination or review, you can create a "Draft" pull request. +While a pull request is marked as a "Draft", you can rebase or reorganize the +commits in the pull request as desired. + +Github pull requests are created as "Ready" by default, or you can mark +a "Draft" pull request as "Ready". +Once a pull request is marked as "Ready", +any changes must be added as new commits. Do not +force-push to a branch in a pull request under review. +(This includes rebasing your branch onto the updated base branch. +Use a merge operation, instead or hit the "Update branch" button +at the bottom of the Github PR page.) This preserves the ability for reviewers to filter changes since their last review. -A pull request must obtain **approvals from at least two reviewers** before it -can be considered for merge by a Maintainer. +A pull request must obtain **approvals from at least two reviewers** +before it can be considered for merge by a Maintainer. Maintainers retain discretion to require more approvals if they feel the credibility of the existing approvals is insufficient. Pull requests must be merged by [squash-and-merge][2] to preserve a linear history for the `develop` branch. +### When and how to merge pull requests + +#### "Passed" + +A pull request should only have the "Passed" label added when it +meets a few criteria: + +1. It must have two approving reviews [as described + above](#pull-requests). (Exception: PRs that are deemed "trivial" + only need one approval.) +2. All CI checks must be complete and passed. (One-off failures may + be acceptable if they are related to a known issue.) +3. The PR must have a [good commit message](#good-commit-messages). + * If the PR started with a good commit message, and it doesn't + need to be updated, the author can indicate that in a comment. + * Any contributor, preferably the author, can leave a comment + suggesting a commit message. + * If the author squashes and rebases the code in preparation for + merge, they should also ensure the commit message(s) are updated + as well. +4. The PR branch must be up to date with the base branch (usually + `develop`). This is usually accomplised by merging the base branch + into the feature branch, but if the other criteria are met, the + changes can be squashed and rebased on top of the base branch. +5. Finally, and most importantly, the author of the PR must + positively indicate that the PR is ready to merge. That can be + accomplished by adding the "Passed" label if their role allows, + or by leaving a comment to the effect that the PR is ready to + merge. + +Once the "Passed" label is added, a maintainer may merge the PR at +any time, so don't use it lightly. + +#### Instructions for maintainers + +The maintainer should double-check that the PR has met all the +necessary criteria, and can request additional information from the +owner, or additional reviews, and can always feel free to remove the +"Passed" label if appropriate. The maintainer has final say on +whether a PR gets merged, and are encouraged to communicate and +issues or concerns to other maintainers. + +##### Most pull requests: "Squash and merge" + +Most pull requests don't need special handling, and can simply be +merged using the "Squash and merge" button on the Github UI. Update +the suggested commit message if necessary. + +##### Slightly more complicated pull requests + +Some pull requests need to be pushed to `develop` as more than one +commit. There are multiple ways to accomplish this. If the author +describes a process, and it is reasonable, follow it. Otherwise, do +a fast forward only merge (`--ff-only`) on the command line and push. + +Either way, check that: +* The commits are based on the current tip of `develop`. +* The commits are clean: No merge commits (except when reverse + merging), no "[FOLD]" or "fixup!" messages. +* All commits are signed. If the commits are not signed by the author, use + `git commit --amend -S` to sign them yourself. +* At least one (but preferably all) of the commits has the PR number + in the commit message. + +**Never use the "Create a merge commit" or "Rebase and merge" + functions!** + +##### Releases, release candidates, and betas + +All releases, including release candidates and betas, are handled +differently from typical PRs. Most importantly, never use +the Github UI to merge a release. + +1. There are two possible conditions that the `develop` branch will + be in when preparing a release. + 1. Ready or almost ready to go: There may be one or two PRs that + need to be merged, but otherwise, the only change needed is to + update the version number in `BuildInfo.cpp`. In this case, + merge those PRs as appropriate, updating the second one, and + waiting for CI to finish in between. Then update + `BuildInfo.cpp`. + 2. Several pending PRs: In this case, do not use the Github UI, + because the delays waiting for CI in between each merge will be + unnecessarily onerous. Instead, create a working branch (e.g. + `develop-next`) based off of `develop`. Squash the changes + from each PR onto the branch, one commit each (unless + more are needed), being sure to sign each commit and update + the commit message to include the PR number. You may be able + to use a fast-forward merge for the first PR. The workflow may + look something like: +``` +git fetch upstream +git checkout upstream/develop +git checkout -b develop-next +# Use -S on the ff-only merge if prbranch1 isn't signed. +# Or do another branch first. +git merge --ff-only user1/prbranch1 +git merge --squash user2/prbranch2 +git commit -S +git merge --squash user3/prbranch3 +git commit -S +[...] +git push --set-upstream origin develop-next + +``` +2. Create the Pull Request with `release` as the base branch. If any + of the included PRs are still open, + [use closing keywords](https://docs.github.com/articles/closing-issues-using-keywords) + in the description to ensure they are closed when the code is + released. e.g. "Closes #1234" +3. Instead of the default template, reuse and update the message from + the previous release. Include the following verbiage somewhere in + the description: +``` +The base branch is release. All releases (including betas) go in +release. This PR will be merged with --ff-only (not squashed or +rebased, and not using the GitHub UI) to both release and develop. +``` +4. Sign-offs for the three platforms usually occur offline, but at + least one approval will be needed on the PR. +5. Once everything is ready to go, open a terminal, and do the + fast-forward merges manually. Do not push any branches until you + verify that all of them update correctly. +``` +git fetch upstream +git checkout -b upstream--develop -t upstream/develop || git checkout upstream--develop +git reset --hard upstream/develop +# develop-next must be signed already! +git merge --ff-only origin/develop-next +git checkout -b upstream--release -t upstream/release || git checkout upstream--release +git reset --hard upstream/release +git merge --ff-only origin/develop-next +# Only do these 3 steps if pushing a release. No betas or RCs +git checkout -b upstream--master -t upstream/master || git checkout upstream--master +git reset --hard upstream/master +git merge --ff-only origin/develop-next +# Check that all of the branches are updated +git log -1 --oneline +# The output should look like: +# 02ec8b7962 (HEAD -> upstream--master, origin/develop-next, upstream--release, upstream--develop, develop-next) Set version to 2.2.0-rc1 +# Note that all of the upstream--develop/release/master are on this commit. +# (Master will be missing for betas, etc.) +# Just to be safe, do a dry run first: +git push --dry-run upstream-push HEAD:develop +git push --dry-run upstream-push HEAD:release +# git push --dry-run upstream-push HEAD:master +# Now push +git push upstream-push HEAD:develop +git push upstream-push HEAD:release +# git push upstream-push HEAD:master +# Don't forget to tag the release, too. +git tag +git push upstream-push +``` +6. Finally +[create a new release on Github](https://github.com/XRPLF/rippled/releases). + # Style guide From e5aa605742befe585309cb380796fa6d56bb6f04 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Wed, 31 Jul 2024 17:14:04 -0400 Subject: [PATCH 07/26] Set version to 2.3.0-b2 --- src/libxrpl/protocol/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libxrpl/protocol/BuildInfo.cpp b/src/libxrpl/protocol/BuildInfo.cpp index b0a7bcc9ed7..3a05f512455 100644 --- a/src/libxrpl/protocol/BuildInfo.cpp +++ b/src/libxrpl/protocol/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.3.0-b1" +char const* const versionString = "2.3.0-b2" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From ffc343a2bc274ee81db9a3cef050e4fd674507a6 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Fri, 2 Aug 2024 21:58:05 +0100 Subject: [PATCH 08/26] Fix crash inside `OverlayImpl` loops over `ids_` (#5071) --- src/test/overlay/tx_reduce_relay_test.cpp | 3 +++ src/xrpld/overlay/detail/OverlayImpl.cpp | 8 ++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/test/overlay/tx_reduce_relay_test.cpp b/src/test/overlay/tx_reduce_relay_test.cpp index 074976e8586..3065bcbb685 100644 --- a/src/test/overlay/tx_reduce_relay_test.cpp +++ b/src/test/overlay/tx_reduce_relay_test.cpp @@ -189,7 +189,10 @@ class tx_reduce_relay_test : public beast::unit_test::suite consumer, std::move(stream_ptr), overlay); + BEAST_EXPECT( + overlay.findPeerByPublicKey(key) == std::shared_ptr{}); overlay.add_active(peer); + BEAST_EXPECT(overlay.findPeerByPublicKey(key) == peer); peers.emplace_back(peer); // overlay stores week ptr to PeerImp lid_ += 2; rid_ += 2; diff --git a/src/xrpld/overlay/detail/OverlayImpl.cpp b/src/xrpld/overlay/detail/OverlayImpl.cpp index f033ad4e9f9..1978a2617aa 100644 --- a/src/xrpld/overlay/detail/OverlayImpl.cpp +++ b/src/xrpld/overlay/detail/OverlayImpl.cpp @@ -1163,9 +1163,11 @@ OverlayImpl::getActivePeers( disabled = enabledInSkip = 0; ret.reserve(ids_.size()); + // NOTE The purpose of p is to delay the destruction of PeerImp + std::shared_ptr p; for (auto& [id, w] : ids_) { - if (auto p = w.lock()) + if (p = w.lock(); p != nullptr) { bool const reduceRelayEnabled = p->txReduceRelayEnabled(); // tx reduced relay feature disabled @@ -1205,9 +1207,11 @@ std::shared_ptr OverlayImpl::findPeerByPublicKey(PublicKey const& pubKey) { std::lock_guard lock(mutex_); + // NOTE The purpose of peer is to delay the destruction of PeerImp + std::shared_ptr peer; for (auto const& e : ids_) { - if (auto peer = e.second.lock()) + if (peer = e.second.lock(); peer != nullptr) { if (peer->getNodePublic() == pubKey) return peer; From eedfec015eec9e13d0f96da0db08d7cc38805f8b Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Fri, 2 Aug 2024 22:25:44 +0100 Subject: [PATCH 09/26] Update gcovr EXCLUDE (#5084) --- cmake/RippledCov.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/RippledCov.cmake b/cmake/RippledCov.cmake index ce7536e8eeb..3c48bb1c145 100644 --- a/cmake/RippledCov.cmake +++ b/cmake/RippledCov.cmake @@ -33,6 +33,6 @@ setup_target_for_coverage_gcovr( FORMAT ${coverage_format} EXECUTABLE rippled EXECUTABLE_ARGS --unittest$<$:=${coverage_test}> --unittest-jobs ${coverage_test_parallelism} --quiet --unittest-log - EXCLUDE "src/test" "${CMAKE_BINARY_DIR}/proto_gen" "${CMAKE_BINARY_DIR}/proto_gen_grpc" + EXCLUDE "src/test" "include/xrpl/beast/test" "include/xrpl/beast/unit_test" "${CMAKE_BINARY_DIR}/pb-xrpl.libpb" DEPENDENCIES rippled ) From 7d27b1119092e3195238a1897d6012c3372e6c53 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Fri, 2 Aug 2024 19:03:05 -0500 Subject: [PATCH 10/26] Remove shards (#5066) --- Builds/levelization/results/loops.txt | 10 +- Builds/levelization/results/ordering.txt | 11 +- cfg/rippled-example.cfg | 47 +- cfg/rippled-reporting.cfg | 47 +- cmake/RippledCore.cmake | 1 - include/xrpl/basics/ThreadSafetyAnalysis.h | 63 - include/xrpl/proto/ripple.proto | 65 - include/xrpl/protocol/HashPrefix.h | 3 - include/xrpl/protocol/SystemParameters.h | 3 - include/xrpl/protocol/jss.h | 12 +- src/test/net/DatabaseDownloader_test.cpp | 315 --- src/test/nodestore/DatabaseShard_test.cpp | 1894 -------------- src/test/nodestore/Database_test.cpp | 31 - src/test/rpc/NodeToShardRPC_test.cpp | 414 --- src/test/rpc/RPCCall_test.cpp | 294 --- src/test/rpc/ShardArchiveHandler_test.cpp | 705 ------ src/test/shamap/common.h | 13 +- src/xrpld/app/consensus/RCLConsensus.cpp | 1 - src/xrpld/app/ledger/InboundLedger.h | 1 - src/xrpld/app/ledger/LedgerMaster.h | 3 - src/xrpld/app/ledger/detail/InboundLedger.cpp | 52 +- .../app/ledger/detail/InboundLedgers.cpp | 25 +- src/xrpld/app/ledger/detail/LedgerMaster.cpp | 101 +- src/xrpld/app/main/Application.cpp | 194 +- src/xrpld/app/main/Application.h | 10 - src/xrpld/app/main/DBInit.h | 120 - src/xrpld/app/main/Main.cpp | 6 - src/xrpld/app/misc/NetworkOPs.cpp | 6 +- src/xrpld/app/misc/SHAMapStoreImp.cpp | 25 +- src/xrpld/app/rdb/Download.h | 79 - src/xrpld/app/rdb/README.md | 33 +- src/xrpld/app/rdb/ShardArchive.h | 78 - src/xrpld/app/rdb/State.h | 1 - src/xrpld/app/rdb/UnitaryShard.h | 155 -- .../rdb/backend/detail/{detail => }/Node.cpp | 97 +- src/xrpld/app/rdb/backend/detail/Node.h | 26 +- .../rdb/backend/detail/PostgresDatabase.cpp | 1 - .../app/rdb/backend/detail/SQLiteDatabase.cpp | 893 +------ src/xrpld/app/rdb/backend/detail/Shard.h | 90 - .../app/rdb/backend/detail/detail/Shard.cpp | 147 -- src/xrpld/app/rdb/detail/Download.cpp | 152 -- .../app/rdb/detail/RelationalDatabase.cpp | 1 - src/xrpld/app/rdb/detail/ShardArchive.cpp | 68 - src/xrpld/app/rdb/detail/UnitaryShard.cpp | 320 --- src/xrpld/core/Config.h | 1 - src/xrpld/core/ConfigSections.h | 6 - src/xrpld/core/Job.h | 1 - src/xrpld/core/JobTypes.h | 1 - src/xrpld/net/DatabaseBody.h | 179 -- src/xrpld/net/DatabaseDownloader.h | 76 - src/xrpld/net/HTTPDownloader.h | 130 - src/xrpld/net/HTTPStream.h | 165 -- src/xrpld/net/ShardDownloader.md | 311 --- src/xrpld/net/detail/DatabaseBody.ipp | 231 -- src/xrpld/net/detail/DatabaseDownloader.cpp | 92 - src/xrpld/net/detail/HTTPDownloader.cpp | 340 --- src/xrpld/net/detail/HTTPStream.cpp | 203 -- src/xrpld/net/detail/RPCCall.cpp | 42 - src/xrpld/net/uml/interrupt_sequence.pu | 233 -- src/xrpld/net/uml/states.pu | 69 - src/xrpld/nodestore/Database.h | 96 +- src/xrpld/nodestore/DatabaseShard.h | 298 --- src/xrpld/nodestore/DeterministicShard.md | 162 -- src/xrpld/nodestore/Manager.h | 1 - src/xrpld/nodestore/README.md | 194 -- src/xrpld/nodestore/ShardInfo.h | 122 - src/xrpld/nodestore/ShardPool.md | 43 - src/xrpld/nodestore/ShardSizeTuning.md | 213 -- src/xrpld/nodestore/Types.h | 9 - src/xrpld/nodestore/backend/NuDBFactory.cpp | 23 +- src/xrpld/nodestore/detail/Database.cpp | 121 - .../nodestore/detail/DatabaseNodeImp.cpp | 1 - src/xrpld/nodestore/detail/DatabaseNodeImp.h | 6 - .../nodestore/detail/DatabaseRotatingImp.cpp | 12 - .../nodestore/detail/DatabaseRotatingImp.h | 3 - .../nodestore/detail/DatabaseShardImp.cpp | 2253 ----------------- src/xrpld/nodestore/detail/DatabaseShardImp.h | 429 ---- .../nodestore/detail/DeterministicShard.cpp | 216 -- .../nodestore/detail/DeterministicShard.h | 174 -- src/xrpld/nodestore/detail/Shard.cpp | 1272 ---------- src/xrpld/nodestore/detail/Shard.h | 432 ---- src/xrpld/nodestore/detail/ShardInfo.cpp | 136 - src/xrpld/nodestore/detail/TaskQueue.cpp | 76 - src/xrpld/nodestore/detail/TaskQueue.h | 64 - src/xrpld/overlay/Overlay.h | 9 - src/xrpld/overlay/Peer.h | 3 - src/xrpld/overlay/detail/Message.cpp | 4 - src/xrpld/overlay/detail/OverlayImpl.cpp | 109 - src/xrpld/overlay/detail/OverlayImpl.h | 16 - src/xrpld/overlay/detail/PeerImp.cpp | 385 +-- src/xrpld/overlay/detail/PeerImp.h | 17 - src/xrpld/overlay/detail/ProtocolMessage.h | 24 - src/xrpld/overlay/detail/TrafficCount.cpp | 6 - src/xrpld/overlay/detail/TrafficCount.h | 2 - src/xrpld/perflog/detail/PerfLogImp.cpp | 6 +- src/xrpld/rpc/ShardArchiveHandler.h | 176 -- src/xrpld/rpc/ShardVerificationScheduler.h | 84 - src/xrpld/rpc/detail/Handler.cpp | 3 - src/xrpld/rpc/detail/ShardArchiveHandler.cpp | 585 ----- .../rpc/detail/ShardVerificationScheduler.cpp | 68 - src/xrpld/rpc/handlers/CrawlShards.cpp | 73 - src/xrpld/rpc/handlers/DownloadShard.cpp | 176 -- src/xrpld/rpc/handlers/GetCounts.cpp | 30 +- src/xrpld/rpc/handlers/Handlers.h | 6 - src/xrpld/rpc/handlers/NodeToShard.cpp | 86 - src/xrpld/rpc/handlers/Tx.cpp | 1 - src/xrpld/shamap/Family.h | 19 +- src/xrpld/shamap/NodeFamily.h | 12 +- src/xrpld/shamap/ShardFamily.h | 125 - src/xrpld/shamap/detail/SHAMap.cpp | 5 +- src/xrpld/shamap/detail/SHAMapSync.cpp | 13 +- src/xrpld/shamap/detail/ShardFamily.cpp | 198 -- 112 files changed, 115 insertions(+), 17175 deletions(-) delete mode 100644 include/xrpl/basics/ThreadSafetyAnalysis.h delete mode 100644 src/test/net/DatabaseDownloader_test.cpp delete mode 100644 src/test/nodestore/DatabaseShard_test.cpp delete mode 100644 src/test/rpc/NodeToShardRPC_test.cpp delete mode 100644 src/test/rpc/ShardArchiveHandler_test.cpp delete mode 100644 src/xrpld/app/rdb/Download.h delete mode 100644 src/xrpld/app/rdb/ShardArchive.h delete mode 100644 src/xrpld/app/rdb/UnitaryShard.h rename src/xrpld/app/rdb/backend/detail/{detail => }/Node.cpp (93%) delete mode 100644 src/xrpld/app/rdb/backend/detail/Shard.h delete mode 100644 src/xrpld/app/rdb/backend/detail/detail/Shard.cpp delete mode 100644 src/xrpld/app/rdb/detail/Download.cpp delete mode 100644 src/xrpld/app/rdb/detail/ShardArchive.cpp delete mode 100644 src/xrpld/app/rdb/detail/UnitaryShard.cpp delete mode 100644 src/xrpld/net/DatabaseBody.h delete mode 100644 src/xrpld/net/DatabaseDownloader.h delete mode 100644 src/xrpld/net/HTTPDownloader.h delete mode 100644 src/xrpld/net/HTTPStream.h delete mode 100644 src/xrpld/net/ShardDownloader.md delete mode 100644 src/xrpld/net/detail/DatabaseBody.ipp delete mode 100644 src/xrpld/net/detail/DatabaseDownloader.cpp delete mode 100644 src/xrpld/net/detail/HTTPDownloader.cpp delete mode 100644 src/xrpld/net/detail/HTTPStream.cpp delete mode 100644 src/xrpld/net/uml/interrupt_sequence.pu delete mode 100644 src/xrpld/net/uml/states.pu delete mode 100644 src/xrpld/nodestore/DatabaseShard.h delete mode 100644 src/xrpld/nodestore/DeterministicShard.md delete mode 100644 src/xrpld/nodestore/ShardInfo.h delete mode 100644 src/xrpld/nodestore/ShardPool.md delete mode 100644 src/xrpld/nodestore/ShardSizeTuning.md delete mode 100644 src/xrpld/nodestore/detail/DatabaseShardImp.cpp delete mode 100644 src/xrpld/nodestore/detail/DatabaseShardImp.h delete mode 100644 src/xrpld/nodestore/detail/DeterministicShard.cpp delete mode 100644 src/xrpld/nodestore/detail/DeterministicShard.h delete mode 100644 src/xrpld/nodestore/detail/Shard.cpp delete mode 100644 src/xrpld/nodestore/detail/Shard.h delete mode 100644 src/xrpld/nodestore/detail/ShardInfo.cpp delete mode 100644 src/xrpld/nodestore/detail/TaskQueue.cpp delete mode 100644 src/xrpld/nodestore/detail/TaskQueue.h delete mode 100644 src/xrpld/rpc/ShardArchiveHandler.h delete mode 100644 src/xrpld/rpc/ShardVerificationScheduler.h delete mode 100644 src/xrpld/rpc/detail/ShardArchiveHandler.cpp delete mode 100644 src/xrpld/rpc/detail/ShardVerificationScheduler.cpp delete mode 100644 src/xrpld/rpc/handlers/CrawlShards.cpp delete mode 100644 src/xrpld/rpc/handlers/DownloadShard.cpp delete mode 100644 src/xrpld/rpc/handlers/NodeToShard.cpp delete mode 100644 src/xrpld/shamap/ShardFamily.h delete mode 100644 src/xrpld/shamap/detail/ShardFamily.cpp diff --git a/Builds/levelization/results/loops.txt b/Builds/levelization/results/loops.txt index ee7e6fd3bc6..f703a3a9d5d 100644 --- a/Builds/levelization/results/loops.txt +++ b/Builds/levelization/results/loops.txt @@ -16,11 +16,8 @@ Loop: xrpld.app xrpld.ledger Loop: xrpld.app xrpld.net xrpld.app > xrpld.net -Loop: xrpld.app xrpld.nodestore - xrpld.app > xrpld.nodestore - Loop: xrpld.app xrpld.overlay - xrpld.overlay ~= xrpld.app + xrpld.overlay == xrpld.app Loop: xrpld.app xrpld.peerfinder xrpld.app > xrpld.peerfinder @@ -38,10 +35,7 @@ Loop: xrpld.core xrpld.perflog xrpld.perflog ~= xrpld.core Loop: xrpld.net xrpld.rpc - xrpld.rpc > xrpld.net - -Loop: xrpld.nodestore xrpld.overlay - xrpld.overlay ~= xrpld.nodestore + xrpld.rpc ~= xrpld.net Loop: xrpld.overlay xrpld.rpc xrpld.rpc ~= xrpld.overlay diff --git a/Builds/levelization/results/ordering.txt b/Builds/levelization/results/ordering.txt index 87f9b03a54e..2856d783a1f 100644 --- a/Builds/levelization/results/ordering.txt +++ b/Builds/levelization/results/ordering.txt @@ -74,19 +74,13 @@ test.ledger > xrpld.app test.ledger > xrpld.core test.ledger > xrpld.ledger test.ledger > xrpl.protocol -test.net > test.jtx -test.net > test.toplevel -test.net > test.unit_test -test.net > xrpld.net test.nodestore > test.jtx test.nodestore > test.toplevel test.nodestore > test.unit_test test.nodestore > xrpl.basics -test.nodestore > xrpld.app test.nodestore > xrpld.core test.nodestore > xrpld.nodestore test.nodestore > xrpld.unity -test.nodestore > xrpl.protocol test.overlay > test.jtx test.overlay > test.unit_test test.overlay > xrpl.basics @@ -109,13 +103,11 @@ test.resource > test.unit_test test.resource > xrpl.basics test.resource > xrpl.resource test.rpc > test.jtx -test.rpc > test.nodestore test.rpc > test.toplevel test.rpc > xrpl.basics test.rpc > xrpld.app test.rpc > xrpld.core test.rpc > xrpld.net -test.rpc > xrpld.nodestore test.rpc > xrpld.overlay test.rpc > xrpld.rpc test.rpc > xrpl.json @@ -150,6 +142,7 @@ xrpld.app > test.unit_test xrpld.app > xrpl.basics xrpld.app > xrpld.conditions xrpld.app > xrpld.consensus +xrpld.app > xrpld.nodestore xrpld.app > xrpld.perflog xrpld.app > xrpl.json xrpld.app > xrpl.protocol @@ -186,14 +179,12 @@ xrpld.peerfinder > xrpl.basics xrpld.peerfinder > xrpld.core xrpld.peerfinder > xrpl.protocol xrpld.perflog > xrpl.basics -xrpld.perflog > xrpld.nodestore xrpld.perflog > xrpl.json xrpld.perflog > xrpl.protocol xrpld.rpc > xrpl.basics xrpld.rpc > xrpld.core xrpld.rpc > xrpld.ledger xrpld.rpc > xrpld.nodestore -xrpld.rpc > xrpld.shamap xrpld.rpc > xrpl.json xrpld.rpc > xrpl.protocol xrpld.rpc > xrpl.resource diff --git a/cfg/rippled-example.cfg b/cfg/rippled-example.cfg index 2ba2afa727d..b283900d013 100644 --- a/cfg/rippled-example.cfg +++ b/cfg/rippled-example.cfg @@ -1094,7 +1094,7 @@ # default value for the unspecified parameter. # # Note: the cache will not be created if online_delete -# is specified, or if shards are used. +# is specified. # # fast_load Boolean. If set, load the last persisted ledger # from disk upon process start before syncing to @@ -1107,10 +1107,6 @@ # earliest_seq The default is 32570 to match the XRP ledger # network's earliest allowed sequence. Alternate # networks may set this value. Minimum value of 1. -# If a [shard_db] section is defined, and this -# value is present either [node_db] or [shard_db], -# it must be defined with the same value in both -# sections. # # online_delete Minimum value of 256. Enable automatic purging # of older ledger information. Maintain at least this @@ -1192,32 +1188,6 @@ # your rippled.cfg file. # Partial pathnames are relative to the location of the rippled executable. # -# [shard_db] Settings for the Shard Database (optional) -# -# Format (without spaces): -# One or more lines of case-insensitive key / value pairs: -# '=' -# ... -# -# Example: -# path=db/shards/nudb -# -# Required keys: -# path Location to store the database -# -# Optional keys: -# max_historical_shards -# The maximum number of historical shards -# to store. -# -# [historical_shard_paths] Additional storage paths for the Shard Database (optional) -# -# Format (without spaces): -# One or more lines, each expressing a full path for storing historical shards: -# /mnt/disk1 -# /mnt/disk2 -# ... -# # [sqlite] Tuning settings for the SQLite databases (optional) # # Format (without spaces): @@ -1674,21 +1644,6 @@ path=/var/lib/rippled/db/nudb online_delete=512 advisory_delete=0 -# This is the persistent datastore for shards. It is important for the health -# of the ripple network that rippled operators shard as much as practical. -# NuDB requires SSD storage. Helpful information can be found at -# https://xrpl.org/history-sharding.html -#[shard_db] -#path=/var/lib/rippled/db/shards/nudb -#max_historical_shards=50 -# -# This optional section can be configured with a list -# of paths to use for storing historical shards. Each -# path must correspond to a unique filesystem. -#[historical_shard_paths] -#/path/1 -#/path/2 - [database_path] /var/lib/rippled/db diff --git a/cfg/rippled-reporting.cfg b/cfg/rippled-reporting.cfg index 290bcc5418a..9776ef5ee45 100644 --- a/cfg/rippled-reporting.cfg +++ b/cfg/rippled-reporting.cfg @@ -1039,17 +1039,13 @@ # default value for the unspecified parameter. # # Note: the cache will not be created if online_delete -# is specified, or if shards are used. +# is specified. # # Optional keys for NuDB or RocksDB: # # earliest_seq The default is 32570 to match the XRP ledger # network's earliest allowed sequence. Alternate # networks may set this value. Minimum value of 1. -# If a [shard_db] section is defined, and this -# value is present either [node_db] or [shard_db], -# it must be defined with the same value in both -# sections. # # online_delete Minimum value of 256. Enable automatic purging # of older ledger information. Maintain at least this @@ -1135,32 +1131,6 @@ # your rippled.cfg file. # Partial pathnames are relative to the location of the rippled executable. # -# [shard_db] Settings for the Shard Database (optional) -# -# Format (without spaces): -# One or more lines of case-insensitive key / value pairs: -# '=' -# ... -# -# Example: -# path=db/shards/nudb -# -# Required keys: -# path Location to store the database -# -# Optional keys: -# max_historical_shards -# The maximum number of historical shards -# to store. -# -# [historical_shard_paths] Additional storage paths for the Shard Database (optional) -# -# Format (without spaces): -# One or more lines, each expressing a full path for storing historical shards: -# /mnt/disk1 -# /mnt/disk2 -# ... -# # [sqlite] Tuning settings for the SQLite databases (optional) # # Format (without spaces): @@ -1616,21 +1586,6 @@ path=/var/lib/rippled-reporting/db/nudb # online_delete=512 # advisory_delete=0 -# This is the persistent datastore for shards. It is important for the health -# of the ripple network that rippled operators shard as much as practical. -# NuDB requires SSD storage. Helpful information can be found at -# https://xrpl.org/history-sharding.html -#[shard_db] -#path=/var/lib/rippled/db/shards/nudb -#max_historical_shards=50 -# -# This optional section can be configured with a list -# of paths to use for storing historical shards. Each -# path must correspond to a unique filesystem. -#[historical_shard_paths] -#/path/1 -#/path/2 - [database_path] /var/lib/rippled-reporting/db diff --git a/cmake/RippledCore.cmake b/cmake/RippledCore.cmake index 6a0060f7b32..18a424c484b 100644 --- a/cmake/RippledCore.cmake +++ b/cmake/RippledCore.cmake @@ -142,7 +142,6 @@ if(xrpld) set_source_files_properties( # these two seem to produce conflicts in beast teardown template methods src/test/rpc/ValidatorRPC_test.cpp - src/test/rpc/ShardArchiveHandler_test.cpp src/test/ledger/Invariants_test.cpp PROPERTIES SKIP_UNITY_BUILD_INCLUSION TRUE) endif() diff --git a/include/xrpl/basics/ThreadSafetyAnalysis.h b/include/xrpl/basics/ThreadSafetyAnalysis.h deleted file mode 100644 index b1889d5b4c6..00000000000 --- a/include/xrpl/basics/ThreadSafetyAnalysis.h +++ /dev/null @@ -1,63 +0,0 @@ -#ifndef RIPPLE_BASICS_THREAD_SAFTY_ANALYSIS_H_INCLUDED -#define RIPPLE_BASICS_THREAD_SAFTY_ANALYSIS_H_INCLUDED - -#ifdef RIPPLE_ENABLE_THREAD_SAFETY_ANNOTATIONS -#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) -#else -#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op -#endif - -#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x)) - -#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) - -#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) - -#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) - -#define ACQUIRED_BEFORE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) - -#define ACQUIRED_AFTER(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) - -#define REQUIRES(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) - -#define REQUIRES_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) - -#define ACQUIRE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) - -#define ACQUIRE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) - -#define RELEASE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) - -#define RELEASE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) - -#define RELEASE_GENERIC(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(release_generic_capability(__VA_ARGS__)) - -#define TRY_ACQUIRE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) - -#define TRY_ACQUIRE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) - -#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) - -#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) - -#define ASSERT_SHARED_CAPABILITY(x) \ - THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x)) - -#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) - -#define NO_THREAD_SAFETY_ANALYSIS \ - THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) - -#endif diff --git a/include/xrpl/proto/ripple.proto b/include/xrpl/proto/ripple.proto index 74cbfe8f6cb..a06bbd9a311 100644 --- a/include/xrpl/proto/ripple.proto +++ b/include/xrpl/proto/ripple.proto @@ -18,10 +18,6 @@ enum MessageType mtHAVE_SET = 35; mtVALIDATION = 41; mtGET_OBJECTS = 42; - mtGET_SHARD_INFO = 50; - mtSHARD_INFO = 51; - mtGET_PEER_SHARD_INFO = 52; - mtPEER_SHARD_INFO = 53; mtVALIDATORLIST = 54; mtSQUELCH = 55; mtVALIDATORLISTCOLLECTION = 56; @@ -29,8 +25,6 @@ enum MessageType mtPROOF_PATH_RESPONSE = 58; mtREPLAY_DELTA_REQ = 59; mtREPLAY_DELTA_RESPONSE = 60; - mtGET_PEER_SHARD_INFO_V2 = 61; - mtPEER_SHARD_INFO_V2 = 62; mtHAVE_TRANSACTIONS = 63; mtTRANSACTIONS = 64; } @@ -89,71 +83,12 @@ message TMLink required bytes nodePubKey = 1 [deprecated=true]; // node public key } -// Request info on shards held -message TMGetPeerShardInfo -{ - required uint32 hops = 1 [deprecated=true]; // number of hops to travel - optional bool lastLink = 2 [deprecated=true]; // true if last link in the peer chain - repeated TMLink peerChain = 3 [deprecated=true]; // public keys used to route messages -} - -// Info about shards held -message TMPeerShardInfo -{ - required string shardIndexes = 1 [deprecated=true]; // rangeSet of shard indexes - optional bytes nodePubKey = 2 [deprecated=true]; // node public key - optional string endpoint = 3 [deprecated=true]; // ipv6 or ipv4 address - optional bool lastLink = 4 [deprecated=true]; // true if last link in the peer chain - repeated TMLink peerChain = 5 [deprecated=true]; // public keys used to route messages -} - // Peer public key message TMPublicKey { required bytes publicKey = 1; } -// Request peer shard information -message TMGetPeerShardInfoV2 -{ - // Peer public keys used to route messages - repeated TMPublicKey peerChain = 1; - - // Remaining times to relay - required uint32 relays = 2; -} - -// Peer shard information -message TMPeerShardInfoV2 -{ - message TMIncomplete - { - required uint32 shardIndex = 1; - required uint32 state = 2; - - // State completion percent, 1 - 100 - optional uint32 progress = 3; - } - - // Message creation time - required uint32 timestamp = 1; - - // Incomplete shards being acquired or verified - repeated TMIncomplete incomplete = 2; - - // Verified immutable shards (RangeSet) - optional string finalized = 3; - - // Public key of node that authored the shard info - required bytes publicKey = 4; - - // Digital signature of node that authored the shard info - required bytes signature = 5; - - // Peer public keys used to route messages - repeated TMPublicKey peerChain = 6; -} - // A transaction can have only one input and one output. // If you want to send an amount that is greater than any single address of yours // you must first combine coins from one address to another. diff --git a/include/xrpl/protocol/HashPrefix.h b/include/xrpl/protocol/HashPrefix.h index 0979756b6e1..bc9c23d1910 100644 --- a/include/xrpl/protocol/HashPrefix.h +++ b/include/xrpl/protocol/HashPrefix.h @@ -84,9 +84,6 @@ enum class HashPrefix : std::uint32_t { /** Payment Channel Claim */ paymentChannelClaim = detail::make_hash_prefix('C', 'L', 'M'), - - /** shard info for signing */ - shardInfo = detail::make_hash_prefix('S', 'H', 'D'), }; template diff --git a/include/xrpl/protocol/SystemParameters.h b/include/xrpl/protocol/SystemParameters.h index c99944193ae..7531a0d5fb9 100644 --- a/include/xrpl/protocol/SystemParameters.h +++ b/include/xrpl/protocol/SystemParameters.h @@ -72,9 +72,6 @@ static constexpr std::uint32_t XRP_LEDGER_EARLIEST_SEQ{32570u}; * used in asserts and tests. */ static constexpr std::uint32_t XRP_LEDGER_EARLIEST_FEES{562177u}; -/** The number of ledgers in a shard */ -static constexpr std::uint32_t DEFAULT_LEDGERS_PER_SHARD{16384u}; - /** The minimum amount of support an amendment should have. @note This value is used by legacy code and will become obsolete diff --git a/include/xrpl/protocol/jss.h b/include/xrpl/protocol/jss.h index a46e15f39ef..84628da286f 100644 --- a/include/xrpl/protocol/jss.h +++ b/include/xrpl/protocol/jss.h @@ -89,7 +89,6 @@ JSS(EscrowFinish); // transaction type. JSS(Fee); // in/out: TransactionSign; field. JSS(FeeSettings); // ledger type. JSS(Flags); // in/out: TransactionSign; field. -JSS(incomplete_shards); // out: OverlayImpl, PeerImp JSS(Invalid); // JSS(LastLedgerSequence); // in: TransactionSign; field JSS(LastUpdateTime); // field. @@ -260,7 +259,6 @@ JSS(code); // out: errors JSS(command); // in: RPCHandler JSS(complete); // out: NetworkOPs, InboundLedger JSS(complete_ledgers); // out: NetworkOPs, PeerImp -JSS(complete_shards); // out: OverlayImpl, PeerImp JSS(consensus); // out: NetworkOPs, LedgerConsensus JSS(converge_time); // out: NetworkOPs JSS(converge_time_s); // out: NetworkOPs @@ -270,8 +268,6 @@ JSS(counters); // in/out: retrieve counters JSS(ctid); // in/out: Tx RPC JSS(currency_a); // out: BookChanges JSS(currency_b); // out: BookChanges -JSS(currentShard); // out: NodeToShardStatus -JSS(currentShardIndex); // out: NodeToShardStatus JSS(currency); // in: paths/PathRequest, STAmount // out: STPathSet, STAmount, // AccountLines @@ -344,8 +340,6 @@ JSS(fetch_pack); // out: NetworkOPs JSS(FIELDS); // out: RPC server_definitions // matches definitions.json format JSS(first); // out: rpc/Version -JSS(firstSequence); // out: NodeToShardStatus -JSS(firstShardIndex); // out: NodeToShardStatus JSS(finished); JSS(fix_txns); // in: LedgerCleaner JSS(flags); // out: AccountOffers, @@ -376,7 +370,7 @@ JSS(ident); // in: AccountCurrencies, AccountInfo, JSS(ignore_default); // in: AccountLines JSS(inLedger); // out: tx/Transaction JSS(inbound); // out: PeerImp -JSS(index); // in: LedgerEntry, DownloadShard +JSS(index); // in: LedgerEntry // out: STLedgerEntry, // LedgerEntry, TxHistory, LedgerData JSS(info); // out: ServerInfo, ConsensusInfo, FetchInfo @@ -406,8 +400,6 @@ JSS(key); // out JSS(key_type); // in/out: WalletPropose, TransactionSign JSS(latency); // out: PeerImp JSS(last); // out: RPCVersion -JSS(lastSequence); // out: NodeToShardStatus -JSS(lastShardIndex); // out: NodeToShardStatus JSS(last_close); // out: NetworkOPs JSS(last_refresh_time); // out: ValidatorSite JSS(last_refresh_status); // out: ValidatorSite @@ -631,7 +623,6 @@ JSS(server_status); // out: NetworkOPs JSS(server_version); // out: NetworkOPs JSS(settle_delay); // out: AccountChannels JSS(severity); // in: LogLevel -JSS(shards); // in/out: GetCounts, DownloadShard JSS(signature); // out: NetworkOPs, ChannelAuthorize JSS(signature_verified); // out: ChannelVerify JSS(signing_key); // out: NetworkOPs @@ -655,7 +646,6 @@ JSS(state_now); // in: Subscribe JSS(status); // error JSS(stop); // in: LedgerCleaner JSS(stop_history_tx_only); // in: Unsubscribe, stop history tx stream -JSS(storedSeqs); // out: NodeToShardStatus JSS(streams); // in: Subscribe, Unsubscribe JSS(strict); // in: AccountCurrencies, AccountInfo JSS(sub_index); // in: LedgerEntry diff --git a/src/test/net/DatabaseDownloader_test.cpp b/src/test/net/DatabaseDownloader_test.cpp deleted file mode 100644 index 99e98dd3d01..00000000000 --- a/src/test/net/DatabaseDownloader_test.cpp +++ /dev/null @@ -1,315 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright 2019 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { -namespace test { - -#define REPORT_FAILURE(D) reportFailure(D, __FILE__, __LINE__) - -class DatabaseDownloader_test : public beast::unit_test::suite -{ - std::shared_ptr - createServer(jtx::Env& env, bool ssl = true) - { - std::vector list; - list.push_back(TrustedPublisherServer::randomValidator()); - return make_TrustedPublisherServer( - env.app().getIOService(), - list, - env.timeKeeper().now() + std::chrono::seconds{3600}, - // No future VLs - {}, - ssl); - } - - struct DownloadCompleter - { - std::mutex m; - std::condition_variable cv; - bool called = false; - boost::filesystem::path dest; - - void - operator()(boost::filesystem::path dst) - { - std::unique_lock lk(m); - called = true; - dest = std::move(dst); - cv.notify_one(); - }; - - bool - waitComplete() - { - std::unique_lock lk(m); - - auto stat = cv.wait_for( - lk, std::chrono::seconds(10), [this] { return called; }); - - called = false; - return stat; - }; - }; - DownloadCompleter cb; - - struct Downloader - { - test::StreamSink sink_; - beast::Journal journal_; - std::shared_ptr ptr_; - - Downloader(jtx::Env& env) - : journal_{sink_} - , ptr_{make_DatabaseDownloader( - env.app().getIOService(), - env.app().config(), - journal_)} - { - } - - ~Downloader() - { - ptr_->stop(); - } - - DatabaseDownloader* - operator->() - { - return ptr_.get(); - } - - DatabaseDownloader const* - operator->() const - { - return ptr_.get(); - } - }; - - void - reportFailure(Downloader const& dl, char const* file, int line) - { - std::stringstream ss; - ss << "Failed. LOGS:\n" - << dl.sink_.messages().str() - << "\nDownloadCompleter failure." - "\nDatabaseDownloader session active? " - << std::boolalpha << dl->sessionIsActive() - << "\nDatabaseDownloader is stopping? " << std::boolalpha - << dl->isStopping(); - - fail(ss.str(), file, line); - } - - void - testDownload(bool verify) - { - testcase << std::string("Basic download - SSL ") + - (verify ? "Verify" : "No Verify"); - - using namespace jtx; - - ripple::test::detail::FileDirGuard cert{ - *this, "_cert", "ca.pem", TrustedPublisherServer::ca_cert()}; - - Env env{*this, envconfig([&cert, &verify](std::unique_ptr cfg) { - if ((cfg->SSL_VERIFY = verify)) // yes, this is assignment - cfg->SSL_VERIFY_FILE = cert.file().string(); - return cfg; - })}; - - Downloader dl{env}; - - // create a TrustedPublisherServer as a simple HTTP - // server to request from. Use the /textfile endpoint - // to get a simple text file sent as response. - auto server = createServer(env); - log << "Downloading DB from " << server->local_endpoint() << std::endl; - - ripple::test::detail::FileDirGuard const data{ - *this, "downloads", "data", "", false, false}; - // initiate the download and wait for the callback - // to be invoked - auto stat = dl->download( - server->local_endpoint().address().to_string(), - std::to_string(server->local_endpoint().port()), - "/textfile", - 11, - data.file(), - std::function{std::ref(cb)}); - if (!BEAST_EXPECT(stat)) - { - REPORT_FAILURE(dl); - return; - } - if (!BEAST_EXPECT(cb.waitComplete())) - { - REPORT_FAILURE(dl); - return; - } - BEAST_EXPECT(cb.dest == data.file()); - if (!BEAST_EXPECT(boost::filesystem::exists(data.file()))) - return; - BEAST_EXPECT(boost::filesystem::file_size(data.file()) > 0); - } - - void - testFailures() - { - testcase("Error conditions"); - - using namespace jtx; - - Env env{*this}; - - { - // bad hostname - boost::system::error_code ec; - boost::asio::ip::tcp::resolver resolver{env.app().getIOService()}; - auto const results = resolver.resolve("badhostname", "443", ec); - // we require an error in resolving this name in order - // for this test to pass. Some networks might have DNS hijacking - // that prevent NXDOMAIN, in which case the failure is not - // possible, so we skip the test. - if (ec) - { - Downloader dl{env}; - ripple::test::detail::FileDirGuard const datafile{ - *this, "downloads", "data", "", false, false}; - BEAST_EXPECT(dl->download( - "badhostname", - "443", - "", - 11, - datafile.file(), - std::function{ - std::ref(cb)})); - if (!BEAST_EXPECT(cb.waitComplete())) - { - REPORT_FAILURE(dl); - } - BEAST_EXPECT(!boost::filesystem::exists(datafile.file())); - BEAST_EXPECTS( - dl.sink_.messages().str().find("async_resolve") != - std::string::npos, - dl.sink_.messages().str()); - } - } - { - // can't connect - Downloader dl{env}; - ripple::test::detail::FileDirGuard const datafile{ - *this, "downloads", "data", "", false, false}; - auto server = createServer(env); - auto host = server->local_endpoint().address().to_string(); - auto port = std::to_string(server->local_endpoint().port()); - log << "Downloading DB from " << server->local_endpoint() - << std::endl; - server->stop(); - BEAST_EXPECT(dl->download( - host, - port, - "", - 11, - datafile.file(), - std::function{std::ref(cb)})); - if (!BEAST_EXPECT(cb.waitComplete())) - { - REPORT_FAILURE(dl); - } - BEAST_EXPECT(!boost::filesystem::exists(datafile.file())); - BEAST_EXPECTS( - dl.sink_.messages().str().find("async_connect") != - std::string::npos, - dl.sink_.messages().str()); - } - { - // not ssl (failed handlshake) - Downloader dl{env}; - ripple::test::detail::FileDirGuard const datafile{ - *this, "downloads", "data", "", false, false}; - auto server = createServer(env, false); - log << "Downloading DB from " << server->local_endpoint() - << std::endl; - BEAST_EXPECT(dl->download( - server->local_endpoint().address().to_string(), - std::to_string(server->local_endpoint().port()), - "", - 11, - datafile.file(), - std::function{std::ref(cb)})); - if (!BEAST_EXPECT(cb.waitComplete())) - { - REPORT_FAILURE(dl); - } - BEAST_EXPECT(!boost::filesystem::exists(datafile.file())); - BEAST_EXPECTS( - dl.sink_.messages().str().find("async_handshake") != - std::string::npos, - dl.sink_.messages().str()); - } - { - // huge file (content length) - Downloader dl{env}; - ripple::test::detail::FileDirGuard const datafile{ - *this, "downloads", "data", "", false, false}; - auto server = createServer(env); - log << "Downloading DB from " << server->local_endpoint() - << std::endl; - BEAST_EXPECT(dl->download( - server->local_endpoint().address().to_string(), - std::to_string(server->local_endpoint().port()), - "/textfile/huge", - 11, - datafile.file(), - std::function{std::ref(cb)})); - if (!BEAST_EXPECT(cb.waitComplete())) - { - REPORT_FAILURE(dl); - } - BEAST_EXPECT(!boost::filesystem::exists(datafile.file())); - BEAST_EXPECTS( - dl.sink_.messages().str().find("Insufficient disk space") != - std::string::npos, - dl.sink_.messages().str()); - } - } - -public: - void - run() override - { - testDownload(true); - testDownload(false); - testFailures(); - } -}; - -#undef REPORT_FAILURE - -BEAST_DEFINE_TESTSUITE(DatabaseDownloader, net, ripple); -} // namespace test -} // namespace ripple diff --git a/src/test/nodestore/DatabaseShard_test.cpp b/src/test/nodestore/DatabaseShard_test.cpp deleted file mode 100644 index e185c43d157..00000000000 --- a/src/test/nodestore/DatabaseShard_test.cpp +++ /dev/null @@ -1,1894 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -namespace ripple { -namespace NodeStore { - -/** std::uniform_int_distribution is platform dependent. - * Unit test for deterministic shards is the following: it generates - * predictable accounts and transactions, packs them into ledgers - * and makes the shard. The hash of this shard should be equal to the - * given value. On different platforms (precisely, Linux and Mac) - * hashes of the resulting shard was different. It was unvestigated - * that the problem is in the class std::uniform_int_distribution - * which generates different pseudorandom sequences on different - * platforms, but we need predictable sequence. - */ -template -struct uniformIntDistribution -{ - using resultType = IntType; - - const resultType A, B; - - struct paramType - { - const resultType A, B; - - paramType(resultType aa, resultType bb) : A(aa), B(bb) - { - } - }; - - explicit uniformIntDistribution( - const resultType a = 0, - const resultType b = std::numeric_limits::max()) - : A(a), B(b) - { - } - - explicit uniformIntDistribution(const paramType& params) - : A(params.A), B(params.B) - { - } - - template - resultType - operator()(Generator& g) const - { - return rnd(g, A, B); - } - - template - resultType - operator()(Generator& g, const paramType& params) const - { - return rnd(g, params.A, params.B); - } - - resultType - a() const - { - return A; - } - - resultType - b() const - { - return B; - } - - resultType - min() const - { - return A; - } - - resultType - max() const - { - return B; - } - -private: - template - resultType - rnd(Generator& g, const resultType a, const resultType b) const - { - static_assert( - std::is_convertible:: - value, - "Ups..."); - static_assert( - Generator::min() == 0, "If non-zero we have handle the offset"); - const resultType range = b - a + 1; - assert(Generator::max() >= range); // Just for safety - const resultType rejectLim = g.max() % range; - resultType n; - do - n = g(); - while (n <= rejectLim); - return (n % range) + a; - } -}; - -template -Integral -randInt(Engine& engine, Integral min, Integral max) -{ - assert(max > min); - - // This should have no state and constructing it should - // be very cheap. If that turns out not to be the case - // it could be hand-optimized. - return uniformIntDistribution(min, max)(engine); -} - -template -Integral -randInt(Engine& engine, Integral max) -{ - return randInt(engine, Integral(0), max); -} - -// Tests DatabaseShard class -// -class DatabaseShard_test : public TestBase -{ - static constexpr std::uint32_t maxSizeGb = 10; - static constexpr std::uint32_t maxHistoricalShards = 100; - static constexpr std::uint32_t ledgersPerShard = 256; - static constexpr std::uint32_t earliestSeq = ledgersPerShard + 1; - static constexpr std::uint32_t dataSizeMax = 4; - static constexpr std::uint32_t iniAmount = 1000000; - static constexpr std::uint32_t nTestShards = 4; - static constexpr std::chrono::seconds shardStoreTimeout = - std::chrono::seconds(60); - test::SuiteJournal journal_; - beast::temp_dir defNodeDir; - - struct TestData - { - /* ring used to generate pseudo-random sequence */ - beast::xor_shift_engine rng_; - /* number of shards to generate */ - int numShards_; - /* vector of accounts used to send test transactions */ - std::vector accounts_; - /* nAccounts_[i] is the number of these accounts existed before i-th - * ledger */ - std::vector nAccounts_; - /* payAccounts_[i][j] = {from, to} is the pair which consists of two - * number of accounts: source and destinations, which participate in - * j-th payment on i-th ledger */ - std::vector>> payAccounts_; - /* xrpAmount_[i] is the amount for all payments on i-th ledger */ - std::vector xrpAmount_; - /* ledgers_[i] is the i-th ledger which contains the above described - * accounts and payments */ - std::vector> ledgers_; - - TestData( - std::uint64_t const seedValue, - int dataSize = dataSizeMax, - int numShards = 1) - : rng_(seedValue), numShards_(numShards) - { - std::uint32_t n = 0; - std::uint32_t nLedgers = ledgersPerShard * numShards; - - nAccounts_.reserve(nLedgers); - payAccounts_.reserve(nLedgers); - xrpAmount_.reserve(nLedgers); - - for (std::uint32_t i = 0; i < nLedgers; ++i) - { - int p; - if (n >= 2) - p = randInt(rng_, 2 * dataSize); - else - p = 0; - - std::vector> pay; - pay.reserve(p); - - for (int j = 0; j < p; ++j) - { - int from, to; - do - { - from = randInt(rng_, n - 1); - to = randInt(rng_, n - 1); - } while (from == to); - - pay.push_back(std::make_pair(from, to)); - } - - n += !randInt(rng_, nLedgers / dataSize); - - if (n > accounts_.size()) - { - char str[9]; - for (int j = 0; j < 8; ++j) - str[j] = 'a' + randInt(rng_, 'z' - 'a'); - str[8] = 0; - accounts_.emplace_back(str); - } - - nAccounts_.push_back(n); - payAccounts_.push_back(std::move(pay)); - xrpAmount_.push_back(randInt(rng_, 90) + 10); - } - } - - bool - isNewAccounts(int seq) - { - return nAccounts_[seq] > (seq ? nAccounts_[seq - 1] : 0); - } - - void - makeLedgerData(test::jtx::Env& env_, std::uint32_t seq) - { - using namespace test::jtx; - - // The local fee may go up, especially in the online delete tests - while (env_.app().getFeeTrack().lowerLocalFee()) - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - - if (isNewAccounts(seq)) - env_.fund(XRP(iniAmount), accounts_[nAccounts_[seq] - 1]); - - for (std::uint32_t i = 0; i < payAccounts_[seq].size(); ++i) - { - env_( - pay(accounts_[payAccounts_[seq][i].first], - accounts_[payAccounts_[seq][i].second], - XRP(xrpAmount_[seq]))); - } - } - - bool - makeLedgers(test::jtx::Env& env_, std::uint32_t startIndex = 0) - { - if (startIndex == 0) - { - for (std::uint32_t i = 3; i <= ledgersPerShard; ++i) - { - if (!env_.close()) - return false; - std::shared_ptr ledger = - env_.app().getLedgerMaster().getClosedLedger(); - if (ledger->info().seq != i) - return false; - } - } - - for (std::uint32_t i = 0; i < ledgersPerShard * numShards_; ++i) - { - auto const index = i + (startIndex * ledgersPerShard); - - makeLedgerData(env_, i); - if (!env_.close()) - return false; - std::shared_ptr ledger = - env_.app().getLedgerMaster().getClosedLedger(); - if (ledger->info().seq != index + ledgersPerShard + 1) - return false; - ledgers_.push_back(ledger); - } - - return true; - } - }; - - void - testLedgerData( - TestData& data, - std::shared_ptr ledger, - std::uint32_t seq) - { - using namespace test::jtx; - - auto rootCount{0}; - auto accCount{0}; - auto sothCount{0}; - for (auto const& sles : ledger->sles) - { - if (sles->getType() == ltACCOUNT_ROOT) - { - int sq = sles->getFieldU32(sfSequence); - int reqsq = -1; - const auto id = sles->getAccountID(sfAccount); - - for (int i = 0; i < data.accounts_.size(); ++i) - { - if (id == data.accounts_[i].id()) - { - reqsq = ledgersPerShard + 1; - for (int j = 0; j <= seq; ++j) - if (data.nAccounts_[j] > i + 1 || - (data.nAccounts_[j] == i + 1 && - !data.isNewAccounts(j))) - { - for (int k = 0; k < data.payAccounts_[j].size(); - ++k) - if (data.payAccounts_[j][k].first == i) - reqsq++; - } - else - reqsq++; - ++accCount; - break; - } - } - if (reqsq == -1) - { - reqsq = data.nAccounts_[seq] + 1; - ++rootCount; - } - BEAST_EXPECT(sq == reqsq); - } - else - ++sothCount; - } - BEAST_EXPECT(rootCount == 1); - BEAST_EXPECT(accCount == data.nAccounts_[seq]); - BEAST_EXPECT(sothCount == 3); - - auto iniCount{0}; - auto setCount{0}; - auto payCount{0}; - auto tothCount{0}; - for (auto const& tx : ledger->txs) - { - if (tx.first->getTxnType() == ttPAYMENT) - { - std::int64_t xrpAmount = - tx.first->getFieldAmount(sfAmount).xrp().decimalXRP(); - if (xrpAmount == iniAmount) - ++iniCount; - else - { - ++payCount; - BEAST_EXPECT(xrpAmount == data.xrpAmount_[seq]); - } - } - else if (tx.first->getTxnType() == ttACCOUNT_SET) - ++setCount; - else - ++tothCount; - } - int newacc = data.isNewAccounts(seq) ? 1 : 0; - BEAST_EXPECT(iniCount == newacc); - BEAST_EXPECT(setCount == newacc); - BEAST_EXPECT(payCount == data.payAccounts_[seq].size()); - BEAST_EXPECT(tothCount == !seq); - } - - bool - saveLedger( - Database& db, - Ledger const& ledger, - std::shared_ptr const& next = {}) - { - // Store header - { - Serializer s(sizeof(std::uint32_t) + sizeof(LedgerInfo)); - s.add32(HashPrefix::ledgerMaster); - addRaw(ledger.info(), s); - db.store( - hotLEDGER, - std::move(s.modData()), - ledger.info().hash, - ledger.info().seq); - } - - // Store the state map - auto visitAcc = [&](SHAMapTreeNode const& node) { - Serializer s; - node.serializeWithPrefix(s); - db.store( - node.getType() == SHAMapNodeType::tnINNER ? hotUNKNOWN - : hotACCOUNT_NODE, - std::move(s.modData()), - node.getHash().as_uint256(), - ledger.info().seq); - return true; - }; - - if (ledger.stateMap().getHash().isNonZero()) - { - if (!ledger.stateMap().isValid()) - return false; - if (next && next->info().parentHash == ledger.info().hash) - { - auto have = next->stateMap().snapShot(false); - ledger.stateMap().snapShot(false)->visitDifferences( - &(*have), visitAcc); - } - else - ledger.stateMap().snapShot(false)->visitNodes(visitAcc); - } - - // Store the transaction map - auto visitTx = [&](SHAMapTreeNode& node) { - Serializer s; - node.serializeWithPrefix(s); - db.store( - node.getType() == SHAMapNodeType::tnINNER ? hotUNKNOWN - : hotTRANSACTION_NODE, - std::move(s.modData()), - node.getHash().as_uint256(), - ledger.info().seq); - return true; - }; - - if (ledger.info().txHash.isNonZero()) - { - if (!ledger.txMap().isValid()) - return false; - ledger.txMap().snapShot(false)->visitNodes(visitTx); - } - - return true; - } - - void - checkLedger(TestData& data, DatabaseShard& db, Ledger const& ledger) - { - auto fetched = db.fetchLedger(ledger.info().hash, ledger.info().seq); - if (!BEAST_EXPECT(fetched)) - return; - - testLedgerData(data, fetched, ledger.info().seq - ledgersPerShard - 1); - - // verify the metadata/header info by serializing to json - BEAST_EXPECT( - getJson(LedgerFill{ - ledger, nullptr, LedgerFill::full | LedgerFill::expand}) == - getJson(LedgerFill{ - *fetched, nullptr, LedgerFill::full | LedgerFill::expand})); - - BEAST_EXPECT( - getJson(LedgerFill{ - ledger, nullptr, LedgerFill::full | LedgerFill::binary}) == - getJson(LedgerFill{ - *fetched, nullptr, LedgerFill::full | LedgerFill::binary})); - - // walk shamap and validate each node - auto fcompAcc = [&](SHAMapTreeNode& node) -> bool { - Serializer s; - node.serializeWithPrefix(s); - auto nSrc{NodeObject::createObject( - node.getType() == SHAMapNodeType::tnINNER ? hotUNKNOWN - : hotACCOUNT_NODE, - std::move(s.modData()), - node.getHash().as_uint256())}; - if (!BEAST_EXPECT(nSrc)) - return false; - - auto nDst = db.fetchNodeObject( - node.getHash().as_uint256(), ledger.info().seq); - if (!BEAST_EXPECT(nDst)) - return false; - - BEAST_EXPECT(isSame(nSrc, nDst)); - - return true; - }; - if (ledger.stateMap().getHash().isNonZero()) - ledger.stateMap().snapShot(false)->visitNodes(fcompAcc); - - auto fcompTx = [&](SHAMapTreeNode& node) -> bool { - Serializer s; - node.serializeWithPrefix(s); - auto nSrc{NodeObject::createObject( - node.getType() == SHAMapNodeType::tnINNER ? hotUNKNOWN - : hotTRANSACTION_NODE, - std::move(s.modData()), - node.getHash().as_uint256())}; - if (!BEAST_EXPECT(nSrc)) - return false; - - auto nDst = db.fetchNodeObject( - node.getHash().as_uint256(), ledger.info().seq); - if (!BEAST_EXPECT(nDst)) - return false; - - BEAST_EXPECT(isSame(nSrc, nDst)); - - return true; - }; - if (ledger.info().txHash.isNonZero()) - ledger.txMap().snapShot(false)->visitNodes(fcompTx); - } - - std::string - bitmask2Rangeset(std::uint64_t bitmask) - { - std::string set; - if (!bitmask) - return set; - bool empty = true; - - for (std::uint32_t i = 0; i < 64 && bitmask; i++) - { - if (bitmask & (1ll << i)) - { - if (!empty) - set += ","; - set += std::to_string(i); - empty = false; - } - } - - RangeSet rs; - BEAST_EXPECT(from_string(rs, set)); - return ripple::to_string(rs); - } - - std::unique_ptr - testConfig( - std::string const& shardDir, - std::string const& nodeDir = std::string()) - { - using namespace test::jtx; - - return envconfig([&](std::unique_ptr cfg) { - // Shard store configuration - cfg->overwrite(ConfigSection::shardDatabase(), "path", shardDir); - cfg->overwrite( - ConfigSection::shardDatabase(), - "max_historical_shards", - std::to_string(maxHistoricalShards)); - cfg->overwrite( - ConfigSection::shardDatabase(), - "ledgers_per_shard", - std::to_string(ledgersPerShard)); - cfg->overwrite( - ConfigSection::shardDatabase(), - "earliest_seq", - std::to_string(earliestSeq)); - - // Node store configuration - cfg->overwrite( - ConfigSection::nodeDatabase(), - "path", - nodeDir.empty() ? defNodeDir.path() : nodeDir); - cfg->overwrite( - ConfigSection::nodeDatabase(), - "ledgers_per_shard", - std::to_string(ledgersPerShard)); - cfg->overwrite( - ConfigSection::nodeDatabase(), - "earliest_seq", - std::to_string(earliestSeq)); - return cfg; - }); - } - - std::optional - waitShard( - DatabaseShard& shardStore, - std::uint32_t shardIndex, - std::chrono::seconds timeout = shardStoreTimeout) - { - auto const end{std::chrono::system_clock::now() + timeout}; - while (shardStore.getNumTasks() || - !boost::icl::contains( - shardStore.getShardInfo()->finalized(), shardIndex)) - { - if (!BEAST_EXPECT(std::chrono::system_clock::now() < end)) - return std::nullopt; - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - } - - return shardIndex; - } - - std::optional - createShard( - TestData& data, - DatabaseShard& shardStore, - int maxShardIndex = 1, - int shardOffset = 0) - { - int shardIndex{-1}; - - for (std::uint32_t i = 0; i < ledgersPerShard; ++i) - { - auto const ledgerSeq{shardStore.prepareLedger( - (maxShardIndex + 1) * ledgersPerShard)}; - if (!BEAST_EXPECT(ledgerSeq != std::nullopt)) - return std::nullopt; - - shardIndex = shardStore.seqToShardIndex(*ledgerSeq); - - int const arrInd = *ledgerSeq - (ledgersPerShard * shardOffset) - - ledgersPerShard - 1; - BEAST_EXPECT( - arrInd >= 0 && arrInd < maxShardIndex * ledgersPerShard); - BEAST_EXPECT(saveLedger(shardStore, *data.ledgers_[arrInd])); - if (arrInd % ledgersPerShard == (ledgersPerShard - 1)) - { - uint256 const finalKey_{0}; - Serializer s; - s.add32(Shard::version); - s.add32(shardStore.firstLedgerSeq(shardIndex)); - s.add32(shardStore.lastLedgerSeq(shardIndex)); - s.addRaw(data.ledgers_[arrInd]->info().hash.data(), 256 / 8); - shardStore.store( - hotUNKNOWN, std::move(s.modData()), finalKey_, *ledgerSeq); - } - shardStore.setStored(data.ledgers_[arrInd]); - } - - return waitShard(shardStore, shardIndex); - } - - void - testStandalone() - { - testcase("Standalone"); - - using namespace test::jtx; - - beast::temp_dir shardDir; - DummyScheduler scheduler; - { - Env env{*this, testConfig(shardDir.path())}; - std::unique_ptr shardStore{ - make_ShardStore(env.app(), scheduler, 2, journal_)}; - - BEAST_EXPECT(shardStore); - BEAST_EXPECT(shardStore->init()); - BEAST_EXPECT(shardStore->ledgersPerShard() == ledgersPerShard); - BEAST_EXPECT(shardStore->seqToShardIndex(ledgersPerShard + 1) == 1); - BEAST_EXPECT(shardStore->seqToShardIndex(2 * ledgersPerShard) == 1); - BEAST_EXPECT( - shardStore->seqToShardIndex(2 * ledgersPerShard + 1) == 2); - BEAST_EXPECT( - shardStore->earliestShardIndex() == - (earliestSeq - 1) / ledgersPerShard); - BEAST_EXPECT(shardStore->firstLedgerSeq(1) == ledgersPerShard + 1); - BEAST_EXPECT(shardStore->lastLedgerSeq(1) == 2 * ledgersPerShard); - BEAST_EXPECT(shardStore->getRootDir().string() == shardDir.path()); - } - - { - Env env{*this, testConfig(shardDir.path())}; - std::unique_ptr shardStore{ - make_ShardStore(env.app(), scheduler, 2, journal_)}; - - env.app().config().overwrite( - ConfigSection::shardDatabase(), "ledgers_per_shard", "512"); - BEAST_EXPECT(!shardStore->init()); - } - - Env env{*this, testConfig(shardDir.path())}; - std::unique_ptr shardStore{ - make_ShardStore(env.app(), scheduler, 2, journal_)}; - - env.app().config().overwrite( - ConfigSection::shardDatabase(), - "earliest_seq", - std::to_string(std::numeric_limits::max())); - BEAST_EXPECT(!shardStore->init()); - } - - void - testCreateShard(std::uint64_t const seedValue) - { - testcase("Create shard"); - - using namespace test::jtx; - - beast::temp_dir shardDir; - Env env{*this, testConfig(shardDir.path())}; - DatabaseShard* db = env.app().getShardStore(); - BEAST_EXPECT(db); - - TestData data(seedValue); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - if (!createShard(data, *db, 1)) - return; - - for (std::uint32_t i = 0; i < ledgersPerShard; ++i) - checkLedger(data, *db, *data.ledgers_[i]); - } - - void - testReopenDatabase(std::uint64_t const seedValue) - { - testcase("Reopen shard store"); - - using namespace test::jtx; - - beast::temp_dir shardDir; - { - Env env{*this, testConfig(shardDir.path())}; - DatabaseShard* db = env.app().getShardStore(); - BEAST_EXPECT(db); - - TestData data(seedValue, 4, 2); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - for (auto i = 0; i < 2; ++i) - { - if (!createShard(data, *db, 2)) - return; - } - } - { - Env env{*this, testConfig(shardDir.path())}; - DatabaseShard* db = env.app().getShardStore(); - BEAST_EXPECT(db); - - TestData data(seedValue, 4, 2); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - for (std::uint32_t i = 1; i <= 2; ++i) - waitShard(*db, i); - - for (std::uint32_t i = 0; i < 2 * ledgersPerShard; ++i) - checkLedger(data, *db, *data.ledgers_[i]); - } - } - - void - testGetFinalShards(std::uint64_t const seedValue) - { - testcase("Get final shards"); - - using namespace test::jtx; - - beast::temp_dir shardDir; - Env env{*this, testConfig(shardDir.path())}; - DatabaseShard* db = env.app().getShardStore(); - BEAST_EXPECT(db); - - TestData data(seedValue, 2, nTestShards); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - BEAST_EXPECT(db->getShardInfo()->finalized().empty()); - - for (auto i = 0; i < nTestShards; ++i) - { - auto const shardIndex{createShard(data, *db, nTestShards)}; - if (!BEAST_EXPECT( - shardIndex && *shardIndex >= 1 && - *shardIndex <= nTestShards)) - { - return; - } - - BEAST_EXPECT(boost::icl::contains( - db->getShardInfo()->finalized(), *shardIndex)); - } - } - - void - testPrepareShards(std::uint64_t const seedValue) - { - testcase("Prepare shards"); - - using namespace test::jtx; - - beast::temp_dir shardDir; - Env env{*this, testConfig(shardDir.path())}; - DatabaseShard* db = env.app().getShardStore(); - BEAST_EXPECT(db); - - TestData data(seedValue, 1, nTestShards); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - BEAST_EXPECT(db->getPreShards() == ""); - BEAST_EXPECT(!db->prepareShards({})); - - std::uint64_t bitMask = 0; - for (std::uint32_t i = 0; i < nTestShards * 2; ++i) - { - std::uint32_t const shardIndex{ - randInt(data.rng_, nTestShards - 1) + 1}; - if (bitMask & (1ll << shardIndex)) - { - db->removePreShard(shardIndex); - bitMask &= ~(1ll << shardIndex); - } - else - { - BEAST_EXPECT(db->prepareShards({shardIndex})); - bitMask |= 1ll << shardIndex; - } - BEAST_EXPECT(db->getPreShards() == bitmask2Rangeset(bitMask)); - } - - // test illegal cases - // adding shards with too large number - BEAST_EXPECT(!db->prepareShards({0})); - BEAST_EXPECT(db->getPreShards() == bitmask2Rangeset(bitMask)); - BEAST_EXPECT(!db->prepareShards({nTestShards + 1})); - BEAST_EXPECT(db->getPreShards() == bitmask2Rangeset(bitMask)); - BEAST_EXPECT(!db->prepareShards({nTestShards + 2})); - BEAST_EXPECT(db->getPreShards() == bitmask2Rangeset(bitMask)); - - // create shards which are not prepared for import - BEAST_EXPECT(db->getShardInfo()->finalized().empty()); - - std::uint64_t bitMask2 = 0; - for (auto i = 0; i < nTestShards; ++i) - { - auto const shardIndex{createShard(data, *db, nTestShards)}; - if (!BEAST_EXPECT( - shardIndex && *shardIndex >= 1 && - *shardIndex <= nTestShards)) - { - return; - } - - BEAST_EXPECT(boost::icl::contains( - db->getShardInfo()->finalized(), *shardIndex)); - - bitMask2 |= 1ll << *shardIndex; - BEAST_EXPECT((bitMask & bitMask2) == 0); - if ((bitMask | bitMask2) == ((1ll << nTestShards) - 1) << 1) - break; - } - - // try to create another shard - BEAST_EXPECT( - db->prepareLedger((nTestShards + 1) * ledgersPerShard) == - std::nullopt); - } - - void - testImportShard(std::uint64_t const seedValue) - { - testcase("Import shard"); - - using namespace test::jtx; - - beast::temp_dir importDir; - TestData data(seedValue, 2); - - { - Env env{*this, testConfig(importDir.path())}; - DatabaseShard* db = env.app().getShardStore(); - BEAST_EXPECT(db); - - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - if (!createShard(data, *db, 1)) - return; - - for (std::uint32_t i = 0; i < ledgersPerShard; ++i) - checkLedger(data, *db, *data.ledgers_[i]); - - data.ledgers_.clear(); - } - - boost::filesystem::path importPath(importDir.path()); - importPath /= "1"; - - { - beast::temp_dir shardDir; - Env env{*this, testConfig(shardDir.path())}; - DatabaseShard* db = env.app().getShardStore(); - BEAST_EXPECT(db); - - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - BEAST_EXPECT(!db->importShard(1, importPath / "not_exist")); - BEAST_EXPECT(db->prepareShards({1})); - BEAST_EXPECT(db->getPreShards() == "1"); - - using namespace boost::filesystem; - remove_all(importPath / LgrDBName); - remove_all(importPath / TxDBName); - - if (!BEAST_EXPECT(db->importShard(1, importPath))) - return; - - BEAST_EXPECT(db->getPreShards() == ""); - - auto n = waitShard(*db, 1); - if (!BEAST_EXPECT(n && *n == 1)) - return; - - for (std::uint32_t i = 0; i < ledgersPerShard; ++i) - checkLedger(data, *db, *data.ledgers_[i]); - } - } - - void - testCorruptedDatabase(std::uint64_t const seedValue) - { - testcase("Corrupted shard store"); - - using namespace test::jtx; - - beast::temp_dir shardDir; - { - TestData data(seedValue, 4, 2); - { - Env env{*this, testConfig(shardDir.path())}; - DatabaseShard* db = env.app().getShardStore(); - BEAST_EXPECT(db); - - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - for (auto i = 0; i < 2; ++i) - { - if (!BEAST_EXPECT(createShard(data, *db, 2))) - return; - } - } - - boost::filesystem::path path = shardDir.path(); - path /= std::string("2"); - path /= "nudb.dat"; - - FILE* f = fopen(path.string().c_str(), "r+b"); - if (!BEAST_EXPECT(f)) - return; - char buf[256]; - beast::rngfill(buf, sizeof(buf), data.rng_); - BEAST_EXPECT(fwrite(buf, 1, 256, f) == 256); - fclose(f); - } - - Env env{*this, testConfig(shardDir.path())}; - DatabaseShard* db = env.app().getShardStore(); - BEAST_EXPECT(db); - - TestData data(seedValue, 4, 2); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - for (std::uint32_t shardIndex = 1; shardIndex <= 1; ++shardIndex) - waitShard(*db, shardIndex); - - BEAST_EXPECT(boost::icl::contains(db->getShardInfo()->finalized(), 1)); - - for (std::uint32_t i = 0; i < 1 * ledgersPerShard; ++i) - checkLedger(data, *db, *data.ledgers_[i]); - } - - void - testIllegalFinalKey(std::uint64_t const seedValue) - { - testcase("Illegal finalKey"); - - using namespace test::jtx; - - for (int i = 0; i < 5; ++i) - { - beast::temp_dir shardDir; - { - Env env{*this, testConfig(shardDir.path())}; - DatabaseShard* db = env.app().getShardStore(); - BEAST_EXPECT(db); - - TestData data(seedValue + i, 2); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - int shardIndex{-1}; - for (std::uint32_t j = 0; j < ledgersPerShard; ++j) - { - auto const ledgerSeq{ - db->prepareLedger(2 * ledgersPerShard)}; - if (!BEAST_EXPECT(ledgerSeq != std::nullopt)) - return; - - shardIndex = db->seqToShardIndex(*ledgerSeq); - int arrInd = *ledgerSeq - ledgersPerShard - 1; - BEAST_EXPECT(arrInd >= 0 && arrInd < ledgersPerShard); - BEAST_EXPECT(saveLedger(*db, *data.ledgers_[arrInd])); - if (arrInd % ledgersPerShard == (ledgersPerShard - 1)) - { - uint256 const finalKey_{0}; - Serializer s; - s.add32(Shard::version + (i == 0)); - s.add32(db->firstLedgerSeq(shardIndex) + (i == 1)); - s.add32(db->lastLedgerSeq(shardIndex) - (i == 3)); - s.addRaw( - data.ledgers_[arrInd - (i == 4)] - ->info() - .hash.data(), - 256 / 8); - db->store( - hotUNKNOWN, - std::move(s.modData()), - finalKey_, - *ledgerSeq); - } - db->setStored(data.ledgers_[arrInd]); - } - - if (i == 2) - { - waitShard(*db, shardIndex); - BEAST_EXPECT(boost::icl::contains( - db->getShardInfo()->finalized(), 1)); - } - else - { - boost::filesystem::path path(shardDir.path()); - path /= "1"; - boost::system::error_code ec; - auto start = std::chrono::system_clock::now(); - auto end = start + shardStoreTimeout; - while (std::chrono::system_clock::now() < end && - boost::filesystem::exists(path, ec)) - { - std::this_thread::yield(); - } - - BEAST_EXPECT(db->getShardInfo()->finalized().empty()); - } - } - - { - Env env{*this, testConfig(shardDir.path())}; - DatabaseShard* db = env.app().getShardStore(); - BEAST_EXPECT(db); - - TestData data(seedValue + i, 2); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - if (i == 2) - { - waitShard(*db, 1); - BEAST_EXPECT(boost::icl::contains( - db->getShardInfo()->finalized(), 1)); - - for (std::uint32_t j = 0; j < ledgersPerShard; ++j) - checkLedger(data, *db, *data.ledgers_[j]); - } - else - BEAST_EXPECT(db->getShardInfo()->finalized().empty()); - } - } - } - - std::string - ripemd160File(std::string filename) - { - using beast::hash_append; - std::ifstream input(filename, std::ios::in | std::ios::binary); - char buf[4096]; - ripemd160_hasher h; - - while (input.read(buf, 4096), input.gcount() > 0) - hash_append(h, buf, input.gcount()); - - auto const binResult = static_cast(h); - const auto charDigest = binResult.data(); - std::string result; - boost::algorithm::hex( - charDigest, - charDigest + sizeof(binResult), - std::back_inserter(result)); - - return result; - } - - void - testDeterministicShard(std::uint64_t const seedValue) - { - testcase("Deterministic shards"); - - using namespace test::jtx; - - for (int i = 0; i < 2; i++) - { - beast::temp_dir shardDir; - { - Env env{*this, testConfig(shardDir.path())}; - DatabaseShard* db = env.app().getShardStore(); - BEAST_EXPECT(db); - - TestData data(seedValue, 4); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - if (!BEAST_EXPECT(createShard(data, *db) != std::nullopt)) - return; - } - - boost::filesystem::path path(shardDir.path()); - path /= "1"; - - auto static const ripemd160Key = - ripemd160File((path / "nudb.key").string()); - auto static const ripemd160Dat = - ripemd160File((path / "nudb.dat").string()); - - { - Env env{*this, testConfig(shardDir.path())}; - DatabaseShard* db = env.app().getShardStore(); - BEAST_EXPECT(db); - - TestData data(seedValue, 4); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - if (!BEAST_EXPECT(waitShard(*db, 1) != std::nullopt)) - return; - - for (std::uint32_t j = 0; j < ledgersPerShard; ++j) - checkLedger(data, *db, *data.ledgers_[j]); - } - - BEAST_EXPECT( - ripemd160File((path / "nudb.key").string()) == ripemd160Key); - BEAST_EXPECT( - ripemd160File((path / "nudb.dat").string()) == ripemd160Dat); - } - } - - void - testImportNodeStore(std::uint64_t const seedValue) - { - testcase("Import node store"); - - using namespace test::jtx; - - beast::temp_dir shardDir; - { - beast::temp_dir nodeDir; - Env env{*this, testConfig(shardDir.path(), nodeDir.path())}; - DatabaseShard* db = env.app().getShardStore(); - Database& ndb = env.app().getNodeStore(); - BEAST_EXPECT(db); - - TestData data(seedValue, 4, 2); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - for (std::uint32_t i = 0; i < 2 * ledgersPerShard; ++i) - BEAST_EXPECT(saveLedger(ndb, *data.ledgers_[i])); - - BEAST_EXPECT(db->getShardInfo()->finalized().empty()); - db->importDatabase(ndb); - for (std::uint32_t i = 1; i <= 2; ++i) - waitShard(*db, i); - - auto const finalShards{db->getShardInfo()->finalized()}; - for (std::uint32_t shardIndex : {1, 2}) - BEAST_EXPECT(boost::icl::contains(finalShards, shardIndex)); - } - { - Env env{*this, testConfig(shardDir.path())}; - DatabaseShard* db = env.app().getShardStore(); - BEAST_EXPECT(db); - - TestData data(seedValue, 4, 2); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - for (std::uint32_t i = 1; i <= 2; ++i) - waitShard(*db, i); - - auto const finalShards{db->getShardInfo()->finalized()}; - for (std::uint32_t shardIndex : {1, 2}) - BEAST_EXPECT(boost::icl::contains(finalShards, shardIndex)); - - for (std::uint32_t i = 0; i < 2 * ledgersPerShard; ++i) - checkLedger(data, *db, *data.ledgers_[i]); - } - } - - void - testImportWithOnlineDelete(std::uint64_t const seedValue) - { - testcase("Import node store with online delete"); - - using namespace test::jtx; - using test::CaptureLogs; - - beast::temp_dir shardDir; - beast::temp_dir nodeDir; - std::string capturedLogs; - - { - auto c = testConfig(shardDir.path(), nodeDir.path()); - auto& section = c->section(ConfigSection::nodeDatabase()); - section.set("online_delete", "550"); - section.set("advisory_delete", "1"); - - // Adjust the log level to capture relevant output - c->section(SECTION_RPC_STARTUP) - .append( - "{ \"command\": \"log_level\", \"severity\": \"trace\" " - "}"); - - std::unique_ptr logs(new CaptureLogs(&capturedLogs)); - Env env{*this, std::move(c), std::move(logs)}; - - DatabaseShard* db = env.app().getShardStore(); - Database& ndb = env.app().getNodeStore(); - BEAST_EXPECT(db); - - auto& store = env.app().getSHAMapStore(); - - // Allow online delete to delete the startup ledgers - // so that it will take some time for the import to - // catch up to the point of the next rotation - store.setCanDelete(10); - - // Create some ledgers for the shard store to import - auto const shardCount = 5; - TestData data(seedValue, 4, shardCount); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - store.rendezvous(); - auto const lastRotated = store.getLastRotated(); - BEAST_EXPECT(lastRotated >= 553 && lastRotated < 1103); - - // Start the import - db->importDatabase(ndb); - - while (!db->getDatabaseImportSequence()) - { - // Wait until the import starts - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - } - - // Enable unimpeded online deletion now that the import has started - store.setCanDelete(std::numeric_limits::max()); - - auto pauseVerifier = std::thread([lastRotated, &store, db, this] { - // The import should still be running when this thread starts - BEAST_EXPECT(db->getDatabaseImportSequence()); - auto rotationProgress = lastRotated; - while (auto const ledgerSeq = db->getDatabaseImportSequence()) - { - // Make sure database rotations dont interfere - // with the import - - auto const last = store.getLastRotated(); - if (last != rotationProgress) - { - // A rotation occurred during shard import. Not - // necessarily an error - - BEAST_EXPECT( - !ledgerSeq || ledgerSeq >= rotationProgress); - rotationProgress = last; - } - } - }); - - auto join = [&pauseVerifier]() { - if (pauseVerifier.joinable()) - pauseVerifier.join(); - }; - - // Create more ledgers to trigger online deletion - data = TestData(seedValue * 2); - if (!BEAST_EXPECT(data.makeLedgers(env, shardCount))) - { - join(); - return; - } - - join(); - BEAST_EXPECT(store.getLastRotated() != lastRotated); - } - - // Database rotation should have been postponed at some - // point during the import - auto const expectedLogMessage = - "rotation would interfere with ShardStore import"; - BEAST_EXPECT( - capturedLogs.find(expectedLogMessage) != std::string::npos); - } - - void - testImportWithHistoricalPaths(std::uint64_t const seedValue) - { - testcase("Import with historical paths"); - - using namespace test::jtx; - - // Test importing with multiple historical paths - { - beast::temp_dir shardDir; - std::array historicalDirs; - std::array historicalPaths; - - std::transform( - historicalDirs.begin(), - historicalDirs.end(), - historicalPaths.begin(), - [](const beast::temp_dir& dir) { return dir.path(); }); - - beast::temp_dir nodeDir; - auto c = testConfig(shardDir.path(), nodeDir.path()); - - auto& historyPaths = c->section(SECTION_HISTORICAL_SHARD_PATHS); - historyPaths.append( - {historicalPaths[0].string(), - historicalPaths[1].string(), - historicalPaths[2].string(), - historicalPaths[3].string()}); - - Env env{*this, std::move(c)}; - DatabaseShard* db = env.app().getShardStore(); - Database& ndb = env.app().getNodeStore(); - BEAST_EXPECT(db); - - auto const shardCount = 4; - - TestData data(seedValue, 4, shardCount); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - for (std::uint32_t i = 0; i < shardCount * ledgersPerShard; ++i) - BEAST_EXPECT(saveLedger(ndb, *data.ledgers_[i])); - - BEAST_EXPECT(db->getShardInfo()->finalized().empty()); - - db->importDatabase(ndb); - for (std::uint32_t i = 1; i <= shardCount; ++i) - waitShard(*db, i); - - auto const final{db->getShardInfo()->finalized()}; - for (std::uint32_t shardIndex : {1, 2, 3, 4}) - BEAST_EXPECT(boost::icl::contains(final, shardIndex)); - - auto const mainPathCount = std::distance( - boost::filesystem::directory_iterator(shardDir.path()), - boost::filesystem::directory_iterator()); - - // Only the two most recent shards - // should be stored at the main path - BEAST_EXPECT(mainPathCount == 2); - - auto const historicalPathCount = std::accumulate( - historicalPaths.begin(), - historicalPaths.end(), - 0, - [](int const sum, boost::filesystem::path const& path) { - return sum + - std::distance( - boost::filesystem::directory_iterator(path), - boost::filesystem::directory_iterator()); - }); - - // All historical shards should be stored - // at historical paths - BEAST_EXPECT(historicalPathCount == shardCount - 2); - } - - // Test importing with a single historical path - { - beast::temp_dir shardDir; - beast::temp_dir historicalDir; - beast::temp_dir nodeDir; - - auto c = testConfig(shardDir.path(), nodeDir.path()); - - auto& historyPaths = c->section(SECTION_HISTORICAL_SHARD_PATHS); - historyPaths.append({historicalDir.path()}); - - Env env{*this, std::move(c)}; - DatabaseShard* db = env.app().getShardStore(); - Database& ndb = env.app().getNodeStore(); - BEAST_EXPECT(db); - - auto const shardCount = 4; - - TestData data(seedValue * 2, 4, shardCount); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - for (std::uint32_t i = 0; i < shardCount * ledgersPerShard; ++i) - BEAST_EXPECT(saveLedger(ndb, *data.ledgers_[i])); - - BEAST_EXPECT(db->getShardInfo()->finalized().empty()); - - db->importDatabase(ndb); - for (std::uint32_t i = 1; i <= shardCount; ++i) - waitShard(*db, i); - - auto const finalShards{db->getShardInfo()->finalized()}; - for (std::uint32_t shardIndex : {1, 2, 3, 4}) - BEAST_EXPECT(boost::icl::contains(finalShards, shardIndex)); - - auto const mainPathCount = std::distance( - boost::filesystem::directory_iterator(shardDir.path()), - boost::filesystem::directory_iterator()); - - // Only the two most recent shards - // should be stored at the main path - BEAST_EXPECT(mainPathCount == 2); - - auto const historicalPathCount = std::distance( - boost::filesystem::directory_iterator(historicalDir.path()), - boost::filesystem::directory_iterator()); - - // All historical shards should be stored - // at historical paths - BEAST_EXPECT(historicalPathCount == shardCount - 2); - } - } - - void - testPrepareWithHistoricalPaths(std::uint64_t const seedValue) - { - testcase("Prepare with historical paths"); - - using namespace test::jtx; - - // Create the primary shard directory - beast::temp_dir primaryDir; - auto config{testConfig(primaryDir.path())}; - - // Create four historical directories - std::array historicalDirs; - { - auto& paths{config->section(SECTION_HISTORICAL_SHARD_PATHS)}; - for (auto const& dir : historicalDirs) - paths.append(dir.path()); - } - - Env env{*this, std::move(config)}; - - // Create some shards - std::uint32_t constexpr numShards{4}; - TestData data(seedValue, 4, numShards); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - auto shardStore{env.app().getShardStore()}; - BEAST_EXPECT(shardStore); - - for (auto i = 0; i < numShards; ++i) - { - auto const shardIndex{createShard(data, *shardStore, numShards)}; - if (!BEAST_EXPECT( - shardIndex && *shardIndex >= 1 && *shardIndex <= numShards)) - { - return; - } - } - - { - // Confirm finalized shards are in the shard store - auto const finalized{shardStore->getShardInfo()->finalized()}; - BEAST_EXPECT(boost::icl::length(finalized) == numShards); - BEAST_EXPECT(boost::icl::first(finalized) == 1); - BEAST_EXPECT(boost::icl::last(finalized) == numShards); - } - - using namespace boost::filesystem; - auto const dirContains = [](beast::temp_dir const& dir, - std::uint32_t shardIndex) { - boost::filesystem::path const path(std::to_string(shardIndex)); - for (auto const& it : directory_iterator(dir.path())) - if (boost::filesystem::path(it).stem() == path) - return true; - return false; - }; - auto const historicalDirsContains = [&](std::uint32_t shardIndex) { - for (auto const& dir : historicalDirs) - if (dirContains(dir, shardIndex)) - return true; - return false; - }; - - // Confirm two most recent shards are in the primary shard directory - for (auto const shardIndex : {numShards - 1, numShards}) - { - BEAST_EXPECT(dirContains(primaryDir, shardIndex)); - BEAST_EXPECT(!historicalDirsContains(shardIndex)); - } - - // Confirm remaining shards are in the historical shard directories - for (auto shardIndex = 1; shardIndex < numShards - 1; ++shardIndex) - { - BEAST_EXPECT(!dirContains(primaryDir, shardIndex)); - BEAST_EXPECT(historicalDirsContains(shardIndex)); - } - - // Create some more shards to exercise recent shard rotation - data = TestData(seedValue * 2, 4, numShards); - if (!BEAST_EXPECT(data.makeLedgers(env, numShards))) - return; - - for (auto i = 0; i < numShards; ++i) - { - auto const shardIndex{ - createShard(data, *shardStore, numShards * 2, numShards)}; - if (!BEAST_EXPECT( - shardIndex && *shardIndex >= numShards + 1 && - *shardIndex <= numShards * 2)) - { - return; - } - } - - { - // Confirm finalized shards are in the shard store - auto const finalized{shardStore->getShardInfo()->finalized()}; - BEAST_EXPECT(boost::icl::length(finalized) == numShards * 2); - BEAST_EXPECT(boost::icl::first(finalized) == 1); - BEAST_EXPECT(boost::icl::last(finalized) == numShards * 2); - } - - // Confirm two most recent shards are in the primary shard directory - for (auto const shardIndex : {numShards * 2 - 1, numShards * 2}) - { - BEAST_EXPECT(dirContains(primaryDir, shardIndex)); - BEAST_EXPECT(!historicalDirsContains(shardIndex)); - } - - // Confirm remaining shards are in the historical shard directories - for (auto shardIndex = 1; shardIndex < numShards * 2 - 1; ++shardIndex) - { - BEAST_EXPECT(!dirContains(primaryDir, shardIndex)); - BEAST_EXPECT(historicalDirsContains(shardIndex)); - } - } - - void - testOpenShardManagement(std::uint64_t const seedValue) - { - testcase("Open shard management"); - - using namespace test::jtx; - - beast::temp_dir shardDir; - Env env{*this, testConfig(shardDir.path())}; - - auto shardStore{env.app().getShardStore()}; - BEAST_EXPECT(shardStore); - - // Create one shard more than the open final limit - auto const openFinalLimit{env.app().config().getValueFor( - SizedItem::openFinalLimit, std::nullopt)}; - auto const numShards{openFinalLimit + 1}; - - TestData data(seedValue, 2, numShards); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - BEAST_EXPECT(shardStore->getShardInfo()->finalized().empty()); - - int oldestShardIndex{-1}; - for (auto i = 0; i < numShards; ++i) - { - auto shardIndex{createShard(data, *shardStore, numShards)}; - if (!BEAST_EXPECT( - shardIndex && *shardIndex >= 1 && *shardIndex <= numShards)) - { - return; - } - - BEAST_EXPECT(boost::icl::contains( - shardStore->getShardInfo()->finalized(), *shardIndex)); - - if (oldestShardIndex == -1) - oldestShardIndex = *shardIndex; - } - - // The number of open shards exceeds the open limit by one. - // A sweep will close enough shards to be within the limit. - shardStore->sweep(); - - // Read from the closed shard and automatically open it - auto const ledgerSeq{shardStore->lastLedgerSeq(oldestShardIndex)}; - auto const index{ledgerSeq - ledgersPerShard - 1}; - BEAST_EXPECT(shardStore->fetchNodeObject( - data.ledgers_[index]->info().hash, ledgerSeq)); - } - - void - testShardInfo(std::uint64_t const seedValue) - { - testcase("Shard info"); - - using namespace test::jtx; - beast::temp_dir shardDir; - Env env{*this, testConfig(shardDir.path())}; - - auto shardStore{env.app().getShardStore()}; - BEAST_EXPECT(shardStore); - - // Check shard store is empty - { - auto const shardInfo{shardStore->getShardInfo()}; - BEAST_EXPECT( - shardInfo->msgTimestamp().time_since_epoch().count() == 0); - BEAST_EXPECT(shardInfo->finalizedToString().empty()); - BEAST_EXPECT(shardInfo->finalized().empty()); - BEAST_EXPECT(shardInfo->incompleteToString().empty()); - BEAST_EXPECT(shardInfo->incomplete().empty()); - } - - // Create an incomplete shard with index 1 - TestData data(seedValue, dataSizeMax, 2); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - if (!BEAST_EXPECT(shardStore->prepareLedger(2 * ledgersPerShard))) - return; - - // Check shard is incomplete - { - auto const shardInfo{shardStore->getShardInfo()}; - BEAST_EXPECT(shardInfo->finalizedToString().empty()); - BEAST_EXPECT(shardInfo->finalized().empty()); - BEAST_EXPECT(shardInfo->incompleteToString() == "1:0"); - BEAST_EXPECT( - shardInfo->incomplete().find(1) != - shardInfo->incomplete().end()); - } - - // Finalize the shard - { - auto shardIndex{createShard(data, *shardStore)}; - if (!BEAST_EXPECT(shardIndex && *shardIndex == 1)) - return; - } - - // Check shard is finalized - { - auto const shardInfo{shardStore->getShardInfo()}; - BEAST_EXPECT(shardInfo->finalizedToString() == "1"); - BEAST_EXPECT(boost::icl::contains(shardInfo->finalized(), 1)); - BEAST_EXPECT(shardInfo->incompleteToString().empty()); - BEAST_EXPECT(shardInfo->incomplete().empty()); - BEAST_EXPECT(!shardInfo->update(1, ShardState::finalized, 0)); - BEAST_EXPECT(shardInfo->setFinalizedFromString("2")); - BEAST_EXPECT(shardInfo->finalizedToString() == "2"); - BEAST_EXPECT(boost::icl::contains(shardInfo->finalized(), 2)); - } - - // Create an incomplete shard with index 2 - if (!BEAST_EXPECT(shardStore->prepareLedger(3 * ledgersPerShard))) - return; - - // Store 10 percent of the ledgers - for (std::uint32_t i = 0; i < (ledgersPerShard / 10); ++i) - { - auto const ledgerSeq{ - shardStore->prepareLedger(3 * ledgersPerShard)}; - if (!BEAST_EXPECT(ledgerSeq != std::nullopt)) - return; - - auto const arrInd{*ledgerSeq - ledgersPerShard - 1}; - if (!BEAST_EXPECT(saveLedger(*shardStore, *data.ledgers_[arrInd]))) - return; - - shardStore->setStored(data.ledgers_[arrInd]); - } - - auto const shardInfo{shardStore->getShardInfo()}; - BEAST_EXPECT(shardInfo->incompleteToString() == "2:10"); - BEAST_EXPECT( - shardInfo->incomplete().find(2) != shardInfo->incomplete().end()); - - auto const timeStamp{env.app().timeKeeper().now()}; - shardInfo->setMsgTimestamp(timeStamp); - BEAST_EXPECT(timeStamp == shardInfo->msgTimestamp()); - - // Check message - auto const msg{shardInfo->makeMessage(env.app())}; - Serializer s; - s.add32(HashPrefix::shardInfo); - - BEAST_EXPECT(msg.timestamp() != 0); - s.add32(msg.timestamp()); - - // Verify incomplete shard - { - BEAST_EXPECT(msg.incomplete_size() == 1); - - auto const& incomplete{msg.incomplete(0)}; - BEAST_EXPECT(incomplete.shardindex() == 2); - s.add32(incomplete.shardindex()); - - BEAST_EXPECT( - static_cast(incomplete.state()) == - ShardState::acquire); - s.add32(incomplete.state()); - - BEAST_EXPECT(incomplete.has_progress()); - BEAST_EXPECT(incomplete.progress() == 10); - s.add32(incomplete.progress()); - } - - // Verify finalized shard - BEAST_EXPECT(msg.has_finalized()); - BEAST_EXPECT(msg.finalized() == "1"); - s.addRaw(msg.finalized().data(), msg.finalized().size()); - - // Verify public key - auto slice{makeSlice(msg.publickey())}; - BEAST_EXPECT(publicKeyType(slice)); - - // Verify signature - BEAST_EXPECT(verify( - PublicKey(slice), s.slice(), makeSlice(msg.signature()), false)); - - BEAST_EXPECT(msg.peerchain_size() == 0); - } - - void - testSQLiteDatabase(std::uint64_t const seedValue) - { - testcase("SQLite Database"); - - using namespace test::jtx; - - beast::temp_dir shardDir; - Env env{*this, testConfig(shardDir.path())}; - - auto shardStore{env.app().getShardStore()}; - BEAST_EXPECT(shardStore); - - auto const shardCount = 3; - TestData data(seedValue, 3, shardCount); - if (!BEAST_EXPECT(data.makeLedgers(env))) - return; - - BEAST_EXPECT(shardStore->getShardInfo()->finalized().empty()); - BEAST_EXPECT(shardStore->getShardInfo()->incompleteToString().empty()); - - auto rdb = - dynamic_cast(&env.app().getRelationalDatabase()); - - BEAST_EXPECT(rdb); - - for (std::uint32_t i = 0; i < shardCount; ++i) - { - // Populate the shard store - - auto n = createShard(data, *shardStore, shardCount); - if (!BEAST_EXPECT(n && *n >= 1 && *n <= shardCount)) - return; - } - - // Close these databases to force the SQLiteDatabase - // to use the shard databases and lookup tables. - rdb->closeLedgerDB(); - rdb->closeTransactionDB(); - - // Lambda for comparing Ledger objects - auto infoCmp = [](auto const& a, auto const& b) { - return a.hash == b.hash && a.txHash == b.txHash && - a.accountHash == b.accountHash && - a.parentHash == b.parentHash && a.drops == b.drops && - a.accepted == b.accepted && a.closeFlags == b.closeFlags && - a.closeTimeResolution == b.closeTimeResolution && - a.closeTime == b.closeTime; - }; - - for (auto const& ledger : data.ledgers_) - { - // Compare each test ledger to the data retrieved - // from the SQLiteDatabase class - - if (shardStore->seqToShardIndex(ledger->seq()) < - shardStore->earliestShardIndex() || - ledger->info().seq < shardStore->earliestLedgerSeq()) - continue; - - auto info = rdb->getLedgerInfoByHash(ledger->info().hash); - - BEAST_EXPECT(info); - BEAST_EXPECT(infoCmp(*info, ledger->info())); - - for (auto const& transaction : ledger->txs) - { - // Compare each test transaction to the data - // retrieved from the SQLiteDatabase class - - error_code_i error{rpcSUCCESS}; - - auto reference = rdb->getTransaction( - transaction.first->getTransactionID(), {}, error); - - BEAST_EXPECT(error == rpcSUCCESS); - if (!BEAST_EXPECT(reference.index() == 0)) - continue; - - auto txn = std::get<0>(reference).first->getSTransaction(); - - BEAST_EXPECT( - transaction.first->getFullText() == txn->getFullText()); - } - } - - // Create additional ledgers to test a pathway in - // 'ripple::saveLedgerMeta' wherein fetching the - // accepted ledger fails - data = TestData(seedValue * 2, 4, 1); - if (!BEAST_EXPECT(data.makeLedgers(env, shardCount))) - return; - } - -public: - DatabaseShard_test() : journal_("DatabaseShard_test", *this) - { - } - - void - run() override - { - auto seedValue = [] { - static std::uint64_t seedValue = 41; - seedValue += 10; - return seedValue; - }; - - testStandalone(); - testCreateShard(seedValue()); - testReopenDatabase(seedValue()); - testGetFinalShards(seedValue()); - testPrepareShards(seedValue()); - testImportShard(seedValue()); - testCorruptedDatabase(seedValue()); - testIllegalFinalKey(seedValue()); - testDeterministicShard(seedValue()); - testImportNodeStore(seedValue()); - testImportWithOnlineDelete(seedValue()); - testImportWithHistoricalPaths(seedValue()); - testPrepareWithHistoricalPaths(seedValue()); - testOpenShardManagement(seedValue()); - testShardInfo(seedValue()); - testSQLiteDatabase(seedValue()); - } -}; - -BEAST_DEFINE_TESTSUITE_MANUAL(DatabaseShard, NodeStore, ripple); - -} // namespace NodeStore -} // namespace ripple diff --git a/src/test/nodestore/Database_test.cpp b/src/test/nodestore/Database_test.cpp index 6774857dfc6..d866247da89 100644 --- a/src/test/nodestore/Database_test.cpp +++ b/src/test/nodestore/Database_test.cpp @@ -616,37 +616,6 @@ class Database_test : public TestBase std::strcmp(e.what(), "earliest_seq set more than once") == 0); } - - // Verify default ledgers per shard - { - std::unique_ptr db = - Manager::instance().make_Database( - megabytes(4), scheduler, 2, nodeParams, journal_); - BEAST_EXPECT( - db->ledgersPerShard() == DEFAULT_LEDGERS_PER_SHARD); - } - - // Set an invalid ledgers per shard - try - { - nodeParams.set("ledgers_per_shard", "100"); - std::unique_ptr db = - Manager::instance().make_Database( - megabytes(4), scheduler, 2, nodeParams, journal_); - } - catch (std::runtime_error const& e) - { - BEAST_EXPECT( - std::strcmp(e.what(), "Invalid ledgers_per_shard") == 0); - } - - // Set a valid ledgers per shard - nodeParams.set("ledgers_per_shard", "256"); - std::unique_ptr db = Manager::instance().make_Database( - megabytes(4), scheduler, 2, nodeParams, journal_); - - // Verify database uses the ledgers per shard - BEAST_EXPECT(db->ledgersPerShard() == 256); } } diff --git a/src/test/rpc/NodeToShardRPC_test.cpp b/src/test/rpc/NodeToShardRPC_test.cpp deleted file mode 100644 index ec1ff367c1e..00000000000 --- a/src/test/rpc/NodeToShardRPC_test.cpp +++ /dev/null @@ -1,414 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2021 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { -namespace test { - -class NodeToShardRPC_test : public beast::unit_test::suite -{ - bool - importCompleted( - NodeStore::DatabaseShard* shardStore, - std::uint8_t const numberOfShards, - Json::Value const& result) - { - auto const info = shardStore->getShardInfo(); - - // Assume completed if the import isn't running - auto const completed = - result[jss::error_message] == "Database import not running"; - - if (completed) - { - BEAST_EXPECT( - info->incomplete().size() + info->finalized().size() == - numberOfShards); - } - - return completed; - } - -public: - void - testDisabled() - { - testcase("Disabled"); - - beast::temp_dir tempDir; - - jtx::Env env = [&] { - auto c = jtx::envconfig(); - auto& sectionNode = c->section(ConfigSection::nodeDatabase()); - sectionNode.set("earliest_seq", "257"); - sectionNode.set("ledgers_per_shard", "256"); - c->setupControl(true, true, true); - - return jtx::Env(*this, std::move(c)); - }(); - - std::uint8_t const numberOfShards = 10; - - // Create some ledgers so that we can initiate a - // shard store database import. - for (int i = 0; i < 256 * (numberOfShards + 1); ++i) - { - env.close(); - } - - { - auto shardStore = env.app().getShardStore(); - if (!BEAST_EXPECT(!shardStore)) - return; - } - - { - // Try the node_to_shard status RPC command. Should fail. - - Json::Value jvParams; - jvParams[jss::action] = "status"; - - auto const result = env.rpc( - "json", "node_to_shard", to_string(jvParams))[jss::result]; - - BEAST_EXPECT(result[jss::error_code] == rpcNOT_ENABLED); - } - - { - // Try to start a shard store import via the RPC - // interface. Should fail. - - Json::Value jvParams; - jvParams[jss::action] = "start"; - - auto const result = env.rpc( - "json", "node_to_shard", to_string(jvParams))[jss::result]; - - BEAST_EXPECT(result[jss::error_code] == rpcNOT_ENABLED); - } - - { - // Try the node_to_shard status RPC command. Should fail. - - Json::Value jvParams; - jvParams[jss::action] = "status"; - - auto const result = env.rpc( - "json", "node_to_shard", to_string(jvParams))[jss::result]; - - BEAST_EXPECT(result[jss::error_code] == rpcNOT_ENABLED); - } - } - - void - testStart() - { - testcase("Start"); - - beast::temp_dir tempDir; - - jtx::Env env = [&] { - auto c = jtx::envconfig(); - auto& section = c->section(ConfigSection::shardDatabase()); - section.set("path", tempDir.path()); - section.set("max_historical_shards", "20"); - section.set("ledgers_per_shard", "256"); - section.set("earliest_seq", "257"); - auto& sectionNode = c->section(ConfigSection::nodeDatabase()); - sectionNode.set("earliest_seq", "257"); - sectionNode.set("ledgers_per_shard", "256"); - c->setupControl(true, true, true); - - return jtx::Env(*this, std::move(c)); - }(); - - std::uint8_t const numberOfShards = 10; - - // Create some ledgers so that we can initiate a - // shard store database import. - for (int i = 0; i < env.app().getShardStore()->ledgersPerShard() * - (numberOfShards + 1); - ++i) - { - env.close(); - } - - auto shardStore = env.app().getShardStore(); - if (!BEAST_EXPECT(shardStore)) - return; - - { - // Initiate a shard store import via the RPC - // interface. - - Json::Value jvParams; - jvParams[jss::action] = "start"; - - auto const result = env.rpc( - "json", "node_to_shard", to_string(jvParams))[jss::result]; - - BEAST_EXPECT( - result[jss::message] == "Database import initiated..."); - } - - while (!shardStore->getDatabaseImportSequence()) - { - // Wait until the import starts - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - } - - { - // Verify that the import is in progress with - // the node_to_shard status RPC command - - Json::Value jvParams; - jvParams[jss::action] = "status"; - - auto const result = env.rpc( - "json", "node_to_shard", to_string(jvParams))[jss::result]; - - BEAST_EXPECT( - result[jss::status] == "success" || - importCompleted(shardStore, numberOfShards, result)); - - std::chrono::seconds const maxWait{180}; - - { - auto const start = std::chrono::system_clock::now(); - while (true) - { - // Verify that the status object accurately - // reflects import progress. - - auto const completeShards = - shardStore->getShardInfo()->finalized(); - - if (!completeShards.empty()) - { - auto const result = env.rpc( - "json", - "node_to_shard", - to_string(jvParams))[jss::result]; - - if (!importCompleted( - shardStore, numberOfShards, result)) - { - BEAST_EXPECT(result[jss::firstShardIndex] == 1); - BEAST_EXPECT(result[jss::lastShardIndex] == 10); - } - } - - if (boost::icl::contains(completeShards, 1)) - { - auto const result = env.rpc( - "json", - "node_to_shard", - to_string(jvParams))[jss::result]; - - BEAST_EXPECT( - result[jss::currentShardIndex] >= 1 || - importCompleted( - shardStore, numberOfShards, result)); - - break; - } - - if (std::this_thread::sleep_for( - std::chrono::milliseconds{100}); - std::chrono::system_clock::now() - start > maxWait) - { - BEAST_EXPECTS( - false, - "Import timeout: could just be a slow machine."); - break; - } - } - } - - { - // Wait for the import to complete - auto const start = std::chrono::system_clock::now(); - while (!boost::icl::contains( - shardStore->getShardInfo()->finalized(), 10)) - { - if (std::this_thread::sleep_for( - std::chrono::milliseconds{100}); - std::chrono::system_clock::now() - start > maxWait) - { - BEAST_EXPECT(importCompleted( - shardStore, numberOfShards, result)); - break; - } - } - } - } - } - - void - testStop() - { - testcase("Stop"); - - beast::temp_dir tempDir; - - jtx::Env env = [&] { - auto c = jtx::envconfig(); - auto& section = c->section(ConfigSection::shardDatabase()); - section.set("path", tempDir.path()); - section.set("max_historical_shards", "20"); - section.set("ledgers_per_shard", "256"); - section.set("earliest_seq", "257"); - auto& sectionNode = c->section(ConfigSection::nodeDatabase()); - sectionNode.set("earliest_seq", "257"); - sectionNode.set("ledgers_per_shard", "256"); - c->setupControl(true, true, true); - - return jtx::Env( - *this, std::move(c), nullptr, beast::severities::kDisabled); - }(); - - std::uint8_t const numberOfShards = 10; - - // Create some ledgers so that we can initiate a - // shard store database import. - for (int i = 0; i < env.app().getShardStore()->ledgersPerShard() * - (numberOfShards + 1); - ++i) - { - env.close(); - } - - auto shardStore = env.app().getShardStore(); - if (!BEAST_EXPECT(shardStore)) - return; - - { - // Initiate a shard store import via the RPC - // interface. - - Json::Value jvParams; - jvParams[jss::action] = "start"; - - auto const result = env.rpc( - "json", "node_to_shard", to_string(jvParams))[jss::result]; - - BEAST_EXPECT( - result[jss::message] == "Database import initiated..."); - } - - { - // Verify that the import is in progress with - // the node_to_shard status RPC command - - Json::Value jvParams; - jvParams[jss::action] = "status"; - - auto const result = env.rpc( - "json", "node_to_shard", to_string(jvParams))[jss::result]; - - BEAST_EXPECT( - result[jss::status] == "success" || - importCompleted(shardStore, numberOfShards, result)); - - std::chrono::seconds const maxWait{30}; - auto const start = std::chrono::system_clock::now(); - - while (shardStore->getShardInfo()->finalized().empty()) - { - // Wait for at least one shard to complete - - if (std::this_thread::sleep_for(std::chrono::milliseconds{100}); - std::chrono::system_clock::now() - start > maxWait) - { - BEAST_EXPECTS( - false, "Import timeout: could just be a slow machine."); - break; - } - } - } - - { - Json::Value jvParams; - jvParams[jss::action] = "stop"; - - auto const result = env.rpc( - "json", "node_to_shard", to_string(jvParams))[jss::result]; - - BEAST_EXPECT( - result[jss::message] == "Database import halt initiated..." || - importCompleted(shardStore, numberOfShards, result)); - } - - std::chrono::seconds const maxWait{30}; - auto const start = std::chrono::system_clock::now(); - - while (true) - { - // Wait until we can verify that the import has - // stopped - - Json::Value jvParams; - jvParams[jss::action] = "status"; - - auto const result = env.rpc( - "json", "node_to_shard", to_string(jvParams))[jss::result]; - - // When the import has stopped, polling the - // status returns an error - if (result.isMember(jss::error)) - { - if (BEAST_EXPECT(result.isMember(jss::error_message))) - { - BEAST_EXPECT( - result[jss::error_message] == - "Database import not running"); - } - - break; - } - - if (std::this_thread::sleep_for(std::chrono::milliseconds{100}); - std::chrono::system_clock::now() - start > maxWait) - { - BEAST_EXPECTS( - false, "Import timeout: could just be a slow machine."); - break; - } - } - } - - void - run() override - { - testDisabled(); - testStart(); - testStop(); - } -}; - -BEAST_DEFINE_TESTSUITE_MANUAL(NodeToShardRPC, rpc, ripple); -} // namespace test -} // namespace ripple diff --git a/src/test/rpc/RPCCall_test.cpp b/src/test/rpc/RPCCall_test.cpp index f3aaf468a9e..5f13c9799a1 100644 --- a/src/test/rpc/RPCCall_test.cpp +++ b/src/test/rpc/RPCCall_test.cpp @@ -2545,231 +2545,6 @@ static RPCCallTestData const rpcCallTestArray[] = { ] })"}, - // download_shard - // -------------------------------------------------------------- - {"download_shard: minimal.", - __LINE__, - { - "download_shard", - "20", - "url_NotValidated", - }, - RPCCallTestData::no_exception, - R"({ - "method" : "download_shard", - "params" : [ - { - "api_version" : %API_VER%, - "shards" : [ - { - "index" : 20, - "url" : "url_NotValidated" - } - ] - } - ] - })"}, - {"download_shard:", - __LINE__, - { - "download_shard", - "20", - "url_NotValidated", - }, - RPCCallTestData::no_exception, - R"({ - "method" : "download_shard", - "params" : [ - { - "api_version" : %API_VER%, - "shards" : [ - { - "index" : 20, - "url" : "url_NotValidated" - } - ] - } - ] - })"}, - {"download_shard: many shards.", - __LINE__, - { - "download_shard", - "200000000", - "url_NotValidated0", - "199999999", - "url_NotValidated1", - "199999998", - "url_NotValidated2", - "199999997", - "url_NotValidated3", - }, - RPCCallTestData::no_exception, - R"({ - "method" : "download_shard", - "params" : [ - { - "api_version" : %API_VER%, - "shards" : [ - { - "index" : 200000000, - "url" : "url_NotValidated0" - }, - { - "index" : 199999999, - "url" : "url_NotValidated1" - }, - { - "index" : 199999998, - "url" : "url_NotValidated2" - }, - { - "index" : 199999997, - "url" : "url_NotValidated3" - } - ] - } - ] - })"}, - {"download_shard: many shards.", - __LINE__, - { - "download_shard", - "2000000", - "url_NotValidated0", - "2000001", - "url_NotValidated1", - "2000002", - "url_NotValidated2", - "2000003", - "url_NotValidated3", - "2000004", - "url_NotValidated4", - }, - RPCCallTestData::no_exception, - R"({ - "method" : "download_shard", - "params" : [ - { - "api_version" : %API_VER%, - "shards" : [ - { - "index" : 2000000, - "url" : "url_NotValidated0" - }, - { - "index" : 2000001, - "url" : "url_NotValidated1" - }, - { - "index" : 2000002, - "url" : "url_NotValidated2" - }, - { - "index" : 2000003, - "url" : "url_NotValidated3" - }, - { - "index" : 2000004, - "url" : "url_NotValidated4" - } - ] - } - ] - })"}, - {"download_shard: too few arguments.", - __LINE__, - {"download_shard", "20"}, - RPCCallTestData::no_exception, - R"({ - "method" : "download_shard", - "params" : [ - { - "error" : "badSyntax", - "error_code" : 1, - "error_message" : "Syntax error." - } - ] - })"}, - {// Note: this should return an error but not throw. - "download_shard: novalidate too few arguments.", - __LINE__, - {"download_shard", "novalidate", "20"}, - RPCCallTestData::bad_cast, - R"()"}, - {"download_shard: novalidate at end.", - __LINE__, - { - "download_shard", - "20", - "url_NotValidated", - "novalidate", - }, - RPCCallTestData::no_exception, - R"({ - "method" : "download_shard", - "params" : [ - { - "api_version" : %API_VER%, - "shards" : [ - { - "index" : 20, - "url" : "url_NotValidated" - } - ] - } - ] - })"}, - {"download_shard: novalidate in middle.", - __LINE__, - { - "download_shard", - "20", - "url_NotValidated20", - "novalidate", - "200", - "url_NotValidated200", - }, - RPCCallTestData::no_exception, - R"({ - "method" : "download_shard", - "params" : [ - { - "error" : "invalidParams", - "error_code" : 31, - "error_message" : "Invalid parameters." - } - ] - })"}, - {// Note: this should return an error but not throw. - "download_shard: arguments swapped.", - __LINE__, - { - "download_shard", - "url_NotValidated", - "20", - }, - RPCCallTestData::bad_cast, - R"()"}, - {"download_shard: index too small.", - __LINE__, - { - "download_shard", - "-1", - "url_NotValidated", - }, - RPCCallTestData::bad_cast, - R"()"}, - {"download_shard: index too big.", - __LINE__, - { - "download_shard", - "4294967296", - "url_NotValidated", - }, - RPCCallTestData::bad_cast, - R"()"}, - // feature // --------------------------------------------------------------------- {"feature: minimal.", @@ -4232,75 +4007,6 @@ static RPCCallTestData const rpcCallTestArray[] = { ] })"}, - // node_to_shard - // ------------------------------------------------------------------- - {"node_to_shard: status.", - __LINE__, - {"node_to_shard", "status"}, - RPCCallTestData::no_exception, - R"({ - "method" : "node_to_shard", - "params" : [ - { - "api_version" : %API_VER%, - "action" : "status" - } - ] - })"}, - {"node_to_shard: start.", - __LINE__, - {"node_to_shard", "start"}, - RPCCallTestData::no_exception, - R"({ - "method" : "node_to_shard", - "params" : [ - { - "api_version" : %API_VER%, - "action" : "start" - } - ] - })"}, - {"node_to_shard: stop.", - __LINE__, - {"node_to_shard", "stop"}, - RPCCallTestData::no_exception, - R"({ - "method" : "node_to_shard", - "params" : [ - { - "api_version" : %API_VER%, - "action" : "stop" - } - ] - })"}, - {"node_to_shard: too many arguments.", - __LINE__, - {"node_to_shard", "start", "stop"}, - RPCCallTestData::no_exception, - R"({ - "method" : "node_to_shard", - "params" : [ - { - "error" : "badSyntax", - "error_code" : 1, - "error_message" : "Syntax error." - } - ] - })"}, - {"node_to_shard: invalid argument.", - __LINE__, - {"node_to_shard", "invalid"}, - RPCCallTestData::no_exception, - R"({ - "method" : "node_to_shard", - "params" : [ - { - "api_version" : %API_VER%, - "action" : "invalid" - } - ] - })"}, - // owner_info // ------------------------------------------------------------------ {"owner_info: minimal.", diff --git a/src/test/rpc/ShardArchiveHandler_test.cpp b/src/test/rpc/ShardArchiveHandler_test.cpp deleted file mode 100644 index 82f12fe49a9..00000000000 --- a/src/test/rpc/ShardArchiveHandler_test.cpp +++ /dev/null @@ -1,705 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { -namespace test { - -class ShardArchiveHandler_test : public beast::unit_test::suite -{ - using Downloads = std::vector>; - - std::shared_ptr - createServer(jtx::Env& env, bool ssl = true) - { - std::vector list; - list.push_back(TrustedPublisherServer::randomValidator()); - return make_TrustedPublisherServer( - env.app().getIOService(), - list, - env.timeKeeper().now() + std::chrono::seconds{3600}, - // No future VLs - {}, - ssl); - } - -public: - // Test the shard downloading module by queueing - // a download and verifying the contents of the - // state database. - void - testSingleDownloadAndStateDB() - { - testcase("testSingleDownloadAndStateDB"); - - beast::temp_dir tempDir; - - auto c = jtx::envconfig(); - auto& section = c->section(ConfigSection::shardDatabase()); - section.set("path", tempDir.path()); - section.set("max_historical_shards", "20"); - c->setupControl(true, true, true); - - jtx::Env env(*this, std::move(c)); - auto handler = env.app().getShardArchiveHandler(); - BEAST_EXPECT(handler); - BEAST_EXPECT(dynamic_cast(handler) == nullptr); - - std::string const rawUrl = "https://foo:443/1.tar.lz4"; - parsedURL url; - - parseUrl(url, rawUrl); - handler->add(1, {url, rawUrl}); - - { - std::lock_guard lock(handler->m_); - std::uint64_t rowCount = 0; - - readArchiveDB( - *handler->sqlDB_, [&](std::string const& url, int state) { - BEAST_EXPECT(state == 1); - BEAST_EXPECT(url == rawUrl); - ++rowCount; - }); - - BEAST_EXPECT(rowCount == 1); - } - - handler->release(); - } - - // Test the shard downloading module by queueing - // three downloads and verifying the contents of - // the state database. - void - testDownloadsAndStateDB() - { - testcase("testDownloadsAndStateDB"); - - beast::temp_dir tempDir; - - auto c = jtx::envconfig(); - auto& section = c->section(ConfigSection::shardDatabase()); - section.set("path", tempDir.path()); - section.set("max_historical_shards", "20"); - c->setupControl(true, true, true); - - jtx::Env env(*this, std::move(c)); - auto handler = env.app().getShardArchiveHandler(); - BEAST_EXPECT(handler); - BEAST_EXPECT(dynamic_cast(handler) == nullptr); - - Downloads const dl = { - {1, "https://foo:443/1.tar.lz4"}, - {2, "https://foo:443/2.tar.lz4"}, - {3, "https://foo:443/3.tar.lz4"}}; - - for (auto const& entry : dl) - { - parsedURL url; - parseUrl(url, entry.second); - handler->add(entry.first, {url, entry.second}); - } - - { - std::lock_guard lock(handler->m_); - std::uint64_t pos = 0; - - readArchiveDB( - *handler->sqlDB_, [&](std::string const& url, int state) { - BEAST_EXPECT(state == dl[pos].first); - BEAST_EXPECT(url == dl[pos].second); - ++pos; - }); - - BEAST_EXPECT(pos == dl.size()); - } - - handler->release(); - } - - // Test the shard downloading module by initiating - // and completing ten downloads and verifying the - // contents of the filesystem and the handler's - // archives. - void - testDownloadsAndFileSystem() - { - testcase("testDownloadsAndFileSystem"); - - beast::temp_dir tempDir; - - auto c = jtx::envconfig(); - { - auto& section{c->section(ConfigSection::shardDatabase())}; - section.set("path", tempDir.path()); - section.set("max_historical_shards", "20"); - section.set("ledgers_per_shard", "256"); - section.set("earliest_seq", "257"); - } - { - auto& section{c->section(ConfigSection::nodeDatabase())}; - section.set("ledgers_per_shard", "256"); - section.set("earliest_seq", "257"); - } - c->setupControl(true, true, true); - - jtx::Env env( - *this, std::move(c), nullptr, beast::severities::kDisabled); - - std::uint8_t const numberOfDownloads = 10; - - // Create some ledgers so that the ShardArchiveHandler - // can verify the last ledger hash for the shard - // downloads. - for (int i = 0; i < env.app().getShardStore()->ledgersPerShard() * - (numberOfDownloads + 1); - ++i) - { - env.close(); - } - - auto handler = env.app().getShardArchiveHandler(); - BEAST_EXPECT(handler); - BEAST_EXPECT(dynamic_cast(handler) == nullptr); - - auto server = createServer(env); - auto host = server->local_endpoint().address().to_string(); - auto port = std::to_string(server->local_endpoint().port()); - server->stop(); - - Downloads const dl = [count = numberOfDownloads, &host, &port] { - Downloads ret; - - for (int i = 1; i <= count; ++i) - { - ret.push_back( - {i, - (boost::format("https://%s:%d/%d.tar.lz4") % host % port % - i) - .str()}); - } - - return ret; - }(); - - for (auto const& entry : dl) - { - parsedURL url; - parseUrl(url, entry.second); - handler->add(entry.first, {url, entry.second}); - } - - BEAST_EXPECT(handler->start()); - - auto stateDir = - RPC::ShardArchiveHandler::getDownloadDirectory(env.app().config()); - - std::unique_lock lock(handler->m_); - - BEAST_EXPECT( - boost::filesystem::exists(stateDir) || handler->archives_.empty()); - - using namespace std::chrono_literals; - auto waitMax = 60s; - - while (!handler->archives_.empty()) - { - lock.unlock(); - std::this_thread::sleep_for(1s); - - if (waitMax -= 1s; waitMax <= 0s) - { - BEAST_EXPECT(false); - break; - } - - lock.lock(); - } - - BEAST_EXPECT(!boost::filesystem::exists(stateDir)); - } - - // Test the shard downloading module by initiating - // and completing ten downloads and verifying the - // contents of the filesystem and the handler's - // archives. Then restart the application and ensure - // that the handler is created and started automatically. - void - testDownloadsAndRestart() - { - testcase("testDownloadsAndRestart"); - - beast::temp_dir tempDir; - - { - auto c = jtx::envconfig(); - { - auto& section{c->section(ConfigSection::shardDatabase())}; - section.set("path", tempDir.path()); - section.set("max_historical_shards", "20"); - section.set("ledgers_per_shard", "256"); - section.set("earliest_seq", "257"); - } - { - auto& section{c->section(ConfigSection::nodeDatabase())}; - section.set("ledgers_per_shard", "256"); - section.set("earliest_seq", "257"); - } - c->setupControl(true, true, true); - - jtx::Env env( - *this, std::move(c), nullptr, beast::severities::kDisabled); - - std::uint8_t const numberOfDownloads = 10; - - // Create some ledgers so that the ShardArchiveHandler - // can verify the last ledger hash for the shard - // downloads. - for (int i = 0; i < env.app().getShardStore()->ledgersPerShard() * - (numberOfDownloads + 1); - ++i) - { - env.close(); - } - - auto handler = env.app().getShardArchiveHandler(); - BEAST_EXPECT(handler); - BEAST_EXPECT( - dynamic_cast(handler) == nullptr); - - auto server = createServer(env); - auto host = server->local_endpoint().address().to_string(); - auto port = std::to_string(server->local_endpoint().port()); - server->stop(); - - Downloads const dl = [count = numberOfDownloads, &host, &port] { - Downloads ret; - - for (int i = 1; i <= count; ++i) - { - ret.push_back( - {i, - (boost::format("https://%s:%d/%d.tar.lz4") % host % - port % i) - .str()}); - } - - return ret; - }(); - - for (auto const& entry : dl) - { - parsedURL url; - parseUrl(url, entry.second); - handler->add(entry.first, {url, entry.second}); - } - - auto stateDir = RPC::ShardArchiveHandler::getDownloadDirectory( - env.app().config()); - - boost::filesystem::copy_file( - stateDir / stateDBName, - boost::filesystem::path(tempDir.path()) / stateDBName); - - BEAST_EXPECT(handler->start()); - - std::unique_lock lock(handler->m_); - - BEAST_EXPECT( - boost::filesystem::exists(stateDir) || - handler->archives_.empty()); - - using namespace std::chrono_literals; - auto waitMax = 60s; - - while (!handler->archives_.empty()) - { - lock.unlock(); - std::this_thread::sleep_for(1s); - - if (waitMax -= 1s; waitMax <= 0s) - { - BEAST_EXPECT(false); - break; - } - - lock.lock(); - } - - BEAST_EXPECT(!boost::filesystem::exists(stateDir)); - - boost::filesystem::create_directory(stateDir); - - boost::filesystem::copy_file( - boost::filesystem::path(tempDir.path()) / stateDBName, - stateDir / stateDBName); - } - - auto c = jtx::envconfig(); - { - auto& section{c->section(ConfigSection::shardDatabase())}; - section.set("path", tempDir.path()); - section.set("max_historical_shards", "20"); - section.set("shard_verification_retry_interval", "1"); - section.set("shard_verification_max_attempts", "10000"); - section.set("ledgers_per_shard", "256"); - section.set("earliest_seq", "257"); - } - { - auto& section{c->section(ConfigSection::nodeDatabase())}; - section.set("ledgers_per_shard", "256"); - section.set("earliest_seq", "257"); - } - c->setupControl(true, true, true); - - jtx::Env env( - *this, std::move(c), nullptr, beast::severities::kDisabled); - std::uint8_t const numberOfDownloads = 10; - - // Create some ledgers so that the ShardArchiveHandler - // can verify the last ledger hash for the shard - // downloads. - for (int i = 0; i < env.app().getShardStore()->ledgersPerShard() * - (numberOfDownloads + 1); - ++i) - { - env.close(); - } - - auto handler = env.app().getShardArchiveHandler(); - BEAST_EXPECT(dynamic_cast(handler) != nullptr); - - auto stateDir = - RPC::ShardArchiveHandler::getDownloadDirectory(env.app().config()); - - std::unique_lock lock(handler->m_); - - BEAST_EXPECT( - boost::filesystem::exists(stateDir) || handler->archives_.empty()); - - using namespace std::chrono_literals; - auto waitMax = 60s; - - while (!handler->archives_.empty()) - { - lock.unlock(); - std::this_thread::sleep_for(1s); - - if (waitMax -= 1s; waitMax <= 0s) - { - BEAST_EXPECT(false); - break; - } - - lock.lock(); - } - - BEAST_EXPECT(!boost::filesystem::exists(stateDir)); - } - - // Ensure that downloads fail when the shard - // database cannot store any more shards - void - testShardCountFailure() - { - testcase("testShardCountFailure"); - std::string capturedLogs; - - { - beast::temp_dir tempDir; - - auto c = jtx::envconfig(); - { - auto& section{c->section(ConfigSection::shardDatabase())}; - section.set("path", tempDir.path()); - section.set("max_historical_shards", "1"); - section.set("ledgers_per_shard", "256"); - section.set("earliest_seq", "257"); - } - { - auto& section{c->section(ConfigSection::nodeDatabase())}; - section.set("ledgers_per_shard", "256"); - section.set("earliest_seq", "257"); - } - c->setupControl(true, true, true); - - std::unique_ptr logs(new CaptureLogs(&capturedLogs)); - jtx::Env env(*this, std::move(c), std::move(logs)); - - std::uint8_t const numberOfDownloads = 10; - - // Create some ledgers so that the ShardArchiveHandler - // can verify the last ledger hash for the shard - // downloads. - for (int i = 0; i < env.app().getShardStore()->ledgersPerShard() * - (numberOfDownloads + 1); - ++i) - { - env.close(); - } - - auto handler = env.app().getShardArchiveHandler(); - BEAST_EXPECT(handler); - BEAST_EXPECT( - dynamic_cast(handler) == nullptr); - - auto server = createServer(env); - auto host = server->local_endpoint().address().to_string(); - auto port = std::to_string(server->local_endpoint().port()); - server->stop(); - - Downloads const dl = [count = numberOfDownloads, &host, &port] { - Downloads ret; - - for (int i = 1; i <= count; ++i) - { - ret.push_back( - {i, - (boost::format("https://%s:%d/%d.tar.lz4") % host % - port % i) - .str()}); - } - - return ret; - }(); - - for (auto const& entry : dl) - { - parsedURL url; - parseUrl(url, entry.second); - handler->add(entry.first, {url, entry.second}); - } - - BEAST_EXPECT(!handler->start()); - auto stateDir = RPC::ShardArchiveHandler::getDownloadDirectory( - env.app().config()); - - handler->release(); - BEAST_EXPECT(!boost::filesystem::exists(stateDir)); - } - - auto const expectedErrorMessage = - "shards 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 maximum number of historical " - "shards reached"; - BEAST_EXPECT( - capturedLogs.find(expectedErrorMessage) != std::string::npos); - - { - beast::temp_dir tempDir; - - auto c = jtx::envconfig(); - { - auto& section{c->section(ConfigSection::shardDatabase())}; - section.set("path", tempDir.path()); - section.set("max_historical_shards", "0"); - section.set("ledgers_per_shard", "256"); - section.set("earliest_seq", "257"); - } - { - auto& section{c->section(ConfigSection::nodeDatabase())}; - section.set("ledgers_per_shard", "256"); - section.set("earliest_seq", "257"); - } - c->setupControl(true, true, true); - - std::unique_ptr logs(new CaptureLogs(&capturedLogs)); - jtx::Env env(*this, std::move(c), std::move(logs)); - - std::uint8_t const numberOfDownloads = 1; - - // Create some ledgers so that the ShardArchiveHandler - // can verify the last ledger hash for the shard - // downloads. - for (int i = 0; i < env.app().getShardStore()->ledgersPerShard() * - ((numberOfDownloads * 3) + 1); - ++i) - { - env.close(); - } - - auto handler = env.app().getShardArchiveHandler(); - BEAST_EXPECT(handler); - BEAST_EXPECT( - dynamic_cast(handler) == nullptr); - - auto server = createServer(env); - auto host = server->local_endpoint().address().to_string(); - auto port = std::to_string(server->local_endpoint().port()); - server->stop(); - - Downloads const dl = [count = numberOfDownloads, &host, &port] { - Downloads ret; - - for (int i = 1; i <= count; ++i) - { - ret.push_back( - {i, - (boost::format("https://%s:%d/%d.tar.lz4") % host % - port % i) - .str()}); - } - - return ret; - }(); - - for (auto const& entry : dl) - { - parsedURL url; - parseUrl(url, entry.second); - handler->add(entry.first, {url, entry.second}); - } - - BEAST_EXPECT(!handler->start()); - auto stateDir = RPC::ShardArchiveHandler::getDownloadDirectory( - env.app().config()); - - handler->release(); - BEAST_EXPECT(!boost::filesystem::exists(stateDir)); - } - - auto const expectedErrorMessage2 = - "shard 1 maximum number of historical shards reached"; - BEAST_EXPECT( - capturedLogs.find(expectedErrorMessage2) != std::string::npos); - } - - // Ensure that downloads fail when the shard - // database has already stored one of the - // queued shards - void - testRedundantShardFailure() - { - testcase("testRedundantShardFailure"); - std::string capturedLogs; - - { - beast::temp_dir tempDir; - - auto c = jtx::envconfig(); - { - auto& section{c->section(ConfigSection::shardDatabase())}; - section.set("path", tempDir.path()); - section.set("max_historical_shards", "1"); - section.set("ledgers_per_shard", "256"); - section.set("earliest_seq", "257"); - } - { - auto& section{c->section(ConfigSection::nodeDatabase())}; - section.set("ledgers_per_shard", "256"); - section.set("earliest_seq", "257"); - } - c->setupControl(true, true, true); - - std::unique_ptr logs(new CaptureLogs(&capturedLogs)); - jtx::Env env( - *this, - std::move(c), - std::move(logs), - beast::severities::kDebug); - - std::uint8_t const numberOfDownloads = 10; - - // Create some ledgers so that the ShardArchiveHandler - // can verify the last ledger hash for the shard - // downloads. - for (int i = 0; i < env.app().getShardStore()->ledgersPerShard() * - (numberOfDownloads + 1); - ++i) - { - env.close(); - } - - BEAST_EXPECT(env.app().getShardStore()->prepareShards({1})); - - auto handler = env.app().getShardArchiveHandler(); - BEAST_EXPECT(handler); - BEAST_EXPECT( - dynamic_cast(handler) == nullptr); - - auto server = createServer(env); - auto host = server->local_endpoint().address().to_string(); - auto port = std::to_string(server->local_endpoint().port()); - server->stop(); - - Downloads const dl = [count = numberOfDownloads, &host, &port] { - Downloads ret; - - for (int i = 1; i <= count; ++i) - { - ret.push_back( - {i, - (boost::format("https://%s:%d/%d.tar.lz4") % host % - port % i) - .str()}); - } - - return ret; - }(); - - for (auto const& entry : dl) - { - parsedURL url; - parseUrl(url, entry.second); - handler->add(entry.first, {url, entry.second}); - } - - BEAST_EXPECT(!handler->start()); - auto stateDir = RPC::ShardArchiveHandler::getDownloadDirectory( - env.app().config()); - - handler->release(); - BEAST_EXPECT(!boost::filesystem::exists(stateDir)); - } - - auto const expectedErrorMessage = - "shard 1 is already queued for import"; - BEAST_EXPECT( - capturedLogs.find(expectedErrorMessage) != std::string::npos); - } - - void - run() override - { - testSingleDownloadAndStateDB(); - testDownloadsAndStateDB(); - testDownloadsAndFileSystem(); - testDownloadsAndRestart(); - testShardCountFailure(); - testRedundantShardFailure(); - } -}; - -BEAST_DEFINE_TESTSUITE_PRIO(ShardArchiveHandler, app, ripple, 3); - -} // namespace test -} // namespace ripple diff --git a/src/test/shamap/common.h b/src/test/shamap/common.h index 2280c77d4a1..db5a2c40acf 100644 --- a/src/test/shamap/common.h +++ b/src/test/shamap/common.h @@ -20,7 +20,6 @@ #ifndef RIPPLE_SHAMAP_TESTS_COMMON_H_INCLUDED #define RIPPLE_SHAMAP_TESTS_COMMON_H_INCLUDED -#include #include #include #include @@ -81,12 +80,14 @@ class TestNodeFamily : public Family return j_; } - std::shared_ptr getFullBelowCache(std::uint32_t) override + std::shared_ptr + getFullBelowCache() override { return fbCache_; } - std::shared_ptr getTreeNodeCache(std::uint32_t) override + std::shared_ptr + getTreeNodeCache() override { return tnCache_; } @@ -98,12 +99,6 @@ class TestNodeFamily : public Family tnCache_->sweep(); } - bool - isShardBacked() const override - { - return true; - } - void missingNodeAcquireBySeq(std::uint32_t refNum, uint256 const& nodeHash) override diff --git a/src/xrpld/app/consensus/RCLConsensus.cpp b/src/xrpld/app/consensus/RCLConsensus.cpp index 7b061e10b31..69fb371578a 100644 --- a/src/xrpld/app/consensus/RCLConsensus.cpp +++ b/src/xrpld/app/consensus/RCLConsensus.cpp @@ -35,7 +35,6 @@ #include #include #include -#include #include #include #include diff --git a/src/xrpld/app/ledger/InboundLedger.h b/src/xrpld/app/ledger/InboundLedger.h index 62b6925d59c..13f603e79d0 100644 --- a/src/xrpld/app/ledger/InboundLedger.h +++ b/src/xrpld/app/ledger/InboundLedger.h @@ -42,7 +42,6 @@ class InboundLedger final : public TimeoutCounter, // These are the reasons we might acquire a ledger enum class Reason { HISTORY, // Acquiring past ledger - SHARD, // Acquiring for shard GENERIC, // Generic other reasons CONSENSUS // We believe the consensus round requires this ledger }; diff --git a/src/xrpld/app/ledger/LedgerMaster.h b/src/xrpld/app/ledger/LedgerMaster.h index 7921773e3f0..5149424e285 100644 --- a/src/xrpld/app/ledger/LedgerMaster.h +++ b/src/xrpld/app/ledger/LedgerMaster.h @@ -357,9 +357,6 @@ class LedgerMaster : public AbstractFetchPackContainer // The last ledger we handled fetching history std::shared_ptr mHistLedger; - // The last ledger we handled fetching for a shard - std::shared_ptr mShardLedger; - // Fully validated ledger, whether or not we have the ledger resident. std::pair mLastValidLedger{uint256(), 0}; diff --git a/src/xrpld/app/ledger/detail/InboundLedger.cpp b/src/xrpld/app/ledger/detail/InboundLedger.cpp index b98f24aed43..5b0c3469111 100644 --- a/src/xrpld/app/ledger/detail/InboundLedger.cpp +++ b/src/xrpld/app/ledger/detail/InboundLedger.cpp @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -112,40 +111,6 @@ InboundLedger::init(ScopedLockType& collectionLock) if (failed_) return; - if (!complete_) - { - auto shardStore = app_.getShardStore(); - if (mReason == Reason::SHARD) - { - if (!shardStore) - { - JLOG(journal_.error()) - << "Acquiring shard with no shard store available"; - failed_ = true; - return; - } - - mHaveHeader = false; - mHaveTransactions = false; - mHaveState = false; - mLedger.reset(); - - tryDB(app_.getShardFamily()->db()); - if (failed_) - return; - } - else if (shardStore && mSeq >= shardStore->earliestLedgerSeq()) - { - if (auto l = shardStore->fetchLedger(hash_, mSeq)) - { - mHaveHeader = true; - mHaveTransactions = true; - mHaveState = true; - complete_ = true; - mLedger = std::move(l); - } - } - } if (!complete_) { addPeers(); @@ -160,7 +125,7 @@ InboundLedger::init(ScopedLockType& collectionLock) mLedger->read(keylet::fees())); mLedger->setImmutable(); - if (mReason == Reason::HISTORY || mReason == Reason::SHARD) + if (mReason == Reason::HISTORY) return; app_.getLedgerMaster().storeLedger(mLedger); @@ -200,8 +165,6 @@ InboundLedger::checkLocal() { if (mLedger) tryDB(mLedger->stateMap().family().db()); - else if (mReason == Reason::SHARD) - tryDB(app_.getShardFamily()->db()); else tryDB(app_.getNodeFamily().db()); if (failed_ || complete_) @@ -283,8 +246,7 @@ InboundLedger::tryDB(NodeStore::Database& srcDB) mLedger = std::make_shared( deserializePrefixedHeader(makeSlice(data)), app_.config(), - mReason == Reason::SHARD ? *app_.getShardFamily() - : app_.getNodeFamily()); + app_.getNodeFamily()); if (mLedger->info().hash != hash_ || (mSeq != 0 && mSeq != mLedger->info().seq)) { @@ -495,9 +457,6 @@ InboundLedger::done() mLedger->setImmutable(); switch (mReason) { - case Reason::SHARD: - app_.getShardStore()->setStored(mLedger); - [[fallthrough]]; case Reason::HISTORY: app_.getInboundLedgers().onLedgerFetched(); break; @@ -551,9 +510,7 @@ InboundLedger::trigger(std::shared_ptr const& peer, TriggerReason reason) if (!mHaveHeader) { - tryDB( - mReason == Reason::SHARD ? app_.getShardFamily()->db() - : app_.getNodeFamily().db()); + tryDB(app_.getNodeFamily().db()); if (failed_) { JLOG(journal_.warn()) << " failed local for " << hash_; @@ -854,8 +811,7 @@ InboundLedger::takeHeader(std::string const& data) if (complete_ || failed_ || mHaveHeader) return true; - auto* f = mReason == Reason::SHARD ? app_.getShardFamily() - : &app_.getNodeFamily(); + auto* f = &app_.getNodeFamily(); mLedger = std::make_shared( deserializeHeader(makeSlice(data)), app_.config(), *f); if (mLedger->info().hash != hash_ || diff --git a/src/xrpld/app/ledger/detail/InboundLedgers.cpp b/src/xrpld/app/ledger/detail/InboundLedgers.cpp index 04964d2a921..2b4d2161b63 100644 --- a/src/xrpld/app/ledger/detail/InboundLedgers.cpp +++ b/src/xrpld/app/ledger/detail/InboundLedgers.cpp @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -70,9 +69,6 @@ class InboundLedgersImp : public InboundLedgers InboundLedger::Reason reason) override { assert(hash.isNonZero()); - assert( - reason != InboundLedger::Reason::SHARD || - (seq != 0 && app_.getShardStore())); // probably not the right rule if (app_.getOPs().isNeedNetworkLedger() && @@ -119,25 +115,6 @@ class InboundLedgersImp : public InboundLedgers if (!inbound->isComplete()) return {}; - if (reason == InboundLedger::Reason::HISTORY) - { - if (inbound->getLedger()->stateMap().family().isShardBacked()) - app_.getNodeStore().storeLedger(inbound->getLedger()); - } - else if (reason == InboundLedger::Reason::SHARD) - { - auto shardStore = app_.getShardStore(); - if (!shardStore) - { - JLOG(j_.error()) - << "Acquiring shard with no shard store available"; - return {}; - } - if (inbound->getLedger()->stateMap().family().isShardBacked()) - shardStore->setStored(inbound->getLedger()); - else - shardStore->storeLedger(inbound->getLedger()); - } return inbound->getLedger(); } @@ -285,7 +262,7 @@ class InboundLedgersImp : public InboundLedgers } // Should only be called with an inboundledger that has - // a reason of history or shard + // a reason of history void onLedgerFetched() override { diff --git a/src/xrpld/app/ledger/detail/LedgerMaster.cpp b/src/xrpld/app/ledger/detail/LedgerMaster.cpp index f03004fd14c..dab8f838249 100644 --- a/src/xrpld/app/ledger/detail/LedgerMaster.cpp +++ b/src/xrpld/app/ledger/detail/LedgerMaster.cpp @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -830,38 +829,13 @@ LedgerMaster::tryFill(std::shared_ptr ledger) void LedgerMaster::getFetchPack(LedgerIndex missing, InboundLedger::Reason reason) { - LedgerIndex const ledgerIndex([&]() { - if (reason == InboundLedger::Reason::SHARD) - { - // Do not acquire a ledger sequence greater - // than the last ledger in the shard - auto const shardStore{app_.getShardStore()}; - auto const shardIndex{shardStore->seqToShardIndex(missing)}; - return std::min(missing + 1, shardStore->lastLedgerSeq(shardIndex)); - } - return missing + 1; - }()); + LedgerIndex const ledgerIndex = missing + 1; auto const haveHash{getLedgerHashForHistory(ledgerIndex, reason)}; if (!haveHash || haveHash->isZero()) { - if (reason == InboundLedger::Reason::SHARD) - { - auto const shardStore{app_.getShardStore()}; - auto const shardIndex{shardStore->seqToShardIndex(missing)}; - if (missing < shardStore->lastLedgerSeq(shardIndex)) - { - JLOG(m_journal.error()) - << "No hash for fetch pack. " - << "Missing ledger sequence " << missing - << " while acquiring shard " << shardIndex; - } - } - else - { - JLOG(m_journal.error()) - << "No hash for fetch pack. Missing Index " << missing; - } + JLOG(m_journal.error()) + << "No hash for fetch pack. Missing Index " << missing; return; } @@ -1342,8 +1316,7 @@ LedgerMaster::getLedgerHashForHistory( { // Try to get the hash of a ledger we need to fetch for history std::optional ret; - auto const& l{ - reason == InboundLedger::Reason::SHARD ? mShardLedger : mHistLedger}; + auto const& l{mHistLedger}; if (l && l->info().seq >= index) { @@ -2001,54 +1974,35 @@ LedgerMaster::fetchForHistory( auto seq = ledger->info().seq; assert(seq == missing); JLOG(m_journal.trace()) << "fetchForHistory acquired " << seq; - if (reason == InboundLedger::Reason::SHARD) + setFullLedger(ledger, false, false); + int fillInProgress; { - ledger->setFull(); - { - std::lock_guard lock(m_mutex); - mShardLedger = ledger; - } - if (!ledger->stateMap().family().isShardBacked()) - app_.getShardStore()->storeLedger(ledger); + std::lock_guard lock(m_mutex); + mHistLedger = ledger; + fillInProgress = mFillInProgress; } - else + if (fillInProgress == 0 && + app_.getRelationalDatabase().getHashByIndex(seq - 1) == + ledger->info().parentHash) { - setFullLedger(ledger, false, false); - int fillInProgress; { + // Previous ledger is in DB std::lock_guard lock(m_mutex); - mHistLedger = ledger; - fillInProgress = mFillInProgress; - } - if (fillInProgress == 0 && - app_.getRelationalDatabase().getHashByIndex(seq - 1) == - ledger->info().parentHash) - { - { - // Previous ledger is in DB - std::lock_guard lock(m_mutex); - mFillInProgress = seq; - } - app_.getJobQueue().addJob( - jtADVANCE, "tryFill", [this, ledger]() { - tryFill(ledger); - }); + mFillInProgress = seq; } + app_.getJobQueue().addJob( + jtADVANCE, "tryFill", [this, ledger]() { + tryFill(ledger); + }); } progress = true; } else { std::uint32_t fetchSz; - if (reason == InboundLedger::Reason::SHARD) - // Do not fetch ledger sequences lower - // than the shard's first ledger sequence - fetchSz = app_.getShardStore()->firstLedgerSeq( - app_.getShardStore()->seqToShardIndex(missing)); - else - // Do not fetch ledger sequences lower - // than the earliest ledger sequence - fetchSz = app_.getNodeStore().earliestLedgerSeq(); + // Do not fetch ledger sequences lower + // than the earliest ledger sequence + fetchSz = app_.getNodeStore().earliestLedgerSeq(); fetchSz = missing >= fetchSz ? std::min(ledger_fetch_size_, (missing - fetchSz) + 1) : 0; @@ -2081,7 +2035,8 @@ LedgerMaster::fetchForHistory( << "Ledgers: " << app_.getLedgerMaster().getCompleteLedgers(); JLOG(m_journal.fatal()) << "Acquire reason: " - << (reason == InboundLedger::Reason::HISTORY ? "HISTORY" : "SHARD"); + << (reason == InboundLedger::Reason::HISTORY ? "HISTORY" + : "NOT HISTORY"); clearLedger(missing + 1); progress = true; } @@ -2133,15 +2088,6 @@ LedgerMaster::doAdvance(std::unique_lock& sl) else missing = std::nullopt; } - if (!missing && mFillInProgress == 0) - { - if (auto shardStore = app_.getShardStore()) - { - missing = shardStore->prepareLedger(mValidLedgerSeq); - if (missing) - reason = InboundLedger::Reason::SHARD; - } - } if (missing) { fetchForHistory(*missing, progress, reason, sl); @@ -2156,7 +2102,6 @@ LedgerMaster::doAdvance(std::unique_lock& sl) else { mHistLedger.reset(); - mShardLedger.reset(); JLOG(m_journal.trace()) << "tryAdvance not fetching history"; } } diff --git a/src/xrpld/app/main/Application.cpp b/src/xrpld/app/main/Application.cpp index ff6cc0584ca..f3308a091dc 100644 --- a/src/xrpld/app/main/Application.cpp +++ b/src/xrpld/app/main/Application.cpp @@ -50,17 +50,14 @@ #include #include #include -#include #include #include #include #include #include #include -#include #include #include -#include #include #include #include @@ -192,9 +189,6 @@ class ApplicationImp : public Application, public BasicApp std::unique_ptr m_nodeStore; NodeFamily nodeFamily_; - std::unique_ptr shardStore_; - std::unique_ptr shardFamily_; - std::unique_ptr shardArchiveHandler_; // VFALCO TODO Make OrderBookDB abstract OrderBookDB m_orderBookDB; std::unique_ptr m_pathRequests; @@ -361,13 +355,6 @@ class ApplicationImp : public Application, public BasicApp , nodeFamily_(*this, *m_collectorManager) - // The shard store is optional and make_ShardStore can return null. - , shardStore_(make_ShardStore( - *this, - m_nodeStoreScheduler, - 4, - logs_->journal("ShardStore"))) - , m_orderBookDB(*this) , m_pathRequests(std::make_unique( @@ -565,14 +552,6 @@ class ApplicationImp : public Application, public BasicApp return nodeFamily_; } - // The shard store is an optional feature. If the sever is configured for - // shards, this function will return a valid pointer, otherwise a nullptr. - Family* - getShardFamily() override - { - return shardFamily_.get(); - } - TimeKeeper& timeKeeper() override { @@ -696,72 +675,6 @@ class ApplicationImp : public Application, public BasicApp return *m_nodeStore; } - // The shard store is an optional feature. If the sever is configured for - // shards, this function will return a valid pointer, otherwise a nullptr. - NodeStore::DatabaseShard* - getShardStore() override - { - return shardStore_.get(); - } - - RPC::ShardArchiveHandler* - getShardArchiveHandler(bool tryRecovery) override - { - static std::mutex handlerMutex; - std::lock_guard lock(handlerMutex); - - // After constructing the handler, try to - // initialize it. Log on error; set the - // member variable on success. - auto initAndSet = - [this](std::unique_ptr&& handler) { - if (!handler) - return false; - - if (!handler->init()) - { - JLOG(m_journal.error()) - << "Failed to initialize ShardArchiveHandler."; - - return false; - } - - shardArchiveHandler_ = std::move(handler); - return true; - }; - - // Need to resume based on state from a previous - // run. - if (tryRecovery) - { - if (shardArchiveHandler_ != nullptr) - { - JLOG(m_journal.error()) - << "ShardArchiveHandler already created at startup."; - - return nullptr; - } - - auto handler = - RPC::ShardArchiveHandler::tryMakeRecoveryHandler(*this); - - if (!initAndSet(std::move(handler))) - return nullptr; - } - - // Construct the ShardArchiveHandler - if (shardArchiveHandler_ == nullptr) - { - auto handler = - RPC::ShardArchiveHandler::makeShardArchiveHandler(*this); - - if (!initAndSet(std::move(handler))) - return nullptr; - } - - return shardArchiveHandler_.get(); - } - Application::MutexType& getMasterMutex() override { @@ -1075,10 +988,10 @@ class ApplicationImp : public Application, public BasicApp { std::shared_ptr const fullBelowCache = - nodeFamily_.getFullBelowCache(0); + nodeFamily_.getFullBelowCache(); std::shared_ptr const treeNodeCache = - nodeFamily_.getTreeNodeCache(0); + nodeFamily_.getTreeNodeCache(); std::size_t const oldFullBelowSize = fullBelowCache->size(); std::size_t const oldTreeNodeSize = treeNodeCache->size(); @@ -1094,25 +1007,6 @@ class ApplicationImp : public Application, public BasicApp << "NodeFamily::TreeNodeCache sweep. Size before: " << oldTreeNodeSize << "; size after: " << treeNodeCache->size(); } - if (shardFamily_) - { - std::size_t const oldFullBelowSize = - shardFamily_->getFullBelowCacheSize(); - std::size_t const oldTreeNodeSize = - shardFamily_->getTreeNodeCacheSize().second; - - shardFamily_->sweep(); - - JLOG(m_journal.debug()) - << "ShardFamily::FullBelowCache sweep. Size before: " - << oldFullBelowSize - << "; size after: " << shardFamily_->getFullBelowCacheSize(); - - JLOG(m_journal.debug()) - << "ShardFamily::TreeNodeCache sweep. Size before: " - << oldTreeNodeSize << "; size after: " - << shardFamily_->getTreeNodeCacheSize().second; - } { TaggedCache const& masterTxCache = getMasterTransaction().getCache(); @@ -1129,11 +1023,6 @@ class ApplicationImp : public Application, public BasicApp // Does not appear to have an associated cache. getNodeStore().sweep(); } - if (shardStore_) - { - // Does not appear to have an associated cache. - shardStore_->sweep(); - } { std::size_t const oldLedgerMasterCacheSize = getLedgerMaster().getFetchPackCacheSize(); @@ -1266,9 +1155,6 @@ class ApplicationImp : public Application, public BasicApp // and new validations must be greater than this. std::atomic maxDisallowedLedger_{0}; - bool - nodeToShards(); - void startGenesisLedger(); @@ -1348,15 +1234,6 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) if (!initRelationalDatabase() || !initNodeStore()) return false; - if (shardStore_) - { - shardFamily_ = - std::make_unique(*this, *m_collectorManager); - - if (!shardStore_->init()) - return false; - } - if (!peerReservations_->load(getWalletDB())) { JLOG(m_journal.fatal()) << "Cannot find peer reservations!"; @@ -1543,13 +1420,6 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) add(*overlay_); // add to PropertyStream } - if (!config_->standalone()) - { - // NodeStore import into the ShardStore requires the SQLite database - if (config_->nodeToShard && !nodeToShards()) - return false; - } - // start first consensus round if (!config_->reporting() && !m_networkOPs->beginConsensus( @@ -1664,38 +1534,6 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) } } - RPC::ShardArchiveHandler* shardArchiveHandler = nullptr; - if (shardStore_) - { - try - { - // Create a ShardArchiveHandler if recovery - // is needed (there's a state database left - // over from a previous run). - auto handler = getShardArchiveHandler(true); - - // Recovery is needed. - if (handler) - shardArchiveHandler = handler; - } - catch (std::exception const& e) - { - JLOG(m_journal.fatal()) - << "Exception when starting ShardArchiveHandler from " - "state database: " - << e.what(); - - return false; - } - } - - if (shardArchiveHandler && !shardArchiveHandler->start()) - { - JLOG(m_journal.fatal()) << "Failed to start ShardArchiveHandler."; - - return false; - } - validatorSites_->start(); if (reportingETL_) @@ -1807,12 +1645,8 @@ ApplicationImp::run() m_loadManager->stop(); m_shaMapStore->stop(); m_jobQueue->stop(); - if (shardArchiveHandler_) - shardArchiveHandler_->stop(); if (overlay_) overlay_->stop(); - if (shardStore_) - shardStore_->stop(); grpcServer_->stop(); m_networkOPs->stop(); serverHandler_->stop(); @@ -1876,9 +1710,6 @@ ApplicationImp::fdRequired() const // doubled if online delete is enabled). needed += std::max(5, m_shaMapStore->fdRequired()); - if (shardStore_) - needed += shardStore_->fdRequired(); - // One fd per incoming connection a port can accept, or // if no limit is set, assume it'll handle 256 clients. for (auto const& p : serverHandler_->setup().ports) @@ -2345,27 +2176,6 @@ ApplicationImp::journal(std::string const& name) return logs_->journal(name); } -bool -ApplicationImp::nodeToShards() -{ - assert(overlay_); - assert(!config_->standalone()); - - if (config_->section(ConfigSection::shardDatabase()).empty()) - { - JLOG(m_journal.fatal()) - << "The [shard_db] configuration setting must be set"; - return false; - } - if (!shardStore_) - { - JLOG(m_journal.fatal()) << "Invalid [shard_db] configuration"; - return false; - } - shardStore_->importDatabase(getNodeStore()); - return true; -} - void ApplicationImp::setMaxDisallowedLedger() { diff --git a/src/xrpld/app/main/Application.h b/src/xrpld/app/main/Application.h index 57e6f1730e5..d4871317e73 100644 --- a/src/xrpld/app/main/Application.h +++ b/src/xrpld/app/main/Application.h @@ -42,14 +42,10 @@ class Manager; } namespace NodeStore { class Database; -class DatabaseShard; } // namespace NodeStore namespace perf { class PerfLog; } -namespace RPC { -class ShardArchiveHandler; -} // VFALCO TODO Fix forward declares required for header dependency loops class AmendmentTable; @@ -172,8 +168,6 @@ class Application : public beast::PropertyStream::Source getCollectorManager() = 0; virtual Family& getNodeFamily() = 0; - virtual Family* - getShardFamily() = 0; virtual TimeKeeper& timeKeeper() = 0; virtual JobQueue& @@ -210,10 +204,6 @@ class Application : public beast::PropertyStream::Source getValidations() = 0; virtual NodeStore::Database& getNodeStore() = 0; - virtual NodeStore::DatabaseShard* - getShardStore() = 0; - virtual RPC::ShardArchiveHandler* - getShardArchiveHandler(bool tryRecovery = false) = 0; virtual InboundLedgers& getInboundLedgers() = 0; virtual InboundTransactions& diff --git a/src/xrpld/app/main/DBInit.h b/src/xrpld/app/main/DBInit.h index 3d2f42717b2..528f2e8105e 100644 --- a/src/xrpld/app/main/DBInit.h +++ b/src/xrpld/app/main/DBInit.h @@ -124,96 +124,6 @@ inline constexpr std::array TxDBInit{ //////////////////////////////////////////////////////////////////////////////// -// The Ledger Meta database maps ledger hashes to shard indexes -inline constexpr auto LgrMetaDBName{"ledger_meta.db"}; - -// In C++17 omitting the explicit template parameters caused -// a crash -inline constexpr std::array LgrMetaDBPragma -{ - "PRAGMA page_size=4096;", "PRAGMA journal_size_limit=1582080;", - "PRAGMA max_page_count=2147483646;", - -#if (ULONG_MAX > UINT_MAX) && !defined(NO_SQLITE_MMAP) - "PRAGMA mmap_size=17179869184;" -#else - - // Provide an explicit `no-op` SQL statement - // in order to keep the size of the array - // constant regardless of the preprocessor - // condition evaluation - "PRAGMA sqlite_noop_statement;" -#endif -}; - -inline constexpr std::array LgrMetaDBInit{ - {"BEGIN TRANSACTION;", - - "CREATE TABLE IF NOT EXISTS LedgerMeta ( \ - LedgerHash CHARACTER(64) PRIMARY KEY, \ - ShardIndex INTEGER \ - );", - - "END TRANSACTION;"}}; - -//////////////////////////////////////////////////////////////////////////////// - -// Transaction Meta database maps transaction IDs to shard indexes -inline constexpr auto TxMetaDBName{"transaction_meta.db"}; - -// In C++17 omitting the explicit template parameters caused -// a crash -inline constexpr std::array TxMetaDBPragma -{ - "PRAGMA page_size=4096;", "PRAGMA journal_size_limit=1582080;", - "PRAGMA max_page_count=2147483646;", - -#if (ULONG_MAX > UINT_MAX) && !defined(NO_SQLITE_MMAP) - "PRAGMA mmap_size=17179869184;" -#else - - // Provide an explicit `no-op` SQL statement - // in order to keep the size of the array - // constant regardless of the preprocessor - // condition evaluation - "PRAGMA sqlite_noop_statement;" -#endif -}; - -inline constexpr std::array TxMetaDBInit{ - {"BEGIN TRANSACTION;", - - "CREATE TABLE IF NOT EXISTS TransactionMeta ( \ - TransID CHARACTER(64) PRIMARY KEY, \ - ShardIndex INTEGER \ - );", - - "END TRANSACTION;"}}; - -//////////////////////////////////////////////////////////////////////////////// - -// Temporary database used with an incomplete shard that is being acquired -inline constexpr auto AcquireShardDBName{"acquire.db"}; - -inline constexpr std::array AcquireShardDBPragma{ - {"PRAGMA journal_size_limit=1582080;"}}; - -inline constexpr std::array AcquireShardDBInit{ - {"CREATE TABLE IF NOT EXISTS Shard ( \ - ShardIndex INTEGER PRIMARY KEY, \ - LastLedgerHash CHARACTER(64), \ - StoredLedgerSeqs BLOB \ - );"}}; - -//////////////////////////////////////////////////////////////////////////////// - -// Pragma for Ledger and Transaction databases with final shards -// These override the CommonDBPragma values defined above. -inline constexpr std::array FinalShardDBPragma{ - {"PRAGMA synchronous=OFF;", "PRAGMA journal_mode=OFF;"}}; - -//////////////////////////////////////////////////////////////////////////////// - inline constexpr auto WalletDBName{"wallet.db"}; inline constexpr std::array WalletDBInit{ @@ -247,36 +157,6 @@ inline constexpr std::array WalletDBInit{ "END TRANSACTION;"}}; -//////////////////////////////////////////////////////////////////////////////// - -static constexpr auto stateDBName{"state.db"}; - -// These override the CommonDBPragma values defined above. -static constexpr std::array DownloaderDBPragma{ - {"PRAGMA synchronous=FULL;", "PRAGMA journal_mode=DELETE;"}}; - -static constexpr std::array ShardArchiveHandlerDBInit{ - {"BEGIN TRANSACTION;", - - "CREATE TABLE IF NOT EXISTS State ( \ - ShardIndex INTEGER PRIMARY KEY, \ - URL TEXT \ - );", - - "END TRANSACTION;"}}; - -static constexpr std::array DatabaseBodyDBInit{ - {"BEGIN TRANSACTION;", - - "CREATE TABLE IF NOT EXISTS download ( \ - Path TEXT, \ - Data BLOB, \ - Size BIGINT UNSIGNED, \ - Part BIGINT UNSIGNED PRIMARY KEY \ - );", - - "END TRANSACTION;"}}; - } // namespace ripple #endif diff --git a/src/xrpld/app/main/Main.cpp b/src/xrpld/app/main/Main.cpp index 059d9758d39..799911f63dd 100644 --- a/src/xrpld/app/main/Main.cpp +++ b/src/xrpld/app/main/Main.cpp @@ -144,7 +144,6 @@ printHelp(const po::options_description& desc) " consensus_info\n" " deposit_authorized " "[]\n" - " download_shard [[ ]]\n" " feature [ [accept|reject]]\n" " fetch_info [clear]\n" " gateway_balances [] [ [ " @@ -160,7 +159,6 @@ printHelp(const po::options_description& desc) " log_level [[] ]\n" " logrotate\n" " manifest \n" - " node_to_shard [status|start|stop]\n" " peers\n" " ping\n" " random\n" @@ -398,7 +396,6 @@ run(int argc, char** argv) "Load the specified ledger file.")( "load", "Load the current ledger from the local DB.")( "net", "Get the initial ledger from the network.")( - "nodetoshard", "Import node store into shards")( "replay", "Replay a ledger close.")( "trap_tx_hash", po::value(), @@ -676,9 +673,6 @@ run(int argc, char** argv) if (vm.count("import")) config->doImport = true; - if (vm.count("nodetoshard")) - config->nodeToShard = true; - if (vm.count("ledger")) { config->START_LEDGER = vm["ledger"].as(); diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index 9cf5d097099..a7ee935f102 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -44,7 +44,6 @@ #include #include #include -#include #include #include #include @@ -2472,10 +2471,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) info[jss::counters] = app_.getPerfLog().countersJson(); Json::Value nodestore(Json::objectValue); - if (app_.getShardStore()) - app_.getShardStore()->getCountsJson(nodestore); - else - app_.getNodeStore().getCountsJson(nodestore); + app_.getNodeStore().getCountsJson(nodestore); info[jss::counters][jss::nodestore] = nodestore; info[jss::current_activities] = app_.getPerfLog().currentJson(); } diff --git a/src/xrpld/app/misc/SHAMapStoreImp.cpp b/src/xrpld/app/misc/SHAMapStoreImp.cpp index d32556a4b29..9344463295b 100644 --- a/src/xrpld/app/misc/SHAMapStoreImp.cpp +++ b/src/xrpld/app/misc/SHAMapStoreImp.cpp @@ -290,8 +290,8 @@ SHAMapStoreImp::run() LedgerIndex lastRotated = state_db_.getState().lastRotated; netOPs_ = &app_.getOPs(); ledgerMaster_ = &app_.getLedgerMaster(); - fullBelowCache_ = &(*app_.getNodeFamily().getFullBelowCache(0)); - treeNodeCache_ = &(*app_.getNodeFamily().getTreeNodeCache(0)); + fullBelowCache_ = &(*app_.getNodeFamily().getFullBelowCache()); + treeNodeCache_ = &(*app_.getNodeFamily().getTreeNodeCache()); if (advisoryDelete_) canDelete_ = state_db_.getCanDelete(); @@ -329,27 +329,8 @@ SHAMapStoreImp::run() validatedSeq >= lastRotated + deleteInterval_ && canDelete_ >= lastRotated - 1 && healthWait() == keepGoing; - // Make sure we don't delete ledgers currently being - // imported into the ShardStore - bool const waitForImport = readyToRotate && [this, lastRotated] { - if (auto shardStore = app_.getShardStore()) - { - if (auto sequence = shardStore->getDatabaseImportSequence()) - return sequence <= lastRotated - 1; - } - - return false; - }(); - - if (waitForImport) - { - JLOG(journal_.info()) - << "NOT rotating validatedSeq " << validatedSeq - << " as rotation would interfere with ShardStore import"; - } - // will delete up to (not including) lastRotated - if (readyToRotate && !waitForImport) + if (readyToRotate) { JLOG(journal_.warn()) << "rotating validatedSeq " << validatedSeq << " lastRotated " diff --git a/src/xrpld/app/rdb/Download.h b/src/xrpld/app/rdb/Download.h deleted file mode 100644 index 6ee02d4c100..00000000000 --- a/src/xrpld/app/rdb/Download.h +++ /dev/null @@ -1,79 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2021 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_RDB_DOWNLOAD_H_INCLUDED -#define RIPPLE_APP_RDB_DOWNLOAD_H_INCLUDED - -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -/** - * @brief openDatabaseBodyDb Opens a database that will store the contents of a - * file being downloaded, returns its descriptor, and starts a new - * download process or continues an existing one. - * @param setup Path to the database and other opening parameters. - * @param path Path of the new file to download. - * @return Pair containing a unique pointer to the database and the amount of - * bytes already downloaded if a download is being continued. - */ -std::pair, std::optional> -openDatabaseBodyDb( - DatabaseCon::Setup const& setup, - boost::filesystem::path const& path); - -/** - * @brief databaseBodyDoPut Saves a new fragment of a downloaded file. - * @param session Session with the database. - * @param data Downloaded fragment of file data to save. - * @param path Path to the file currently being downloaded. - * @param fileSize Size of the portion of the file already downloaded. - * @param part The index of the most recently updated database row. - * @param maxRowSizePad A constant padding value that accounts for other data - * stored in each row of the database. - * @return Index of the most recently updated database row. - */ -std::uint64_t -databaseBodyDoPut( - soci::session& session, - std::string const& data, - std::string const& path, - std::uint64_t fileSize, - std::uint64_t part, - std::uint16_t maxRowSizePad); - -/** - * @brief databaseBodyFinish Finishes the download process and writes the file - * to disk. - * @param session Session with the database. - * @param fout Opened file into which the downloaded data from the database will - * be written. - */ -void -databaseBodyFinish(soci::session& session, std::ofstream& fout); - -} // namespace ripple - -#endif diff --git a/src/xrpld/app/rdb/README.md b/src/xrpld/app/rdb/README.md index 1a68a1ae5e3..f4cb5f203a4 100644 --- a/src/xrpld/app/rdb/README.md +++ b/src/xrpld/app/rdb/README.md @@ -2,9 +2,8 @@ The guiding principles of the Relational Database Interface are summarized below: -* All hard-coded SQL statements should be stored in the [files](#source-files) under the `ripple/app/rdb` directory. With the exception of test modules, no hard-coded SQL should be added to any other file in rippled. +* All hard-coded SQL statements should be stored in the [files](#source-files) under the `xrpld/app/rdb` directory. With the exception of test modules, no hard-coded SQL should be added to any other file in rippled. * The base class `RelationalDatabase` is inherited by derived classes that each provide an interface for operating on distinct relational database systems. -* For future use, the shard store will be used if the node store is absent. ## Overview @@ -12,7 +11,7 @@ Firstly, the interface `RelationalDatabase` is inherited by the classes `SQLiteD ## Configuration -The config section `[relational_db]` has a property named `backend` whose value designates which database implementation will be used for node or shard databases. Presently the only valid value for this property is `sqlite`: +The config section `[relational_db]` has a property named `backend` whose value designates which database implementation will be used for node databases. Presently the only valid value for this property is `sqlite`: ``` [relational_db] @@ -24,35 +23,25 @@ backend=sqlite The Relational Database Interface consists of the following directory structure (as of November 2021): ``` -src/ripple/app/rdb/ +src/xrpld/app/rdb/ ├── backend │   ├── detail -│   │   ├── impl -│   │   │   ├── Node.cpp -│   │   │   └── Shard.cpp +│   │   ├── Node.cpp │   │   ├── Node.h -│   │   └── Shard.h -│   ├── impl │   │   ├── PostgresDatabase.cpp │   │   └── SQLiteDatabase.cpp │   ├── PostgresDatabase.h │   └── SQLiteDatabase.h -├── impl -│   ├── Download.cpp +├── detail │   ├── PeerFinder.cpp │   ├── RelationalDatabase.cpp -│   ├── ShardArchive.cpp │   ├── State.cpp -│   ├── UnitaryShard.cpp │   ├── Vacuum.cpp │   └── Wallet.cpp -├── Download.h ├── PeerFinder.h ├── RelationalDatabase.h ├── README.md -├── ShardArchive.h ├── State.h -├── UnitaryShard.h ├── Vacuum.h └── Wallet.h ``` @@ -61,16 +50,12 @@ src/ripple/app/rdb/ | File | Contents | | ----------- | ----------- | | `Node.[h\|cpp]` | Defines/Implements methods used by `SQLiteDatabase` for interacting with SQLite node databases| -| `Shard.[h\|cpp]` | Defines/Implements methods used by `SQLiteDatabase` for interacting with SQLite shard databases | | `PostgresDatabase.[h\|cpp]` | Defines/Implements the class `PostgresDatabase`/`PostgresDatabaseImp` which inherits from `RelationalDatabase` and is used to operate on the main stores | |`SQLiteDatabase.[h\|cpp]`| Defines/Implements the class `SQLiteDatabase`/`SQLiteDatabaseImp` which inherits from `RelationalDatabase` and is used to operate on the main stores | -| `Download.[h\|cpp]` | Defines/Implements methods for persisting file downloads to a SQLite database | | `PeerFinder.[h\|cpp]` | Defines/Implements methods for interacting with the PeerFinder SQLite database | |`RelationalDatabase.cpp`| Implements the static method `RelationalDatabase::init` which is used to initialize an instance of `RelationalDatabase` | | `RelationalDatabase.h` | Defines the abstract class `RelationalDatabase`, the primary class of the Relational Database Interface | -| `ShardArchive.[h\|cpp]` | Defines/Implements methods used by `ShardArchiveHandler` for interacting with SQLite databases containing metadata regarding shard downloads | | `State.[h\|cpp]` | Defines/Implements methods for interacting with the State SQLite database which concerns ledger deletion and database rotation | -| `UnitaryShard.[h\|cpp]` | Defines/Implements methods used by a unitary instance of `Shard` for interacting with the various SQLite databases thereof. These files are distinct from `Shard.[h\|cpp]` which contain methods used by `SQLiteDatabaseImp` | | `Vacuum.[h\|cpp]` | Defines/Implements a method for performing the `VACUUM` operation on SQLite databases | | `Wallet.[h\|cpp]` | Defines/Implements methods for interacting with Wallet SQLite databases | @@ -84,19 +69,15 @@ The Relational Database Interface provides three categories of methods for inter * Free functions for interacting with SQLite databases used by various components of the software. These methods feature a `soci::session` parameter which facilitates connecting to SQLite databases, and are defined and implemented in the following files: - * `Download.[h\|cpp]` * `PeerFinder.[h\|cpp]` - * `ShardArchive.[h\|cpp]` * `State.[h\|cpp]` - * `UnitaryShard.[h\|cpp]` * `Vacuum.[h\|cpp]` * `Wallet.[h\|cpp]` -* Free functions used exclusively by `SQLiteDatabaseImp` for interacting with SQLite databases owned by the node store or shard store. Unlike the free functions in the files listed above, these are not intended to be invoked directly by clients. Rather, these methods are invoked by derived instances of `RelationalDatabase`. These methods are defined in the following files: +* Free functions used exclusively by `SQLiteDatabaseImp` for interacting with SQLite databases owned by the node store. Unlike the free functions in the files listed above, these are not intended to be invoked directly by clients. Rather, these methods are invoked by derived instances of `RelationalDatabase`. These methods are defined in the following files: * `Node.[h|cpp]` - * `Shard.[h|cpp]` -* Member functions of `RelationalDatabase`, `SQLiteDatabase`, and `PostgresDatabase` which are used to access the main stores (node store, shard store). The `SQLiteDatabase` class will access the node store by default, but will use shard databases if the node store is not present and the shard store is available. The class `PostgresDatabase` uses only the node store. +* Member functions of `RelationalDatabase`, `SQLiteDatabase`, and `PostgresDatabase` which are used to access the node store. diff --git a/src/xrpld/app/rdb/ShardArchive.h b/src/xrpld/app/rdb/ShardArchive.h deleted file mode 100644 index 44f990ab5f5..00000000000 --- a/src/xrpld/app/rdb/ShardArchive.h +++ /dev/null @@ -1,78 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2021 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_RDB_SHARDARCHIVE_H_INCLUDED -#define RIPPLE_APP_RDB_SHARDARCHIVE_H_INCLUDED - -#include -#include - -namespace ripple { - -/** - * @brief makeArchiveDB Opens the shard archive database and returns its - * descriptor. - * @param dir Path to the database to open. - * @param dbName Name of the database. - * @return Unique pointer to the opened database. - */ -std::unique_ptr -makeArchiveDB(boost::filesystem::path const& dir, std::string const& dbName); - -/** - * @brief readArchiveDB Reads entries from the shard archive database and - * invokes the given callback for each entry. - * @param db Session with the database. - * @param func Callback to invoke for each entry. - */ -void -readArchiveDB( - DatabaseCon& db, - std::function const& func); - -/** - * @brief insertArchiveDB Adds an entry to the shard archive database. - * @param db Session with the database. - * @param shardIndex Shard index to add. - * @param url Shard download url to add. - */ -void -insertArchiveDB( - DatabaseCon& db, - std::uint32_t shardIndex, - std::string const& url); - -/** - * @brief deleteFromArchiveDB Deletes an entry from the shard archive database. - * @param db Session with the database. - * @param shardIndex Shard index to remove from the database. - */ -void -deleteFromArchiveDB(DatabaseCon& db, std::uint32_t shardIndex); - -/** - * @brief dropArchiveDB Removes a table in the shard archive database. - * @param db Session with the database. - */ -void -dropArchiveDB(DatabaseCon& db); - -} // namespace ripple - -#endif diff --git a/src/xrpld/app/rdb/State.h b/src/xrpld/app/rdb/State.h index b245270cda2..e65e9d4d57a 100644 --- a/src/xrpld/app/rdb/State.h +++ b/src/xrpld/app/rdb/State.h @@ -24,7 +24,6 @@ #include #include #include -#include #include #include diff --git a/src/xrpld/app/rdb/UnitaryShard.h b/src/xrpld/app/rdb/UnitaryShard.h deleted file mode 100644 index e848000221d..00000000000 --- a/src/xrpld/app/rdb/UnitaryShard.h +++ /dev/null @@ -1,155 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2021 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_RDB_UNITARYSHARD_H_INCLUDED -#define RIPPLE_APP_RDB_UNITARYSHARD_H_INCLUDED - -#include -#include -#include -#include -#include - -namespace ripple { - -struct DatabasePair -{ - std::unique_ptr ledgerDb; - std::unique_ptr transactionDb; -}; - -/** - * @brief makeShardCompleteLedgerDBs Opens shard databases for verified shards - * and returns their descriptors. - * @param config Config object. - * @param setup Path to the databases and other opening parameters. - * @return Pair of unique pointers to the opened ledger and transaction - * databases. - */ -DatabasePair -makeShardCompleteLedgerDBs( - Config const& config, - DatabaseCon::Setup const& setup); - -/** - * @brief makeShardIncompleteLedgerDBs Opens shard databases for partially - * downloaded or unverified shards and returns their descriptors. - * @param config Config object. - * @param setup Path to the databases and other opening parameters. - * @param checkpointerSetup Checkpointer parameters. - * @return Pair of unique pointers to the opened ledger and transaction - * databases. - */ -DatabasePair -makeShardIncompleteLedgerDBs( - Config const& config, - DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup); - -/** - * @brief updateLedgerDBs Saves the given ledger to shard databases. - * @param txdb Session with the transaction databases. - * @param lgrdb Session with the ledger databases. - * @param ledger Ledger to save. - * @param index Index of the shard that owns the ledger. - * @param stop Reference to an atomic flag that can stop the process if raised. - * @param j Journal - * @return True if the ledger was successfully saved. - */ -bool -updateLedgerDBs( - soci::session& txdb, - soci::session& lgrdb, - std::shared_ptr const& ledger, - std::uint32_t index, - std::atomic& stop, - beast::Journal j); - -/** - * @brief makeAcquireDB Opens the shard acquire database and returns its - * descriptor. - * @param setup Path to the database and other opening parameters. - * @param checkpointerSetup Checkpointer parameters. - * @return Unique pointer to the opened database. - */ -std::unique_ptr -makeAcquireDB( - DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup); - -/** - * @brief insertAcquireDBIndex Adds a new shard index to the shard acquire - * database. - * @param session Session with the database. - * @param index Index to add. - */ -void -insertAcquireDBIndex(soci::session& session, std::uint32_t index); - -/** - * @brief selectAcquireDBLedgerSeqs Returns the set of acquired ledgers for - * the given shard. - * @param session Session with the database. - * @param index Shard index. - * @return Pair which contains true if such an index was found in the database, - * and a string which contains the set of ledger sequences. - * If no sequences were saved then the optional will have no value. - */ -std::pair> -selectAcquireDBLedgerSeqs(soci::session& session, std::uint32_t index); - -struct AcquireShardSeqsHash -{ - std::optional sequences; - std::optional hash; -}; - -/** - * @brief selectAcquireDBLedgerSeqsHash Returns the set of acquired ledger - * sequences and the last ledger hash for the shard with the provided - * index. - * @param session Session with the database. - * @param index Shard index. - * @return Pair which contains true if such an index was found in the database - * and the AcquireShardSeqsHash structure which contains a string with - * the ledger sequences and a string with last ledger hash. If the set - * of sequences or hash were not saved then no value is returned. - */ -std::pair -selectAcquireDBLedgerSeqsHash(soci::session& session, std::uint32_t index); - -/** - * @brief updateAcquireDB Updates information in the acquire DB. - * @param session Session with the database. - * @param ledger Ledger to save into the database. - * @param index Shard index. - * @param lastSeq Last acquired ledger sequence. - * @param seqs Current set of acquired ledger sequences if it's not empty. - */ -void -updateAcquireDB( - soci::session& session, - std::shared_ptr const& ledger, - std::uint32_t index, - std::uint32_t lastSeq, - std::optional const& seqs); - -} // namespace ripple - -#endif diff --git a/src/xrpld/app/rdb/backend/detail/detail/Node.cpp b/src/xrpld/app/rdb/backend/detail/Node.cpp similarity index 93% rename from src/xrpld/app/rdb/backend/detail/detail/Node.cpp rename to src/xrpld/app/rdb/backend/detail/Node.cpp index 67a80b43cf3..70e42b0ae85 100644 --- a/src/xrpld/app/rdb/backend/detail/detail/Node.cpp +++ b/src/xrpld/app/rdb/backend/detail/Node.cpp @@ -622,8 +622,7 @@ getTxHistory( soci::session& session, Application& app, LedgerIndex startIndex, - int quantity, - bool count) + int quantity) { std::string sql = boost::str( boost::format( @@ -663,13 +662,6 @@ getTxHistory( txs.push_back(trans); } } - - if (!total && count) - { - session << "SELECT COUNT(*) FROM Transactions;", soci::into(total); - - total = -total; - } } return {txs, total}; @@ -685,9 +677,6 @@ getTxHistory( * the account, the ledger search range, the offset of the first entry to * return, the number of transactions to return, and a flag if this * number is unlimited. - * @param limit_used Number of transactions already returned in calls - * to other shard databases, if shard databases are used. - * No value if the node database is used. * @param descending True for descending order, false for ascending. * @param binary True for binary form, false for decoded. * @param count True for counting the number of transactions, false for @@ -700,7 +689,6 @@ transactionsSQL( Application& app, std::string selection, RelationalDatabase::AccountTxOptions const& options, - std::optional const& limit_used, bool descending, bool binary, bool count, @@ -729,14 +717,6 @@ transactionsSQL( numberOfResults = options.limit; } - if (limit_used) - { - if (numberOfResults <= *limit_used) - return ""; - else - numberOfResults -= *limit_used; - } - std::string maxClause = ""; std::string minClause = ""; @@ -790,9 +770,6 @@ transactionsSQL( * the account, the ledger search range, the offset of the first entry to * return, the number of transactions to return, and a flag if this * number is unlimited. - * @param limit_used Number of transactions already returned in calls - * to other shard databases, if shard databases are used. - * No value if the node database is used. * @param descending True for descending order, false for ascending. * @param j Journal. * @return Vector of pairs of found transactions and their metadata sorted by @@ -809,7 +786,6 @@ getAccountTxs( Application& app, LedgerMaster& ledgerMaster, RelationalDatabase::AccountTxOptions const& options, - std::optional const& limit_used, bool descending, beast::Journal j) { @@ -819,7 +795,6 @@ getAccountTxs( app, "AccountTransactions.LedgerSeq,Status,RawTxn,TxnMeta", options, - limit_used, descending, false, false, @@ -880,18 +855,6 @@ getAccountTxs( total++; } } - - if (!total && limit_used) - { - RelationalDatabase::AccountTxOptions opt = options; - opt.offset = 0; - std::string sql1 = transactionsSQL( - app, "COUNT(*)", opt, limit_used, descending, false, false, j); - - session << sql1, soci::into(total); - - total = -total; - } } return {ret, total}; @@ -903,11 +866,9 @@ getOldestAccountTxs( Application& app, LedgerMaster& ledgerMaster, RelationalDatabase::AccountTxOptions const& options, - std::optional const& limit_used, beast::Journal j) { - return getAccountTxs( - session, app, ledgerMaster, options, limit_used, false, j); + return getAccountTxs(session, app, ledgerMaster, options, false, j); } std::pair @@ -916,11 +877,9 @@ getNewestAccountTxs( Application& app, LedgerMaster& ledgerMaster, RelationalDatabase::AccountTxOptions const& options, - std::optional const& limit_used, beast::Journal j) { - return getAccountTxs( - session, app, ledgerMaster, options, limit_used, true, j); + return getAccountTxs(session, app, ledgerMaster, options, true, j); } /** @@ -933,9 +892,6 @@ getNewestAccountTxs( * the account, the ledger search range, the offset of the first entry to * return, the number of transactions to return, and a flag if this * number is unlimited. - * @param limit_used Number of transactions already returned in calls to other - * shard databases, if shard databases are used. No value if the node - * database is used. * @param descending True for descending order, false for ascending. * @param j Journal. * @return Vector of tuples each containing (the found transactions, their @@ -951,7 +907,6 @@ getAccountTxsB( soci::session& session, Application& app, RelationalDatabase::AccountTxOptions const& options, - std::optional const& limit_used, bool descending, beast::Journal j) { @@ -961,7 +916,6 @@ getAccountTxsB( app, "AccountTransactions.LedgerSeq,Status,RawTxn,TxnMeta", options, - limit_used, descending, true /*binary*/, false, @@ -1001,18 +955,6 @@ getAccountTxsB( ret.emplace_back(std::move(rawTxn), std::move(txnMeta), seq); total++; } - - if (!total && limit_used) - { - RelationalDatabase::AccountTxOptions opt = options; - opt.offset = 0; - std::string sql1 = transactionsSQL( - app, "COUNT(*)", opt, limit_used, descending, true, false, j); - - session << sql1, soci::into(total); - - total = -total; - } } return {ret, total}; @@ -1023,10 +965,9 @@ getOldestAccountTxsB( soci::session& session, Application& app, RelationalDatabase::AccountTxOptions const& options, - std::optional const& limit_used, beast::Journal j) { - return getAccountTxsB(session, app, options, limit_used, false, j); + return getAccountTxsB(session, app, options, false, j); } std::pair, int> @@ -1034,10 +975,9 @@ getNewestAccountTxsB( soci::session& session, Application& app, RelationalDatabase::AccountTxOptions const& options, - std::optional const& limit_used, beast::Journal j) { - return getAccountTxsB(session, app, options, limit_used, true, j); + return getAccountTxsB(session, app, options, true, j); } /** @@ -1052,8 +992,6 @@ getNewestAccountTxsB( * match: the account, the ledger search range, the marker of the first * returned entry, the number of transactions to return, and a flag if * this number unlimited. - * @param limit_used Number of transactions already returned in calls - * to other shard databases. * @param page_length Total number of transactions to return. * @param forward True for ascending order, false for descending. * @return Vector of tuples of found transactions, their metadata and account @@ -1069,7 +1007,6 @@ accountTxPage( void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& onTransaction, RelationalDatabase::AccountTxPageOptions const& options, - int limit_used, std::uint32_t page_length, bool forward) { @@ -1085,10 +1022,6 @@ accountTxPage( else numberOfResults = options.limit; - if (numberOfResults < limit_used) - return {options.marker, -1}; - numberOfResults -= limit_used; - // As an account can have many thousands of transactions, there is a limit // placed on the amount of transactions returned. If the limit is reached // before the result set has been exhausted (we always query for one more @@ -1104,8 +1037,6 @@ accountTxPage( } std::optional newmarker; - if (limit_used > 0) - newmarker = options.marker; static std::string const prefix( R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq, @@ -1251,17 +1182,10 @@ oldestAccountTxPage( void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& onTransaction, RelationalDatabase::AccountTxPageOptions const& options, - int limit_used, std::uint32_t page_length) { return accountTxPage( - session, - onUnsavedLedger, - onTransaction, - options, - limit_used, - page_length, - true); + session, onUnsavedLedger, onTransaction, options, page_length, true); } std::pair, int> @@ -1272,17 +1196,10 @@ newestAccountTxPage( void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& onTransaction, RelationalDatabase::AccountTxPageOptions const& options, - int limit_used, std::uint32_t page_length) { return accountTxPage( - session, - onUnsavedLedger, - onTransaction, - options, - limit_used, - page_length, - false); + session, onUnsavedLedger, onTransaction, options, page_length, false); } std::variant diff --git a/src/xrpld/app/rdb/backend/detail/Node.h b/src/xrpld/app/rdb/backend/detail/Node.h index d7c170663a5..5564adf8954 100644 --- a/src/xrpld/app/rdb/backend/detail/Node.h +++ b/src/xrpld/app/rdb/backend/detail/Node.h @@ -249,7 +249,6 @@ getHashesByIndex( * @param app Application object. * @param startIndex Offset of first returned entry. * @param quantity Number of returned entries. - * @param count True if counting of all transaction in that shard required. * @return Vector of shared pointers to transactions sorted in * descending order by ledger sequence. Also number of transactions * if count == true. @@ -259,8 +258,7 @@ getTxHistory( soci::session& session, Application& app, LedgerIndex startIndex, - int quantity, - bool count); + int quantity); /** * @brief getOldestAccountTxs Returns oldest transactions for given @@ -272,9 +270,6 @@ getTxHistory( * the account, minimum and maximum ledger numbers to search, * offset of first entry to return, number of transactions to return, * flag if this number unlimited. - * @param limit_used Number or transactions already returned in calls - * to another shard databases, if shard databases are used. - * None if node database is used. * @param j Journal. * @return Vector of pairs of found transactions and their metadata * sorted in ascending order by account sequence. @@ -290,7 +285,6 @@ getOldestAccountTxs( Application& app, LedgerMaster& ledgerMaster, RelationalDatabase::AccountTxOptions const& options, - std::optional const& limit_used, beast::Journal j); /** @@ -303,9 +297,6 @@ getOldestAccountTxs( * the account, minimum and maximum ledger numbers to search, * offset of first entry to return, number of transactions to return, * flag if this number unlimited. - * @param limit_used Number or transactions already returned in calls - * to another shard databases, if shard databases are used. - * None if node database is used. * @param j Journal. * @return Vector of pairs of found transactions and their metadata * sorted in descending order by account sequence. @@ -321,7 +312,6 @@ getNewestAccountTxs( Application& app, LedgerMaster& ledgerMaster, RelationalDatabase::AccountTxOptions const& options, - std::optional const& limit_used, beast::Journal j); /** @@ -334,9 +324,6 @@ getNewestAccountTxs( * the account, minimum and maximum ledger numbers to search, * offset of first entry to return, number of transactions to return, * flag if this number unlimited. - * @param limit_used Number or transactions already returned in calls - * to another shard databases, if shard databases are used. - * None if node database is used. * @param j Journal. * @return Vector of tuples of found transactions, their metadata and * account sequences sorted in ascending order by account @@ -351,7 +338,6 @@ getOldestAccountTxsB( soci::session& session, Application& app, RelationalDatabase::AccountTxOptions const& options, - std::optional const& limit_used, beast::Journal j); /** @@ -364,9 +350,6 @@ getOldestAccountTxsB( * the account, minimum and maximum ledger numbers to search, * offset of first entry to return, number of transactions to return, * flag if this number unlimited. - * @param limit_used Number or transactions already returned in calls - * to another shard databases, if shard databases are used. - * None if node database is used. * @param j Journal. * @return Vector of tuples of found transactions, their metadata and * account sequences sorted in descending order by account @@ -381,7 +364,6 @@ getNewestAccountTxsB( soci::session& session, Application& app, RelationalDatabase::AccountTxOptions const& options, - std::optional const& limit_used, beast::Journal j); /** @@ -396,8 +378,6 @@ getNewestAccountTxsB( * match: the account, minimum and maximum ledger numbers to search, * marker of first returned entry, number of transactions to return, * flag if this number unlimited. - * @param limit_used Number or transactions already returned in calls - * to another shard databases. * @param page_length Total number of transactions to return. * @return Vector of tuples of found transactions, their metadata and * account sequences sorted in ascending order by account @@ -412,7 +392,6 @@ oldestAccountTxPage( void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& onTransaction, RelationalDatabase::AccountTxPageOptions const& options, - int limit_used, std::uint32_t page_length); /** @@ -427,8 +406,6 @@ oldestAccountTxPage( * match: the account, minimum and maximum ledger numbers to search, * marker of first returned entry, number of transactions to return, * flag if this number unlimited. - * @param limit_used Number or transactions already returned in calls - * to another shard databases. * @param page_length Total number of transactions to return. * @return Vector of tuples of found transactions, their metadata and * account sequences sorted in descending order by account @@ -443,7 +420,6 @@ newestAccountTxPage( void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& onTransaction, RelationalDatabase::AccountTxPageOptions const& options, - int limit_used, std::uint32_t page_length); /** diff --git a/src/xrpld/app/rdb/backend/detail/PostgresDatabase.cpp b/src/xrpld/app/rdb/backend/detail/PostgresDatabase.cpp index ac998991a6d..ac1a9813c2b 100644 --- a/src/xrpld/app/rdb/backend/detail/PostgresDatabase.cpp +++ b/src/xrpld/app/rdb/backend/detail/PostgresDatabase.cpp @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include diff --git a/src/xrpld/app/rdb/backend/detail/SQLiteDatabase.cpp b/src/xrpld/app/rdb/backend/detail/SQLiteDatabase.cpp index 3edc5d163d8..bc5992d03d0 100644 --- a/src/xrpld/app/rdb/backend/detail/SQLiteDatabase.cpp +++ b/src/xrpld/app/rdb/backend/detail/SQLiteDatabase.cpp @@ -25,10 +25,8 @@ #include #include #include -#include #include #include -#include #include #include #include @@ -59,19 +57,6 @@ class SQLiteDatabaseImp final : public SQLiteDatabase JLOG(j_.fatal()) << error; Throw(error.data()); } - - if (app.getShardStore() && - !makeMetaDBs( - config, - setup, - DatabaseCon::CheckpointerSetup{&jobQueue, &app_.logs()})) - { - std::string_view constexpr error = - "Failed to create metadata databases"; - - JLOG(j_.fatal()) << error; - Throw(error.data()); - } } std::optional @@ -195,7 +180,6 @@ class SQLiteDatabaseImp final : public SQLiteDatabase bool const useTxTables_; beast::Journal j_; std::unique_ptr lgrdb_, txdb_; - std::unique_ptr lgrMetaDB_, txMetaDB_; /** * @brief makeLedgerDBs Opens ledger and transaction databases for the node @@ -211,56 +195,6 @@ class SQLiteDatabaseImp final : public SQLiteDatabase DatabaseCon::Setup const& setup, DatabaseCon::CheckpointerSetup const& checkpointerSetup); - /** - * @brief makeMetaDBs Opens shard index lookup databases, and stores - * their descriptors in private member variables. - * @param config Config object. - * @param setup Path to the databases and other opening parameters. - * @param checkpointerSetup Checkpointer parameters. - * @return True if node databases opened successfully. - */ - bool - makeMetaDBs( - Config const& config, - DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup); - - /** - * @brief seqToShardIndex Provides the index of the shard that stores the - * ledger with the given sequence. - * @param ledgerSeq Ledger sequence. - * @return Shard index. - */ - std::uint32_t - seqToShardIndex(LedgerIndex ledgerSeq) - { - return app_.getShardStore()->seqToShardIndex(ledgerSeq); - } - - /** - * @brief firstLedgerSeq Returns the sequence of the first ledger stored in - * the shard specified by the shard index parameter. - * @param shardIndex Shard Index. - * @return First ledger sequence. - */ - LedgerIndex - firstLedgerSeq(std::uint32_t shardIndex) - { - return app_.getShardStore()->firstLedgerSeq(shardIndex); - } - - /** - * @brief lastLedgerSeq Returns the sequence of the last ledger stored in - * the shard specified by the shard index parameter. - * @param shardIndex Shard Index. - * @return Last ledger sequence. - */ - LedgerIndex - lastLedgerSeq(std::uint32_t shardIndex) - { - return app_.getShardStore()->lastLedgerSeq(shardIndex); - } - /** * @brief existsLedger Checks if the node store ledger database exists. * @return True if the node store ledger database exists. @@ -282,16 +216,6 @@ class SQLiteDatabaseImp final : public SQLiteDatabase return static_cast(txdb_); } - /** - * shardStoreExists Checks whether the shard store exists - * @return True if the shard store exists - */ - bool - shardStoreExists() - { - return app_.getShardStore() != nullptr; - } - /** * @brief checkoutTransaction Checks out and returns node store ledger * database. @@ -313,131 +237,6 @@ class SQLiteDatabaseImp final : public SQLiteDatabase { return txdb_->checkoutDb(); } - - /** - * @brief doLedger Checks out the ledger database owned by the shard - * containing the given ledger, and invokes the provided callback - * with a session to that database. - * @param ledgerSeq Ledger sequence. - * @param callback Callback function to call. - * @return Value returned by callback function. - */ - bool - doLedger( - LedgerIndex ledgerSeq, - std::function const& callback) - { - return app_.getShardStore()->callForLedgerSQLByLedgerSeq( - ledgerSeq, callback); - } - - /** - * @brief doTransaction Checks out the transaction database owned by the - * shard containing the given ledger, and invokes the provided - * callback with a session to that database. - * @param ledgerSeq Ledger sequence. - * @param callback Callback function to call. - * @return Value returned by callback function. - */ - bool - doTransaction( - LedgerIndex ledgerSeq, - std::function const& callback) - { - return app_.getShardStore()->callForTransactionSQLByLedgerSeq( - ledgerSeq, callback); - } - - /** - * @brief iterateLedgerForward Checks out ledger databases for all shards in - * ascending order starting from the given shard index, until all - * shards in range have been visited or the callback returns false. - * For each visited shard, we invoke the provided callback with a - * session to the database and the current shard index. - * @param firstIndex First shard index to visit or no value if all shards - * should be visited. - * @param callback Callback function to call. - * @return True if each callback function returned true, false otherwise. - */ - bool - iterateLedgerForward( - std::optional firstIndex, - std::function< - bool(soci::session& session, std::uint32_t shardIndex)> const& - callback) - { - return app_.getShardStore()->iterateLedgerSQLsForward( - firstIndex, callback); - } - - /** - * @brief iterateTransactionForward Checks out transaction databases for all - * shards in ascending order starting from the given shard index, - * until all shards in range have been visited or the callback - * returns false. For each visited shard, we invoke the provided - * callback with a session to the database and the current shard - * index. - * @param firstIndex First shard index to visit or no value if all shards - * should be visited. - * @param callback Callback function to call. - * @return True if each callback function returned true, false otherwise. - */ - bool - iterateTransactionForward( - std::optional firstIndex, - std::function< - bool(soci::session& session, std::uint32_t shardIndex)> const& - callback) - { - return app_.getShardStore()->iterateLedgerSQLsForward( - firstIndex, callback); - } - - /** - * @brief iterateLedgerBack Checks out ledger databases for all - * shards in descending order starting from the given shard index, - * until all shards in range have been visited or the callback - * returns false. For each visited shard, we invoke the provided - * callback with a session to the database and the current shard - * index. - * @param firstIndex First shard index to visit or no value if all shards - * should be visited. - * @param callback Callback function to call. - * @return True if each callback function returned true, false otherwise. - */ - bool - iterateLedgerBack( - std::optional firstIndex, - std::function< - bool(soci::session& session, std::uint32_t shardIndex)> const& - callback) - { - return app_.getShardStore()->iterateLedgerSQLsBack( - firstIndex, callback); - } - - /** - * @brief iterateTransactionBack Checks out transaction databases for all - * shards in descending order starting from the given shard index, - * until all shards in range have been visited or the callback - * returns false. For each visited shard, we invoke the provided - * callback with a session to the database and the current shard - * index. - * @param firstIndex First shard index to visit or no value if all shards - * should be visited. - * @param callback Callback function to call. - * @return True if each callback function returned true, false otherwise. - */ - bool - iterateTransactionBack( - std::optional firstIndex, - std::function< - bool(soci::session& session, std::uint32_t shardIndex)> const& - callback) - { - return app_.getShardStore()->iterateLedgerSQLsBack( - firstIndex, callback); - } }; bool @@ -453,21 +252,6 @@ SQLiteDatabaseImp::makeLedgerDBs( return res; } -bool -SQLiteDatabaseImp::makeMetaDBs( - Config const& config, - DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup) -{ - auto [lgrMetaDB, txMetaDB] = - detail::makeMetaDBs(config, setup, checkpointerSetup); - - txMetaDB_ = std::move(txMetaDB); - lgrMetaDB_ = std::move(lgrMetaDB); - - return true; -} - std::optional SQLiteDatabaseImp::getMinLedgerSeq() { @@ -478,19 +262,6 @@ SQLiteDatabaseImp::getMinLedgerSeq() return detail::getMinLedgerSeq(*db, detail::TableType::Ledgers); } - /* else use shard databases, if available */ - if (shardStoreExists()) - { - std::optional res; - iterateLedgerForward( - {}, [&](soci::session& session, std::uint32_t shardIndex) { - res = detail::getMinLedgerSeq( - session, detail::TableType::Ledgers); - return !res; - }); - return res; - } - /* else return empty value */ return {}; } @@ -507,18 +278,6 @@ SQLiteDatabaseImp::getTransactionsMinLedgerSeq() return detail::getMinLedgerSeq(*db, detail::TableType::Transactions); } - if (shardStoreExists()) - { - std::optional res; - iterateTransactionForward( - {}, [&](soci::session& session, std::uint32_t shardIndex) { - res = detail::getMinLedgerSeq( - session, detail::TableType::Transactions); - return !res; - }); - return res; - } - return {}; } @@ -535,18 +294,6 @@ SQLiteDatabaseImp::getAccountTransactionsMinLedgerSeq() *db, detail::TableType::AccountTransactions); } - if (shardStoreExists()) - { - std::optional res; - iterateTransactionForward( - {}, [&](soci::session& session, std::uint32_t shardIndex) { - res = detail::getMinLedgerSeq( - session, detail::TableType::AccountTransactions); - return !res; - }); - return res; - } - return {}; } @@ -559,18 +306,6 @@ SQLiteDatabaseImp::getMaxLedgerSeq() return detail::getMaxLedgerSeq(*db, detail::TableType::Ledgers); } - if (shardStoreExists()) - { - std::optional res; - iterateLedgerBack( - {}, [&](soci::session& session, std::uint32_t shardIndex) { - res = detail::getMaxLedgerSeq( - session, detail::TableType::Ledgers); - return !res; - }); - return res; - } - return {}; } @@ -587,15 +322,6 @@ SQLiteDatabaseImp::deleteTransactionByLedgerSeq(LedgerIndex ledgerSeq) *db, detail::TableType::Transactions, ledgerSeq); return; } - - if (shardStoreExists()) - { - doTransaction(ledgerSeq, [&](soci::session& session) { - detail::deleteByLedgerSeq( - session, detail::TableType::Transactions, ledgerSeq); - return true; - }); - } } void @@ -608,17 +334,6 @@ SQLiteDatabaseImp::deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) *db, detail::TableType::Ledgers, ledgerSeq); return; } - - if (shardStoreExists()) - { - iterateLedgerBack( - seqToShardIndex(ledgerSeq), - [&](soci::session& session, std::uint32_t shardIndex) { - detail::deleteBeforeLedgerSeq( - session, detail::TableType::Ledgers, ledgerSeq); - return true; - }); - } } void @@ -634,17 +349,6 @@ SQLiteDatabaseImp::deleteTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) *db, detail::TableType::Transactions, ledgerSeq); return; } - - if (shardStoreExists()) - { - iterateTransactionBack( - seqToShardIndex(ledgerSeq), - [&](soci::session& session, std::uint32_t shardIndex) { - detail::deleteBeforeLedgerSeq( - session, detail::TableType::Transactions, ledgerSeq); - return true; - }); - } } void @@ -661,17 +365,6 @@ SQLiteDatabaseImp::deleteAccountTransactionsBeforeLedgerSeq( *db, detail::TableType::AccountTransactions, ledgerSeq); return; } - - if (shardStoreExists()) - { - iterateTransactionBack( - seqToShardIndex(ledgerSeq), - [&](soci::session& session, std::uint32_t shardIndex) { - detail::deleteBeforeLedgerSeq( - session, detail::TableType::AccountTransactions, ledgerSeq); - return true; - }); - } } std::size_t @@ -686,18 +379,6 @@ SQLiteDatabaseImp::getTransactionCount() return detail::getRows(*db, detail::TableType::Transactions); } - if (shardStoreExists()) - { - std::size_t rows = 0; - iterateTransactionForward( - {}, [&](soci::session& session, std::uint32_t shardIndex) { - rows += - detail::getRows(session, detail::TableType::Transactions); - return true; - }); - return rows; - } - return 0; } @@ -713,18 +394,6 @@ SQLiteDatabaseImp::getAccountTransactionCount() return detail::getRows(*db, detail::TableType::AccountTransactions); } - if (shardStoreExists()) - { - std::size_t rows = 0; - iterateTransactionForward( - {}, [&](soci::session& session, std::uint32_t shardIndex) { - rows += detail::getRows( - session, detail::TableType::AccountTransactions); - return true; - }); - return rows; - } - return 0; } @@ -737,25 +406,6 @@ SQLiteDatabaseImp::getLedgerCountMinMax() return detail::getRowsMinMax(*db, detail::TableType::Ledgers); } - if (shardStoreExists()) - { - CountMinMax res{0, 0, 0}; - iterateLedgerForward( - {}, [&](soci::session& session, std::uint32_t shardIndex) { - auto r = - detail::getRowsMinMax(session, detail::TableType::Ledgers); - if (r.numberOfRows) - { - res.numberOfRows += r.numberOfRows; - if (res.minLedgerSequence == 0) - res.minLedgerSequence = r.minLedgerSequence; - res.maxLedgerSequence = r.maxLedgerSequence; - } - return true; - }); - return res; - } - return {0, 0, 0}; } @@ -771,27 +421,6 @@ SQLiteDatabaseImp::saveValidatedLedger( return false; } - if (auto shardStore = app_.getShardStore(); shardStore) - { - if (ledger->info().seq < shardStore->earliestLedgerSeq()) - // For the moment return false only when the ShardStore - // should accept the ledger, but fails when attempting - // to do so, i.e. when saveLedgerMeta fails. Later when - // the ShardStore supercedes the NodeStore, change this - // line to return false if the ledger is too early. - return true; - - auto lgrMetaSession = lgrMetaDB_->checkoutDb(); - auto txMetaSession = txMetaDB_->checkoutDb(); - - return detail::saveLedgerMeta( - ledger, - app_, - *lgrMetaSession, - *txMetaSession, - shardStore->seqToShardIndex(ledger->info().seq)); - } - return true; } @@ -807,16 +436,6 @@ SQLiteDatabaseImp::getLedgerInfoByIndex(LedgerIndex ledgerSeq) return res; } - if (shardStoreExists()) - { - std::optional res; - doLedger(ledgerSeq, [&](soci::session& session) { - res = detail::getLedgerInfoByIndex(session, ledgerSeq, j_); - return true; - }); - return res; - } - return {}; } @@ -832,22 +451,6 @@ SQLiteDatabaseImp::getNewestLedgerInfo() return res; } - if (shardStoreExists()) - { - std::optional res; - iterateLedgerBack( - {}, [&](soci::session& session, std::uint32_t shardIndex) { - if (auto info = detail::getNewestLedgerInfo(session, j_)) - { - res = info; - return false; - } - return true; - }); - - return res; - } - return {}; } @@ -864,24 +467,6 @@ SQLiteDatabaseImp::getLimitedOldestLedgerInfo(LedgerIndex ledgerFirstIndex) return res; } - if (shardStoreExists()) - { - std::optional res; - iterateLedgerForward( - seqToShardIndex(ledgerFirstIndex), - [&](soci::session& session, std::uint32_t shardIndex) { - if (auto info = detail::getLimitedOldestLedgerInfo( - session, ledgerFirstIndex, j_)) - { - res = info; - return false; - } - return true; - }); - - return res; - } - return {}; } @@ -898,23 +483,6 @@ SQLiteDatabaseImp::getLimitedNewestLedgerInfo(LedgerIndex ledgerFirstIndex) return res; } - if (shardStoreExists()) - { - std::optional res; - iterateLedgerBack( - {}, [&](soci::session& session, std::uint32_t shardIndex) { - if (auto info = detail::getLimitedNewestLedgerInfo( - session, ledgerFirstIndex, j_)) - { - res = info; - return false; - } - return shardIndex >= seqToShardIndex(ledgerFirstIndex); - }); - - return res; - } - return {}; } @@ -930,24 +498,6 @@ SQLiteDatabaseImp::getLedgerInfoByHash(uint256 const& ledgerHash) return res; } - if (auto shardStore = app_.getShardStore()) - { - std::optional res; - auto lgrMetaSession = lgrMetaDB_->checkoutDb(); - - if (auto const shardIndex = - detail::getShardIndexforLedger(*lgrMetaSession, ledgerHash)) - { - shardStore->callForLedgerSQLByShardIndex( - *shardIndex, [&](soci::session& session) { - res = detail::getLedgerInfoByHash(session, ledgerHash, j_); - return false; // unused - }); - } - - return res; - } - return {}; } @@ -963,16 +513,6 @@ SQLiteDatabaseImp::getHashByIndex(LedgerIndex ledgerIndex) return res; } - if (shardStoreExists()) - { - uint256 hash; - doLedger(ledgerIndex, [&](soci::session& session) { - hash = detail::getHashByIndex(session, ledgerIndex); - return true; - }); - return hash; - } - return uint256(); } @@ -988,16 +528,6 @@ SQLiteDatabaseImp::getHashesByIndex(LedgerIndex ledgerIndex) return res; } - if (shardStoreExists()) - { - std::optional res; - doLedger(ledgerIndex, [&](soci::session& session) { - res = detail::getHashesByIndex(session, ledgerIndex, j_); - return true; - }); - return res; - } - return {}; } @@ -1013,26 +543,6 @@ SQLiteDatabaseImp::getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) return res; } - if (shardStoreExists()) - { - std::map res; - while (minSeq <= maxSeq) - { - LedgerIndex shardMaxSeq = lastLedgerSeq(seqToShardIndex(minSeq)); - if (shardMaxSeq > maxSeq) - shardMaxSeq = maxSeq; - doLedger(minSeq, [&](soci::session& session) { - auto r = - detail::getHashesByIndex(session, minSeq, shardMaxSeq, j_); - res.insert(r.begin(), r.end()); - return true; - }); - minSeq = shardMaxSeq + 1; - } - - return res; - } - return {}; } @@ -1045,39 +555,12 @@ SQLiteDatabaseImp::getTxHistory(LedgerIndex startIndex) if (existsTransaction()) { auto db = checkoutTransaction(); - auto const res = - detail::getTxHistory(*db, app_, startIndex, 20, false).first; + auto const res = detail::getTxHistory(*db, app_, startIndex, 20).first; if (!res.empty()) return res; } - if (shardStoreExists()) - { - std::vector> txs; - int quantity = 20; - iterateTransactionBack( - {}, [&](soci::session& session, std::uint32_t shardIndex) { - auto [tx, total] = detail::getTxHistory( - session, app_, startIndex, quantity, true); - txs.insert(txs.end(), tx.begin(), tx.end()); - if (total > 0) - { - quantity -= total; - if (quantity <= 0) - return false; - startIndex = 0; - } - else - { - startIndex += total; - } - return true; - }); - - return txs; - } - return {}; } @@ -1092,52 +575,10 @@ SQLiteDatabaseImp::getOldestAccountTxs(AccountTxOptions const& options) if (existsTransaction()) { auto db = checkoutTransaction(); - return detail::getOldestAccountTxs( - *db, app_, ledgerMaster, options, {}, j_) + return detail::getOldestAccountTxs(*db, app_, ledgerMaster, options, j_) .first; } - if (shardStoreExists()) - { - AccountTxs ret; - AccountTxOptions opt = options; - int limit_used = 0; - iterateTransactionForward( - opt.minLedger ? seqToShardIndex(opt.minLedger) - : std::optional(), - [&](soci::session& session, std::uint32_t shardIndex) { - if (opt.maxLedger && - shardIndex > seqToShardIndex(opt.maxLedger)) - return false; - auto [r, total] = detail::getOldestAccountTxs( - session, app_, ledgerMaster, opt, limit_used, j_); - ret.insert(ret.end(), r.begin(), r.end()); - if (!total) - return false; - if (total > 0) - { - limit_used += total; - opt.offset = 0; - } - else - { - /* - * If total < 0, then -total means number of transactions - * skipped, see definition of return value of function - * ripple::getOldestAccountTxs(). - */ - total = -total; - if (opt.offset <= total) - opt.offset = 0; - else - opt.offset -= total; - } - return true; - }); - - return ret; - } - return {}; } @@ -1152,52 +593,10 @@ SQLiteDatabaseImp::getNewestAccountTxs(AccountTxOptions const& options) if (existsTransaction()) { auto db = checkoutTransaction(); - return detail::getNewestAccountTxs( - *db, app_, ledgerMaster, options, {}, j_) + return detail::getNewestAccountTxs(*db, app_, ledgerMaster, options, j_) .first; } - if (shardStoreExists()) - { - AccountTxs ret; - AccountTxOptions opt = options; - int limit_used = 0; - iterateTransactionBack( - opt.maxLedger ? seqToShardIndex(opt.maxLedger) - : std::optional(), - [&](soci::session& session, std::uint32_t shardIndex) { - if (opt.minLedger && - shardIndex < seqToShardIndex(opt.minLedger)) - return false; - auto [r, total] = detail::getNewestAccountTxs( - session, app_, ledgerMaster, opt, limit_used, j_); - ret.insert(ret.end(), r.begin(), r.end()); - if (!total) - return false; - if (total > 0) - { - limit_used += total; - opt.offset = 0; - } - else - { - /* - * If total < 0, then -total means number of transactions - * skipped, see definition of return value of function - * ripple::getNewestAccountTxs(). - */ - total = -total; - if (opt.offset <= total) - opt.offset = 0; - else - opt.offset -= total; - } - return true; - }); - - return ret; - } - return {}; } @@ -1210,48 +609,7 @@ SQLiteDatabaseImp::getOldestAccountTxsB(AccountTxOptions const& options) if (existsTransaction()) { auto db = checkoutTransaction(); - return detail::getOldestAccountTxsB(*db, app_, options, {}, j_).first; - } - - if (shardStoreExists()) - { - MetaTxsList ret; - AccountTxOptions opt = options; - int limit_used = 0; - iterateTransactionForward( - opt.minLedger ? seqToShardIndex(opt.minLedger) - : std::optional(), - [&](soci::session& session, std::uint32_t shardIndex) { - if (opt.maxLedger && - shardIndex > seqToShardIndex(opt.maxLedger)) - return false; - auto [r, total] = detail::getOldestAccountTxsB( - session, app_, opt, limit_used, j_); - ret.insert(ret.end(), r.begin(), r.end()); - if (!total) - return false; - if (total > 0) - { - limit_used += total; - opt.offset = 0; - } - else - { - /* - * If total < 0, then -total means number of transactions - * skipped, see definition of return value of function - * ripple::getOldestAccountTxsB(). - */ - total = -total; - if (opt.offset <= total) - opt.offset = 0; - else - opt.offset -= total; - } - return true; - }); - - return ret; + return detail::getOldestAccountTxsB(*db, app_, options, j_).first; } return {}; @@ -1266,48 +624,7 @@ SQLiteDatabaseImp::getNewestAccountTxsB(AccountTxOptions const& options) if (existsTransaction()) { auto db = checkoutTransaction(); - return detail::getNewestAccountTxsB(*db, app_, options, {}, j_).first; - } - - if (shardStoreExists()) - { - MetaTxsList ret; - AccountTxOptions opt = options; - int limit_used = 0; - iterateTransactionBack( - opt.maxLedger ? seqToShardIndex(opt.maxLedger) - : std::optional(), - [&](soci::session& session, std::uint32_t shardIndex) { - if (opt.minLedger && - shardIndex < seqToShardIndex(opt.minLedger)) - return false; - auto [r, total] = detail::getNewestAccountTxsB( - session, app_, opt, limit_used, j_); - ret.insert(ret.end(), r.begin(), r.end()); - if (!total) - return false; - if (total > 0) - { - limit_used += total; - opt.offset = 0; - } - else - { - /* - * If total < 0, then -total means number of transactions - * skipped, see definition of return value of function - * ripple::getNewestAccountTxsB(). - */ - total = -total; - if (opt.offset <= total) - opt.offset = 0; - else - opt.offset -= total; - } - return true; - }); - - return ret; + return detail::getNewestAccountTxsB(*db, app_, options, j_).first; } return {}; @@ -1339,39 +656,11 @@ SQLiteDatabaseImp::oldestAccountTxPage(AccountTxPageOptions const& options) auto db = checkoutTransaction(); auto newmarker = detail::oldestAccountTxPage( - *db, onUnsavedLedger, onTransaction, options, 0, page_length) + *db, onUnsavedLedger, onTransaction, options, page_length) .first; return {ret, newmarker}; } - if (shardStoreExists()) - { - AccountTxPageOptions opt = options; - int limit_used = 0; - iterateTransactionForward( - opt.minLedger ? seqToShardIndex(opt.minLedger) - : std::optional(), - [&](soci::session& session, std::uint32_t shardIndex) { - if (opt.maxLedger != UINT32_MAX && - shardIndex > seqToShardIndex(opt.minLedger)) - return false; - auto [marker, total] = detail::oldestAccountTxPage( - session, - onUnsavedLedger, - onTransaction, - opt, - limit_used, - page_length); - opt.marker = marker; - if (total < 0) - return false; - limit_used += total; - return true; - }); - - return {ret, opt.marker}; - } - return {}; } @@ -1401,39 +690,11 @@ SQLiteDatabaseImp::newestAccountTxPage(AccountTxPageOptions const& options) auto db = checkoutTransaction(); auto newmarker = detail::newestAccountTxPage( - *db, onUnsavedLedger, onTransaction, options, 0, page_length) + *db, onUnsavedLedger, onTransaction, options, page_length) .first; return {ret, newmarker}; } - if (shardStoreExists()) - { - AccountTxPageOptions opt = options; - int limit_used = 0; - iterateTransactionBack( - opt.maxLedger != UINT32_MAX ? seqToShardIndex(opt.maxLedger) - : std::optional(), - [&](soci::session& session, std::uint32_t shardIndex) { - if (opt.minLedger && - shardIndex < seqToShardIndex(opt.minLedger)) - return false; - auto [marker, total] = detail::newestAccountTxPage( - session, - onUnsavedLedger, - onTransaction, - opt, - limit_used, - page_length); - opt.marker = marker; - if (total < 0) - return false; - limit_used += total; - return true; - }); - - return {ret, opt.marker}; - } - return {}; } @@ -1462,39 +723,11 @@ SQLiteDatabaseImp::oldestAccountTxPageB(AccountTxPageOptions const& options) auto db = checkoutTransaction(); auto newmarker = detail::oldestAccountTxPage( - *db, onUnsavedLedger, onTransaction, options, 0, page_length) + *db, onUnsavedLedger, onTransaction, options, page_length) .first; return {ret, newmarker}; } - if (shardStoreExists()) - { - AccountTxPageOptions opt = options; - int limit_used = 0; - iterateTransactionForward( - opt.minLedger ? seqToShardIndex(opt.minLedger) - : std::optional(), - [&](soci::session& session, std::uint32_t shardIndex) { - if (opt.maxLedger != UINT32_MAX && - shardIndex > seqToShardIndex(opt.minLedger)) - return false; - auto [marker, total] = detail::oldestAccountTxPage( - session, - onUnsavedLedger, - onTransaction, - opt, - limit_used, - page_length); - opt.marker = marker; - if (total < 0) - return false; - limit_used += total; - return true; - }); - - return {ret, opt.marker}; - } - return {}; } @@ -1523,39 +756,11 @@ SQLiteDatabaseImp::newestAccountTxPageB(AccountTxPageOptions const& options) auto db = checkoutTransaction(); auto newmarker = detail::newestAccountTxPage( - *db, onUnsavedLedger, onTransaction, options, 0, page_length) + *db, onUnsavedLedger, onTransaction, options, page_length) .first; return {ret, newmarker}; } - if (shardStoreExists()) - { - AccountTxPageOptions opt = options; - int limit_used = 0; - iterateTransactionBack( - opt.maxLedger != UINT32_MAX ? seqToShardIndex(opt.maxLedger) - : std::optional(), - [&](soci::session& session, std::uint32_t shardIndex) { - if (opt.minLedger && - shardIndex < seqToShardIndex(opt.minLedger)) - return false; - auto [marker, total] = detail::newestAccountTxPage( - session, - onUnsavedLedger, - onTransaction, - opt, - limit_used, - page_length); - opt.marker = marker; - if (total < 0) - return false; - limit_used += total; - return true; - }); - - return {ret, opt.marker}; - } - return {}; } @@ -1574,37 +779,6 @@ SQLiteDatabaseImp::getTransaction( return detail::getTransaction(*db, app_, id, range, ec); } - if (auto shardStore = app_.getShardStore(); shardStore) - { - std::variant res(TxSearched::unknown); - auto txMetaSession = txMetaDB_->checkoutDb(); - - if (auto const shardIndex = - detail::getShardIndexforTransaction(*txMetaSession, id)) - { - shardStore->callForTransactionSQLByShardIndex( - *shardIndex, [&](soci::session& session) { - std::optional> range1; - if (range) - { - std::uint32_t const low = std::max( - range->lower(), firstLedgerSeq(*shardIndex)); - std::uint32_t const high = std::min( - range->upper(), lastLedgerSeq(*shardIndex)); - if (low <= high) - range1 = ClosedInterval(low, high); - } - res = detail::getTransaction(session, app_, id, range1, ec); - - return res.index() == 1 && - std::get(res) != - TxSearched::unknown; // unused - }); - } - - return res; - } - return TxSearched::unknown; } @@ -1617,14 +791,6 @@ SQLiteDatabaseImp::ledgerDbHasSpace(Config const& config) return detail::dbHasSpace(*db, config, j_); } - if (shardStoreExists()) - { - return iterateLedgerBack( - {}, [&](soci::session& session, std::uint32_t shardIndex) { - return detail::dbHasSpace(session, config, j_); - }); - } - return true; } @@ -1640,14 +806,6 @@ SQLiteDatabaseImp::transactionDbHasSpace(Config const& config) return detail::dbHasSpace(*db, config, j_); } - if (shardStoreExists()) - { - return iterateTransactionBack( - {}, [&](soci::session& session, std::uint32_t shardIndex) { - return detail::dbHasSpace(session, config, j_); - }); - } - return true; } @@ -1659,17 +817,6 @@ SQLiteDatabaseImp::getKBUsedAll() return ripple::getKBUsedAll(lgrdb_->getSession()); } - if (shardStoreExists()) - { - std::uint32_t sum = 0; - iterateLedgerBack( - {}, [&](soci::session& session, std::uint32_t shardIndex) { - sum += ripple::getKBUsedAll(session); - return true; - }); - return sum; - } - return 0; } @@ -1681,17 +828,6 @@ SQLiteDatabaseImp::getKBUsedLedger() return ripple::getKBUsedDB(lgrdb_->getSession()); } - if (shardStoreExists()) - { - std::uint32_t sum = 0; - iterateLedgerBack( - {}, [&](soci::session& session, std::uint32_t shardIndex) { - sum += ripple::getKBUsedDB(session); - return true; - }); - return sum; - } - return 0; } @@ -1706,17 +842,6 @@ SQLiteDatabaseImp::getKBUsedTransaction() return ripple::getKBUsedDB(txdb_->getSession()); } - if (shardStoreExists()) - { - std::uint32_t sum = 0; - iterateTransactionBack( - {}, [&](soci::session& session, std::uint32_t shardIndex) { - sum += ripple::getKBUsedDB(session); - return true; - }); - return sum; - } - return 0; } diff --git a/src/xrpld/app/rdb/backend/detail/Shard.h b/src/xrpld/app/rdb/backend/detail/Shard.h deleted file mode 100644 index 870b6b82fe4..00000000000 --- a/src/xrpld/app/rdb/backend/detail/Shard.h +++ /dev/null @@ -1,90 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_RDB_BACKEND_DETAIL_SHARD_H_INCLUDED -#define RIPPLE_APP_RDB_BACKEND_DETAIL_SHARD_H_INCLUDED - -#include -#include -#include -#include -#include -#include - -namespace ripple { -namespace detail { - -/** - * @brief makeMetaDBs Opens ledger and transaction 'meta' databases which - * map ledger hashes and transaction IDs to the index of the shard - * that holds the ledger or transaction. - * @param config Config object. - * @param setup Path to database and opening parameters. - * @param checkpointerSetup Database checkpointer setup. - * @return Struct DatabasePair which contains unique pointers to the ledger - * and transaction databases. - */ -DatabasePair -makeMetaDBs( - Config const& config, - DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup); - -/** - * @brief saveLedgerMeta Stores (transaction ID -> shard index) and - * (ledger hash -> shard index) mappings in the meta databases. - * @param ledger The ledger. - * @param app Application object. - * @param lgrMetaSession Session to ledger meta database. - * @param txnMetaSession Session to transaction meta database. - * @param shardIndex The index of the shard that contains this ledger. - * @return True on success. - */ -bool -saveLedgerMeta( - std::shared_ptr const& ledger, - Application& app, - soci::session& lgrMetaSession, - soci::session& txnMetaSession, - std::uint32_t shardIndex); - -/** - * @brief getShardIndexforLedger Queries the ledger meta database to - * retrieve the index of the shard that contains this ledger. - * @param session Session to the database. - * @param hash Hash of the ledger. - * @return The index of the shard on success, otherwise an unseated value. - */ -std::optional -getShardIndexforLedger(soci::session& session, LedgerHash const& hash); - -/** - * @brief getShardIndexforTransaction Queries the transaction meta database to - * retrieve the index of the shard that contains this transaction. - * @param session Session to the database. - * @param id ID of the transaction. - * @return The index of the shard on success, otherwise an unseated value. - */ -std::optional -getShardIndexforTransaction(soci::session& session, TxID const& id); - -} // namespace detail -} // namespace ripple - -#endif diff --git a/src/xrpld/app/rdb/backend/detail/detail/Shard.cpp b/src/xrpld/app/rdb/backend/detail/detail/Shard.cpp deleted file mode 100644 index 6db64b1249b..00000000000 --- a/src/xrpld/app/rdb/backend/detail/detail/Shard.cpp +++ /dev/null @@ -1,147 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { -namespace detail { - -DatabasePair -makeMetaDBs( - Config const& config, - DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup) -{ - // ledger meta database - auto lgrMetaDB{std::make_unique( - setup, - LgrMetaDBName, - LgrMetaDBPragma, - LgrMetaDBInit, - checkpointerSetup)}; - - if (!config.useTxTables()) - return {std::move(lgrMetaDB), nullptr}; - - // transaction meta database - auto txMetaDB{std::make_unique( - setup, TxMetaDBName, TxMetaDBPragma, TxMetaDBInit, checkpointerSetup)}; - - return {std::move(lgrMetaDB), std::move(txMetaDB)}; -} - -bool -saveLedgerMeta( - std::shared_ptr const& ledger, - Application& app, - soci::session& lgrMetaSession, - soci::session& txnMetaSession, - std::uint32_t const shardIndex) -{ - std::string_view constexpr lgrSQL = - R"sql(INSERT OR REPLACE INTO LedgerMeta VALUES - (:ledgerHash,:shardIndex);)sql"; - - auto const hash = to_string(ledger->info().hash); - lgrMetaSession << lgrSQL, soci::use(hash), soci::use(shardIndex); - - if (!app.config().useTxTables()) - return true; - - auto const aLedger = [&app, ledger]() -> std::shared_ptr { - try - { - auto aLedger = - app.getAcceptedLedgerCache().fetch(ledger->info().hash); - if (!aLedger) - { - aLedger = std::make_shared(ledger, app); - app.getAcceptedLedgerCache().canonicalize_replace_client( - ledger->info().hash, aLedger); - } - - return aLedger; - } - catch (std::exception const&) - { - JLOG(app.journal("Ledger").warn()) - << "An accepted ledger was missing nodes"; - } - - return {}; - }(); - - if (!aLedger) - return false; - - soci::transaction tr(txnMetaSession); - - for (auto const& acceptedLedgerTx : *aLedger) - { - std::string_view constexpr txnSQL = - R"sql(INSERT OR REPLACE INTO TransactionMeta VALUES - (:transactionID,:shardIndex);)sql"; - - auto const transactionID = - to_string(acceptedLedgerTx->getTransactionID()); - - txnMetaSession << txnSQL, soci::use(transactionID), - soci::use(shardIndex); - } - - tr.commit(); - return true; -} - -std::optional -getShardIndexforLedger(soci::session& session, LedgerHash const& hash) -{ - std::uint32_t shardIndex; - session << "SELECT ShardIndex FROM LedgerMeta WHERE LedgerHash = '" << hash - << "';", - soci::into(shardIndex); - - if (!session.got_data()) - return std::nullopt; - - return shardIndex; -} - -std::optional -getShardIndexforTransaction(soci::session& session, TxID const& id) -{ - std::uint32_t shardIndex; - session << "SELECT ShardIndex FROM TransactionMeta WHERE TransID = '" << id - << "';", - soci::into(shardIndex); - - if (!session.got_data()) - return std::nullopt; - - return shardIndex; -} - -} // namespace detail -} // namespace ripple diff --git a/src/xrpld/app/rdb/detail/Download.cpp b/src/xrpld/app/rdb/detail/Download.cpp deleted file mode 100644 index 012d60b3734..00000000000 --- a/src/xrpld/app/rdb/detail/Download.cpp +++ /dev/null @@ -1,152 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2021 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include - -namespace ripple { - -std::pair, std::optional> -openDatabaseBodyDb( - DatabaseCon::Setup const& setup, - boost::filesystem::path const& path) -{ - // SOCI requires boost::optional (not std::optional) as the parameter. - boost::optional pathFromDb; - boost::optional size; - - auto conn = std::make_unique( - setup, "Download", DownloaderDBPragma, DatabaseBodyDBInit); - - auto& session = *conn->checkoutDb(); - - session << "SELECT Path FROM Download WHERE Part=0;", - soci::into(pathFromDb); - - // Try to reuse preexisting - // database. - if (pathFromDb) - { - // Can't resuse - database was - // from a different file download. - if (pathFromDb != path.string()) - { - session << "DROP TABLE Download;"; - } - - // Continuing a file download. - else - { - session << "SELECT SUM(LENGTH(Data)) FROM Download;", - soci::into(size); - } - } - - return {std::move(conn), (size ? *size : std::optional())}; -} - -std::uint64_t -databaseBodyDoPut( - soci::session& session, - std::string const& data, - std::string const& path, - std::uint64_t fileSize, - std::uint64_t part, - std::uint16_t maxRowSizePad) -{ - std::uint64_t rowSize = 0; - soci::indicator rti; - - std::uint64_t remainingInRow = 0; - - auto be = - dynamic_cast(session.get_backend()); - BOOST_ASSERT(be); - - // This limits how large we can make the blob - // in each row. Also subtract a pad value to - // account for the other values in the row. - auto const blobMaxSize = - sqlite_api::sqlite3_limit(be->conn_, SQLITE_LIMIT_LENGTH, -1) - - maxRowSizePad; - - std::string newpath; - - auto rowInit = [&] { - session << "INSERT INTO Download VALUES (:path, zeroblob(0), 0, :part)", - soci::use(newpath), soci::use(part); - - remainingInRow = blobMaxSize; - rowSize = 0; - }; - - session << "SELECT Path,Size,Part FROM Download ORDER BY Part DESC " - "LIMIT 1", - soci::into(newpath), soci::into(rowSize), soci::into(part, rti); - - if (!session.got_data()) - { - newpath = path; - rowInit(); - } - else - remainingInRow = blobMaxSize - rowSize; - - auto insert = [&session, &rowSize, &part, &fs = fileSize]( - auto const& data) { - std::uint64_t updatedSize = rowSize + data.size(); - - session << "UPDATE Download SET Data = CAST(Data || :data AS blob), " - "Size = :size WHERE Part = :part;", - soci::use(data), soci::use(updatedSize), soci::use(part); - - fs += data.size(); - }; - - size_t currentBase = 0; - - while (currentBase + remainingInRow < data.size()) - { - if (remainingInRow) - { - insert(data.substr(currentBase, remainingInRow)); - currentBase += remainingInRow; - } - - ++part; - rowInit(); - } - - insert(data.substr(currentBase)); - - return part; -} - -void -databaseBodyFinish(soci::session& session, std::ofstream& fout) -{ - soci::rowset rs = - (session.prepare << "SELECT Data FROM Download ORDER BY PART ASC;"); - - // iteration through the resultset: - for (auto it = rs.begin(); it != rs.end(); ++it) - fout.write(it->data(), it->size()); -} - -} // namespace ripple diff --git a/src/xrpld/app/rdb/detail/RelationalDatabase.cpp b/src/xrpld/app/rdb/detail/RelationalDatabase.cpp index 874550abd97..07dc27fd1d3 100644 --- a/src/xrpld/app/rdb/detail/RelationalDatabase.cpp +++ b/src/xrpld/app/rdb/detail/RelationalDatabase.cpp @@ -20,7 +20,6 @@ #include #include #include -#include namespace ripple { diff --git a/src/xrpld/app/rdb/detail/ShardArchive.cpp b/src/xrpld/app/rdb/detail/ShardArchive.cpp deleted file mode 100644 index 81b99348cb4..00000000000 --- a/src/xrpld/app/rdb/detail/ShardArchive.cpp +++ /dev/null @@ -1,68 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2021 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include - -namespace ripple { - -std::unique_ptr -makeArchiveDB(boost::filesystem::path const& dir, std::string const& dbName) -{ - return std::make_unique( - dir, dbName, DownloaderDBPragma, ShardArchiveHandlerDBInit); -} - -void -readArchiveDB( - DatabaseCon& db, - std::function const& func) -{ - soci::rowset rs = - (db.getSession().prepare << "SELECT * FROM State;"); - - for (auto it = rs.begin(); it != rs.end(); ++it) - { - func(it->get(1), it->get(0)); - } -} - -void -insertArchiveDB( - DatabaseCon& db, - std::uint32_t shardIndex, - std::string const& url) -{ - db.getSession() << "INSERT INTO State VALUES (:index, :url);", - soci::use(shardIndex), soci::use(url); -} - -void -deleteFromArchiveDB(DatabaseCon& db, std::uint32_t shardIndex) -{ - db.getSession() << "DELETE FROM State WHERE ShardIndex = :index;", - soci::use(shardIndex); -} - -void -dropArchiveDB(DatabaseCon& db) -{ - db.getSession() << "DROP TABLE State;"; -} - -} // namespace ripple diff --git a/src/xrpld/app/rdb/detail/UnitaryShard.cpp b/src/xrpld/app/rdb/detail/UnitaryShard.cpp deleted file mode 100644 index dd64759f4a8..00000000000 --- a/src/xrpld/app/rdb/detail/UnitaryShard.cpp +++ /dev/null @@ -1,320 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2021 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include - -namespace ripple { - -DatabasePair -makeShardCompleteLedgerDBs( - Config const& config, - DatabaseCon::Setup const& setup) -{ - auto tx{std::make_unique( - setup, TxDBName, FinalShardDBPragma, TxDBInit)}; - tx->getSession() << boost::str( - boost::format("PRAGMA cache_size=-%d;") % - kilobytes(config.getValueFor(SizedItem::txnDBCache, std::nullopt))); - - auto lgr{std::make_unique( - setup, LgrDBName, FinalShardDBPragma, LgrDBInit)}; - lgr->getSession() << boost::str( - boost::format("PRAGMA cache_size=-%d;") % - kilobytes(config.getValueFor(SizedItem::lgrDBCache, std::nullopt))); - - return {std::move(lgr), std::move(tx)}; -} - -DatabasePair -makeShardIncompleteLedgerDBs( - Config const& config, - DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup) -{ - // transaction database - auto tx{std::make_unique( - setup, TxDBName, TxDBPragma, TxDBInit, checkpointerSetup)}; - tx->getSession() << boost::str( - boost::format("PRAGMA cache_size=-%d;") % - kilobytes(config.getValueFor(SizedItem::txnDBCache))); - - // ledger database - auto lgr{std::make_unique( - setup, LgrDBName, LgrDBPragma, LgrDBInit, checkpointerSetup)}; - lgr->getSession() << boost::str( - boost::format("PRAGMA cache_size=-%d;") % - kilobytes(config.getValueFor(SizedItem::lgrDBCache))); - - return {std::move(lgr), std::move(tx)}; -} - -bool -updateLedgerDBs( - soci::session& txsession, - soci::session& lgrsession, - std::shared_ptr const& ledger, - std::uint32_t index, - std::atomic& stop, - beast::Journal j) -{ - auto const ledgerSeq{ledger->info().seq}; - - // Update the transactions database - { - auto& session{txsession}; - soci::transaction tr(session); - - session << "DELETE FROM Transactions " - "WHERE LedgerSeq = :seq;", - soci::use(ledgerSeq); - session << "DELETE FROM AccountTransactions " - "WHERE LedgerSeq = :seq;", - soci::use(ledgerSeq); - - if (ledger->info().txHash.isNonZero()) - { - auto const sSeq{std::to_string(ledgerSeq)}; - if (!ledger->txMap().isValid()) - { - JLOG(j.error()) - << "shard " << index << " has an invalid transaction map" - << " on sequence " << sSeq; - return false; - } - - for (auto const& item : ledger->txs) - { - if (stop.load(std::memory_order_relaxed)) - return false; - - TxMeta const txMeta{ - item.first->getTransactionID(), - ledger->seq(), - *item.second}; - - auto const sTxID = to_string(txMeta.getTxID()); - - session << "DELETE FROM AccountTransactions " - "WHERE TransID = :txID;", - soci::use(sTxID); - - auto const& accounts = txMeta.getAffectedAccounts(); - if (!accounts.empty()) - { - auto const sTxnSeq{std::to_string(txMeta.getIndex())}; - auto const s{boost::str( - boost::format("('%s','%s',%s,%s)") % sTxID % "%s" % - sSeq % sTxnSeq)}; - std::string sql; - sql.reserve((accounts.size() + 1) * 128); - sql = - "INSERT INTO AccountTransactions " - "(TransID, Account, LedgerSeq, TxnSeq) VALUES "; - sql += boost::algorithm::join( - accounts | - boost::adaptors::transformed( - [&](AccountID const& accountID) { - return boost::str( - boost::format(s) % - ripple::toBase58(accountID)); - }), - ","); - sql += ';'; - session << sql; - - JLOG(j.trace()) - << "shard " << index << " account transaction: " << sql; - } - else if (!isPseudoTx(*item.first)) - { - // It's okay for pseudo transactions to not affect any - // accounts. But otherwise... - JLOG(j.warn()) - << "shard " << index << " transaction in ledger " - << sSeq << " affects no accounts"; - } - - Serializer s; - item.second->add(s); - session - << (STTx::getMetaSQLInsertReplaceHeader() + - item.first->getMetaSQL( - ledgerSeq, sqlBlobLiteral(s.modData())) + - ';'); - } - } - - tr.commit(); - } - - auto const sHash{to_string(ledger->info().hash)}; - - // Update the ledger database - { - auto& session{lgrsession}; - soci::transaction tr(session); - - auto const sParentHash{to_string(ledger->info().parentHash)}; - auto const sDrops{to_string(ledger->info().drops)}; - auto const closingTime{ - ledger->info().closeTime.time_since_epoch().count()}; - auto const prevClosingTime{ - ledger->info().parentCloseTime.time_since_epoch().count()}; - auto const closeTimeRes{ledger->info().closeTimeResolution.count()}; - auto const sAccountHash{to_string(ledger->info().accountHash)}; - auto const sTxHash{to_string(ledger->info().txHash)}; - - session << "DELETE FROM Ledgers " - "WHERE LedgerSeq = :seq;", - soci::use(ledgerSeq); - session << "INSERT OR REPLACE INTO Ledgers (" - "LedgerHash, LedgerSeq, PrevHash, TotalCoins, ClosingTime," - "PrevClosingTime, CloseTimeRes, CloseFlags, AccountSetHash," - "TransSetHash)" - "VALUES (" - ":ledgerHash, :ledgerSeq, :prevHash, :totalCoins," - ":closingTime, :prevClosingTime, :closeTimeRes," - ":closeFlags, :accountSetHash, :transSetHash);", - soci::use(sHash), soci::use(ledgerSeq), soci::use(sParentHash), - soci::use(sDrops), soci::use(closingTime), - soci::use(prevClosingTime), soci::use(closeTimeRes), - soci::use(ledger->info().closeFlags), soci::use(sAccountHash), - soci::use(sTxHash); - - tr.commit(); - } - - return true; -} - -std::unique_ptr -makeAcquireDB( - DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup) -{ - return std::make_unique( - setup, - AcquireShardDBName, - AcquireShardDBPragma, - AcquireShardDBInit, - checkpointerSetup); -} - -void -insertAcquireDBIndex(soci::session& session, std::uint32_t index) -{ - session << "INSERT INTO Shard (ShardIndex) " - "VALUES (:shardIndex);", - soci::use(index); -} - -std::pair> -selectAcquireDBLedgerSeqs(soci::session& session, std::uint32_t index) -{ - // resIndex and must be boost::optional (not std) because that's - // what SOCI expects in its interface. - boost::optional resIndex; - soci::blob sociBlob(session); - soci::indicator blobPresent; - - session << "SELECT ShardIndex, StoredLedgerSeqs " - "FROM Shard " - "WHERE ShardIndex = :index;", - soci::into(resIndex), soci::into(sociBlob, blobPresent), - soci::use(index); - - if (!resIndex || index != resIndex) - return {false, {}}; - - if (blobPresent != soci::i_ok) - return {true, {}}; - - std::string s; - convert(sociBlob, s); - - return {true, s}; -} - -std::pair -selectAcquireDBLedgerSeqsHash(soci::session& session, std::uint32_t index) -{ - // resIndex and sHash0 must be boost::optional (not std) because that's - // what SOCI expects in its interface. - boost::optional resIndex; - boost::optional sHash0; - soci::blob sociBlob(session); - soci::indicator blobPresent; - - session << "SELECT ShardIndex, LastLedgerHash, StoredLedgerSeqs " - "FROM Shard " - "WHERE ShardIndex = :index;", - soci::into(resIndex), soci::into(sHash0), - soci::into(sociBlob, blobPresent), soci::use(index); - - std::optional sHash = - (sHash0 ? *sHash0 : std::optional()); - - if (!resIndex || index != resIndex) - return {false, {{}, {}}}; - - if (blobPresent != soci::i_ok) - return {true, {{}, sHash}}; - - std::string s; - convert(sociBlob, s); - - return {true, {s, sHash}}; -} - -void -updateAcquireDB( - soci::session& session, - std::shared_ptr const& ledger, - std::uint32_t index, - std::uint32_t lastSeq, - std::optional const& seqs) -{ - soci::blob sociBlob(session); - auto const sHash{to_string(ledger->info().hash)}; - - if (seqs) - convert(*seqs, sociBlob); - - if (ledger->info().seq == lastSeq) - { - // Store shard's last ledger hash - session << "UPDATE Shard " - "SET LastLedgerHash = :lastLedgerHash," - "StoredLedgerSeqs = :storedLedgerSeqs " - "WHERE ShardIndex = :shardIndex;", - soci::use(sHash), soci::use(sociBlob), soci::use(index); - } - else - { - session << "UPDATE Shard " - "SET StoredLedgerSeqs = :storedLedgerSeqs " - "WHERE ShardIndex = :shardIndex;", - soci::use(sociBlob), soci::use(index); - } -} - -} // namespace ripple diff --git a/src/xrpld/core/Config.h b/src/xrpld/core/Config.h index 2872193f0ee..e63e6d2f356 100644 --- a/src/xrpld/core/Config.h +++ b/src/xrpld/core/Config.h @@ -146,7 +146,6 @@ class Config : public BasicConfig public: bool doImport = false; - bool nodeToShard = false; bool ELB_SUPPORT = false; // Entries from [ips] config stanza diff --git a/src/xrpld/core/ConfigSections.h b/src/xrpld/core/ConfigSections.h index b4e460f1cfc..8685d29a4d0 100644 --- a/src/xrpld/core/ConfigSections.h +++ b/src/xrpld/core/ConfigSections.h @@ -35,11 +35,6 @@ struct ConfigSection return "node_db"; } static std::string - shardDatabase() - { - return "shard_db"; - } - static std::string importNodeDatabase() { return "import_db"; @@ -56,7 +51,6 @@ struct ConfigSection #define SECTION_ELB_SUPPORT "elb_support" #define SECTION_FEE_DEFAULT "fee_default" #define SECTION_FETCH_DEPTH "fetch_depth" -#define SECTION_HISTORICAL_SHARD_PATHS "historical_shard_paths" #define SECTION_INSIGHT "insight" #define SECTION_IO_WORKERS "io_workers" #define SECTION_IPS "ips" diff --git a/src/xrpld/core/Job.h b/src/xrpld/core/Job.h index c5926ae2e08..76d26c39e72 100644 --- a/src/xrpld/core/Job.h +++ b/src/xrpld/core/Job.h @@ -47,7 +47,6 @@ enum JobType { jtCLIENT_FEE_CHANGE, // Subscription for fee change by a client jtCLIENT_CONSENSUS, // Subscription for consensus state change by a client jtCLIENT_ACCT_HIST, // Subscription for account history by a client - jtCLIENT_SHARD, // Client request for shard archiving jtCLIENT_RPC, // Client RPC request jtCLIENT_WEBSOCKET, // Client websocket request jtRPC, // A websocket command from the client diff --git a/src/xrpld/core/JobTypes.h b/src/xrpld/core/JobTypes.h index 2dbc45ca1b5..3b41ce7ff47 100644 --- a/src/xrpld/core/JobTypes.h +++ b/src/xrpld/core/JobTypes.h @@ -84,7 +84,6 @@ class JobTypes add(jtCLIENT_FEE_CHANGE, "clientFeeChange", maxLimit, 2000ms, 5000ms); add(jtCLIENT_CONSENSUS, "clientConsensus", maxLimit, 2000ms, 5000ms); add(jtCLIENT_ACCT_HIST, "clientAccountHistory", maxLimit, 2000ms, 5000ms); - add(jtCLIENT_SHARD, "clientShardArchive", maxLimit, 2000ms, 5000ms); add(jtCLIENT_RPC, "clientRPC", maxLimit, 2000ms, 5000ms); add(jtCLIENT_WEBSOCKET, "clientWebsocket", maxLimit, 2000ms, 5000ms); add(jtRPC, "RPC", maxLimit, 0ms, 0ms); diff --git a/src/xrpld/net/DatabaseBody.h b/src/xrpld/net/DatabaseBody.h deleted file mode 100644 index 15780ced313..00000000000 --- a/src/xrpld/net/DatabaseBody.h +++ /dev/null @@ -1,179 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_NET_DATABASEBODY_H -#define RIPPLE_NET_DATABASEBODY_H - -#include -#include -#include -#include -#include - -namespace ripple { - -// DatabaseBody needs to meet requirements -// from asio which is why some conventions -// used elsewhere in this code base are not -// followed. -struct DatabaseBody -{ - // Algorithm for storing buffers when parsing. - class reader; - - // The type of the @ref message::body member. - class value_type; - - /** Returns the size of the body - - @param body The database body to use - */ - static std::uint64_t - size(value_type const& body); -}; - -class DatabaseBody::value_type -{ - // This body container holds a connection to the - // database, and also caches the size when set. - - friend class reader; - friend struct DatabaseBody; - - // The cached file size - std::uint64_t fileSize_ = 0; - boost::filesystem::path path_; - std::unique_ptr conn_; - std::string batch_; - std::shared_ptr strand_; - std::mutex m_; - std::condition_variable c_; - std::uint64_t handlerCount_ = 0; - std::uint64_t part_ = 0; - bool closing_ = false; - -public: - /// Destructor - ~value_type() = default; - - /// Constructor - value_type() = default; - - /// Returns `true` if the file is open - bool - is_open() const - { - return static_cast(conn_); - } - - /// Returns the size of the file if open - std::uint64_t - size() const - { - return fileSize_; - } - - /// Close the file if open - void - close(); - - /** Open a file at the given path with the specified mode - - @param path The utf-8 encoded path to the file - - @param config The configuration settings - - @param io_service The asio context for running a strand. - - @param ec Set to the error, if any occurred - */ - void - open( - boost::filesystem::path const& path, - Config const& config, - boost::asio::io_service& io_service, - boost::system::error_code& ec); -}; - -/** Algorithm for storing buffers when parsing. - - Objects of this type are created during parsing - to store incoming buffers representing the body. -*/ -class DatabaseBody::reader -{ - value_type& body_; // The body we are writing to - - static constexpr std::uint32_t FLUSH_SIZE = 50000000; - static constexpr std::uint8_t MAX_HANDLERS = 3; - static constexpr std::uint16_t MAX_ROW_SIZE_PAD = 500; - -public: - // Constructor. - // - // This is called after the header is parsed and - // indicates that a non-zero sized body may be present. - // `h` holds the received message headers. - // `b` is an instance of `DatabaseBody`. - // - template - explicit reader( - boost::beast::http::header& h, - value_type& b); - - // Initializer - // - // This is called before the body is parsed and - // gives the reader a chance to do something that might - // need to return an error code. It informs us of - // the payload size (`content_length`) which we can - // optionally use for optimization. - // - // Note: boost::Beast calls init() and requires a - // boost::optional (not a std::optional) as the - // parameter. - void - init(boost::optional const&, boost::system::error_code& ec); - - // This function is called one or more times to store - // buffer sequences corresponding to the incoming body. - // - template - std::size_t - put(ConstBufferSequence const& buffers, boost::system::error_code& ec); - - void - do_put(std::string const& data); - - // This function is called when writing is complete. - // It is an opportunity to perform any final actions - // which might fail, in order to return an error code. - // Operations that might fail should not be attempted in - // destructors, since an exception thrown from there - // would terminate the program. - // - void - finish(boost::system::error_code& ec); -}; - -} // namespace ripple - -#include - -#endif // RIPPLE_NET_DATABASEBODY_H diff --git a/src/xrpld/net/DatabaseDownloader.h b/src/xrpld/net/DatabaseDownloader.h deleted file mode 100644 index 490e7c62e16..00000000000 --- a/src/xrpld/net/DatabaseDownloader.h +++ /dev/null @@ -1,76 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_NET_DATABASEDOWNLOADER_H -#define RIPPLE_NET_DATABASEDOWNLOADER_H - -#include -#include - -namespace ripple { - -class DatabaseDownloader : public HTTPDownloader -{ -public: - virtual ~DatabaseDownloader() = default; - -private: - DatabaseDownloader( - boost::asio::io_service& io_service, - Config const& config, - beast::Journal j); - - static const std::uint8_t MAX_PATH_LEN = - std::numeric_limits::max(); - - std::shared_ptr - getParser( - boost::filesystem::path dstPath, - std::function complete, - boost::system::error_code& ec) override; - - bool - checkPath(boost::filesystem::path const& dstPath) override; - - void - closeBody(std::shared_ptr p) override; - - std::uint64_t - size(std::shared_ptr p) override; - - Config const& config_; - boost::asio::io_service& io_service_; - - friend std::shared_ptr - make_DatabaseDownloader( - boost::asio::io_service& io_service, - Config const& config, - beast::Journal j); -}; - -// DatabaseDownloader must be a shared_ptr because it uses shared_from_this -std::shared_ptr -make_DatabaseDownloader( - boost::asio::io_service& io_service, - Config const& config, - beast::Journal j); - -} // namespace ripple - -#endif // RIPPLE_NET_DATABASEDOWNLOADER_H diff --git a/src/xrpld/net/HTTPDownloader.h b/src/xrpld/net/HTTPDownloader.h deleted file mode 100644 index f96fb8e572b..00000000000 --- a/src/xrpld/net/HTTPDownloader.h +++ /dev/null @@ -1,130 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2018 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_NET_HTTPDOWNLOADER_H_INCLUDED -#define RIPPLE_NET_HTTPDOWNLOADER_H_INCLUDED - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -namespace ripple { - -/** Provides an asynchronous HTTP[S] file downloader - */ -class HTTPDownloader : public std::enable_shared_from_this -{ -public: - using error_code = boost::system::error_code; - - bool - download( - std::string const& host, - std::string const& port, - std::string const& target, - int version, - boost::filesystem::path const& dstPath, - std::function complete, - bool ssl = true); - - void - stop(); - - virtual ~HTTPDownloader() = default; - - bool - sessionIsActive() const; - - bool - isStopping() const; - -protected: - // must be accessed through a shared_ptr - // use make_XXX functions to create - HTTPDownloader( - boost::asio::io_service& io_service, - Config const& config, - beast::Journal j); - - using parser = boost::beast::http::basic_parser; - - beast::Journal const j_; - - void - fail( - boost::filesystem::path dstPath, - boost::system::error_code const& ec, - std::string const& errMsg, - std::shared_ptr parser); - -private: - Config const& config_; - boost::asio::io_service::strand strand_; - std::unique_ptr stream_; - boost::beast::flat_buffer read_buf_; - std::atomic stop_; - - // Used to protect sessionActive_ - mutable std::mutex m_; - bool sessionActive_; - std::condition_variable c_; - - void - do_session( - std::string host, - std::string port, - std::string target, - int version, - boost::filesystem::path dstPath, - std::function complete, - bool ssl, - boost::asio::yield_context yield); - - virtual std::shared_ptr - getParser( - boost::filesystem::path dstPath, - std::function complete, - boost::system::error_code& ec) = 0; - - virtual bool - checkPath(boost::filesystem::path const& dstPath) = 0; - - virtual void - closeBody(std::shared_ptr p) = 0; - - virtual uint64_t - size(std::shared_ptr p) = 0; -}; - -} // namespace ripple - -#endif diff --git a/src/xrpld/net/HTTPStream.h b/src/xrpld/net/HTTPStream.h deleted file mode 100644 index 275d8ca9544..00000000000 --- a/src/xrpld/net/HTTPStream.h +++ /dev/null @@ -1,165 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_NET_HTTPSTREAM_H_INCLUDED -#define RIPPLE_NET_HTTPSTREAM_H_INCLUDED - -#include -#include - -#include -#include -#include -#include - -#include - -namespace ripple { - -class HTTPStream -{ -public: - using request = boost::beast::http::request; - using parser = boost::beast::http::basic_parser; - - virtual ~HTTPStream() = default; - - [[nodiscard]] virtual boost::asio::ip::tcp::socket& - getStream() = 0; - - [[nodiscard]] virtual bool - connect( - std::string& errorOut, - std::string const& host, - std::string const& port, - boost::asio::yield_context& yield) = 0; - - virtual void - asyncWrite( - request& req, - boost::asio::yield_context& yield, - boost::system::error_code& ec) = 0; - - virtual void - asyncRead( - boost::beast::flat_buffer& buf, - parser& p, - boost::asio::yield_context& yield, - boost::system::error_code& ec) = 0; - - virtual void - asyncReadSome( - boost::beast::flat_buffer& buf, - parser& p, - boost::asio::yield_context& yield, - boost::system::error_code& ec) = 0; -}; - -class SSLStream : public HTTPStream -{ -public: - SSLStream( - Config const& config, - boost::asio::io_service::strand& strand, - beast::Journal j); - - virtual ~SSLStream() = default; - - boost::asio::ip::tcp::socket& - getStream() override; - - bool - connect( - std::string& errorOut, - std::string const& host, - std::string const& port, - boost::asio::yield_context& yield) override; - - void - asyncWrite( - request& req, - boost::asio::yield_context& yield, - boost::system::error_code& ec) override; - - void - asyncRead( - boost::beast::flat_buffer& buf, - parser& p, - boost::asio::yield_context& yield, - boost::system::error_code& ec) override; - - void - asyncReadSome( - boost::beast::flat_buffer& buf, - parser& p, - boost::asio::yield_context& yield, - boost::system::error_code& ec) override; - -private: - HTTPClientSSLContext ssl_ctx_; - std::optional> - stream_; - boost::asio::io_service::strand& strand_; -}; - -class RawStream : public HTTPStream -{ -public: - RawStream(boost::asio::io_service::strand& strand); - - virtual ~RawStream() = default; - - boost::asio::ip::tcp::socket& - getStream() override; - - bool - connect( - std::string& errorOut, - std::string const& host, - std::string const& port, - boost::asio::yield_context& yield) override; - - void - asyncWrite( - request& req, - boost::asio::yield_context& yield, - boost::system::error_code& ec) override; - - void - asyncRead( - boost::beast::flat_buffer& buf, - parser& p, - boost::asio::yield_context& yield, - boost::system::error_code& ec) override; - - void - asyncReadSome( - boost::beast::flat_buffer& buf, - parser& p, - boost::asio::yield_context& yield, - boost::system::error_code& ec) override; - -private: - std::optional stream_; - boost::asio::io_service::strand& strand_; -}; - -} // namespace ripple - -#endif // RIPPLE_NET_HTTPSTREAM_H diff --git a/src/xrpld/net/ShardDownloader.md b/src/xrpld/net/ShardDownloader.md deleted file mode 100644 index d961df61c65..00000000000 --- a/src/xrpld/net/ShardDownloader.md +++ /dev/null @@ -1,311 +0,0 @@ -# Shard Downloader - -## Overview - -This document describes mechanics of the `HTTPDownloader`, a class that performs -the task of downloading shards from remote web servers via HTTP. The downloader -utilizes a strand (`boost::asio::io_service::strand`) to ensure that downloads -are never executed concurrently. Hence, if a download is in progress when -another download is initiated, the second download will be queued and invoked -only when the first download is completed. - -## Motivation - -In March 2020 the downloader was modified to include some key features: - -- The ability to stop downloads during a graceful shutdown. -- The ability to resume partial downloads after a crash or shutdown. - -This document was created to document the changes introduced by this change. - -## Classes - -Much of the shard downloading process concerns the following classes: - -- `HTTPDownloader` - - This is a generic class designed for serially executing downloads via HTTP. - -- `ShardArchiveHandler` - - This class uses the `HTTPDownloader` to fetch shards from remote web servers. - Additionally, the archive handler performs validity checks on the downloaded - files and imports the validated files into the local shard store. - - The `ShardArchiveHandler` exposes a simple public interface: - - ```C++ - /** Add an archive to be downloaded and imported. - @param shardIndex the index of the shard to be imported. - @param url the location of the archive. - @return `true` if successfully added. - @note Returns false if called while downloading. - */ - bool - add(std::uint32_t shardIndex, std::pair&& url); - - /** Starts downloading and importing archives. */ - bool - start(); - ``` - - When a client submits a `download_shard` command via the RPC interface, each - of the requested files is registered with the handler via the `add` method. - After all the files have been registered, the handler's `start` method is - invoked, which in turn creates an instance of the `HTTPDownloader` and begins - the first download. When the download is completed, the downloader invokes - the handler's `complete` method, which will initiate the download of the next - file, or simply return if there are no more downloads to process. When - `complete` is invoked with no remaining files to be downloaded, the handler - and downloader are not destroyed automatically, but persist for the duration - of the application to assist with graceful shutdowns. - -- `DatabaseBody` - - This class defines a custom message body type, allowing an - `http::response_parser` to write to an SQLite database rather than to a flat - file. This class is discussed in further detail in the Recovery section. - -## Graceful Shutdowns & Recovery - -This section describes in greater detail how the shutdown and recovery features -of the downloader are implemented in C++ using the `boost::asio` framework. - -##### Member Variables: - -The variables shown here are members of the `HTTPDownloader` class and -will be used in the following code examples. - -```c++ -std::unique_ptr stream_; -std::condition_variable c_; -std::atomic stop_; -``` - -### Graceful Shutdowns - -##### Thread 1: - -A graceful shutdown begins when the `stop()` method of the -`ShardArchiveHandler` is invoked: - -```c++ -void -ShardArchiveHandler::stop() -{ - std::lock_guard lock(m_); - - if (downloader_) - { - downloader_->stop(); - downloader_.reset(); - } - - stopped(); -} -``` - -Inside of `HTTPDownloader::stop()`, if a download is currently in progress, -the `stop_` member variable is set and the thread waits for the -download to stop: - -```c++ -void -HTTPDownloader::stop() -{ - std::unique_lock lock(m_); - - stop_ = true; - - if(sessionActive_) - { - // Wait for the handler to exit. - c_.wait(lock, - [this]() - { - return !sessionActive_; - }); - } -} -``` - -##### Thread 2: - -The graceful shutdown is realized when the thread executing the download polls -`stop_` after this variable has been set to `true`. Polling occurs -while the file is being downloaded, in between calls to `async_read_some()`. The -stop takes effect when the socket is closed and the handler function ( -`do_session()` ) is exited. - -```c++ -void HTTPDownloader::do_session() -{ - - // (Connection initialization logic) . . . - - - // (In between calls to async_read_some): - if(stop_.load()) - { - close(p); - return exit(); - } - - // . . . - - break; -} -``` - -### Recovery - -Persisting the current state of both the archive handler and the downloader is -achieved by leveraging an SQLite database rather than flat files, as the -database protects against data corruption that could result from a system crash. - -##### ShardArchiveHandler - -Although `HTTPDownloader` is a generic class that could be used to download a -variety of file types, currently it is used exclusively by the -`ShardArchiveHandler` to download shards. In order to provide resilience, the -`ShardArchiveHandler` will use an SQLite database to preserve its current state -whenever there are active, paused, or queued downloads. The `shard_db` section -in the configuration file allows users to specify the location of the database -to use for this purpose. - -###### SQLite Table Format - -| Index | URL | -|:-----:|:-----------------------------------:| -| 1 | https://example.com/1.tar.lz4 | -| 2 | https://example.com/2.tar.lz4 | -| 5 | https://example.com/5.tar.lz4 | - -##### HTTPDownloader - -While the archive handler maintains a list of all partial and queued downloads, -the `HTTPDownloader` stores the raw bytes of the file currently being -downloaded. The partially downloaded file will be represented as one or more -`BLOB` entries in an SQLite database. As the maximum size of a `BLOB` entry is -currently limited to roughly 2.1 GB, a 5 GB shard file for instance will occupy -three database entries upon completion. - -###### SQLite Table Format - -Since downloads execute serially by design, the entries in this table always -correspond to the contents of a single file. - -| Bytes | size | Part | -|:------:|:----------:|:----:| -| 0x... | 2147483647 | 0 | -| 0x... | 2147483647 | 1 | -| 0x... | 705032706 | 2 | - -##### Config File Entry -The `download_path` field of the `shard_db` entry is used to determine where to -store the recovery database. If this field is omitted, the `path` field will be -used instead. - -```dosini -# This is the persistent datastore for shards. It is important for the health -# of the network that rippled operators shard as much as practical. -# NuDB requires SSD storage. Helpful information can be found on -# https://xrpl.org/history-sharding.html -[shard_db] -type=NuDB -path=/var/lib/rippled/db/shards/nudb -download_path=/var/lib/rippled/db/shards/ -max_historical_shards=50 -``` - -##### Resuming Partial Downloads -When resuming downloads after a shutdown, crash, or other interruption, the -`HTTPDownloader` will utilize the `range` field of the HTTP header to download -only the remainder of the partially downloaded file. - -```C++ -auto downloaded = getPartialFileSize(); -auto total = getTotalFileSize(); - -http::request req {http::verb::head, - target, - version}; - -if (downloaded < total) -{ - // If we already downloaded 1000 bytes to the database, - // the range header will look like: - // Range: "bytes=1000-" - req.set(http::field::range, "bytes=" + to_string(downloaded) + "-"); -} -else if(downloaded == total) -{ - // Download is already complete. (Interruption must - // have occurred after file was downloaded but before - // the state file was updated.) -} -else -{ - // The size of the partially downloaded file exceeds - // the total download size. Error condition. Handle - // appropriately. -} -``` - -##### DatabaseBody - -Previously, the `HTTPDownloader` leveraged an `http::response_parser` -instantiated with an `http::file_body`. The `file_body` class declares a nested -type, `reader`, which does the task of writing HTTP message payloads -(constituting a requested file) to the filesystem. In order for the -`http::response_parser` to interface with the database, we implement a custom -body type that declares a nested `reader` type which has been outfitted to -persist octects received from the remote host to a local SQLite database. The -code snippet below illustrates the customization points available to -user-defined body types: - -```C++ -/// Defines a Body type -struct body -{ - /// This determines the return type of the `message::body` member function - using value_type = ...; - - /// An optional function, returns the body's payload size (which may be - /// zero) - static - std::uint64_t - size(value_type const& v); - - /// The algorithm used for extracting buffers - class reader; - - /// The algorithm used for inserting buffers - class writer; -} - -``` -Note that the `DatabaseBody` class is specifically designed to work with `asio` -and follows `asio` conventions. - -The method invoked to write data to the filesystem (or SQLite database in our -case) has the following signature: - -```C++ -std::size_t -body::reader::put(ConstBufferSequence const& buffers, error_code& ec); -``` - -## Sequence Diagram - -This sequence diagram demonstrates a scenario wherein the `ShardArchiveHandler` -leverages the state persisted in the database to recover from a crash and resume -the requested downloads. - -![alt_text](./images/interrupt_sequence.png "Resuming downloads post abort") - -## State Diagram - -This diagram illustrates the various states of the Shard Downloader module. - -![alt_text](./images/states.png "Shard Downloader states") diff --git a/src/xrpld/net/detail/DatabaseBody.ipp b/src/xrpld/net/detail/DatabaseBody.ipp deleted file mode 100644 index 76223ca6a35..00000000000 --- a/src/xrpld/net/detail/DatabaseBody.ipp +++ /dev/null @@ -1,231 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include - -namespace ripple { - -inline void -DatabaseBody::value_type::close() -{ - { - std::unique_lock lock(m_); - - // Stop all scheduled and currently - // executing handlers before closing. - if (handlerCount_) - { - closing_ = true; - - auto predicate = [&] { return !handlerCount_; }; - c_.wait(lock, predicate); - } - - conn_.reset(); - } -} - -inline void -DatabaseBody::value_type::open( - boost::filesystem::path const& path, - Config const& config, - boost::asio::io_service& io_service, - boost::system::error_code& ec) -{ - strand_.reset(new boost::asio::io_service::strand(io_service)); - path_ = path; - - auto setup = setup_DatabaseCon(config); - setup.dataDir = path.parent_path(); - setup.useGlobalPragma = false; - - auto [conn, size] = openDatabaseBodyDb(setup, path); - conn_ = std::move(conn); - if (size) - fileSize_ = *size; -} - -// This is called from message::payload_size -inline std::uint64_t -DatabaseBody::size(value_type const& body) -{ - // Forward the call to the body - return body.size(); -} - -// We don't do much in the reader constructor since the -// database is already open. -// -template -DatabaseBody::reader::reader( - boost::beast::http::header&, - value_type& body) - : body_(body) -{ -} - -// We don't do anything with content_length but a sophisticated -// application might check available space on the device -// to see if there is enough room to store the body. -inline void -DatabaseBody::reader::init( - boost::optional const& /*content_length*/, - boost::system::error_code& ec) -{ - // The connection must already be available for writing - assert(body_.conn_); - - // The error_code specification requires that we - // either set the error to some value, or set it - // to indicate no error. - // - // We don't do anything fancy so set "no error" - ec = {}; -} - -// This will get called one or more times with body buffers -// -template -std::size_t -DatabaseBody::reader::put( - ConstBufferSequence const& buffers, - boost::system::error_code& ec) -{ - // This function must return the total number of - // bytes transferred from the input buffers. - std::size_t nwritten = 0; - - // Loop over all the buffers in the sequence, - // and write each one to the database. - for (auto it = buffer_sequence_begin(buffers); - it != buffer_sequence_end(buffers); - ++it) - { - boost::asio::const_buffer buffer = *it; - - body_.batch_.append( - static_cast(buffer.data()), buffer.size()); - - // Write this buffer to the database - if (body_.batch_.size() > FLUSH_SIZE) - { - bool post = true; - - { - std::lock_guard lock(body_.m_); - - if (body_.handlerCount_ >= MAX_HANDLERS) - post = false; - else - ++body_.handlerCount_; - } - - if (post) - { - body_.strand_->post( - [data = body_.batch_, this] { this->do_put(data); }); - - body_.batch_.clear(); - } - } - - nwritten += it->size(); - } - - // Indicate success - // This is required by the error_code specification - ec = {}; - - return nwritten; -} - -inline void -DatabaseBody::reader::do_put(std::string const& data) -{ - using namespace boost::asio; - - { - std::unique_lock lock(body_.m_); - - // The download is being halted. - if (body_.closing_) - { - if (--body_.handlerCount_ == 0) - { - lock.unlock(); - body_.c_.notify_one(); - } - - return; - } - } - - auto path = body_.path_.string(); - - { - auto db = body_.conn_->checkoutDb(); - body_.part_ = databaseBodyDoPut( - *db, data, path, body_.fileSize_, body_.part_, MAX_ROW_SIZE_PAD); - } - - bool const notify = [this] { - std::lock_guard lock(body_.m_); - return --body_.handlerCount_ == 0; - }(); - - if (notify) - body_.c_.notify_one(); -} - -// Called after writing is done when there's no error. -inline void -DatabaseBody::reader::finish(boost::system::error_code& ec) -{ - { - std::unique_lock lock(body_.m_); - - // Wait for scheduled DB writes - // to complete. - if (body_.handlerCount_) - { - auto predicate = [&] { return !body_.handlerCount_; }; - body_.c_.wait(lock, predicate); - } - } - - std::ofstream fout; - fout.open(body_.path_.string(), std::ios::binary | std::ios::out); - - { - auto db = body_.conn_->checkoutDb(); - databaseBodyFinish(*db, fout); - } - - // Flush any pending data that hasn't - // been been written to the DB. - if (body_.batch_.size()) - { - fout.write(body_.batch_.data(), body_.batch_.size()); - body_.batch_.clear(); - } - - fout.close(); -} - -} // namespace ripple diff --git a/src/xrpld/net/detail/DatabaseDownloader.cpp b/src/xrpld/net/detail/DatabaseDownloader.cpp deleted file mode 100644 index b39e6904c46..00000000000 --- a/src/xrpld/net/detail/DatabaseDownloader.cpp +++ /dev/null @@ -1,92 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include - -namespace ripple { - -std::shared_ptr -make_DatabaseDownloader( - boost::asio::io_service& io_service, - Config const& config, - beast::Journal j) -{ - return std::shared_ptr( - new DatabaseDownloader(io_service, config, j)); -} - -DatabaseDownloader::DatabaseDownloader( - boost::asio::io_service& io_service, - Config const& config, - beast::Journal j) - : HTTPDownloader(io_service, config, j) - , config_(config) - , io_service_(io_service) -{ -} - -auto -DatabaseDownloader::getParser( - boost::filesystem::path dstPath, - std::function complete, - boost::system::error_code& ec) -> std::shared_ptr -{ - using namespace boost::beast; - - auto p = std::make_shared>(); - p->body_limit(std::numeric_limits::max()); - p->get().body().open(dstPath, config_, io_service_, ec); - - if (ec) - p->get().body().close(); - - return p; -} - -bool -DatabaseDownloader::checkPath(boost::filesystem::path const& dstPath) -{ - return dstPath.string().size() <= MAX_PATH_LEN; -} - -void -DatabaseDownloader::closeBody(std::shared_ptr p) -{ - using namespace boost::beast; - - auto databaseBodyParser = - std::dynamic_pointer_cast>(p); - assert(databaseBodyParser); - - databaseBodyParser->get().body().close(); -} - -std::uint64_t -DatabaseDownloader::size(std::shared_ptr p) -{ - using namespace boost::beast; - - auto databaseBodyParser = - std::dynamic_pointer_cast>(p); - assert(databaseBodyParser); - - return databaseBodyParser->get().body().size(); -} - -} // namespace ripple diff --git a/src/xrpld/net/detail/HTTPDownloader.cpp b/src/xrpld/net/detail/HTTPDownloader.cpp deleted file mode 100644 index 760aa020e4a..00000000000 --- a/src/xrpld/net/detail/HTTPDownloader.cpp +++ /dev/null @@ -1,340 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include - -namespace ripple { - -HTTPDownloader::HTTPDownloader( - boost::asio::io_service& io_service, - Config const& config, - beast::Journal j) - : j_(j) - , config_(config) - , strand_(io_service) - , stop_(false) - , sessionActive_(false) -{ -} - -bool -HTTPDownloader::download( - std::string const& host, - std::string const& port, - std::string const& target, - int version, - boost::filesystem::path const& dstPath, - std::function complete, - bool ssl) -{ - if (!checkPath(dstPath)) - return false; - - if (stop_) - return true; - - { - std::lock_guard lock(m_); - sessionActive_ = true; - } - - if (!strand_.running_in_this_thread()) - strand_.post(std::bind( - &HTTPDownloader::download, - shared_from_this(), - host, - port, - target, - version, - dstPath, - complete, - ssl)); - else - boost::asio::spawn( - strand_, - std::bind( - &HTTPDownloader::do_session, - shared_from_this(), - host, - port, - target, - version, - dstPath, - complete, - ssl, - std::placeholders::_1)); - return true; -} - -void -HTTPDownloader::do_session( - std::string const host, - std::string const port, - std::string const target, - int version, - boost::filesystem::path dstPath, - std::function complete, - bool ssl, - boost::asio::yield_context yield) -{ - using namespace boost::asio; - using namespace boost::beast; - - boost::system::error_code ec; - bool skip = false; - - ////////////////////////////////////////////// - // Define lambdas for encapsulating download - // operations: - auto close = [&](auto p) { - closeBody(p); - - // Gracefully close the stream - stream_->getStream().shutdown(socket_base::shutdown_both, ec); - if (ec == boost::asio::error::eof) - ec.assign(0, ec.category()); - if (ec) - { - // Most web servers don't bother with performing - // the SSL shutdown handshake, for speed. - JLOG(j_.trace()) << "shutdown: " << ec.message(); - } - - // The stream cannot be reused - stream_.reset(); - }; - - // When the downloader is being stopped - // because the server is shutting down, - // this method notifies a caller of `onStop` - // (`RPC::ShardArchiveHandler` to be specific) - // that the session has ended. - auto exit = [this, &dstPath, complete] { - if (!stop_) - complete(std::move(dstPath)); - - std::lock_guard lock(m_); - sessionActive_ = false; - c_.notify_one(); - }; - - auto failAndExit = [&exit, &dstPath, complete, &ec, this]( - std::string const& errMsg, auto p) { - fail(dstPath, ec, errMsg, p); - exit(); - }; - // end lambdas - //////////////////////////////////////////////////////////// - - if (stop_.load()) - return exit(); - - auto p = this->getParser(dstPath, complete, ec); - if (ec) - return failAndExit("getParser", p); - - ////////////////////////////////////////////// - // Prepare for download and establish the - // connection: - if (ssl) - stream_ = std::make_unique(config_, strand_, j_); - else - stream_ = std::make_unique(strand_); - - std::string error; - if (!stream_->connect(error, host, port, yield)) - return failAndExit(error, p); - - // Set up an HTTP HEAD request message to find the file size - http::request req{http::verb::head, target, version}; - req.set(http::field::host, host); - req.set(http::field::user_agent, BOOST_BEAST_VERSION_STRING); - - std::uint64_t const rangeStart = size(p); - - // Requesting a portion of the file - if (rangeStart) - { - req.set( - http::field::range, - (boost::format("bytes=%llu-") % rangeStart).str()); - } - - stream_->asyncWrite(req, yield, ec); - if (ec) - return failAndExit("async_write", p); - - { - // Read the response - http::response_parser connectParser; - connectParser.skip(true); - stream_->asyncRead(read_buf_, connectParser, yield, ec); - if (ec) - return failAndExit("async_read", p); - - // Range request was rejected - if (connectParser.get().result() == http::status::range_not_satisfiable) - { - req.erase(http::field::range); - - stream_->asyncWrite(req, yield, ec); - if (ec) - return failAndExit("async_write_range_verify", p); - - http::response_parser rangeParser; - rangeParser.skip(true); - - stream_->asyncRead(read_buf_, rangeParser, yield, ec); - if (ec) - return failAndExit("async_read_range_verify", p); - - // The entire file is downloaded already. - if (rangeParser.content_length() == rangeStart) - skip = true; - else - return failAndExit("range_not_satisfiable", p); - } - else if ( - rangeStart && - connectParser.get().result() != http::status::partial_content) - { - ec.assign( - boost::system::errc::not_supported, - boost::system::generic_category()); - - return failAndExit("Range request ignored", p); - } - else if (auto len = connectParser.content_length()) - { - try - { - // Ensure sufficient space is available - if (*len > space(dstPath.parent_path()).available) - { - return failAndExit( - "Insufficient disk space for download", p); - } - } - catch (std::exception const& e) - { - return failAndExit(std::string("exception: ") + e.what(), p); - } - } - } - - if (!skip) - { - // Set up an HTTP GET request message to download the file - req.method(http::verb::get); - - if (rangeStart) - { - req.set( - http::field::range, - (boost::format("bytes=%llu-") % rangeStart).str()); - } - } - - stream_->asyncWrite(req, yield, ec); - if (ec) - return failAndExit("async_write", p); - - // end prepare and connect - //////////////////////////////////////////////////////////// - - if (skip) - p->skip(true); - - // Download the file - while (!p->is_done()) - { - if (stop_.load()) - { - close(p); - return exit(); - } - - stream_->asyncReadSome(read_buf_, *p, yield, ec); - } - - JLOG(j_.trace()) << "download completed: " << dstPath.string(); - - close(p); - exit(); -} - -void -HTTPDownloader::stop() -{ - stop_ = true; - - std::unique_lock lock(m_); - if (sessionActive_) - { - // Wait for the handler to exit. - c_.wait(lock, [this]() { return !sessionActive_; }); - } -} - -bool -HTTPDownloader::sessionIsActive() const -{ - std::lock_guard lock(m_); - return sessionActive_; -} - -bool -HTTPDownloader::isStopping() const -{ - std::lock_guard lock(m_); - return stop_; -} - -void -HTTPDownloader::fail( - boost::filesystem::path dstPath, - boost::system::error_code const& ec, - std::string const& errMsg, - std::shared_ptr parser) -{ - if (!ec) - { - JLOG(j_.error()) << errMsg; - } - else if (ec != boost::asio::error::operation_aborted) - { - JLOG(j_.error()) << errMsg << ": " << ec.message(); - } - - if (parser) - closeBody(parser); - - try - { - remove(dstPath); - } - catch (std::exception const& e) - { - JLOG(j_.error()) << "exception: " << e.what() - << " in function: " << __func__; - } -} - -} // namespace ripple diff --git a/src/xrpld/net/detail/HTTPStream.cpp b/src/xrpld/net/detail/HTTPStream.cpp deleted file mode 100644 index b94f8959ec9..00000000000 --- a/src/xrpld/net/detail/HTTPStream.cpp +++ /dev/null @@ -1,203 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include - -namespace ripple { - -SSLStream::SSLStream( - Config const& config, - boost::asio::io_service::strand& strand, - beast::Journal j) - : ssl_ctx_(config, j, boost::asio::ssl::context::tlsv12_client) - , strand_(strand) -{ -} - -boost::asio::ip::tcp::socket& -SSLStream::getStream() -{ - assert(stream_); - return stream_->next_layer(); -} - -bool -SSLStream::connect( - std::string& errorOut, - std::string const& host, - std::string const& port, - boost::asio::yield_context& yield) -{ - using namespace boost::asio; - using namespace boost::beast; - - boost::system::error_code ec; - - auto fail = [&errorOut, &ec]( - std::string const& errorIn, - std::string const& message = "") { - errorOut = errorIn + ": " + (message.empty() ? ec.message() : message); - return false; - }; - - ip::tcp::resolver resolver{strand_.context()}; - auto const endpoints = resolver.async_resolve(host, port, yield[ec]); - if (ec) - return fail("async_resolve"); - - try - { - stream_.emplace(strand_.context(), ssl_ctx_.context()); - } - catch (std::exception const& e) - { - return fail("exception", e.what()); - } - - ec = ssl_ctx_.preConnectVerify(*stream_, host); - if (ec) - return fail("preConnectVerify"); - - boost::asio::async_connect( - stream_->next_layer(), endpoints.begin(), endpoints.end(), yield[ec]); - if (ec) - return fail("async_connect"); - - ec = ssl_ctx_.postConnectVerify(*stream_, host); - if (ec) - return fail("postConnectVerify"); - - stream_->async_handshake(ssl::stream_base::client, yield[ec]); - if (ec) - return fail("async_handshake"); - - return true; -} - -void -SSLStream::asyncWrite( - request& req, - boost::asio::yield_context& yield, - boost::system::error_code& ec) -{ - boost::beast::http::async_write(*stream_, req, yield[ec]); -} - -void -SSLStream::asyncRead( - boost::beast::flat_buffer& buf, - parser& p, - boost::asio::yield_context& yield, - boost::system::error_code& ec) -{ - boost::beast::http::async_read(*stream_, buf, p, yield[ec]); -} - -void -SSLStream::asyncReadSome( - boost::beast::flat_buffer& buf, - parser& p, - boost::asio::yield_context& yield, - boost::system::error_code& ec) -{ - boost::beast::http::async_read_some(*stream_, buf, p, yield[ec]); -} - -RawStream::RawStream(boost::asio::io_service::strand& strand) : strand_(strand) -{ -} - -boost::asio::ip::tcp::socket& -RawStream::getStream() -{ - assert(stream_); - return *stream_; -} - -bool -RawStream::connect( - std::string& errorOut, - std::string const& host, - std::string const& port, - boost::asio::yield_context& yield) -{ - using namespace boost::asio; - using namespace boost::beast; - - boost::system::error_code ec; - - auto fail = [&errorOut, &ec]( - std::string const& errorIn, - std::string const& message = "") { - errorOut = errorIn + ": " + (message.empty() ? ec.message() : message); - return false; - }; - - ip::tcp::resolver resolver{strand_.context()}; - auto const endpoints = resolver.async_resolve(host, port, yield[ec]); - if (ec) - return fail("async_resolve"); - - try - { - stream_.emplace(strand_.context()); - } - catch (std::exception const& e) - { - return fail("exception", e.what()); - } - - boost::asio::async_connect( - *stream_, endpoints.begin(), endpoints.end(), yield[ec]); - if (ec) - return fail("async_connect"); - - return true; -} - -void -RawStream::asyncWrite( - request& req, - boost::asio::yield_context& yield, - boost::system::error_code& ec) -{ - boost::beast::http::async_write(*stream_, req, yield[ec]); -} - -void -RawStream::asyncRead( - boost::beast::flat_buffer& buf, - parser& p, - boost::asio::yield_context& yield, - boost::system::error_code& ec) -{ - boost::beast::http::async_read(*stream_, buf, p, yield[ec]); -} - -void -RawStream::asyncReadSome( - boost::beast::flat_buffer& buf, - parser& p, - boost::asio::yield_context& yield, - boost::system::error_code& ec) -{ - boost::beast::http::async_read_some(*stream_, buf, p, yield[ec]); -} - -} // namespace ripple diff --git a/src/xrpld/net/detail/RPCCall.cpp b/src/xrpld/net/detail/RPCCall.cpp index 533878ab1b0..997d6463f23 100644 --- a/src/xrpld/net/detail/RPCCall.cpp +++ b/src/xrpld/net/detail/RPCCall.cpp @@ -189,36 +189,6 @@ class RPCParser return v; } - Json::Value - parseDownloadShard(Json::Value const& jvParams) - { - Json::Value jvResult(Json::objectValue); - unsigned int sz{jvParams.size()}; - unsigned int i{0}; - - // If odd number of params then 'novalidate' may have been specified - if (sz & 1) - { - if (boost::iequals(jvParams[0u].asString(), "novalidate")) - ++i; - else if (!boost::iequals(jvParams[--sz].asString(), "novalidate")) - return rpcError(rpcINVALID_PARAMS); - } - - // Create the 'shards' array - Json::Value shards(Json::arrayValue); - for (; i < sz; i += 2) - { - Json::Value shard(Json::objectValue); - shard[jss::index] = jvParams[i].asUInt(); - shard[jss::url] = jvParams[i + 1].asString(); - shards.append(std::move(shard)); - } - jvResult[jss::shards] = std::move(shards); - - return jvResult; - } - Json::Value parseInternal(Json::Value const& jvParams) { @@ -870,15 +840,6 @@ class RPCParser return jvRequest; } - Json::Value - parseNodeToShard(Json::Value const& jvParams) - { - Json::Value jvRequest; - jvRequest[jss::action] = jvParams[0u].asString(); - - return jvRequest; - } - // peer_reservations_add [] Json::Value parsePeerReservationsAdd(Json::Value const& jvParams) @@ -1200,9 +1161,7 @@ class RPCParser {"channel_verify", &RPCParser::parseChannelVerify, 4, 4}, {"connect", &RPCParser::parseConnect, 1, 2}, {"consensus_info", &RPCParser::parseAsIs, 0, 0}, - {"crawl_shards", &RPCParser::parseAsIs, 0, 2}, {"deposit_authorized", &RPCParser::parseDepositAuthorized, 2, 3}, - {"download_shard", &RPCParser::parseDownloadShard, 2, -1}, {"feature", &RPCParser::parseFeature, 0, 2}, {"fetch_info", &RPCParser::parseFetchInfo, 0, 1}, {"gateway_balances", &RPCParser::parseGatewayBalances, 1, -1}, @@ -1220,7 +1179,6 @@ class RPCParser {"log_level", &RPCParser::parseLogLevel, 0, 2}, {"logrotate", &RPCParser::parseAsIs, 0, 0}, {"manifest", &RPCParser::parseManifest, 1, 1}, - {"node_to_shard", &RPCParser::parseNodeToShard, 1, 1}, {"owner_info", &RPCParser::parseAccountItems, 1, 3}, {"peers", &RPCParser::parseAsIs, 0, 0}, {"ping", &RPCParser::parseAsIs, 0, 0}, diff --git a/src/xrpld/net/uml/interrupt_sequence.pu b/src/xrpld/net/uml/interrupt_sequence.pu deleted file mode 100644 index ba046d084f8..00000000000 --- a/src/xrpld/net/uml/interrupt_sequence.pu +++ /dev/null @@ -1,233 +0,0 @@ -@startuml - - -skinparam shadowing false - -/' -skinparam sequence { - ArrowColor #e1e4e8 - ActorBorderColor #e1e4e8 - DatabaseBorderColor #e1e4e8 - LifeLineBorderColor Black - LifeLineBackgroundColor #d3d6d9 - - ParticipantBorderColor DeepSkyBlue - ParticipantBackgroundColor DodgerBlue - ParticipantFontName Impact - ParticipantFontSize 17 - ParticipantFontColor #A9DCDF - - NoteBackgroundColor #6a737d - - ActorBackgroundColor #f6f8fa - ActorFontColor #6a737d - ActorFontSize 17 - ActorFontName Aapex - - EntityBackgroundColor #f6f8fa - EntityFontColor #6a737d - EntityFontSize 17 - EntityFontName Aapex - - DatabaseBackgroundColor #f6f8fa - DatabaseFontColor #6a737d - DatabaseFontSize 17 - DatabaseFontName Aapex - - CollectionsBackgroundColor #f6f8fa - ActorFontColor #6a737d - ActorFontSize 17 - ActorFontName Aapex -} - -skinparam note { - BackgroundColor #fafbfc - BorderColor #e1e4e8 -} -'/ - -'skinparam monochrome true - -actor Client as c -entity RippleNode as rn -entity ShardArchiveHandler as sa -entity SSLHTTPDownloader as d -database Database as db -collections Fileserver as s - -c -> rn: Launch RippleNode -activate rn - -c -> rn: Issue download request - -note right of c - **Download Request:** - - { - "method": "download_shard", - "params": - [ - { - "shards": - [ - {"index": 1, "url": "https://example.com/1.tar.lz4"}, - {"index": 2, "url": "https://example.com/2.tar.lz4"}, - {"index": 5, "url": "https://example.com/5.tar.lz4"} - ] - } - ] - } -end note - -rn -> sa: Create instance of Handler -activate sa - -rn -> sa: Add three downloads -sa -> sa: Validate requested downloads - -rn -> sa: Initiate Downloads -sa -> rn: ACK: Initiating -rn -> c: Initiating requested downloads - -sa -> db: Save state to the database\n(Processing three downloads) - -note right of db - - **ArchiveHandler State (SQLite Table):** - - | Index | URL | - | 1 | https://example.com/1.tar.lz4 | - | 2 | https://example.com/2.tar.lz4 | - | 5 | https://example.com/5.tar.lz4 | - -end note - -sa -> d: Create instance of Downloader -activate d - -group Download 1 - - note over sa - **Download 1:** - - This encapsulates the download of the first file - at URL "https://example.com/1.tar.lz4". - - end note - - sa -> d: Start download - - d -> s: Connect and request file - s -> d: Send file - d -> sa: Invoke completion handler - -end - -sa -> sa: Import and validate shard - -sa -> db: Update persisted state\n(Remove download) - -note right of db - **ArchiveHandler State:** - - | Index | URL | - | 2 | https://example.com/2.tar.lz4 | - | 5 | https://example.com/5.tar.lz4 | - -end note - -group Download 2 - - sa -> d: Start download - - d -> s: Connect and request file - -end - -rn -> rn: **RippleNode crashes** - -deactivate sa -deactivate rn -deactivate d - -c -> rn: Restart RippleNode -activate rn - -rn -> db: Detect non-empty state database - -rn -> sa: Create instance of Handler -activate sa - -sa -> db: Load state - -note right of db - **ArchiveHandler State:** - - | Index | URL | - | 2 | https://example.com/2.tar.lz4 | - | 5 | https://example.com/5.tar.lz4 | - -end note - -sa -> d: Create instance of Downloader -activate d - -sa -> sa: Resume Download 2 - -group Download 2 - - sa -> d: Start download - - d -> s: Connect and request file - s -> d: Send file - d -> sa: Invoke completion handler - -end - -sa -> sa: Import and validate shard - -sa -> db: Update persisted state \n(Remove download) - -note right of db - **ArchiveHandler State:** - - | Index | URL | - | 5 | https://example.com/5.tar.lz4 | - -end note - -group Download 3 - - sa -> d: Start download - - d -> s: Connect and request file - s -> d: Send file - d -> sa: Invoke completion handler - -end - -sa -> sa: Import and validate shard - -sa -> db: Update persisted state \n(Remove download) - -note right of db - **ArchiveHandler State:** - - ***empty*** - -end note - -sa -> db: Remove empty database - -sa -> sa: Automatically destroyed -deactivate sa - -d -> d: Destroyed via reference\ncounting -deactivate d - -c -> rn: Poll RippleNode to verify successfull\nimport of all requested shards. -c -> rn: Shutdown RippleNode - -deactivate rn - -@enduml diff --git a/src/xrpld/net/uml/states.pu b/src/xrpld/net/uml/states.pu deleted file mode 100644 index b5db8ee48f4..00000000000 --- a/src/xrpld/net/uml/states.pu +++ /dev/null @@ -1,69 +0,0 @@ -@startuml - -state "Updating Database" as UD4 { - UD4: Update the database to reflect - UD4: the current state. -} -state "Initiating Download" as ID { - ID: Omit the range header to download - ID: the entire file. -} - -state "Evaluate Database" as ED { - ED: Determine the current state - ED: based on the contents of the - ED: database from a previous run. -} - -state "Remove Database" as RD { - RD: The database is destroyed when - RD: empty. -} - -state "Download in Progress" as DP - -state "Download Completed" as DC { - - state "Updating Database" as UD { - UD: Update the database to reflect - UD: the current state. - } - - state "Queue Check" as QC { - QC: Check the queue for any reamining - QC: downloads. - } - - [*] --> UD - UD --> QC -} - -state "Check Resume" as CR { - CR: Determine whether we're resuming - CR: a previous download or starting a - CR: new one. -} - -state "Resuming Download" as IPD { - IPD: Set the range header in the - IPD: HTTP request as needed. -} - -[*] --> ED : State DB is present at\nnode launch -ED --> RD : State DB is empty -ED --> CR : There are downloads queued -RD --> [*] - -[*] --> UD4 : Client invokes <>\ncommand -UD4 --> ID : Database updated -ID --> DP : Download started -DP --> DC : Download completed -DC --> ID : There **are** additional downloads\nqueued -DP --> [*] : A graceful shutdown is\nin progress -DC --> RD : There **are no** additional\ndownloads queued - -CR --> IPD : Resuming an interrupted\ndownload -IPD --> DP: Download started -CR --> ID : Initiating a new\ndownload - -@enduml diff --git a/src/xrpld/nodestore/Database.h b/src/xrpld/nodestore/Database.h index ad843c55d52..daf0483e890 100644 --- a/src/xrpld/nodestore/Database.h +++ b/src/xrpld/nodestore/Database.h @@ -31,8 +31,6 @@ namespace ripple { -class Ledger; - namespace NodeStore { /** Persistency layer for NodeObject @@ -153,7 +151,7 @@ class Database @note This can be called concurrently. @param hash The key of the object to retrieve @param ledgerSeq The sequence of the ledger where the - object is stored, used by the shard store. + object is stored. @param callback Callback function when read completes */ virtual void @@ -162,14 +160,6 @@ class Database std::uint32_t ledgerSeq, std::function const&)>&& callback); - /** Store a ledger from a different database. - - @param srcLedger The ledger to store. - @return true if the operation was successful - */ - virtual bool - storeLedger(std::shared_ptr const& srcLedger) = 0; - /** Remove expired entries from the positive and negative caches. */ virtual void sweep() = 0; @@ -224,14 +214,6 @@ class Database bool isStopping() const; - /** @return The maximum number of ledgers stored in a shard - */ - [[nodiscard]] std::uint32_t - ledgersPerShard() const noexcept - { - return ledgersPerShard_; - } - /** @return The earliest ledger sequence allowed */ [[nodiscard]] std::uint32_t @@ -240,63 +222,6 @@ class Database return earliestLedgerSeq_; } - /** @return The earliest shard index - */ - [[nodiscard]] std::uint32_t - earliestShardIndex() const noexcept - { - return earliestShardIndex_; - } - - /** Calculates the first ledger sequence for a given shard index - - @param shardIndex The shard index considered - @return The first ledger sequence pertaining to the shard index - */ - [[nodiscard]] std::uint32_t - firstLedgerSeq(std::uint32_t shardIndex) const noexcept - { - assert(shardIndex >= earliestShardIndex_); - if (shardIndex <= earliestShardIndex_) - return earliestLedgerSeq_; - return 1 + (shardIndex * ledgersPerShard_); - } - - /** Calculates the last ledger sequence for a given shard index - - @param shardIndex The shard index considered - @return The last ledger sequence pertaining to the shard index - */ - [[nodiscard]] std::uint32_t - lastLedgerSeq(std::uint32_t shardIndex) const noexcept - { - assert(shardIndex >= earliestShardIndex_); - return (shardIndex + 1) * ledgersPerShard_; - } - - /** Calculates the shard index for a given ledger sequence - - @param ledgerSeq ledger sequence - @return The shard index of the ledger sequence - */ - [[nodiscard]] std::uint32_t - seqToShardIndex(std::uint32_t ledgerSeq) const noexcept - { - assert(ledgerSeq >= earliestLedgerSeq_); - return (ledgerSeq - 1) / ledgersPerShard_; - } - - /** Calculates the maximum ledgers for a given shard index - - @param shardIndex The shard index considered - @return The maximum ledgers pertaining to the shard index - - @note The earliest shard may store less if the earliest ledger - sequence truncates its beginning - */ - [[nodiscard]] std::uint32_t - maxLedgers(std::uint32_t shardIndex) const noexcept; - protected: beast::Journal const j_; Scheduler& scheduler_; @@ -305,25 +230,14 @@ class Database std::atomic fetchHitCount_{0}; std::atomic fetchSz_{0}; - // The default is DEFAULT_LEDGERS_PER_SHARD (16384) to match the XRP ledger - // network. Can be set through the configuration file using the - // 'ledgers_per_shard' field under the 'node_db' and 'shard_db' stanzas. - // If specified, the value must be a multiple of 256 and equally assigned - // in both stanzas. Only unit tests or alternate networks should change - // this value. - std::uint32_t const ledgersPerShard_; - // The default is XRP_LEDGER_EARLIEST_SEQ (32570) to match the XRP ledger // network's earliest allowed ledger sequence. Can be set through the // configuration file using the 'earliest_seq' field under the 'node_db' - // and 'shard_db' stanzas. If specified, the value must be greater than zero - // and equally assigned in both stanzas. Only unit tests or alternate + // stanza. If specified, the value must be greater than zero. + // Only unit tests or alternate // networks should change this value. std::uint32_t const earliestLedgerSeq_; - // The earliest shard index - std::uint32_t const earliestShardIndex_; - // The maximum number of requests a thread extracts from the queue in an // attempt to minimize the overhead of mutex acquisition. This is an // advanced tunable, via the config file. The default value is 4. @@ -341,10 +255,6 @@ class Database void importInternal(Backend& dstBackend, Database& srcDB); - // Called by the public storeLedger function - bool - storeLedger(Ledger const& srcLedger, std::shared_ptr dstBackend); - void updateFetchMetrics(uint64_t fetches, uint64_t hits, uint64_t duration) { diff --git a/src/xrpld/nodestore/DatabaseShard.h b/src/xrpld/nodestore/DatabaseShard.h deleted file mode 100644 index 408ac3501d3..00000000000 --- a/src/xrpld/nodestore/DatabaseShard.h +++ /dev/null @@ -1,298 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2017 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_NODESTORE_DATABASESHARD_H_INCLUDED -#define RIPPLE_NODESTORE_DATABASESHARD_H_INCLUDED - -#include -#include -#include -#include -#include - -#include -#include - -namespace ripple { -namespace NodeStore { - -/** A collection of historical shards - */ -class DatabaseShard : public Database -{ -public: - /** Construct a shard store - - @param scheduler The scheduler to use for performing asynchronous tasks - @param readThreads The number of asynchronous read threads to create - @param config The shard configuration section for the database - @param journal Destination for logging output - */ - DatabaseShard( - Scheduler& scheduler, - int readThreads, - Section const& config, - beast::Journal journal) - : Database(scheduler, readThreads, config, journal) - { - } - - /** Initialize the database - - @return `true` if the database initialized without error - */ - [[nodiscard]] virtual bool - init() = 0; - - /** Prepare to store a new ledger in the shard being acquired - - @param validLedgerSeq The sequence of the maximum valid ledgers - @return If a ledger should be fetched and stored, then returns the - ledger sequence of the ledger to request. Otherwise returns - std::nullopt. - Some reasons this may return std::nullopt are: all shards are - stored and full, max allowed disk space would be exceeded, or a - ledger was recently requested and not enough time has passed - between requests. - @implNote adds a new writable shard if necessary - */ - [[nodiscard]] virtual std::optional - prepareLedger(std::uint32_t validLedgerSeq) = 0; - - /** Prepare one or more shard indexes to be imported into the database - - @param shardIndexes Shard indexes to be prepared for import - @return true if all shard indexes successfully prepared for import - */ - [[nodiscard]] virtual bool - prepareShards(std::vector const& shardIndexes) = 0; - - /** Remove a previously prepared shard index for import - - @param shardIndex Shard index to be removed from import - */ - virtual void - removePreShard(std::uint32_t shardIndex) = 0; - - /** Get shard indexes being imported - - @return a string representing the shards prepared for import - */ - [[nodiscard]] virtual std::string - getPreShards() = 0; - - /** Import a shard from the shard archive handler into the - shard database. This differs from 'importDatabase' which - imports the contents of the NodeStore - - @param shardIndex Shard index to import - @param srcDir The directory to import from - @return true If the shard was successfully imported - @implNote if successful, srcDir is moved to the database directory - */ - [[nodiscard]] virtual bool - importShard( - std::uint32_t shardIndex, - boost::filesystem::path const& srcDir) = 0; - - /** Fetch a ledger from the shard store - - @param hash The key of the ledger to retrieve - @param seq The sequence of the ledger - @return The ledger if found, nullptr otherwise - */ - [[nodiscard]] virtual std::shared_ptr - fetchLedger(uint256 const& hash, std::uint32_t seq) = 0; - - /** Notifies the database that the given ledger has been - fully acquired and stored. - - @param ledger The stored ledger to be marked as complete - */ - virtual void - setStored(std::shared_ptr const& ledger) = 0; - - /** Invoke a callback on the SQLite db holding the - corresponding ledger - - @return Value returned by callback function. - */ - virtual bool - callForLedgerSQLByLedgerSeq( - LedgerIndex ledgerSeq, - std::function const& callback) = 0; - - /** Invoke a callback on the ledger SQLite db for the - corresponding shard - - @return Value returned by callback function. - */ - virtual bool - callForLedgerSQLByShardIndex( - std::uint32_t shardIndex, - std::function const& callback) = 0; - - /** Invoke a callback on the transaction SQLite db - for the corresponding ledger - - @return Value returned by callback function. - */ - virtual bool - callForTransactionSQLByLedgerSeq( - LedgerIndex ledgerSeq, - std::function const& callback) = 0; - - /** Invoke a callback on the transaction SQLite db - for the corresponding shard - - @return Value returned by callback function. - */ - virtual bool - callForTransactionSQLByShardIndex( - std::uint32_t shardIndex, - std::function const& callback) = 0; - - /** - * @brief iterateLedgerSQLsForward Checks out ledger databases for all - * shards in ascending order starting from given shard index until - * shard with the largest index visited or callback returned false. - * For each visited shard calls given callback function passing - * shard index and session with the database to it. - * @param minShardIndex Start shard index to visit or none if all shards - * should be visited. - * @param callback Callback function to call. - * @return True if each callback function returns true, false otherwise. - */ - virtual bool - iterateLedgerSQLsForward( - std::optional minShardIndex, - std::function< - bool(soci::session& session, std::uint32_t shardIndex)> const& - callback) = 0; - - /** - * @brief iterateTransactionSQLsForward Checks out transaction databases for - * all shards in ascending order starting from given shard index - * until shard with the largest index visited or callback returned - * false. For each visited shard calls given callback function - * passing shard index and session with the database to it. - * @param minShardIndex Start shard index to visit or none if all shards - * should be visited. - * @param callback Callback function to call. - * @return True if each callback function returns true, false otherwise. - */ - virtual bool - iterateTransactionSQLsForward( - std::optional minShardIndex, - std::function< - bool(soci::session& session, std::uint32_t shardIndex)> const& - callback) = 0; - - /** - * @brief iterateLedgerSQLsBack Checks out ledger databases for - * all shards in descending order starting from given shard index - * until shard with the smallest index visited or callback returned - * false. For each visited shard calls given callback function - * passing shard index and session with the database to it. - * @param maxShardIndex Start shard index to visit or none if all shards - * should be visited. - * @param callback Callback function to call. - * @return True if each callback function returns true, false otherwise. - */ - virtual bool - iterateLedgerSQLsBack( - std::optional maxShardIndex, - std::function< - bool(soci::session& session, std::uint32_t shardIndex)> const& - callback) = 0; - - /** - * @brief iterateTransactionSQLsBack Checks out transaction databases for - * all shards in descending order starting from given shard index - * until shard with the smallest index visited or callback returned - * false. For each visited shard calls given callback function - * passing shard index and session with the database to it. - * @param maxShardIndex Start shard index to visit or none if all shards - * should be visited. - * @param callback Callback function to call. - * @return True if each callback function returns true, false otherwise. - */ - virtual bool - iterateTransactionSQLsBack( - std::optional maxShardIndex, - std::function< - bool(soci::session& session, std::uint32_t shardIndex)> const& - callback) = 0; - - /** Query information about shards held - - @return Information about shards held by this node - */ - [[nodiscard]] virtual std::unique_ptr - getShardInfo() const = 0; - - /** Returns the root database directory - */ - [[nodiscard]] virtual boost::filesystem::path const& - getRootDir() const = 0; - - /** Returns a JSON object detailing the status of an ongoing - database import if one is running, otherwise an error - object. - */ - virtual Json::Value - getDatabaseImportStatus() const = 0; - - /** Initiates a NodeStore to ShardStore import and returns - the result in a JSON object. - */ - virtual Json::Value - startNodeToShard() = 0; - - /** Terminates a NodeStore to ShardStore import and returns - the result in a JSON object. - */ - virtual Json::Value - stopNodeToShard() = 0; - - /** Returns the first ledger sequence of the shard currently being imported - from the NodeStore - - @return The ledger sequence or an unseated value if no import is running - */ - virtual std::optional - getDatabaseImportSequence() const = 0; - - /** Returns the number of queued tasks - */ - [[nodiscard]] virtual size_t - getNumTasks() const = 0; -}; - -extern std::unique_ptr -make_ShardStore( - Application& app, - Scheduler& scheduler, - int readThreads, - beast::Journal j); - -} // namespace NodeStore -} // namespace ripple - -#endif diff --git a/src/xrpld/nodestore/DeterministicShard.md b/src/xrpld/nodestore/DeterministicShard.md deleted file mode 100644 index 70d0584567b..00000000000 --- a/src/xrpld/nodestore/DeterministicShard.md +++ /dev/null @@ -1,162 +0,0 @@ -# Deterministic Database Shards - -This doc describes the standard way to assemble the database shard. -A shard assembled using this approach becomes deterministic i.e. -if two independent sides assemble a shard consisting of the same ledgers, -accounts and transactions, then they will obtain the same shard files -`nudb.dat` and `nudb.key`. The approach deals with the `NuDB` database -format only, refer to `https://github.com/vinniefalco/NuDB`. - - -## Headers - -Due to NuDB database definition, the following headers are used for -database files: - -nudb.key: -``` -char[8] Type The characters "nudb.key" -uint16 Version Holds the version number -uint64 UID Unique ID generated on creation -uint64 Appnum Application defined constant -uint16 KeySize Key size in bytes -uint64 Salt A random seed -uint64 Pepper The salt hashed -uint16 BlockSize size of a file block in bytes -uint16 LoadFactor Target fraction in 65536ths -uint8[56] Reserved Zeroes -uint8[] Reserved Zero-pad to block size -``` - -nudb.dat: -``` -char[8] Type The characters "nudb.dat" -uint16 Version Holds the version number -uint64 UID Unique ID generated on creation -uint64 Appnum Application defined constant -uint16 KeySize Key size in bytes -uint8[64] (reserved) Zeroes -``` -All of these fields are saved using network byte order -(bigendian: most significant byte first). - -To make the shard deterministic the following parameters are used -as values of header field both for `nudb.key` and `nudb.dat` files. -``` -Version 2 -UID digest(0) -Appnum digest(2) | 0x5348524400000000 /* 'SHRD' */ -KeySize 32 -Salt digest(1) -Pepper XXH64(Salt) -BlockSize 0x1000 (4096 bytes) -LoadFactor 0.5 (numeric 0x8000) -``` -Note: XXH64() is well-known hash algorithm. - -The `digest(i)` mentioned above defined as the follows: - -First, RIPEMD160 hash `H` calculated of the following structure -(the same as final Key of the shard): -``` -uint32 version Version of shard, 2 at the present -uint32 firstSeq Sequence number of first ledger in the shard -uint32 lastSeq Sequence number of last ledger in the shard -uint256 lastHash Hash of last ledger in shard -``` -there all 32-bit integers are hashed in network byte order -(bigendian: most significant byte first). - -Then, `digest(i)` is defined as the following part of the above hash `H`: -``` -digest(0) = H[0] << 56 | H[1] << 48 | ... | H[7] << 0, -digest(1) = H[8] << 56 | H[9] << 48 | ... | H[15] << 0, -digest(2) = H[16] << 24 | H[17] << 16 | ... | H[19] << 0, -``` -where `H[i]` denotes `i`-th byte of hash `H`. - - -## Contents - -After deterministic shard is created using the above mentioned headers, -it filled with objects using the following steps. - -1. All objects within the shard are visited in the order described in the -next section. Here the objects are: ledger headers, SHAmap tree nodes -including state and transaction nodes, final key. - -2. Set of all visited objects is divided into groups. Each group except of -the last contains 16384 objects in the order of their visiting. Last group -may contain less than 16384 objects. - -3. All objects within each group are sorted in according to their hashes. -Objects are sorted by increasing of their hashes, precisely, by increasing -of hex representations of hashes in lexicographic order. For example, -the following is an example of sorted hashes in their hex representation: -``` -0000000000000000000000000000000000000000000000000000000000000000 -154F29A919B30F50443A241C466691B046677C923EE7905AB97A4DBE8A5C2429 -2231553FC01D37A66C61BBEEACBB8C460994493E5659D118E19A8DDBB1444273 -272DCBFD8E4D5D786CF11A5444B30FB35435933B5DE6C660AA46E68CF0F5C441 -3C062FD9F0BCDCA31ACEBCD8E530D0BDAD1F1D1257B89C435616506A3EE6CB9E -58A0E5AE427CDDC1C7C06448E8C3E4BF718DE036D827881624B20465C3E1336F -... -``` - -4. Finally, objects added to the deterministic shard group by group in the -sorted order within each group from low to high hashes. - - -## Order of visiting objects - -The shard consists of 16384 ledgers and the final key with the hash 0. -Each ledger has the header object and two SMAmaps: state and transaction. -SHAmap is a rooted tree in which each node has maximum of 16 descendants -enumerating by indexes 0..15. Visiting each node in the SHAmap -is performing by functions visitNodes and visitDifferences implemented -in the file `ripple/shamap/impl/ShaMapSync.cpp`. - -Here is how the function visitNodes works: it visit the root at first. -Then it visit all nodes in the 1st layer, i. e. the nodes which are -immediately descendants of the root sequentially from index 0 to 15. -Then it visit all nodes in 2nd layer i.e. the nodes which are immediately -descendants the nodes from 1st layer. The order of visiting 2nd layer nodes -is the following. First, descendants of the 1st layer node with index 0 -are visited sequintially from index 0 to 15. Then descendents of 1st layer -node with index 1 are visited etc. After visiting all nodes of 2nd layer -the nodes from 3rd layer are visited etc. - -The function visitDifferences works similar to visitNodes with the following -exceptions. The first exception is that visitDifferences get 2 arguments: -current SHAmap and previous SHAmap and visit only the nodes from current -SHAmap which and not present in previous SHAmap. The second exception is -that visitDifferences visits all non-leaf nodes in the order of visitNodes -function, but all leaf nodes are visited immedeately after visiting of their -parent node sequentially from index 0 to 15. - -Finally, all objects within the shard are visited in the following order. -All ledgers are visited from the ledger with high index to the ledger with -low index in descending order. For each ledger the state SHAmap is visited -first using visitNode function for the ledger with highest index and -visitDifferences function for other ledgers. Then transaction SHAmap is visited -using visitNodes function. At last, the ledger header object is visited. -Final key of the shard is visited at the end. - - -## Tests - -To perform test to deterministic shards implementation one can enter -the following command: -``` -rippled --unittest ripple.NodeStore.DatabaseShard -``` - -The following is the right output of deterministic shards test: -``` -ripple.NodeStore.DatabaseShard DatabaseShard deterministic_shard -with backend nudb -Iteration 0: RIPEMD160[nudb.key] = F96BF2722AB2EE009FFAE4A36AAFC4F220E21951 -Iteration 0: RIPEMD160[nudb.dat] = FAE6AE84C15968B0419FDFC014931EA12A396C71 -Iteration 1: RIPEMD160[nudb.key] = F96BF2722AB2EE009FFAE4A36AAFC4F220E21951 -Iteration 1: RIPEMD160[nudb.dat] = FAE6AE84C15968B0419FDFC014931EA12A396C71 -``` diff --git a/src/xrpld/nodestore/Manager.h b/src/xrpld/nodestore/Manager.h index 5a4d46068be..89ed165b483 100644 --- a/src/xrpld/nodestore/Manager.h +++ b/src/xrpld/nodestore/Manager.h @@ -21,7 +21,6 @@ #define RIPPLE_NODESTORE_MANAGER_H_INCLUDED #include -#include #include namespace ripple { diff --git a/src/xrpld/nodestore/README.md b/src/xrpld/nodestore/README.md index 7b004f67a42..1549c1ef968 100644 --- a/src/xrpld/nodestore/README.md +++ b/src/xrpld/nodestore/README.md @@ -1,8 +1,6 @@ # Database Documentation * [NodeStore](#nodestore) * [Benchmarks](#benchmarks) -* [Downloaded Shard Validation](#downloaded-shard-validation) -* [Shard Storage Paths](#shard-storage-paths) # NodeStore @@ -176,195 +174,3 @@ instruction not being used. * Important point to note that is if this factory is tested with an existing set of sst files none of the old sst files will benefit from indexing changes until they are compacted at a future point in time. - -# Downloaded Shard Validation - -## Overview - -In order to validate shards that have been downloaded from file servers (as -opposed to shards acquired from peers), the application must confirm the -validity of the downloaded shard's last ledger. So before initiating the -download, we first confirm that we are able to retrieve the shard's last ledger -hash. The following sections describe this confirmation process in greater -detail. - -## Hash Verification - -### Flag Ledger - -Since the number of ledgers contained in each shard is always a multiple of 256, -a shard's last ledger is always a flag ledger. Conveniently, the skip list -stored within a ledger will provide us with a series of flag ledger hashes, -enabling the software to corroborate a shard's last ledger hash. We access the -skip list by calling `LedgerMaster::walkHashBySeq` and providing the sequence of -a shard's last ledger: - -```C++ -std::optional expectedHash; -expectedHash = - app_.getLedgerMaster().walkHashBySeq(lastLedgerSeq(shardIndex)); -``` - -When a user requests a shard download, the `ShardArchiveHandler` will first use -this function to retrieve the hash of the shard's last ledger. If the function -returns a hash, downloading the shard can proceed. Once the download completes, -the server can reliably retrieve this last ledger hash to complete validation of -the shard. - -### Caveats - -#### Later Ledger - -The `walkHashBySeq` function will provide the hash of a flag ledger only if the -application has stored a later ledger. When verifying the last ledger hash of a -pending shard download, if there is no later ledger stored, the download will be -deferred until a later ledger has been stored. - -We use the presence (or absence) of a validated ledger with a sequence number -later than the sequence of the shard's last ledger as a heuristic for -determining whether or not we should have the shard's last ledger hash. A later -ledger must be present in order to reliably retrieve the hash of the shard's -last ledger. The hash will only be retrieved when a later ledger is present. -Otherwise verification of the shard will be deferred. - -### Retries - -#### Retry Limit - -If the server must defer hash verification, the software will initiate a timer -that upon expiration, will re-attempt verifying the last ledger hash. We place -an upper limit on the number of attempts the server makes to achieve this -verification. When the maximum number of attempts has been reached, the download -request will fail, and the `ShardArchiveHandler` will proceed with any remaining -downloads. An attempt counts toward the limit only when we are able to get a -later validated ledger (implying a current view of the network), but are unable -to retrieve the last ledger hash. Retries that occur because no validated ledger -was available are not counted. - -# Shard Storage Paths - -## Overview - -The shard database stores validated ledgers in logical groups called shards. As -of June 2020, a shard stores 16384 ledgers by default. In order to allow users -to store shards on multiple devices, the shard database can be configured with -several file system paths. Each path provided should refer to a directory on a -distinct filesystem, and no two paths should ever correspond to the same -filesystem. Violating this restriction will cause the server to inaccurately -estimate the amount of space available for storing shards. In the absence of a -suitable platform agnostic solution, this requirement is enforced only on -Linux. However, on other platforms we employ a heuristic that issues a warning -if we suspect that this restriction is violated. - -## Configuration - -The `shard_db` and `historical_shard_paths` sections of the server's -configuration file will be used to determine where the server stores shards. -Minimally, the `shard_db` section must contain a single `path` key. -If this is the only storage path provided, all shards will be stored at this -location. If the configuration also lists one or more lines in the -`historical_shard_paths` section, all older shards will be stored at these -locations, and the `path` will be used only to store the current -and previous shards. The goal is to allow users to provide an efficient SSD for -storing recent shards, as these will be accessed more frequently, while using -large mechanical drives for storing older shards that will be accessed less -frequently. - -Below is a sample configuration snippet that provides a path for main storage -and several paths for historical storage: - -```dosini -# This is the persistent datastore for shards. It is important for the health -# of the network that server operators shard as much as practical. -# NuDB requires SSD storage. Helpful information can be found on -# https://xrpl.org/history-sharding.html -[shard_db] -type=NuDB - -# A single path for storing -# the current and previous -# shards: -# ------------------------- -path=/var/lib/rippled/db/shards/nudb - -# Path where shards are stored -# while being downloaded: -# ---------------------------- -download_path=/var/lib/rippled/db/shards/ - -# The number of historical shards to store. -# The default value is 0, which means that -# the server won't store any historical -# shards - only the current and previous -# shards will be stored. -# ------------------------------------ -max_historical_shards=100 - -# List of paths for storing older shards. -[historical_shard_paths] -/mnt/disk1 -/mnt/disk2 -/mnt/disk3 - -``` -## Shard Migration - -When a new shard (*current shard*) is confirmed by the network, the recent -shards will shift. The *previous shard* will become a *historical shard*, the -*current shard* will become the *previous shard*, and the new shard will become -the *current shard*. These are just logical labels, and the shards themselves -don't change to reflect being current, previous, or historical. However, if the -server's configuration specifies one or more paths for historical storage, -during this shift the formerly *previous shard* will be migrated to one of the -historical paths. If multiple paths are provided, the server dynamically -chooses one with sufficient space for storing the shard. - -**Note:** As of June 2020, the shard database does not store the partial shard -currently being built by live network transactions, but this is planned to -change. When this feature is implemented, the *current shard* will refer to this -partial shard, and the *previous shard* will refer to the most recently -validated shard. - -### Selecting a Historical Storage Path - -When storing historical shards, if multiple historical paths are provided, the -path to use for each shard will be selected in a random fashion. By using all -available storage devices, we create a uniform distribution of disk utilization -for disks of equivalent size, (provided that the disks are used only to store -shards). In theory, selecting devices in this manner will also increase our -chances for concurrent access to stored shards, however as of June 2020 -concurrent shard access is not implemented. Lastly, a storage path is included -in the random distribution only if it has enough storage capacity to hold the -next shard. - -## Shard Acquisition - -When the server is acquiring shard history, these acquired shards will be stored -at a path designated for historical storage (`historical_storage_path`). If no -such path is provided, acquired shards will be stored at the -`path`. - -## Storage capacity - -### Filesystem Capacity - -When the shard database updates its record of disk utilization, it trusts that -the provided historical paths refer to distinct devices, or at least distinct -filesystems. If this requirement is violated, the database will operate with an -inaccurate view of how many shards it can store. Violation of this requirement -won't necessarily impede database operations, but the database will fail to -identify scenarios wherein storing the maximum number of historical shards (as -per the 'historical_shard_count' parameter in the configuration file) would -exceed the amount of storage space available. - -### Shard Migration - -During a "recent shard shift", if the server has already reached the configured -limit of stored historical shards, instead of moving the formerly *previous -shard* to a historical drive (or keeping it at the 'path') the -shard will be dropped and removed from the filesystem. - -### Shard Acquisition - -Once the configured limit of stored historical shards has been reached, shard -acquisition halts, and no additional shards will be acquired. diff --git a/src/xrpld/nodestore/ShardInfo.h b/src/xrpld/nodestore/ShardInfo.h deleted file mode 100644 index b894ddc34a3..00000000000 --- a/src/xrpld/nodestore/ShardInfo.h +++ /dev/null @@ -1,122 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_NODESTORE_SHARDINFO_H_INCLUDED -#define RIPPLE_NODESTORE_SHARDINFO_H_INCLUDED - -#include -#include -#include - -namespace ripple { -namespace NodeStore { - -/* Contains information on the status of shards for a node - */ -class ShardInfo -{ -private: - class Incomplete - { - public: - Incomplete() = delete; - Incomplete(ShardState state, std::uint32_t percentProgress) - : state_(state), percentProgress_(percentProgress) - { - } - - [[nodiscard]] ShardState - state() const noexcept - { - return state_; - } - - [[nodiscard]] std::uint32_t - percentProgress() const noexcept - { - return percentProgress_; - } - - private: - ShardState state_; - std::uint32_t percentProgress_; - }; - -public: - [[nodiscard]] NetClock::time_point const& - msgTimestamp() const - { - return msgTimestamp_; - } - - void - setMsgTimestamp(NetClock::time_point const& timestamp) - { - msgTimestamp_ = timestamp; - } - - [[nodiscard]] std::string - finalizedToString() const; - - [[nodiscard]] bool - setFinalizedFromString(std::string const& str) - { - return from_string(finalized_, str); - } - - [[nodiscard]] RangeSet const& - finalized() const - { - return finalized_; - } - - [[nodiscard]] std::string - incompleteToString() const; - - [[nodiscard]] std::map const& - incomplete() const - { - return incomplete_; - } - - // Returns true if successful or false because of a duplicate index - bool - update( - std::uint32_t shardIndex, - ShardState state, - std::uint32_t percentProgress); - - [[nodiscard]] protocol::TMPeerShardInfoV2 - makeMessage(Application& app); - -private: - // Finalized immutable shards - RangeSet finalized_; - - // Incomplete shards being acquired or finalized - std::map incomplete_; - - // Message creation time - NetClock::time_point msgTimestamp_; -}; - -} // namespace NodeStore -} // namespace ripple - -#endif diff --git a/src/xrpld/nodestore/ShardPool.md b/src/xrpld/nodestore/ShardPool.md deleted file mode 100644 index 2079feabb4e..00000000000 --- a/src/xrpld/nodestore/ShardPool.md +++ /dev/null @@ -1,43 +0,0 @@ -# Open Shard Management - -## Overview - -Shard NuDB and SQLite databases consume server resources. This can be unnecessarily taxing on servers with many shards. The open shard management feature aims to improve the situation by managing a limited number of open shard database connections. The feature, which is integrated into the existing DatabaseShardImp and Shard classes, maintains a limited pool of open databases prioritized by their last use time stamp. The following sections describe the feature in greater detail. - -### Open Shard Management - -The open shard management feature is integrated into the DatabaseShardImp and Shard classes. As the DatabaseShardImp sweep function is periodically called, the number of finalized open shards, which constitutes the open pool, are examined. Upon the pool exceeding a pool limit, an attempt is made to close enough open shards to remain within the limit. Shards to be closed are selected based on their last use time stamp, which is automatically updated on database access. If necessary, shards will automatically open their databases when accessed. - -```C++ - if (openFinals.size() > openFinalLimit_) - { - // Try to close enough shards to be within the limit. - // Sort on largest elapsed time since last use. - std::sort( - openFinals.begin(), - openFinals.end(), - [&](std::shared_ptr const& lhsShard, - std::shared_ptr const& rhsShard) { - return lhsShard->getLastUse() > rhsShard->getLastUse(); - }); - - for (auto it{openFinals.cbegin()}; - it != openFinals.cend() && openFinals.size() > openFinalLimit_;) - { - if ((*it)->tryClose()) - it = openFinals.erase(it); - else - ++it; - } - } -``` - -### Shard - -When closing an open shard, DatabaseShardImp will call the Shard 'tryClose' function. This function will only close the shard databases if there are no outstanding references. - -DatabaseShardImp will use the Shard 'isOpen' function to determine the state of a shard's database. - -### Caveats - -The Shard class must check the state of its databases before use. Prior use assumed databases were always open, that is no longer the case with the open shard management feature. diff --git a/src/xrpld/nodestore/ShardSizeTuning.md b/src/xrpld/nodestore/ShardSizeTuning.md deleted file mode 100644 index bded73c43c5..00000000000 --- a/src/xrpld/nodestore/ShardSizeTuning.md +++ /dev/null @@ -1,213 +0,0 @@ -# Shard size Tuning - -The purpose of this document is to compare the sizes of shards containing -varying amounts of ledgers. - -## Methodology - -One can see visually from a block explorer that a typical mainnet ledger -consists of about 30 offer transactions issued by about 8 different accounts, -and several transactions of other types. To simulate this situation and -similar situations we have constructed deterministic shards of differenet -sizes, with varying amounts of offers per ledger and varying amounts of -accounts issuing these offers. - -In the following results table, the number of ledgers per shard ranges from 256 -to 16K with the size doubling the size at each step. We considered the -following numbers of offers per ledger: 0, 1, 5, 10 and 30. Also we considered -case of 1 and 8 accounts issuing offers. For each constructed deterministic -shard we counted its size. Finally we compared doubled size of the shard with -N ledgers and the size of a shard with 2*N ledgers where othere parameters such -as number of offers and accounts are the same. This comparison is sufficient to -determine which number of ledgers per shard leads to less storage size on the -disk. - -Note that we minimize total storage size on the disk, but not the size of each -shard because data below shows that the size of a typical shard is not larger -than 10G, but sizes of modern disks, even SSDs, start from 250G. So there is -no problem to fit a single shard to a disk, even small. - - -## Raw results table - -All sizes of constructed shards are shown in the following table. -Rows corresponds to shard sizes (S) counted in ledgers, columns corresponds -to numbers of offers (O) per ledger. In each cell there are two numbers: -first number corresponds to the case of 1 account issuing offers, the second -number corresponds to 8 accounts. Each number is a size of the shard with -given parameters measured in megabytes. - -|S\O|0|1|5|10|30| -|---|---|---|---|---|---| -|256|2.2/2.2|3.4/3.3|5.3/7.3|7.7/10.9|17.1/21.9| -|512|4.4/4.5|7.0/7.0|11.2/15.6|16.4/23.7|36.9/47.9| -|1K|8.9/9.0|14.7/14.6|23.7/33.6|35.0/51.0|78.2/ 102.9| -|2K|17.8/18.0|30.5/30.7|50.4/72.2|74.3/ 111.9|166.2/ 221.0| -|4K|35.5/35.9|63.6/64.2|106.2/ 154.8|156.1/ 238.7|354.7/ 476.0| -|8K|71.1/71.9|133.4/ 134.5|222.2/ 328.1|329.1/ 511.2|754.6/ 1021.0| -|16K|142.3/ 143.9|279/9 280.8|465.7/ 698.1|696.4/ 1094.2|1590.5/ 2166.6| - -## Preliminary conclusion - -If one compares a doubled size of shard with N ledgers and a size of shard -with 2*N ledgers anywhere in the above table than the conlusion will be that -the second number is greater. For example, the following table shows the -percentage by which the second number is greater for the most interesting case -of 30 offers per ledger. The first row corresponds to the case of 1 account -issuing offers, and the second row corresponds to the case of 8 issuing -accounts. - -|A\N|256|512|1K|2K|4K|8K| -|---|---|---|---|---|---|---| -|1|8%|6%|6%|6%|7%|6%|5%| -|8|9%|7%|7%|8%|6%|7%|6%| - -The common conclusion in this model is that if one doubled the number of -the ledgers in a shard then the total disk space utilized will raise by 5-9%. - -## Adding accounts into consideration - -Previous model does not take into account that there are large number of -XRP accounts in the mainnet, and each shard should contain information -about each of these accounts. As of January 2020, there were about 1.9 million -XRP accounts, and stored information about each of them is not less than 133 -bytes. The constant 133 was obtained from debug print of rippled program when -it saves account object to the database. So the actual size of each shard from -raw table should be increased by at least 1.9M * 133 = 252.7M. Thus we obtained -the following table of shard sizes for the most interesting case (30 offers -per ledger and 8 issuing accounts) where S is shard size in ledgers and M is -shard size in megabytes - -|S|256|512|1K|2K|4K|8K|16K| -|---|---|---|---|---|---|---|---| -|M|274.6|300.6|355.6|473.7|728.7|1273.7|2419.3| - -Now we can see from the last table that even considering minimum assumption -about number of accounts and corresponding additional size of a shard, -doubled size of shard with N ledgers is larger than size of a shard with -2*N ledgers. If number of accounts increase then this inequality will be -even stronger. - -## Using mainnet data - -Next idea to improve model is to count real shard sizes from mainnet. -We used real 16k-ledgers shards with indexes from 2600 to 3600 with step 100, -and corresponding real 8k-ledgers shards. Each 16k-ledgers shard consists -of two 8k-ledgers shards which are called "corresponding". For example, -16k-ledgers shard with index 2600 consists of two 8k-ledgers shards with -indexes 5200 and 5201. - -In the following table we compare size of a 16k-ledgers shard with sum of sizes -of two corresponding 8k-ledgers shards. There we only count size of nudb.dat -file, sizes are in GB. Ratio is the size of two 8k-ledgers shards divided -to the size of 16k-ledgers shard. - -|Index|16k-ledgers|8k-ledgers sum|Ratio| -|---|---|---|---| -|2600|2.39|1.49 + 1.63 = 3.12|1.31| -|2700|2.95|1.77 + 1.94 = 3.71|1.26| -|2800|2.53|1.54 + 1.75 = 3.29|1.30| -|2900|3.83|2.26 + 2.35 = 4.61|1.20| -|3000|4.49|2.70 + 2.59 = 5.29|1.18| -|3100|3.79|2.35 + 2.25 = 4.60|1.21| -|3200|4.15|2.54 + 2.43 = 4.97|1.20| -|3300|5.19|3.23 + 2.80 = 6.03|1.16| -|3400|4.18|2.53 + 2.51 = 5.04|1.21| -|3500|5.06|2.90 + 3.04 = 5.94|1.17| -|3600|4.18|2.56 + 2.51 = 5.07|1.21| -|Average|3.89|2.35 + 2.35 = 4.70|1.21| - -Note that shard on the disk consists of 4 files each of which can be large too. -These files are nudb.dat, nudb.key, ledger.db, transaction.db. Next table is -similar to previous with the following exception: each number is total size -of these 2 files: nudb.dat and nudb.key. We decided not to count sizes of -ledger.db and transaction.db since these sizes are not permanent instead of -sizes of nudb.* which are permanent for deterministic shards. - -|Index|16k-ledgers|8k-ledgers sum|Ratio| -|---|---|---|---| -|2600|2.76|1.73 + 1.89 = 3.62|1.31| -|2700|3.40|2.05 + 2.25 = 4.30|1.26| -|2800|2.91|1.79 + 2.02 = 3.81|1.31| -|2900|4.40|2.62 + 2.71 = 5.33|1.21| -|3000|5.09|3.09 + 2.96 = 6.05|1.19| -|3100|4.29|2.69 + 2.57 = 5.26|1.23| -|3200|4.69|2.90 + 2.78 = 5.68|1.21| -|3300|5.92|3.72 + 3.21 = 6.93|1.17| -|3400|4.77|2.91 + 2.89 = 5.80|1.22| -|3500|5.73|3.31 + 3.47 = 6.78|1.18| -|3600|4.77|2.95 + 2.90 = 5.85|1.23| -|Average|4.43|2.70 + 2.70 = 5.40|1.22| - -We can see that in all tables ratio is greater then 1, so using shards with -16 ledgers is preferred. - -## Compare 16K shards and 32K shards - -To claim that shards with 16K ledgers are the best choice, we also assembled -shards with 32k ledgers per shard with indexes from 1300 to 1800 with step 50 -and corresponding shards with 16k ledgers per shard. For example, 32k-ledgers -shard 1800 correnspond to 16k-ledgers shards with indexes 3600 and 3601 etc. - -Here are result tables for these shards similar to tables from previous part. -In the first table we only take into consideration sizes of nudb.dat files. - -|Index|32k-ledgers|16k-ledgers sum|Ratio| -|---|---|---|---| -|1300|4.00|2.39 + 2.32 = 4.71|1.18| -|1350|5.23|2.95 + 3.02 = 5.97|1.14| -|1400|4.37|2.53 + 2.59 = 5.12|1.17| -|1450|7.02|3.83 + 3.98 = 7.81|1.11| -|1500|7.53|4.49 + 3.86 = 8.35|1.11| -|1550|6.85|3.79 + 3.89 = 7.68|1.12| -|1600|7.28|4.15 + 3.99 = 8.14|1.12| -|1650|8.10|5.19 + 3.76 = 8.95|1.10| -|1700|7.58|4.18 + 4.27 = 8.45|1.11| -|1750|8.95|5.06 + 4.77 = 9.83|1.10| -|1800|7.29|4.18 + 4.02 = 8.20|1.12| -|Average|6.75|3.88 + 3.68 = 7.56|1.12| - -In the second table we take into consideration total sizes of files nudb.dat -and nudb.key. - -|Index|32k-ledgers|16k-ledgers sum|Ratio| -|---|---|---|---| -|1300|4.59|2.76 + 2.68 = 5.44|1.19| -|1350|5.98|3.40 + 3.47 = 6.87|1.15| -|1400|4.99|2.91 + 2.98 = 5.89|1.18| -|1450|8.02|4.40 + 4.56 = 8.96|1.12| -|1500|8.51|5.09 + 4.39 = 9.48|1.11| -|1550|7.73|4.29 + 4.42 = 8.71|1.13| -|1600|8.20|4.69 + 4.52 = 9.21|1.12| -|1650|9.20|5.92 + 4.29 = 10.21|1.11| -|1700|8.61|4.77 + 4.87 = 9.64|1.12| -|1750|10.09|5.73 + 5.41 = 11.14|1.10| -|1800|8.27|4.77 + 4.59 = 9.36|1.13| -|Average|7.69|4.43 + 4.20 = 8.63|1.12| - -## Conclusion - -We showed that using shards with 8k ledgers leads to raising required disk size -by 22% in comparison with using shards with 16k ledgers. In the same way, -using shards with 16k ledgers leads to raising required disk space by 12% -in comparison with using shards with 32k ledgers. Note that increase ratio 12% -is much less than 22% so using 32k-ledgers shards will bring us not so much -economy in disk space. - -At the same time, size is one thing to compare but there are other aspects. -Smaller shards have an advantage that they take less time to acquire and -finalize. They also make for smaller archived shards which take less time to -download and import. Having more/smaller shards might also lead to better -database concurrency/performance. - -It is hard to maintain both size and time parameters by a single optimization -formulae because different choices for weights of size and time may lead to -different results. But using "common sense" arguments we can compare -16k-ledgers shards and 32k-ledgers as follows: using 32k-ledgers shards give us -12% advantage in size, and about 44% disadvantage in time, because average size -of 16k-ledgers shard is about 56% of average 32k-ledgers shard. At the same, -if we compare 16k-ledgers shards with 8k-ledgers, then the first has 22% -advantage in size and 39% disadvantage in time. So the balance of -advantages/disadvantages is better when we use 16k-ledgers shards. - -Thus we recommend use shards with 16K ledgers. diff --git a/src/xrpld/nodestore/Types.h b/src/xrpld/nodestore/Types.h index 39104f946e3..a5792fe7df3 100644 --- a/src/xrpld/nodestore/Types.h +++ b/src/xrpld/nodestore/Types.h @@ -56,15 +56,6 @@ using Batch = std::vector>; } // namespace NodeStore -/** Shard states. */ -enum class ShardState : std::uint32_t { - acquire, // Acquiring ledgers - complete, // Backend is ledger complete, database is unverified - finalizing, // Verifying database - finalized, // Database verified, shard is immutable - queued // Queued to be finalized -}; - } // namespace ripple #endif diff --git a/src/xrpld/nodestore/backend/NuDBFactory.cpp b/src/xrpld/nodestore/backend/NuDBFactory.cpp index 742bf05031b..14cd84a1ad7 100644 --- a/src/xrpld/nodestore/backend/NuDBFactory.cpp +++ b/src/xrpld/nodestore/backend/NuDBFactory.cpp @@ -38,11 +38,11 @@ namespace NodeStore { class NuDBBackend : public Backend { public: - static constexpr std::uint64_t currentType = 1; - static constexpr std::uint64_t deterministicMask = 0xFFFFFFFF00000000ull; - - /* "SHRD" in ASCII */ - static constexpr std::uint64_t deterministicType = 0x5348524400000000ull; + // "appnum" is an application-defined constant stored in the header of a + // NuDB database. We used it to identify shard databases before that code + // was removed. For now, its only use is a sanity check that the database + // was created by xrpld. + static constexpr std::uint64_t appnum = 1; beast::Journal const j_; size_t const keyBytes_; @@ -149,16 +149,7 @@ class NuDBBackend : public Backend if (ec) Throw(ec); - /** Old value currentType is accepted for appnum in traditional - * databases, new value is used for deterministic shard databases. - * New 64-bit value is constructed from fixed and random parts. - * Fixed part is bounded by bitmask deterministicMask, - * and the value of fixed part is deterministicType. - * Random part depends on the contents of the shard and may be any. - * The contents of appnum field should match either old or new rule. - */ - if (db_.appnum() != currentType && - (db_.appnum() & deterministicMask) != deterministicType) + if (db_.appnum() != appnum) Throw("nodestore: unknown appnum"); db_.set_burst(burstSize_); } @@ -172,7 +163,7 @@ class NuDBBackend : public Backend void open(bool createIfMissing) override { - open(createIfMissing, currentType, nudb::make_uid(), nudb::make_salt()); + open(createIfMissing, appnum, nudb::make_uid(), nudb::make_salt()); } void diff --git a/src/xrpld/nodestore/detail/Database.cpp b/src/xrpld/nodestore/detail/Database.cpp index 93468eb6084..60cfb35051c 100644 --- a/src/xrpld/nodestore/detail/Database.cpp +++ b/src/xrpld/nodestore/detail/Database.cpp @@ -17,7 +17,6 @@ */ //============================================================================== -#include #include #include #include @@ -36,21 +35,13 @@ Database::Database( beast::Journal journal) : j_(journal) , scheduler_(scheduler) - , ledgersPerShard_(get( - config, - "ledgers_per_shard", - DEFAULT_LEDGERS_PER_SHARD)) , earliestLedgerSeq_( get(config, "earliest_seq", XRP_LEDGER_EARLIEST_SEQ)) - , earliestShardIndex_((earliestLedgerSeq_ - 1) / ledgersPerShard_) , requestBundle_(get(config, "rq_bundle", 4)) , readThreads_(std::max(1, readThreads)) { assert(readThreads != 0); - if (ledgersPerShard_ == 0 || ledgersPerShard_ % 256 != 0) - Throw("Invalid ledgers_per_shard"); - if (earliestLedgerSeq_ < 1) Throw("Invalid earliest_seq"); @@ -148,19 +139,6 @@ Database::isStopping() const return readStopping_.load(std::memory_order_relaxed); } -std::uint32_t -Database::maxLedgers(std::uint32_t shardIndex) const noexcept -{ - if (shardIndex > earliestShardIndex_) - return ledgersPerShard_; - - if (shardIndex == earliestShardIndex_) - return lastLedgerSeq(shardIndex) - firstLedgerSeq(shardIndex) + 1; - - assert(!"Invalid shard index"); - return 0; -} - void Database::stop() { @@ -275,105 +253,6 @@ Database::fetchNodeObject( return nodeObject; } -bool -Database::storeLedger( - Ledger const& srcLedger, - std::shared_ptr dstBackend) -{ - auto fail = [&](std::string const& msg) { - JLOG(j_.error()) << "Source ledger sequence " << srcLedger.info().seq - << ". " << msg; - return false; - }; - - if (srcLedger.info().hash.isZero()) - return fail("Invalid hash"); - if (srcLedger.info().accountHash.isZero()) - return fail("Invalid account hash"); - - auto& srcDB = const_cast(srcLedger.stateMap().family().db()); - if (&srcDB == this) - return fail("Source and destination databases are the same"); - - Batch batch; - batch.reserve(batchWritePreallocationSize); - auto storeBatch = [&, fname = __func__]() { - std::uint64_t sz{0}; - for (auto const& nodeObject : batch) - sz += nodeObject->getData().size(); - - try - { - dstBackend->storeBatch(batch); - } - catch (std::exception const& e) - { - fail( - std::string("Exception caught in function ") + fname + - ". Error: " + e.what()); - return false; - } - - storeStats(batch.size(), sz); - batch.clear(); - return true; - }; - - // Store ledger header - { - Serializer s(sizeof(std::uint32_t) + sizeof(LedgerInfo)); - s.add32(HashPrefix::ledgerMaster); - addRaw(srcLedger.info(), s); - auto nObj = NodeObject::createObject( - hotLEDGER, std::move(s.modData()), srcLedger.info().hash); - batch.emplace_back(std::move(nObj)); - } - - bool error = false; - auto visit = [&](SHAMapTreeNode& node) { - if (!isStopping()) - { - if (auto nodeObject = srcDB.fetchNodeObject( - node.getHash().as_uint256(), srcLedger.info().seq)) - { - batch.emplace_back(std::move(nodeObject)); - if (batch.size() < batchWritePreallocationSize || storeBatch()) - return true; - } - } - - error = true; - return false; - }; - - // Store the state map - if (srcLedger.stateMap().getHash().isNonZero()) - { - if (!srcLedger.stateMap().isValid()) - return fail("Invalid state map"); - - srcLedger.stateMap().snapShot(false)->visitNodes(visit); - if (error) - return fail("Failed to store state map"); - } - - // Store the transaction map - if (srcLedger.info().txHash.isNonZero()) - { - if (!srcLedger.txMap().isValid()) - return fail("Invalid transaction map"); - - srcLedger.txMap().snapShot(false)->visitNodes(visit); - if (error) - return fail("Failed to store transaction map"); - } - - if (!batch.empty() && !storeBatch()) - return fail("Failed to store"); - - return true; -} - void Database::getCountsJson(Json::Value& obj) { diff --git a/src/xrpld/nodestore/detail/DatabaseNodeImp.cpp b/src/xrpld/nodestore/detail/DatabaseNodeImp.cpp index d61c68e759a..85e5d3c0da9 100644 --- a/src/xrpld/nodestore/detail/DatabaseNodeImp.cpp +++ b/src/xrpld/nodestore/detail/DatabaseNodeImp.cpp @@ -17,7 +17,6 @@ */ //============================================================================== -#include #include #include diff --git a/src/xrpld/nodestore/detail/DatabaseNodeImp.h b/src/xrpld/nodestore/detail/DatabaseNodeImp.h index f5f5f64bd1d..c2bf237b943 100644 --- a/src/xrpld/nodestore/detail/DatabaseNodeImp.h +++ b/src/xrpld/nodestore/detail/DatabaseNodeImp.h @@ -128,12 +128,6 @@ class DatabaseNodeImp : public Database std::function const&)>&& callback) override; - bool - storeLedger(std::shared_ptr const& srcLedger) override - { - return Database::storeLedger(*srcLedger, backend_); - } - void sweep() override; diff --git a/src/xrpld/nodestore/detail/DatabaseRotatingImp.cpp b/src/xrpld/nodestore/detail/DatabaseRotatingImp.cpp index b1283d7de71..58cc3599dc6 100644 --- a/src/xrpld/nodestore/detail/DatabaseRotatingImp.cpp +++ b/src/xrpld/nodestore/detail/DatabaseRotatingImp.cpp @@ -17,7 +17,6 @@ */ //============================================================================== -#include #include #include @@ -79,17 +78,6 @@ DatabaseRotatingImp::importDatabase(Database& source) importInternal(*backend, source); } -bool -DatabaseRotatingImp::storeLedger(std::shared_ptr const& srcLedger) -{ - auto const backend = [&] { - std::lock_guard lock(mutex_); - return writableBackend_; - }(); - - return Database::storeLedger(*srcLedger, backend); -} - void DatabaseRotatingImp::sync() { diff --git a/src/xrpld/nodestore/detail/DatabaseRotatingImp.h b/src/xrpld/nodestore/detail/DatabaseRotatingImp.h index ec46fc687be..0c17dc59ceb 100644 --- a/src/xrpld/nodestore/detail/DatabaseRotatingImp.h +++ b/src/xrpld/nodestore/detail/DatabaseRotatingImp.h @@ -75,9 +75,6 @@ class DatabaseRotatingImp : public DatabaseRotating void sync() override; - bool - storeLedger(std::shared_ptr const& srcLedger) override; - void sweep() override; diff --git a/src/xrpld/nodestore/detail/DatabaseShardImp.cpp b/src/xrpld/nodestore/detail/DatabaseShardImp.cpp deleted file mode 100644 index c7e45641d7f..00000000000 --- a/src/xrpld/nodestore/detail/DatabaseShardImp.cpp +++ /dev/null @@ -1,2253 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2017 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#if BOOST_OS_LINUX -#include -#endif - -namespace ripple { - -namespace NodeStore { - -DatabaseShardImp::DatabaseShardImp( - Application& app, - Scheduler& scheduler, - int readThreads, - beast::Journal j) - : DatabaseShard( - scheduler, - readThreads, - app.config().section(ConfigSection::shardDatabase()), - j) - , app_(app) - , avgShardFileSz_(ledgersPerShard_ * kilobytes(192ull)) - , openFinalLimit_( - app.config().getValueFor(SizedItem::openFinalLimit, std::nullopt)) -{ - if (app.config().reporting()) - { - Throw( - "Attempted to create DatabaseShardImp in reporting mode. Reporting " - "does not support shards. Remove shards info from config"); - } -} - -bool -DatabaseShardImp::init() -{ - { - std::lock_guard lock(mutex_); - if (init_) - { - JLOG(j_.error()) << "already initialized"; - return false; - } - - if (!initConfig(lock)) - { - JLOG(j_.error()) << "invalid configuration file settings"; - return false; - } - - try - { - using namespace boost::filesystem; - - // Consolidate the main storage path and all historical paths - std::vector paths{dir_}; - paths.insert( - paths.end(), historicalPaths_.begin(), historicalPaths_.end()); - - for (auto const& path : paths) - { - if (exists(path)) - { - if (!is_directory(path)) - { - JLOG(j_.error()) << path << " must be a directory"; - return false; - } - } - else if (!create_directories(path)) - { - JLOG(j_.error()) - << "failed to create path: " + path.string(); - return false; - } - } - - if (!app_.config().standalone() && !historicalPaths_.empty()) - { - // Check historical paths for duplicated file systems - if (!checkHistoricalPaths(lock)) - return false; - } - - ctx_ = std::make_unique(); - ctx_->start(); - - // Find shards - std::uint32_t openFinals{0}; - for (auto const& path : paths) - { - for (auto const& it : directory_iterator(path)) - { - // Ignore files - if (!is_directory(it)) - continue; - - // Ignore nonnumerical directory names - auto const shardDir{it.path()}; - auto dirName{shardDir.stem().string()}; - if (!std::all_of( - dirName.begin(), dirName.end(), [](auto c) { - return ::isdigit(static_cast(c)); - })) - { - continue; - } - - // Ignore values below the earliest shard index - auto const shardIndex{std::stoul(dirName)}; - if (shardIndex < earliestShardIndex_) - { - JLOG(j_.debug()) - << "shard " << shardIndex - << " ignored, comes before earliest shard index " - << earliestShardIndex_; - continue; - } - - // Check if a previous database import failed - if (is_regular_file(shardDir / databaseImportMarker_)) - { - JLOG(j_.warn()) - << "shard " << shardIndex - << " previously failed database import, removing"; - remove_all(shardDir); - continue; - } - - auto shard{std::make_shared( - app_, *this, shardIndex, shardDir.parent_path(), j_)}; - if (!shard->init(scheduler_, *ctx_)) - { - // Remove corrupted or legacy shard - shard->removeOnDestroy(); - JLOG(j_.warn()) - << "shard " << shardIndex << " removed, " - << (shard->isLegacy() ? "legacy" : "corrupted") - << " shard"; - continue; - } - - switch (shard->getState()) - { - case ShardState::finalized: - if (++openFinals > openFinalLimit_) - shard->tryClose(); - shards_.emplace(shardIndex, std::move(shard)); - break; - - case ShardState::complete: - finalizeShard( - shards_.emplace(shardIndex, std::move(shard)) - .first->second, - true, - std::nullopt); - break; - - case ShardState::acquire: - if (acquireIndex_ != 0) - { - JLOG(j_.error()) - << "more than one shard being acquired"; - return false; - } - - shards_.emplace(shardIndex, std::move(shard)); - acquireIndex_ = shardIndex; - break; - - default: - JLOG(j_.error()) - << "shard " << shardIndex << " invalid state"; - return false; - } - } - } - } - catch (std::exception const& e) - { - JLOG(j_.fatal()) << "Exception caught in function " << __func__ - << ". Error: " << e.what(); - return false; - } - - init_ = true; - } - - updateFileStats(); - return true; -} - -std::optional -DatabaseShardImp::prepareLedger(std::uint32_t validLedgerSeq) -{ - std::optional shardIndex; - - { - std::lock_guard lock(mutex_); - assert(init_); - - if (acquireIndex_ != 0) - { - if (auto const it{shards_.find(acquireIndex_)}; it != shards_.end()) - return it->second->prepare(); - - // Should never get here - assert(false); - return std::nullopt; - } - - if (!canAdd_) - return std::nullopt; - - shardIndex = findAcquireIndex(validLedgerSeq, lock); - } - - if (!shardIndex) - { - JLOG(j_.debug()) << "no new shards to add"; - { - std::lock_guard lock(mutex_); - canAdd_ = false; - } - return std::nullopt; - } - - auto const pathDesignation = [this, shardIndex = *shardIndex]() { - std::lock_guard lock(mutex_); - return prepareForNewShard(shardIndex, numHistoricalShards(lock), lock); - }(); - - if (!pathDesignation) - return std::nullopt; - - auto const needsHistoricalPath = - *pathDesignation == PathDesignation::historical; - - auto shard = [this, shardIndex, needsHistoricalPath] { - std::lock_guard lock(mutex_); - return std::make_unique( - app_, - *this, - *shardIndex, - (needsHistoricalPath ? chooseHistoricalPath(lock) : ""), - j_); - }(); - - if (!shard->init(scheduler_, *ctx_)) - return std::nullopt; - - auto const ledgerSeq{shard->prepare()}; - { - std::lock_guard lock(mutex_); - shards_.emplace(*shardIndex, std::move(shard)); - acquireIndex_ = *shardIndex; - updatePeers(lock); - } - - return ledgerSeq; -} - -bool -DatabaseShardImp::prepareShards(std::vector const& shardIndexes) -{ - auto fail = [j = j_, &shardIndexes]( - std::string const& msg, - std::optional shardIndex = std::nullopt) { - auto multipleIndexPrequel = [&shardIndexes] { - std::vector indexesAsString(shardIndexes.size()); - std::transform( - shardIndexes.begin(), - shardIndexes.end(), - indexesAsString.begin(), - [](uint32_t const index) { return std::to_string(index); }); - - return std::string("shard") + - (shardIndexes.size() > 1 ? "s " : " ") + - boost::algorithm::join(indexesAsString, ", "); - }; - - JLOG(j.error()) << (shardIndex ? "shard " + std::to_string(*shardIndex) - : multipleIndexPrequel()) - << " " << msg; - return false; - }; - - if (shardIndexes.empty()) - return fail("invalid shard indexes"); - - std::lock_guard lock(mutex_); - assert(init_); - - if (!canAdd_) - return fail("cannot be stored at this time"); - - auto historicalShardsToPrepare = 0; - - for (auto const shardIndex : shardIndexes) - { - if (shardIndex < earliestShardIndex_) - { - return fail( - "comes before earliest shard index " + - std::to_string(earliestShardIndex_), - shardIndex); - } - - // If we are synced to the network, check if the shard index is - // greater or equal to the current or validated shard index. - auto seqCheck = [&](std::uint32_t ledgerSeq) { - if (ledgerSeq >= earliestLedgerSeq_ && - shardIndex >= seqToShardIndex(ledgerSeq)) - { - return fail("invalid index", shardIndex); - } - return true; - }; - if (!seqCheck(app_.getLedgerMaster().getValidLedgerIndex() + 1) || - !seqCheck(app_.getLedgerMaster().getCurrentLedgerIndex())) - { - return fail("invalid index", shardIndex); - } - - if (shards_.find(shardIndex) != shards_.end()) - return fail("is already stored", shardIndex); - - if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end()) - return fail( - "is already queued for import from the shard archive handler", - shardIndex); - - if (databaseImportStatus_) - { - if (auto shard = databaseImportStatus_->currentShard.lock(); shard) - { - if (shard->index() == shardIndex) - return fail( - "is being imported from the nodestore", shardIndex); - } - } - - // Any shard earlier than the two most recent shards - // is a historical shard - if (shardIndex < shardBoundaryIndex()) - ++historicalShardsToPrepare; - } - - auto const numHistShards = numHistoricalShards(lock); - - // Check shard count and available storage space - if (numHistShards + historicalShardsToPrepare > maxHistoricalShards_) - return fail("maximum number of historical shards reached"); - - if (historicalShardsToPrepare) - { - // Check available storage space for historical shards - if (!sufficientStorage( - historicalShardsToPrepare, PathDesignation::historical, lock)) - return fail("insufficient storage space available"); - } - - if (auto const recentShardsToPrepare = - shardIndexes.size() - historicalShardsToPrepare; - recentShardsToPrepare) - { - // Check available storage space for recent shards - if (!sufficientStorage( - recentShardsToPrepare, PathDesignation::none, lock)) - return fail("insufficient storage space available"); - } - - for (auto const shardIndex : shardIndexes) - preparedIndexes_.emplace(shardIndex); - - updatePeers(lock); - return true; -} - -void -DatabaseShardImp::removePreShard(std::uint32_t shardIndex) -{ - std::lock_guard lock(mutex_); - assert(init_); - - if (preparedIndexes_.erase(shardIndex)) - updatePeers(lock); -} - -std::string -DatabaseShardImp::getPreShards() -{ - RangeSet rs; - { - std::lock_guard lock(mutex_); - assert(init_); - - for (auto const& shardIndex : preparedIndexes_) - rs.insert(shardIndex); - } - - if (rs.empty()) - return {}; - - return ripple::to_string(rs); -}; - -bool -DatabaseShardImp::importShard( - std::uint32_t shardIndex, - boost::filesystem::path const& srcDir) -{ - auto fail = [&](std::string const& msg, - std::lock_guard const& lock) { - JLOG(j_.error()) << "shard " << shardIndex << " " << msg; - - // Remove the failed import shard index so it can be retried - preparedIndexes_.erase(shardIndex); - updatePeers(lock); - return false; - }; - - using namespace boost::filesystem; - try - { - if (!is_directory(srcDir) || is_empty(srcDir)) - { - return fail( - "invalid source directory " + srcDir.string(), - std::lock_guard(mutex_)); - } - } - catch (std::exception const& e) - { - return fail( - std::string(". Exception caught in function ") + __func__ + - ". Error: " + e.what(), - std::lock_guard(mutex_)); - } - - auto const expectedHash{app_.getLedgerMaster().walkHashBySeq( - lastLedgerSeq(shardIndex), InboundLedger::Reason::GENERIC)}; - if (!expectedHash) - return fail("expected hash not found", std::lock_guard(mutex_)); - - path dstDir; - { - std::lock_guard lock(mutex_); - if (shards_.find(shardIndex) != shards_.end()) - return fail("already exists", lock); - - // Check shard was prepared for import - if (preparedIndexes_.find(shardIndex) == preparedIndexes_.end()) - return fail("was not prepared for import", lock); - - auto const pathDesignation{ - prepareForNewShard(shardIndex, numHistoricalShards(lock), lock)}; - if (!pathDesignation) - return fail("failed to import", lock); - - if (*pathDesignation == PathDesignation::historical) - dstDir = chooseHistoricalPath(lock); - else - dstDir = dir_; - } - dstDir /= std::to_string(shardIndex); - - auto renameDir = [&, fname = __func__](path const& src, path const& dst) { - try - { - rename(src, dst); - } - catch (std::exception const& e) - { - return fail( - std::string(". Exception caught in function ") + fname + - ". Error: " + e.what(), - std::lock_guard(mutex_)); - } - return true; - }; - - // Rename source directory to the shard database directory - if (!renameDir(srcDir, dstDir)) - return false; - - // Create the new shard - auto shard{std::make_unique( - app_, *this, shardIndex, dstDir.parent_path(), j_)}; - - if (!shard->init(scheduler_, *ctx_) || - shard->getState() != ShardState::complete) - { - shard.reset(); - renameDir(dstDir, srcDir); - return fail("failed to import", std::lock_guard(mutex_)); - } - - auto const [it, inserted] = [&]() { - std::lock_guard lock(mutex_); - preparedIndexes_.erase(shardIndex); - return shards_.emplace(shardIndex, std::move(shard)); - }(); - - if (!inserted) - { - shard.reset(); - renameDir(dstDir, srcDir); - return fail("failed to import", std::lock_guard(mutex_)); - } - - finalizeShard(it->second, true, expectedHash); - return true; -} - -std::shared_ptr -DatabaseShardImp::fetchLedger(uint256 const& hash, std::uint32_t ledgerSeq) -{ - auto const shardIndex{seqToShardIndex(ledgerSeq)}; - { - std::shared_ptr shard; - { - std::lock_guard lock(mutex_); - assert(init_); - - auto const it{shards_.find(shardIndex)}; - if (it == shards_.end()) - return nullptr; - shard = it->second; - } - - // Ledger must be stored in a final or acquiring shard - switch (shard->getState()) - { - case ShardState::finalized: - break; - case ShardState::acquire: - if (shard->containsLedger(ledgerSeq)) - break; - [[fallthrough]]; - default: - return nullptr; - } - } - - auto const nodeObject{Database::fetchNodeObject(hash, ledgerSeq)}; - if (!nodeObject) - return nullptr; - - auto fail = [&](std::string const& msg) -> std::shared_ptr { - JLOG(j_.error()) << "shard " << shardIndex << " " << msg; - return nullptr; - }; - - auto ledger{std::make_shared( - deserializePrefixedHeader(makeSlice(nodeObject->getData())), - app_.config(), - *app_.getShardFamily())}; - - if (ledger->info().seq != ledgerSeq) - { - return fail( - "encountered invalid ledger sequence " + std::to_string(ledgerSeq)); - } - if (ledger->info().hash != hash) - { - return fail( - "encountered invalid ledger hash " + to_string(hash) + - " on sequence " + std::to_string(ledgerSeq)); - } - - ledger->setFull(); - if (!ledger->stateMap().fetchRoot( - SHAMapHash{ledger->info().accountHash}, nullptr)) - { - return fail( - "is missing root STATE node on hash " + to_string(hash) + - " on sequence " + std::to_string(ledgerSeq)); - } - - if (ledger->info().txHash.isNonZero()) - { - if (!ledger->txMap().fetchRoot( - SHAMapHash{ledger->info().txHash}, nullptr)) - { - return fail( - "is missing root TXN node on hash " + to_string(hash) + - " on sequence " + std::to_string(ledgerSeq)); - } - } - return ledger; -} - -void -DatabaseShardImp::setStored(std::shared_ptr const& ledger) -{ - auto const ledgerSeq{ledger->info().seq}; - if (ledger->info().hash.isZero()) - { - JLOG(j_.error()) << "zero ledger hash for ledger sequence " - << ledgerSeq; - return; - } - if (ledger->info().accountHash.isZero()) - { - JLOG(j_.error()) << "zero account hash for ledger sequence " - << ledgerSeq; - return; - } - if (ledger->stateMap().getHash().isNonZero() && - !ledger->stateMap().isValid()) - { - JLOG(j_.error()) << "invalid state map for ledger sequence " - << ledgerSeq; - return; - } - if (ledger->info().txHash.isNonZero() && !ledger->txMap().isValid()) - { - JLOG(j_.error()) << "invalid transaction map for ledger sequence " - << ledgerSeq; - return; - } - - auto const shardIndex{seqToShardIndex(ledgerSeq)}; - std::shared_ptr shard; - { - std::lock_guard lock(mutex_); - assert(init_); - - if (shardIndex != acquireIndex_) - { - JLOG(j_.trace()) - << "shard " << shardIndex << " is not being acquired"; - return; - } - - auto const it{shards_.find(shardIndex)}; - if (it == shards_.end()) - { - JLOG(j_.error()) - << "shard " << shardIndex << " is not being acquired"; - return; - } - shard = it->second; - } - - if (shard->containsLedger(ledgerSeq)) - { - JLOG(j_.trace()) << "shard " << shardIndex << " ledger already stored"; - return; - } - - setStoredInShard(shard, ledger); -} - -std::unique_ptr -DatabaseShardImp::getShardInfo() const -{ - std::lock_guard lock(mutex_); - return getShardInfo(lock); -} - -void -DatabaseShardImp::stop() -{ - // Stop read threads in base before data members are destroyed - Database::stop(); - std::vector> shards; - { - std::lock_guard lock(mutex_); - shards.reserve(shards_.size()); - for (auto const& [_, shard] : shards_) - { - shards.push_back(shard); - shard->stop(); - } - shards_.clear(); - } - taskQueue_.stop(); - - // All shards should be expired at this point - for (auto const& wptr : shards) - { - if (auto const shard{wptr.lock()}) - { - JLOG(j_.warn()) << " shard " << shard->index() << " unexpired"; - } - } - - std::unique_lock lock(mutex_); - - // Notify the shard being imported - // from the node store to stop - if (databaseImportStatus_) - { - // A node store import is in progress - if (auto importShard = databaseImportStatus_->currentShard.lock(); - importShard) - importShard->stop(); - } - - // Wait for the node store import thread - // if necessary - if (databaseImporter_.joinable()) - { - // Tells the import function to halt - haltDatabaseImport_ = true; - - // Wait for the function to exit - while (databaseImportStatus_) - { - // Unlock just in case the import - // function is waiting on the mutex - lock.unlock(); - - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - lock.lock(); - } - - // Calling join while holding the mutex_ without - // first making sure that doImportDatabase has - // exited could lead to deadlock via the mutex - // acquisition that occurs in that function - if (databaseImporter_.joinable()) - databaseImporter_.join(); - } -} - -void -DatabaseShardImp::importDatabase(Database& source) -{ - std::lock_guard lock(mutex_); - assert(init_); - - // Only the application local node store can be imported - assert(&source == &app_.getNodeStore()); - - if (databaseImporter_.joinable()) - { - assert(false); - JLOG(j_.error()) << "database import already in progress"; - return; - } - - startDatabaseImportThread(lock); -} - -void -DatabaseShardImp::doImportDatabase() -{ - auto shouldHalt = [this] { - bool expected = true; - return haltDatabaseImport_.compare_exchange_strong(expected, false) || - isStopping(); - }; - - if (shouldHalt()) - return; - - auto loadLedger = - [this](char const* const sortOrder) -> std::optional { - std::shared_ptr ledger; - std::uint32_t ledgerSeq{0}; - std::optional info; - if (sortOrder == std::string("asc")) - { - info = dynamic_cast(&app_.getRelationalDatabase()) - ->getLimitedOldestLedgerInfo(earliestLedgerSeq()); - } - else - { - info = dynamic_cast(&app_.getRelationalDatabase()) - ->getLimitedNewestLedgerInfo(earliestLedgerSeq()); - } - if (info) - { - ledger = loadLedgerHelper(*info, app_, false); - ledgerSeq = info->seq; - } - if (!ledger || ledgerSeq == 0) - { - JLOG(j_.error()) << "no suitable ledgers were found in" - " the SQLite database to import"; - return std::nullopt; - } - return ledgerSeq; - }; - - // Find earliest ledger sequence stored - auto const earliestLedgerSeq{loadLedger("asc")}; - if (!earliestLedgerSeq) - return; - - auto const earliestIndex = [&] { - auto earliestIndex = seqToShardIndex(*earliestLedgerSeq); - - // Consider only complete shards - if (earliestLedgerSeq != firstLedgerSeq(earliestIndex)) - ++earliestIndex; - - return earliestIndex; - }(); - - // Find last ledger sequence stored - auto const latestLedgerSeq = loadLedger("desc"); - if (!latestLedgerSeq) - return; - - auto const latestIndex = [&] { - auto latestIndex = seqToShardIndex(*latestLedgerSeq); - - // Consider only complete shards - if (latestLedgerSeq != lastLedgerSeq(latestIndex)) - --latestIndex; - - return latestIndex; - }(); - - if (latestIndex < earliestIndex) - { - JLOG(j_.error()) << "no suitable ledgers were found in" - " the SQLite database to import"; - return; - } - - JLOG(j_.debug()) << "Importing ledgers for shards " << earliestIndex - << " through " << latestIndex; - - { - std::lock_guard lock(mutex_); - - assert(!databaseImportStatus_); - databaseImportStatus_ = std::make_unique( - earliestIndex, latestIndex, 0); - } - - // Import the shards - for (std::uint32_t shardIndex = earliestIndex; shardIndex <= latestIndex; - ++shardIndex) - { - if (shouldHalt()) - return; - - auto const pathDesignation = [this, shardIndex] { - std::lock_guard lock(mutex_); - - auto const numHistShards = numHistoricalShards(lock); - auto const pathDesignation = - prepareForNewShard(shardIndex, numHistShards, lock); - - return pathDesignation; - }(); - - if (!pathDesignation) - break; - - { - std::lock_guard lock(mutex_); - - // Skip if being acquired - if (shardIndex == acquireIndex_) - { - JLOG(j_.debug()) - << "shard " << shardIndex << " already being acquired"; - continue; - } - - // Skip if being imported from the shard archive handler - if (preparedIndexes_.find(shardIndex) != preparedIndexes_.end()) - { - JLOG(j_.debug()) - << "shard " << shardIndex << " already being imported"; - continue; - } - - // Skip if stored - if (shards_.find(shardIndex) != shards_.end()) - { - JLOG(j_.debug()) << "shard " << shardIndex << " already stored"; - continue; - } - } - - std::uint32_t const firstSeq = firstLedgerSeq(shardIndex); - std::uint32_t const lastSeq = - std::max(firstSeq, lastLedgerSeq(shardIndex)); - - // Verify SQLite ledgers are in the node store - { - auto const ledgerHashes{ - app_.getRelationalDatabase().getHashesByIndex( - firstSeq, lastSeq)}; - if (ledgerHashes.size() != maxLedgers(shardIndex)) - continue; - - auto& source = app_.getNodeStore(); - bool valid{true}; - - for (std::uint32_t n = firstSeq; n <= lastSeq; ++n) - { - if (!source.fetchNodeObject(ledgerHashes.at(n).ledgerHash, n)) - { - JLOG(j_.warn()) << "SQLite ledger sequence " << n - << " mismatches node store"; - valid = false; - break; - } - } - if (!valid) - continue; - } - - if (shouldHalt()) - return; - - bool const needsHistoricalPath = - *pathDesignation == PathDesignation::historical; - - auto const path = needsHistoricalPath - ? chooseHistoricalPath(std::lock_guard(mutex_)) - : dir_; - - // Create the new shard - auto shard{std::make_shared(app_, *this, shardIndex, path, j_)}; - if (!shard->init(scheduler_, *ctx_)) - continue; - - { - std::lock_guard lock(mutex_); - - if (shouldHalt()) - return; - - databaseImportStatus_->currentIndex = shardIndex; - databaseImportStatus_->currentShard = shard; - databaseImportStatus_->firstSeq = firstSeq; - databaseImportStatus_->lastSeq = lastSeq; - } - - // Create a marker file to signify a database import in progress - auto const shardDir{path / std::to_string(shardIndex)}; - auto const markerFile{shardDir / databaseImportMarker_}; - { - std::ofstream ofs{markerFile.string()}; - if (!ofs.is_open()) - { - JLOG(j_.error()) << "shard " << shardIndex - << " failed to create temp marker file"; - shard->removeOnDestroy(); - continue; - } - } - - // Copy the ledgers from node store - std::shared_ptr recentStored; - std::optional lastLedgerHash; - - while (auto const ledgerSeq = shard->prepare()) - { - if (shouldHalt()) - return; - - // Not const so it may be moved later - auto ledger{loadByIndex(*ledgerSeq, app_, false)}; - if (!ledger || ledger->info().seq != ledgerSeq) - break; - - auto const result{shard->storeLedger(ledger, recentStored)}; - storeStats(result.count, result.size); - if (result.error) - break; - - if (!shard->setLedgerStored(ledger)) - break; - - if (!lastLedgerHash && ledgerSeq == lastSeq) - lastLedgerHash = ledger->info().hash; - - recentStored = std::move(ledger); - } - - if (shouldHalt()) - return; - - using namespace boost::filesystem; - bool success{false}; - if (lastLedgerHash && shard->getState() == ShardState::complete) - { - // Store shard final key - Serializer s; - s.add32(Shard::version); - s.add32(firstLedgerSeq(shardIndex)); - s.add32(lastLedgerSeq(shardIndex)); - s.addBitString(*lastLedgerHash); - auto const nodeObject{NodeObject::createObject( - hotUNKNOWN, std::move(s.modData()), Shard::finalKey)}; - - if (shard->storeNodeObject(nodeObject)) - { - try - { - std::lock_guard lock(mutex_); - - // The database import process is complete and the - // marker file is no longer required - remove_all(markerFile); - - JLOG(j_.debug()) << "shard " << shardIndex - << " was successfully imported" - " from the NodeStore"; - finalizeShard( - shards_.emplace(shardIndex, std::move(shard)) - .first->second, - true, - std::nullopt); - - // This variable is meant to capture the success - // of everything up to the point of shard finalization. - // If the shard fails to finalize, this condition will - // be handled by the finalization function itself, and - // not here. - success = true; - } - catch (std::exception const& e) - { - JLOG(j_.fatal()) << "shard index " << shardIndex - << ". Exception caught in function " - << __func__ << ". Error: " << e.what(); - } - } - } - - if (!success) - { - JLOG(j_.error()) << "shard " << shardIndex - << " failed to import from the NodeStore"; - - if (shard) - shard->removeOnDestroy(); - } - } - - if (shouldHalt()) - return; - - updateFileStats(); -} - -std::int32_t -DatabaseShardImp::getWriteLoad() const -{ - std::shared_ptr shard; - { - std::lock_guard lock(mutex_); - assert(init_); - - auto const it{shards_.find(acquireIndex_)}; - if (it == shards_.end()) - return 0; - shard = it->second; - } - - return shard->getWriteLoad(); -} - -void -DatabaseShardImp::store( - NodeObjectType type, - Blob&& data, - uint256 const& hash, - std::uint32_t ledgerSeq) -{ - auto const shardIndex{seqToShardIndex(ledgerSeq)}; - std::shared_ptr shard; - { - std::lock_guard lock(mutex_); - if (shardIndex != acquireIndex_) - { - JLOG(j_.trace()) - << "shard " << shardIndex << " is not being acquired"; - return; - } - - auto const it{shards_.find(shardIndex)}; - if (it == shards_.end()) - { - JLOG(j_.error()) - << "shard " << shardIndex << " is not being acquired"; - return; - } - shard = it->second; - } - - auto const nodeObject{ - NodeObject::createObject(type, std::move(data), hash)}; - if (shard->storeNodeObject(nodeObject)) - storeStats(1, nodeObject->getData().size()); -} - -bool -DatabaseShardImp::storeLedger(std::shared_ptr const& srcLedger) -{ - auto const ledgerSeq{srcLedger->info().seq}; - auto const shardIndex{seqToShardIndex(ledgerSeq)}; - std::shared_ptr shard; - { - std::lock_guard lock(mutex_); - assert(init_); - - if (shardIndex != acquireIndex_) - { - JLOG(j_.trace()) - << "shard " << shardIndex << " is not being acquired"; - return false; - } - - auto const it{shards_.find(shardIndex)}; - if (it == shards_.end()) - { - JLOG(j_.error()) - << "shard " << shardIndex << " is not being acquired"; - return false; - } - shard = it->second; - } - - auto const result{shard->storeLedger(srcLedger, nullptr)}; - storeStats(result.count, result.size); - if (result.error || result.count == 0 || result.size == 0) - return false; - - return setStoredInShard(shard, srcLedger); -} - -void -DatabaseShardImp::sweep() -{ - std::vector> shards; - { - std::lock_guard lock(mutex_); - assert(init_); - - shards.reserve(shards_.size()); - for (auto const& e : shards_) - shards.push_back(e.second); - } - - std::vector> openFinals; - openFinals.reserve(openFinalLimit_); - - for (auto const& weak : shards) - { - if (auto const shard{weak.lock()}; shard && shard->isOpen()) - { - if (shard->getState() == ShardState::finalized) - openFinals.emplace_back(std::move(shard)); - } - } - - if (openFinals.size() > openFinalLimit_) - { - JLOG(j_.trace()) << "Open shards exceed configured limit of " - << openFinalLimit_ << " by " - << (openFinals.size() - openFinalLimit_); - - // Try to close enough shards to be within the limit. - // Sort ascending on last use so the oldest are removed first. - std::sort( - openFinals.begin(), - openFinals.end(), - [&](std::shared_ptr const& lhsShard, - std::shared_ptr const& rhsShard) { - return lhsShard->getLastUse() < rhsShard->getLastUse(); - }); - - for (auto it{openFinals.cbegin()}; - it != openFinals.cend() && openFinals.size() > openFinalLimit_;) - { - if ((*it)->tryClose()) - it = openFinals.erase(it); - else - ++it; - } - } -} - -Json::Value -DatabaseShardImp::getDatabaseImportStatus() const -{ - if (std::lock_guard lock(mutex_); databaseImportStatus_) - { - Json::Value ret(Json::objectValue); - - ret[jss::firstShardIndex] = databaseImportStatus_->earliestIndex; - ret[jss::lastShardIndex] = databaseImportStatus_->latestIndex; - ret[jss::currentShardIndex] = databaseImportStatus_->currentIndex; - - Json::Value currentShard(Json::objectValue); - currentShard[jss::firstSequence] = databaseImportStatus_->firstSeq; - currentShard[jss::lastSequence] = databaseImportStatus_->lastSeq; - - if (auto shard = databaseImportStatus_->currentShard.lock(); shard) - currentShard[jss::storedSeqs] = shard->getStoredSeqs(); - - ret[jss::currentShard] = currentShard; - - if (haltDatabaseImport_) - ret[jss::message] = "Database import halt initiated..."; - - return ret; - } - - return RPC::make_error(rpcINTERNAL, "Database import not running"); -} - -Json::Value -DatabaseShardImp::startNodeToShard() -{ - std::lock_guard lock(mutex_); - - if (!init_) - return RPC::make_error(rpcINTERNAL, "Shard store not initialized"); - - if (databaseImporter_.joinable()) - return RPC::make_error( - rpcINTERNAL, "Database import already in progress"); - - if (isStopping()) - return RPC::make_error(rpcINTERNAL, "Node is shutting down"); - - startDatabaseImportThread(lock); - - Json::Value result(Json::objectValue); - result[jss::message] = "Database import initiated..."; - - return result; -} - -Json::Value -DatabaseShardImp::stopNodeToShard() -{ - std::lock_guard lock(mutex_); - - if (!init_) - return RPC::make_error(rpcINTERNAL, "Shard store not initialized"); - - if (!databaseImporter_.joinable()) - return RPC::make_error(rpcINTERNAL, "Database import not running"); - - if (isStopping()) - return RPC::make_error(rpcINTERNAL, "Node is shutting down"); - - haltDatabaseImport_ = true; - - Json::Value result(Json::objectValue); - result[jss::message] = "Database import halt initiated..."; - - return result; -} - -std::optional -DatabaseShardImp::getDatabaseImportSequence() const -{ - std::lock_guard lock(mutex_); - - if (!databaseImportStatus_) - return {}; - - return databaseImportStatus_->firstSeq; -} - -bool -DatabaseShardImp::initConfig(std::lock_guard const&) -{ - auto fail = [j = j_](std::string const& msg) { - JLOG(j.error()) << "[" << ConfigSection::shardDatabase() << "] " << msg; - return false; - }; - - Config const& config{app_.config()}; - Section const& section{config.section(ConfigSection::shardDatabase())}; - - auto compare = [&](std::string const& name, std::uint32_t defaultValue) { - std::uint32_t shardDBValue{defaultValue}; - get_if_exists(section, name, shardDBValue); - - std::uint32_t nodeDBValue{defaultValue}; - get_if_exists( - config.section(ConfigSection::nodeDatabase()), name, nodeDBValue); - - return shardDBValue == nodeDBValue; - }; - - // If ledgers_per_shard or earliest_seq are specified, - // they must be equally assigned in 'node_db' - if (!compare("ledgers_per_shard", DEFAULT_LEDGERS_PER_SHARD)) - { - return fail( - "and [" + ConfigSection::nodeDatabase() + "] define different '" + - "ledgers_per_shard" + "' values"); - } - if (!compare("earliest_seq", XRP_LEDGER_EARLIEST_SEQ)) - { - return fail( - "and [" + ConfigSection::nodeDatabase() + "] define different '" + - "earliest_seq" + "' values"); - } - - using namespace boost::filesystem; - if (!get_if_exists(section, "path", dir_)) - return fail("'path' missing"); - - { - get_if_exists(section, "max_historical_shards", maxHistoricalShards_); - - Section const& historicalShardPaths = - config.section(SECTION_HISTORICAL_SHARD_PATHS); - - auto values = historicalShardPaths.values(); - - std::sort(values.begin(), values.end()); - values.erase(std::unique(values.begin(), values.end()), values.end()); - - for (auto const& s : values) - { - auto const dir = path(s); - if (dir_ == dir) - { - return fail( - "the 'path' cannot also be in the " - "'historical_shard_path' section"); - } - - historicalPaths_.push_back(s); - } - } - - // NuDB is the default and only supported permanent storage backend - backendName_ = get(section, "type", "nudb"); - if (!boost::iequals(backendName_, "NuDB")) - return fail("'type' value unsupported"); - - return true; -} - -std::shared_ptr -DatabaseShardImp::fetchNodeObject( - uint256 const& hash, - std::uint32_t ledgerSeq, - FetchReport& fetchReport, - bool duplicate) -{ - auto const shardIndex{seqToShardIndex(ledgerSeq)}; - std::shared_ptr shard; - { - std::lock_guard lock(mutex_); - auto const it{shards_.find(shardIndex)}; - if (it == shards_.end()) - return nullptr; - shard = it->second; - } - - return shard->fetchNodeObject(hash, fetchReport); -} - -std::optional -DatabaseShardImp::findAcquireIndex( - std::uint32_t validLedgerSeq, - std::lock_guard const&) -{ - if (validLedgerSeq < earliestLedgerSeq_) - return std::nullopt; - - auto const maxShardIndex{[this, validLedgerSeq]() { - auto shardIndex{seqToShardIndex(validLedgerSeq)}; - if (validLedgerSeq != lastLedgerSeq(shardIndex)) - --shardIndex; - return shardIndex; - }()}; - auto const maxNumShards{maxShardIndex - earliestShardIndex_ + 1}; - - // Check if the shard store has all shards - if (shards_.size() >= maxNumShards) - return std::nullopt; - - if (maxShardIndex < 1024 || - static_cast(shards_.size()) / maxNumShards > 0.5f) - { - // Small or mostly full index space to sample - // Find the available indexes and select one at random - std::vector available; - available.reserve(maxNumShards - shards_.size()); - - for (auto shardIndex = earliestShardIndex_; shardIndex <= maxShardIndex; - ++shardIndex) - { - if (shards_.find(shardIndex) == shards_.end() && - preparedIndexes_.find(shardIndex) == preparedIndexes_.end()) - { - available.push_back(shardIndex); - } - } - - if (available.empty()) - return std::nullopt; - - if (available.size() == 1) - return available.front(); - - return available[rand_int( - 0u, static_cast(available.size() - 1))]; - } - - // Large, sparse index space to sample - // Keep choosing indexes at random until an available one is found - // chances of running more than 30 times is less than 1 in a billion - for (int i = 0; i < 40; ++i) - { - auto const shardIndex{rand_int(earliestShardIndex_, maxShardIndex)}; - if (shards_.find(shardIndex) == shards_.end() && - preparedIndexes_.find(shardIndex) == preparedIndexes_.end()) - { - return shardIndex; - } - } - - assert(false); - return std::nullopt; -} - -void -DatabaseShardImp::finalizeShard( - std::shared_ptr& shard, - bool const writeSQLite, - std::optional const& expectedHash) -{ - taskQueue_.addTask([this, - wptr = std::weak_ptr(shard), - writeSQLite, - expectedHash]() { - if (isStopping()) - return; - - auto shard{wptr.lock()}; - if (!shard) - { - JLOG(j_.debug()) << "Shard removed before being finalized"; - return; - } - - if (!shard->finalize(writeSQLite, expectedHash)) - { - if (isStopping()) - return; - - // Invalid or corrupt shard, remove it - removeFailedShard(shard); - return; - } - - if (isStopping()) - return; - - { - auto const boundaryIndex{shardBoundaryIndex()}; - std::lock_guard lock(mutex_); - - if (shard->index() < boundaryIndex) - { - // This is a historical shard - if (!historicalPaths_.empty() && - shard->getDir().parent_path() == dir_) - { - // Shard wasn't placed at a separate historical path - JLOG(j_.warn()) << "shard " << shard->index() - << " is not stored at a historical path"; - } - } - else - { - // Not a historical shard. Shift recent shards if necessary - assert(!boundaryIndex || shard->index() - boundaryIndex <= 1); - relocateOutdatedShards(lock); - - // Set the appropriate recent shard index - if (shard->index() == boundaryIndex) - secondLatestShardIndex_ = shard->index(); - else - latestShardIndex_ = shard->index(); - - if (shard->getDir().parent_path() != dir_) - { - JLOG(j_.warn()) << "shard " << shard->index() - << " is not stored at the path"; - } - } - - updatePeers(lock); - } - - updateFileStats(); - }); -} - -void -DatabaseShardImp::updateFileStats() -{ - std::vector> shards; - { - std::lock_guard lock(mutex_); - if (shards_.empty()) - return; - - shards.reserve(shards_.size()); - for (auto const& e : shards_) - shards.push_back(e.second); - } - - std::uint64_t sumSz{0}; - std::uint32_t sumFd{0}; - std::uint32_t numShards{0}; - for (auto const& weak : shards) - { - if (auto const shard{weak.lock()}; shard) - { - auto const [sz, fd] = shard->getFileInfo(); - sumSz += sz; - sumFd += fd; - ++numShards; - } - } - - std::lock_guard lock(mutex_); - fileSz_ = sumSz; - fdRequired_ = sumFd; - avgShardFileSz_ = (numShards == 0 ? fileSz_ : fileSz_ / numShards); - - if (!canAdd_) - return; - - if (auto const count = numHistoricalShards(lock); - count >= maxHistoricalShards_) - { - if (maxHistoricalShards_) - { - // In order to avoid excessive output, don't produce - // this warning if the server isn't configured to - // store historical shards. - JLOG(j_.warn()) << "maximum number of historical shards reached"; - } - - canAdd_ = false; - } - else if (!sufficientStorage( - maxHistoricalShards_ - count, - PathDesignation::historical, - lock)) - { - JLOG(j_.warn()) - << "maximum shard store size exceeds available storage space"; - - canAdd_ = false; - } -} - -bool -DatabaseShardImp::sufficientStorage( - std::uint32_t numShards, - PathDesignation pathDesignation, - std::lock_guard const&) const -{ - try - { - std::vector capacities; - - if (pathDesignation == PathDesignation::historical && - !historicalPaths_.empty()) - { - capacities.reserve(historicalPaths_.size()); - - for (auto const& path : historicalPaths_) - { - // Get the available storage for each historical path - auto const availableSpace = - boost::filesystem::space(path).available; - - capacities.push_back(availableSpace); - } - } - else - { - // Get the available storage for the main shard path - capacities.push_back(boost::filesystem::space(dir_).available); - } - - for (std::uint64_t const capacity : capacities) - { - // Leverage all the historical shard paths to - // see if collectively they can fit the specified - // number of shards. For this to work properly, - // each historical path must correspond to a separate - // physical device or filesystem. - - auto const shardCap = capacity / avgShardFileSz_; - if (numShards <= shardCap) - return true; - - numShards -= shardCap; - } - } - catch (std::exception const& e) - { - JLOG(j_.fatal()) << "Exception caught in function " << __func__ - << ". Error: " << e.what(); - return false; - } - - return false; -} - -bool -DatabaseShardImp::setStoredInShard( - std::shared_ptr& shard, - std::shared_ptr const& ledger) -{ - if (!shard->setLedgerStored(ledger)) - { - // Invalid or corrupt shard, remove it - removeFailedShard(shard); - return false; - } - - if (shard->getState() == ShardState::complete) - { - std::lock_guard lock(mutex_); - if (auto const it{shards_.find(shard->index())}; it != shards_.end()) - { - if (shard->index() == acquireIndex_) - acquireIndex_ = 0; - - finalizeShard(it->second, false, std::nullopt); - } - else - { - JLOG(j_.debug()) - << "shard " << shard->index() << " is no longer being acquired"; - } - } - - updateFileStats(); - return true; -} - -void -DatabaseShardImp::removeFailedShard(std::shared_ptr& shard) -{ - { - std::lock_guard lock(mutex_); - - if (shard->index() == acquireIndex_) - acquireIndex_ = 0; - - if (shard->index() == latestShardIndex_) - latestShardIndex_ = std::nullopt; - - if (shard->index() == secondLatestShardIndex_) - secondLatestShardIndex_ = std::nullopt; - } - - shard->removeOnDestroy(); - - // Reset the shared_ptr to invoke the shard's - // destructor and remove it from the server - shard.reset(); - updateFileStats(); -} - -std::uint32_t -DatabaseShardImp::shardBoundaryIndex() const -{ - auto const validIndex = app_.getLedgerMaster().getValidLedgerIndex(); - - if (validIndex < earliestLedgerSeq_) - return 0; - - // Shards with an index earlier than the recent shard boundary index - // are considered historical. The three shards at or later than - // this index consist of the two most recently validated shards - // and the shard still in the process of being built by live - // transactions. - return seqToShardIndex(validIndex) - 1; -} - -std::uint32_t -DatabaseShardImp::numHistoricalShards( - std::lock_guard const& lock) const -{ - auto const boundaryIndex{shardBoundaryIndex()}; - return std::count_if( - shards_.begin(), shards_.end(), [boundaryIndex](auto const& entry) { - return entry.first < boundaryIndex; - }); -} - -void -DatabaseShardImp::relocateOutdatedShards( - std::lock_guard const& lock) -{ - auto& cur{latestShardIndex_}; - auto& prev{secondLatestShardIndex_}; - if (!cur && !prev) - return; - - auto const latestShardIndex = - seqToShardIndex(app_.getLedgerMaster().getValidLedgerIndex()); - auto const separateHistoricalPath = !historicalPaths_.empty(); - - auto const removeShard = [this](std::uint32_t const shardIndex) -> void { - canAdd_ = false; - - if (auto it = shards_.find(shardIndex); it != shards_.end()) - { - if (it->second) - removeFailedShard(it->second); - else - { - JLOG(j_.warn()) << "can't find shard to remove"; - } - } - else - { - JLOG(j_.warn()) << "can't find shard to remove"; - } - }; - - auto const keepShard = [this, &lock, removeShard, separateHistoricalPath]( - std::uint32_t const shardIndex) -> bool { - if (numHistoricalShards(lock) >= maxHistoricalShards_) - { - JLOG(j_.error()) << "maximum number of historical shards reached"; - removeShard(shardIndex); - return false; - } - if (separateHistoricalPath && - !sufficientStorage(1, PathDesignation::historical, lock)) - { - JLOG(j_.error()) << "insufficient storage space available"; - removeShard(shardIndex); - return false; - } - - return true; - }; - - // Move a shard from the main shard path to a historical shard - // path by copying the contents, and creating a new shard. - auto const moveShard = [this, - &lock](std::uint32_t const shardIndex) -> void { - auto it{shards_.find(shardIndex)}; - if (it == shards_.end()) - { - JLOG(j_.warn()) << "can't find shard to move to historical path"; - return; - } - - auto& shard{it->second}; - - // Close any open file descriptors before moving the shard - // directory. Don't call removeOnDestroy since that would - // attempt to close the fds after the directory has been moved. - if (!shard->tryClose()) - { - JLOG(j_.warn()) << "can't close shard to move to historical path"; - return; - } - - auto const dst{chooseHistoricalPath(lock)}; - try - { - // Move the shard directory to the new path - boost::filesystem::rename( - shard->getDir().string(), dst / std::to_string(shardIndex)); - } - catch (...) - { - JLOG(j_.error()) << "shard " << shardIndex - << " failed to move to historical storage"; - return; - } - - // Create a shard instance at the new location - shard = std::make_shared(app_, *this, shardIndex, dst, j_); - - // Open the new shard - if (!shard->init(scheduler_, *ctx_)) - { - JLOG(j_.error()) << "shard " << shardIndex - << " failed to open in historical storage"; - shard->removeOnDestroy(); - shard.reset(); - } - }; - - // See if either of the recent shards needs to be updated - bool const curNotSynched = - latestShardIndex_ && *latestShardIndex_ != latestShardIndex; - bool const prevNotSynched = secondLatestShardIndex_ && - *secondLatestShardIndex_ != latestShardIndex - 1; - - // A new shard has been published. Move outdated - // shards to historical storage as needed - if (curNotSynched || prevNotSynched) - { - if (prev) - { - // Move the formerly second latest shard to historical storage - if (keepShard(*prev) && separateHistoricalPath) - moveShard(*prev); - - prev = std::nullopt; - } - - if (cur) - { - // The formerly latest shard is now the second latest - if (cur == latestShardIndex - 1) - prev = cur; - - // The formerly latest shard is no longer a 'recent' shard - else - { - // Move the formerly latest shard to historical storage - if (keepShard(*cur) && separateHistoricalPath) - moveShard(*cur); - } - - cur = std::nullopt; - } - } -} - -auto -DatabaseShardImp::prepareForNewShard( - std::uint32_t shardIndex, - std::uint32_t numHistoricalShards, - std::lock_guard const& lock) -> std::optional -{ - // Any shard earlier than the two most recent shards is a historical shard - auto const boundaryIndex{shardBoundaryIndex()}; - auto const isHistoricalShard = shardIndex < boundaryIndex; - - auto const designation = isHistoricalShard && !historicalPaths_.empty() - ? PathDesignation::historical - : PathDesignation::none; - - // Check shard count and available storage space - if (isHistoricalShard && numHistoricalShards >= maxHistoricalShards_) - { - JLOG(j_.error()) << "maximum number of historical shards reached"; - canAdd_ = false; - return std::nullopt; - } - if (!sufficientStorage(1, designation, lock)) - { - JLOG(j_.error()) << "insufficient storage space available"; - canAdd_ = false; - return std::nullopt; - } - - return designation; -} - -boost::filesystem::path -DatabaseShardImp::chooseHistoricalPath(std::lock_guard const&) const -{ - // If not configured with separate historical paths, - // use the main path (dir_) by default. - if (historicalPaths_.empty()) - return dir_; - - boost::filesystem::path historicalShardPath; - std::vector potentialPaths; - - for (boost::filesystem::path const& path : historicalPaths_) - { - if (boost::filesystem::space(path).available >= avgShardFileSz_) - potentialPaths.push_back(path); - } - - if (potentialPaths.empty()) - { - JLOG(j_.error()) << "failed to select a historical shard path"; - return ""; - } - - std::sample( - potentialPaths.begin(), - potentialPaths.end(), - &historicalShardPath, - 1, - default_prng()); - - return historicalShardPath; -} - -bool -DatabaseShardImp::checkHistoricalPaths(std::lock_guard const&) const -{ -#if BOOST_OS_LINUX - // Each historical shard path must correspond - // to a directory on a distinct device or file system. - // Currently, this constraint is enforced only on Linux. - std::unordered_map> filesystemIDs( - historicalPaths_.size()); - - for (auto const& path : historicalPaths_) - { - struct statvfs buffer; - if (statvfs(path.c_str(), &buffer)) - { - JLOG(j_.error()) - << "failed to acquire stats for 'historical_shard_path': " - << path; - return false; - } - - filesystemIDs[buffer.f_fsid].push_back(path.string()); - } - - bool ret = true; - for (auto const& entry : filesystemIDs) - { - // Check to see if any of the paths are stored on the same file system - if (entry.second.size() > 1) - { - // Two or more historical storage paths - // correspond to the same file system. - JLOG(j_.error()) - << "The following paths correspond to the same filesystem: " - << boost::algorithm::join(entry.second, ", ") - << ". Each configured historical storage path should" - " be on a unique device or filesystem."; - - ret = false; - } - } - - return ret; - -#else - // The requirement that each historical storage path - // corresponds to a distinct device or file system is - // enforced only on Linux, so on other platforms - // keep track of the available capacities for each - // path. Issue a warning if we suspect any of the paths - // may violate this requirement. - - // Map byte counts to each path that shares that byte count. - std::unordered_map> - uniqueCapacities(historicalPaths_.size()); - - for (auto const& path : historicalPaths_) - uniqueCapacities[boost::filesystem::space(path).available].push_back( - path.string()); - - for (auto const& entry : uniqueCapacities) - { - // Check to see if any paths have the same amount of available bytes. - if (entry.second.size() > 1) - { - // Two or more historical storage paths may - // correspond to the same device or file system. - JLOG(j_.warn()) - << "Each of the following paths have " << entry.first - << " bytes free, and may be located on the same device" - " or file system: " - << boost::algorithm::join(entry.second, ", ") - << ". Each configured historical storage path should" - " be on a unique device or file system."; - } - } -#endif - - return true; -} - -bool -DatabaseShardImp::callForLedgerSQLByLedgerSeq( - LedgerIndex ledgerSeq, - std::function const& callback) -{ - if (ledgerSeq < earliestLedgerSeq_) - { - JLOG(j_.warn()) << "callForLedgerSQLByLedgerSeq ledger seq too early: " - << ledgerSeq; - return false; - } - - return callForLedgerSQLByShardIndex(seqToShardIndex(ledgerSeq), callback); -} - -bool -DatabaseShardImp::callForLedgerSQLByShardIndex( - const uint32_t shardIndex, - std::function const& callback) -{ - std::lock_guard lock(mutex_); - - auto const it{shards_.find(shardIndex)}; - - return it != shards_.end() && - it->second->getState() == ShardState::finalized && - it->second->callForLedgerSQL(callback); -} - -bool -DatabaseShardImp::callForTransactionSQLByLedgerSeq( - LedgerIndex ledgerSeq, - std::function const& callback) -{ - return callForTransactionSQLByShardIndex( - seqToShardIndex(ledgerSeq), callback); -} - -bool -DatabaseShardImp::callForTransactionSQLByShardIndex( - std::uint32_t const shardIndex, - std::function const& callback) -{ - std::lock_guard lock(mutex_); - - auto const it{shards_.find(shardIndex)}; - - return it != shards_.end() && - it->second->getState() == ShardState::finalized && - it->second->callForTransactionSQL(callback); -} - -bool -DatabaseShardImp::iterateShardsForward( - std::optional minShardIndex, - std::function const& visit) -{ - std::lock_guard lock(mutex_); - - std::map>::iterator it, eit; - - if (!minShardIndex) - it = shards_.begin(); - else - it = shards_.lower_bound(*minShardIndex); - - eit = shards_.end(); - - for (; it != eit; it++) - { - if (it->second->getState() == ShardState::finalized) - { - if (!visit(*it->second)) - return false; - } - } - - return true; -} - -bool -DatabaseShardImp::iterateLedgerSQLsForward( - std::optional minShardIndex, - std::function const& - callback) -{ - return iterateShardsForward( - minShardIndex, [&callback](Shard& shard) -> bool { - return shard.callForLedgerSQL(callback); - }); -} - -bool -DatabaseShardImp::iterateTransactionSQLsForward( - std::optional minShardIndex, - std::function const& - callback) -{ - return iterateShardsForward( - minShardIndex, [&callback](Shard& shard) -> bool { - return shard.callForTransactionSQL(callback); - }); -} - -bool -DatabaseShardImp::iterateShardsBack( - std::optional maxShardIndex, - std::function const& visit) -{ - std::lock_guard lock(mutex_); - - std::map>::reverse_iterator it, eit; - - if (!maxShardIndex) - it = shards_.rbegin(); - else - it = std::make_reverse_iterator(shards_.upper_bound(*maxShardIndex)); - - eit = shards_.rend(); - - for (; it != eit; it++) - { - if (it->second->getState() == ShardState::finalized && - (!maxShardIndex || it->first <= *maxShardIndex)) - { - if (!visit(*it->second)) - return false; - } - } - - return true; -} - -bool -DatabaseShardImp::iterateLedgerSQLsBack( - std::optional maxShardIndex, - std::function const& - callback) -{ - return iterateShardsBack(maxShardIndex, [&callback](Shard& shard) -> bool { - return shard.callForLedgerSQL(callback); - }); -} - -bool -DatabaseShardImp::iterateTransactionSQLsBack( - std::optional maxShardIndex, - std::function const& - callback) -{ - return iterateShardsBack(maxShardIndex, [&callback](Shard& shard) -> bool { - return shard.callForTransactionSQL(callback); - }); -} - -std::unique_ptr -DatabaseShardImp::getShardInfo(std::lock_guard const&) const -{ - auto shardInfo{std::make_unique()}; - for (auto const& [_, shard] : shards_) - { - shardInfo->update( - shard->index(), shard->getState(), shard->getPercentProgress()); - } - - for (auto const shardIndex : preparedIndexes_) - shardInfo->update(shardIndex, ShardState::queued, 0); - - return shardInfo; -} - -size_t -DatabaseShardImp::getNumTasks() const -{ - std::lock_guard lock(mutex_); - return taskQueue_.size(); -} - -void -DatabaseShardImp::updatePeers(std::lock_guard const& lock) const -{ - if (!app_.config().standalone() && - app_.getOPs().getOperatingMode() != OperatingMode::DISCONNECTED) - { - auto const message{getShardInfo(lock)->makeMessage(app_)}; - app_.overlay().foreach(send_always(std::make_shared( - message, protocol::mtPEER_SHARD_INFO_V2))); - } -} - -void -DatabaseShardImp::startDatabaseImportThread(std::lock_guard const&) -{ - // Run the lengthy node store import process in the background - // on a dedicated thread. - databaseImporter_ = std::thread([this] { - doImportDatabase(); - - std::lock_guard lock(mutex_); - - // Make sure to clear this in case the import - // exited early. - databaseImportStatus_.reset(); - - // Detach the thread so subsequent attempts - // to start the import won't get held up by - // the old thread of execution - databaseImporter_.detach(); - }); -} - -//------------------------------------------------------------------------------ - -std::unique_ptr -make_ShardStore( - Application& app, - Scheduler& scheduler, - int readThreads, - beast::Journal j) -{ - // The shard store is optional. Future changes will require it. - Section const& section{ - app.config().section(ConfigSection::shardDatabase())}; - if (section.empty()) - return nullptr; - - return std::make_unique(app, scheduler, readThreads, j); -} - -} // namespace NodeStore -} // namespace ripple diff --git a/src/xrpld/nodestore/detail/DatabaseShardImp.h b/src/xrpld/nodestore/detail/DatabaseShardImp.h deleted file mode 100644 index df740cf407c..00000000000 --- a/src/xrpld/nodestore/detail/DatabaseShardImp.h +++ /dev/null @@ -1,429 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2017 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_NODESTORE_DATABASESHARDIMP_H_INCLUDED -#define RIPPLE_NODESTORE_DATABASESHARDIMP_H_INCLUDED - -#include -#include -#include - -#include - -namespace ripple { -namespace NodeStore { - -class DatabaseShardImp : public DatabaseShard -{ -public: - DatabaseShardImp() = delete; - DatabaseShardImp(DatabaseShardImp const&) = delete; - DatabaseShardImp(DatabaseShardImp&&) = delete; - DatabaseShardImp& - operator=(DatabaseShardImp const&) = delete; - DatabaseShardImp& - operator=(DatabaseShardImp&&) = delete; - - DatabaseShardImp( - Application& app, - Scheduler& scheduler, - int readThreads, - beast::Journal j); - - ~DatabaseShardImp() - { - stop(); - } - - [[nodiscard]] bool - init() override; - - std::optional - prepareLedger(std::uint32_t validLedgerSeq) override; - - bool - prepareShards(std::vector const& shardIndexes) override; - - void - removePreShard(std::uint32_t shardIndex) override; - - std::string - getPreShards() override; - - bool - importShard(std::uint32_t shardIndex, boost::filesystem::path const& srcDir) - override; - - std::shared_ptr - fetchLedger(uint256 const& hash, std::uint32_t ledgerSeq) override; - - void - setStored(std::shared_ptr const& ledger) override; - - std::unique_ptr - getShardInfo() const override; - - size_t - getNumTasks() const override; - - boost::filesystem::path const& - getRootDir() const override - { - return dir_; - } - - std::string - getName() const override - { - return backendName_; - } - - void - stop() override; - - /** Import the application local node store - - @param source The application node store. - */ - void - importDatabase(Database& source) override; - - void - doImportDatabase(); - - std::int32_t - getWriteLoad() const override; - - bool - isSameDB(std::uint32_t s1, std::uint32_t s2) override - { - return seqToShardIndex(s1) == seqToShardIndex(s2); - } - - void - store( - NodeObjectType type, - Blob&& data, - uint256 const& hash, - std::uint32_t ledgerSeq) override; - - void - sync() override{}; - - bool - storeLedger(std::shared_ptr const& srcLedger) override; - - void - sweep() override; - - Json::Value - getDatabaseImportStatus() const override; - - Json::Value - startNodeToShard() override; - - Json::Value - stopNodeToShard() override; - - std::optional - getDatabaseImportSequence() const override; - - bool - callForLedgerSQLByLedgerSeq( - LedgerIndex ledgerSeq, - std::function const& callback) override; - - bool - callForLedgerSQLByShardIndex( - std::uint32_t const shardIndex, - std::function const& callback) override; - - bool - callForTransactionSQLByLedgerSeq( - LedgerIndex ledgerSeq, - std::function const& callback) override; - - bool - callForTransactionSQLByShardIndex( - std::uint32_t const shardIndex, - std::function const& callback) override; - - bool - iterateLedgerSQLsForward( - std::optional minShardIndex, - std::function< - bool(soci::session& session, std::uint32_t shardIndex)> const& - callback) override; - - bool - iterateTransactionSQLsForward( - std::optional minShardIndex, - std::function< - bool(soci::session& session, std::uint32_t shardIndex)> const& - callback) override; - - bool - iterateLedgerSQLsBack( - std::optional maxShardIndex, - std::function< - bool(soci::session& session, std::uint32_t shardIndex)> const& - callback) override; - - bool - iterateTransactionSQLsBack( - std::optional maxShardIndex, - std::function< - bool(soci::session& session, std::uint32_t shardIndex)> const& - callback) override; - -private: - enum class PathDesignation : uint8_t { - none, // No path specified - historical // Needs a historical path - }; - - struct DatabaseImportStatus - { - DatabaseImportStatus( - std::uint32_t const earliestIndex, - std::uint32_t const latestIndex, - std::uint32_t const currentIndex) - : earliestIndex(earliestIndex) - , latestIndex(latestIndex) - , currentIndex(currentIndex) - { - } - - // Index of the first shard to be imported - std::uint32_t earliestIndex{0}; - - // Index of the last shard to be imported - std::uint32_t latestIndex{0}; - - // Index of the shard currently being imported - std::uint32_t currentIndex{0}; - - // First ledger sequence of the current shard - std::uint32_t firstSeq{0}; - - // Last ledger sequence of the current shard - std::uint32_t lastSeq{0}; - - // The shard currently being imported - std::weak_ptr currentShard; - }; - - Application& app_; - mutable std::mutex mutex_; - bool init_{false}; - - // The context shared with all shard backend databases - std::unique_ptr ctx_; - - // Queue of background tasks to be performed - TaskQueue taskQueue_; - - // Shards held by this server - std::map> shards_; - - // Shard indexes being imported from the shard archive handler - std::set preparedIndexes_; - - // Shard index being acquired from the peer network - std::uint32_t acquireIndex_{0}; - - // The shard store root directory - boost::filesystem::path dir_; - - // If new shards can be stored - bool canAdd_{true}; - - // The name associated with the backend used with the shard store - std::string backendName_; - - // Maximum number of historical shards to store. - std::uint32_t maxHistoricalShards_{0}; - - // Contains historical shard paths - std::vector historicalPaths_; - - // Storage space utilized by the shard store (in bytes) - std::uint64_t fileSz_{0}; - - // Average storage space required by a shard (in bytes) - std::uint64_t avgShardFileSz_; - - // The limit of final shards with open databases at any time - std::uint32_t const openFinalLimit_; - - // File name used to mark shards being imported from node store - static constexpr auto databaseImportMarker_ = "database_import"; - - // latestShardIndex_ and secondLatestShardIndex hold the indexes - // of the shards most recently confirmed by the network. These - // values are not updated in real time and are modified only - // when adding shards to the database, in order to determine where - // pending shards will be stored on the filesystem. A value of - // std::nullopt indicates that the corresponding shard is not held - // by the database. - std::optional latestShardIndex_; - std::optional secondLatestShardIndex_; - - // Struct used for node store import progress - std::unique_ptr databaseImportStatus_; - - // Thread for running node store import - std::thread databaseImporter_; - - // Indicates whether the import should stop - std::atomic_bool haltDatabaseImport_{false}; - - // Initialize settings from the configuration file - // Lock must be held - bool - initConfig(std::lock_guard const&); - - std::shared_ptr - fetchNodeObject( - uint256 const& hash, - std::uint32_t ledgerSeq, - FetchReport& fetchReport, - bool duplicate) override; - - void - for_each(std::function)> f) override - { - Throw("Import from shard store not supported"); - } - - // Randomly select a shard index not stored - // Lock must be held - std::optional - findAcquireIndex( - std::uint32_t validLedgerSeq, - std::lock_guard const&); - - // Queue a task to finalize a shard by verifying its databases - // Lock must be held - void - finalizeShard( - std::shared_ptr& shard, - bool writeSQLite, - std::optional const& expectedHash); - - // Update storage and file descriptor usage stats - void - updateFileStats(); - - // Returns true if the file system has enough storage - // available to hold the specified number of shards. - // The value of pathDesignation determines whether - // the shard(s) in question are historical and thus - // meant to be stored at a path designated for historical - // shards. - bool - sufficientStorage( - std::uint32_t numShards, - PathDesignation pathDesignation, - std::lock_guard const&) const; - - bool - setStoredInShard( - std::shared_ptr& shard, - std::shared_ptr const& ledger); - - void - removeFailedShard(std::shared_ptr& shard); - - // Returns the index that represents the logical - // partition between historical and recent shards - std::uint32_t - shardBoundaryIndex() const; - - std::uint32_t - numHistoricalShards(std::lock_guard const& lock) const; - - // Shifts the recent and second most recent (by index) - // shards as new shards become available on the network. - // Older shards are moved to a historical shard path. - void - relocateOutdatedShards(std::lock_guard const& lock); - - // Checks whether the shard can be stored. If - // the new shard can't be stored, returns - // std::nullopt. Otherwise returns an enum - // indicating whether the new shard should be - // placed in a separate directory for historical - // shards. - std::optional - prepareForNewShard( - std::uint32_t shardIndex, - std::uint32_t numHistoricalShards, - std::lock_guard const& lock); - - boost::filesystem::path - chooseHistoricalPath(std::lock_guard const&) const; - - /** - * @brief iterateShardsForward Visits all shards starting from given - * in ascending order and calls given callback function to each - * of them passing shard as parameter. - * @param minShardIndex Start shard index to visit or none if all shards - * should be visited. - * @param visit Callback function to call. - * @return True if each callback function returned true, false otherwise. - */ - bool - iterateShardsForward( - std::optional minShardIndex, - std::function const& visit); - - /** - * @brief iterateShardsBack Visits all shards starting from given - * in descending order and calls given callback function to each - * of them passing shard as parameter. - * @param maxShardIndex Start shard index to visit or none if all shards - * should be visited. - * @param visit Callback function to call. - * @return True if each callback function returned true, false otherwise. - */ - bool - iterateShardsBack( - std::optional maxShardIndex, - std::function const& visit); - - bool - checkHistoricalPaths(std::lock_guard const&) const; - - std::unique_ptr - getShardInfo(std::lock_guard const&) const; - - // Update peers with the status of every complete and incomplete shard - void - updatePeers(std::lock_guard const& lock) const; - - // Start the node store import process - void - startDatabaseImportThread(std::lock_guard const&); -}; - -} // namespace NodeStore -} // namespace ripple - -#endif diff --git a/src/xrpld/nodestore/detail/DeterministicShard.cpp b/src/xrpld/nodestore/detail/DeterministicShard.cpp deleted file mode 100644 index c575a685ded..00000000000 --- a/src/xrpld/nodestore/detail/DeterministicShard.cpp +++ /dev/null @@ -1,216 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { -namespace NodeStore { - -DeterministicShard::DeterministicShard( - Application& app, - boost::filesystem::path const& dir, - std::uint32_t index, - beast::Journal j) - : app_(app) - , index_(index) - , dir_(dir / "tmp") - , ctx_(std::make_unique()) - , j_(j) - , curMemObjs_(0) - , maxMemObjs_( - app_.getShardStore()->ledgersPerShard() <= 256 ? maxMemObjsTest - : maxMemObjsDefault) -{ -} - -DeterministicShard::~DeterministicShard() -{ - close(true); -} - -bool -DeterministicShard::init(Serializer const& finalKey) -{ - auto db = app_.getShardStore(); - - auto fail = [&](std::string const& msg) { - JLOG(j_.error()) << "deterministic shard " << index_ - << " not created: " << msg; - backend_.reset(); - try - { - remove_all(dir_); - } - catch (std::exception const& e) - { - JLOG(j_.error()) << "deterministic shard " << index_ - << ". Exception caught in function " << __func__ - << ". Error: " << e.what(); - } - return false; - }; - - if (!db) - return fail("shard store not exists"); - - if (index_ < db->earliestShardIndex()) - return fail("Invalid shard index"); - - Config const& config{app_.config()}; - Section section{config.section(ConfigSection::shardDatabase())}; - auto const type{get(section, "type", "nudb")}; - auto const factory{Manager::instance().find(type)}; - if (!factory) - return fail("failed to find factory for " + type); - - section.set("path", dir_.string()); - backend_ = factory->createInstance( - NodeObject::keyBytes, section, 1, scheduler_, *ctx_, j_); - - if (!backend_) - return fail("failed to create database"); - - ripemd160_hasher h; - h(finalKey.data(), finalKey.size()); - auto const result{static_cast(h)}; - auto const hash{uint160::fromVoid(result.data())}; - - auto digest = [&](int n) { - auto const data{hash.data()}; - std::uint64_t result{0}; - - switch (n) - { - case 0: - case 1: - // Construct 64 bits from sequential eight bytes - for (int i = 0; i < 8; i++) - result = (result << 8) + data[n * 8 + i]; - break; - - case 2: - // Construct 64 bits using the last four bytes of data - result = (static_cast(data[16]) << 24) + - (static_cast(data[17]) << 16) + - (static_cast(data[18]) << 8) + - (static_cast(data[19])); - break; - } - - return result; - }; - auto const uid{digest(0)}; - auto const salt{digest(1)}; - auto const appType{digest(2) | deterministicType}; - - // Open or create the NuDB key/value store - try - { - if (exists(dir_)) - remove_all(dir_); - - backend_->open(true, appType, uid, salt); - } - catch (std::exception const& e) - { - return fail( - std::string(". Exception caught in function ") + __func__ + - ". Error: " + e.what()); - } - - return true; -} - -std::shared_ptr -make_DeterministicShard( - Application& app, - boost::filesystem::path const& shardDir, - std::uint32_t shardIndex, - Serializer const& finalKey, - beast::Journal j) -{ - std::shared_ptr dShard( - new DeterministicShard(app, shardDir, shardIndex, j)); - if (!dShard->init(finalKey)) - return {}; - return dShard; -} - -void -DeterministicShard::close(bool cancel) -{ - try - { - if (cancel) - { - backend_.reset(); - remove_all(dir_); - } - else - { - ctx_->flush(); - curMemObjs_ = 0; - backend_.reset(); - } - } - catch (std::exception const& e) - { - JLOG(j_.error()) << "deterministic shard " << index_ - << ". Exception caught in function " << __func__ - << ". Error: " << e.what(); - } -} - -bool -DeterministicShard::store(std::shared_ptr const& nodeObject) -{ - try - { - backend_->store(nodeObject); - - // Flush to the backend if at threshold - if (++curMemObjs_ >= maxMemObjs_) - { - ctx_->flush(); - curMemObjs_ = 0; - } - } - catch (std::exception const& e) - { - JLOG(j_.error()) << "deterministic shard " << index_ - << ". Exception caught in function " << __func__ - << ". Error: " << e.what(); - return false; - } - - return true; -} - -} // namespace NodeStore -} // namespace ripple diff --git a/src/xrpld/nodestore/detail/DeterministicShard.h b/src/xrpld/nodestore/detail/DeterministicShard.h deleted file mode 100644 index 3eb5eaa8144..00000000000 --- a/src/xrpld/nodestore/detail/DeterministicShard.h +++ /dev/null @@ -1,174 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_NODESTORE_DETERMINISTICSHARD_H_INCLUDED -#define RIPPLE_NODESTORE_DETERMINISTICSHARD_H_INCLUDED - -#include -#include -#include -#include - -namespace ripple { -namespace NodeStore { - -/** DeterministicShard class. - * - * 1. The init() method creates temporary folder dir_, - * and the deterministic shard is initialized in that folder. - * 2. The store() method adds object to memory pool. - * 3. The flush() method stores all objects from memory pool to the shard - * located in dir_ in sorted order. - * 4. The close(true) method closes the backend and removes the directory. - */ -class DeterministicShard -{ - constexpr static std::uint32_t maxMemObjsDefault = 16384u; - constexpr static std::uint32_t maxMemObjsTest = 16u; - - /* "SHRD" in ASCII */ - constexpr static std::uint64_t deterministicType = 0x5348524400000000ll; - -private: - DeterministicShard(DeterministicShard const&) = delete; - DeterministicShard& - operator=(DeterministicShard const&) = delete; - - /** Creates the object for shard database - * - * @param app Application object - * @param dir Directory where shard is located - * @param index Index of the shard - * @param j Journal to logging - */ - DeterministicShard( - Application& app, - boost::filesystem::path const& dir, - std::uint32_t index, - beast::Journal j); - - /** Initializes the deterministic shard. - * - * @param finalKey Serializer of shard's final key which consists of: - * shard version (32 bit) - * first ledger sequence in the shard (32 bit) - * last ledger sequence in the shard (32 bit) - * hash of last ledger (256 bits) - * @return true if no error, false if error - */ - bool - init(Serializer const& finalKey); - -public: - ~DeterministicShard(); - - /** Finalizes and closes the shard. - */ - void - close() - { - close(false); - } - - [[nodiscard]] boost::filesystem::path const& - getDir() const - { - return dir_; - } - - /** Store a node object in memory. - * - * @param nodeObject The node object to store - * @return true on success. - * @note Flushes all objects in memory to the backend when the number - * of node objects held in memory exceed a threshold - */ - [[nodiscard]] bool - store(std::shared_ptr const& nodeObject); - -private: - /** Finalizes and closes the shard. - * - * @param cancel True if reject the shard and delete all files, - * false if finalize the shard and store them - */ - void - close(bool cancel); - - // Application reference - Application& app_; - - // Shard Index - std::uint32_t const index_; - - // Path to temporary database files - boost::filesystem::path const dir_; - - // Dummy scheduler for deterministic write - DummyScheduler scheduler_; - - // NuDB context - std::unique_ptr ctx_; - - // NuDB key/value store for node objects - std::shared_ptr backend_; - - // Journal - beast::Journal const j_; - - // Current number of in-cache objects - std::uint32_t curMemObjs_; - - // Maximum number of in-cache objects - std::uint32_t const maxMemObjs_; - - friend std::shared_ptr - make_DeterministicShard( - Application& app, - boost::filesystem::path const& shardDir, - std::uint32_t shardIndex, - Serializer const& finalKey, - beast::Journal j); -}; - -/** Creates shared pointer to deterministic shard and initializes it. - * - * @param app Application object - * @param shardDir Directory where shard is located - * @param shardIndex Index of the shard - * @param finalKey Serializer of shard's ginal key which consists of: - * shard version (32 bit) - * first ledger sequence in the shard (32 bit) - * last ledger sequence in the shard (32 bit) - * hash of last ledger (256 bits) - * @param j Journal to logging - * @return Shared pointer to deterministic shard or {} in case of error. - */ -std::shared_ptr -make_DeterministicShard( - Application& app, - boost::filesystem::path const& shardDir, - std::uint32_t shardIndex, - Serializer const& finalKey, - beast::Journal j); - -} // namespace NodeStore -} // namespace ripple - -#endif diff --git a/src/xrpld/nodestore/detail/Shard.cpp b/src/xrpld/nodestore/detail/Shard.cpp deleted file mode 100644 index 8c2e9997fbf..00000000000 --- a/src/xrpld/nodestore/detail/Shard.cpp +++ /dev/null @@ -1,1272 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2017 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { -namespace NodeStore { - -uint256 const Shard::finalKey{0}; - -Shard::Shard( - Application& app, - DatabaseShard const& db, - std::uint32_t index, - beast::Journal j) - : Shard(app, db, index, "", j) -{ -} - -Shard::Shard( - Application& app, - DatabaseShard const& db, - std::uint32_t index, - boost::filesystem::path const& dir, - beast::Journal j) - : app_(app) - , j_(j) - , index_(index) - , firstSeq_(db.firstLedgerSeq(index)) - , lastSeq_(std::max(firstSeq_, db.lastLedgerSeq(index))) - , maxLedgers_(db.maxLedgers(index)) - , dir_((dir.empty() ? db.getRootDir() : dir) / std::to_string(index_)) -{ -} - -Shard::~Shard() -{ - if (!removeOnDestroy_) - return; - - if (backend_) - { - // Abort removal if the backend is in use - if (backendCount_ > 0) - { - JLOG(j_.error()) << "shard " << index_ - << " backend in use, unable to remove directory"; - return; - } - - // Release database files first otherwise remove_all may fail - backend_.reset(); - lgrSQLiteDB_.reset(); - txSQLiteDB_.reset(); - acquireInfo_.reset(); - } - - try - { - boost::filesystem::remove_all(dir_); - } - catch (std::exception const& e) - { - JLOG(j_.fatal()) << "shard " << index_ - << ". Exception caught in function " << __func__ - << ". Error: " << e.what(); - } -} - -bool -Shard::init(Scheduler& scheduler, nudb::context& context) -{ - Section section{app_.config().section(ConfigSection::shardDatabase())}; - std::string const type{get(section, "type", "nudb")}; - auto const factory{Manager::instance().find(type)}; - if (!factory) - { - JLOG(j_.error()) << "shard " << index_ << " failed to find factory for " - << type; - return false; - } - section.set("path", dir_.string()); - - std::lock_guard lock{mutex_}; - if (backend_) - { - JLOG(j_.error()) << "shard " << index_ << " already initialized"; - return false; - } - backend_ = factory->createInstance( - NodeObject::keyBytes, - section, - megabytes( - app_.config().getValueFor(SizedItem::burstSize, std::nullopt)), - scheduler, - context, - j_); - - return open(lock); -} - -bool -Shard::isOpen() const -{ - std::lock_guard lock(mutex_); - if (!backend_) - { - JLOG(j_.error()) << "shard " << index_ << " not initialized"; - return false; - } - - return backend_->isOpen(); -} - -bool -Shard::tryClose() -{ - // Keep database open if being acquired or finalized - if (state_ != ShardState::finalized) - return false; - - std::lock_guard lock(mutex_); - - // Keep database open if in use - if (backendCount_ > 0) - return false; - - if (!backend_) - { - JLOG(j_.error()) << "shard " << index_ << " not initialized"; - return false; - } - if (!backend_->isOpen()) - return false; - - try - { - backend_->close(); - } - catch (std::exception const& e) - { - JLOG(j_.fatal()) << "shard " << index_ - << ". Exception caught in function " << __func__ - << ". Error: " << e.what(); - return false; - } - - lgrSQLiteDB_.reset(); - txSQLiteDB_.reset(); - acquireInfo_.reset(); - - // Reset caches to reduce memory use - app_.getShardFamily()->getFullBelowCache(lastSeq_)->reset(); - app_.getShardFamily()->getTreeNodeCache(lastSeq_)->reset(); - - return true; -} - -std::optional -Shard::prepare() -{ - if (state_ != ShardState::acquire) - { - JLOG(j_.warn()) << "shard " << index_ - << " prepare called when not acquiring"; - return std::nullopt; - } - - std::lock_guard lock(mutex_); - if (!acquireInfo_) - { - JLOG(j_.error()) << "shard " << index_ - << " missing acquire SQLite database"; - return std::nullopt; - } - - if (acquireInfo_->storedSeqs.empty()) - return lastSeq_; - return prevMissing(acquireInfo_->storedSeqs, 1 + lastSeq_, firstSeq_); -} - -bool -Shard::storeNodeObject(std::shared_ptr const& nodeObject) -{ - if (state_ != ShardState::acquire) - { - // The import node store case is an exception - if (nodeObject->getHash() != finalKey) - { - // Ignore residual calls from InboundLedgers - JLOG(j_.trace()) << "shard " << index_ << " not acquiring"; - return false; - } - } - - auto const scopedCount{makeBackendCount()}; - if (!scopedCount) - return false; - - try - { - std::lock_guard lock(mutex_); - backend_->store(nodeObject); - } - catch (std::exception const& e) - { - JLOG(j_.fatal()) << "shard " << index_ - << ". Exception caught in function " << __func__ - << ". Error: " << e.what(); - return false; - } - - return true; -} - -std::shared_ptr -Shard::fetchNodeObject(uint256 const& hash, FetchReport& fetchReport) -{ - auto const scopedCount{makeBackendCount()}; - if (!scopedCount) - return nullptr; - - std::shared_ptr nodeObject; - - // Try the backend - Status status; - try - { - std::lock_guard lock(mutex_); - status = backend_->fetch(hash.data(), &nodeObject); - } - catch (std::exception const& e) - { - JLOG(j_.fatal()) << "shard " << index_ - << ". Exception caught in function " << __func__ - << ". Error: " << e.what(); - return nullptr; - } - - switch (status) - { - case ok: - case notFound: - break; - case dataCorrupt: { - JLOG(j_.fatal()) - << "shard " << index_ << ". Corrupt node object at hash " - << to_string(hash); - break; - } - default: { - JLOG(j_.warn()) - << "shard " << index_ << ". Unknown status=" << status - << " fetching node object at hash " << to_string(hash); - break; - } - } - - if (nodeObject) - fetchReport.wasFound = true; - - return nodeObject; -} - -Shard::StoreLedgerResult -Shard::storeLedger( - std::shared_ptr const& srcLedger, - std::shared_ptr const& next) -{ - StoreLedgerResult result; - if (state_ != ShardState::acquire) - { - // Ignore residual calls from InboundLedgers - JLOG(j_.trace()) << "shard " << index_ << ". Not acquiring"; - return result; - } - if (containsLedger(srcLedger->info().seq)) - { - JLOG(j_.trace()) << "shard " << index_ << ". Ledger already stored"; - return result; - } - - auto fail = [&](std::string const& msg) { - JLOG(j_.error()) << "shard " << index_ << ". Source ledger sequence " - << srcLedger->info().seq << ". " << msg; - result.error = true; - return result; - }; - - if (srcLedger->info().hash.isZero()) - return fail("Invalid hash"); - if (srcLedger->info().accountHash.isZero()) - return fail("Invalid account hash"); - - auto& srcDB{const_cast(srcLedger->stateMap().family().db())}; - if (&srcDB == &(app_.getShardFamily()->db())) - return fail("Source and destination databases are the same"); - - auto const scopedCount{makeBackendCount()}; - if (!scopedCount) - return fail("Failed to lock backend"); - - Batch batch; - batch.reserve(batchWritePreallocationSize); - auto storeBatch = [&]() { - std::uint64_t sz{0}; - for (auto const& nodeObject : batch) - sz += nodeObject->getData().size(); - - try - { - std::lock_guard lock(mutex_); - backend_->storeBatch(batch); - } - catch (std::exception const& e) - { - fail( - std::string(". Exception caught in function ") + __func__ + - ". Error: " + e.what()); - return false; - } - - result.count += batch.size(); - result.size += sz; - batch.clear(); - return true; - }; - - // Store ledger header - { - Serializer s(sizeof(std::uint32_t) + sizeof(LedgerInfo)); - s.add32(HashPrefix::ledgerMaster); - addRaw(srcLedger->info(), s); - auto nodeObject = NodeObject::createObject( - hotLEDGER, std::move(s.modData()), srcLedger->info().hash); - batch.emplace_back(std::move(nodeObject)); - } - - bool error = false; - auto visit = [&](SHAMapTreeNode const& node) { - if (!stop_) - { - if (auto nodeObject = srcDB.fetchNodeObject( - node.getHash().as_uint256(), srcLedger->info().seq)) - { - batch.emplace_back(std::move(nodeObject)); - if (batch.size() < batchWritePreallocationSize || storeBatch()) - return true; - } - } - - error = true; - return false; - }; - - // Store the state map - if (srcLedger->stateMap().getHash().isNonZero()) - { - if (!srcLedger->stateMap().isValid()) - return fail("Invalid state map"); - - if (next && next->info().parentHash == srcLedger->info().hash) - { - auto have = next->stateMap().snapShot(false); - srcLedger->stateMap().snapShot(false)->visitDifferences( - &(*have), visit); - } - else - srcLedger->stateMap().snapShot(false)->visitNodes(visit); - if (error) - return fail("Failed to store state map"); - } - - // Store the transaction map - if (srcLedger->info().txHash.isNonZero()) - { - if (!srcLedger->txMap().isValid()) - return fail("Invalid transaction map"); - - srcLedger->txMap().snapShot(false)->visitNodes(visit); - if (error) - return fail("Failed to store transaction map"); - } - - if (!batch.empty() && !storeBatch()) - return fail("Failed to store"); - - return result; -} - -bool -Shard::setLedgerStored(std::shared_ptr const& ledger) -{ - if (state_ != ShardState::acquire) - { - // Ignore residual calls from InboundLedgers - JLOG(j_.trace()) << "shard " << index_ << " not acquiring"; - return false; - } - - auto fail = [&](std::string const& msg) { - JLOG(j_.error()) << "shard " << index_ << ". " << msg; - return false; - }; - - auto const ledgerSeq{ledger->info().seq}; - if (ledgerSeq < firstSeq_ || ledgerSeq > lastSeq_) - return fail("Invalid ledger sequence " + std::to_string(ledgerSeq)); - - auto const scopedCount{makeBackendCount()}; - if (!scopedCount) - return false; - - // This lock is used as an optimization to prevent unneeded - // calls to storeSQLite before acquireInfo_ is updated - std::lock_guard storedLock(storedMutex_); - - { - std::lock_guard lock(mutex_); - if (!acquireInfo_) - return fail("Missing acquire SQLite database"); - - if (boost::icl::contains(acquireInfo_->storedSeqs, ledgerSeq)) - { - // Ignore redundant calls - JLOG(j_.debug()) << "shard " << index_ << " ledger sequence " - << ledgerSeq << " already stored"; - return true; - } - } - - if (!storeSQLite(ledger)) - return fail("Failed to store ledger"); - - std::lock_guard lock(mutex_); - - // Update the acquire database - acquireInfo_->storedSeqs.insert(ledgerSeq); - - try - { - auto session{acquireInfo_->SQLiteDB->checkoutDb()}; - soci::blob sociBlob(*session); - convert(to_string(acquireInfo_->storedSeqs), sociBlob); - if (ledgerSeq == lastSeq_) - { - // Store shard's last ledger hash - auto const sHash{to_string(ledger->info().hash)}; - *session << "UPDATE Shard " - "SET LastLedgerHash = :lastLedgerHash," - "StoredLedgerSeqs = :storedLedgerSeqs " - "WHERE ShardIndex = :shardIndex;", - soci::use(sHash), soci::use(sociBlob), soci::use(index_); - } - else - { - *session << "UPDATE Shard " - "SET StoredLedgerSeqs = :storedLedgerSeqs " - "WHERE ShardIndex = :shardIndex;", - soci::use(sociBlob), soci::use(index_); - } - } - catch (std::exception const& e) - { - acquireInfo_->storedSeqs.erase(ledgerSeq); - return fail( - std::string(". Exception caught in function ") + __func__ + - ". Error: " + e.what()); - } - - // Update progress - progress_ = boost::icl::length(acquireInfo_->storedSeqs); - if (progress_ == maxLedgers_) - state_ = ShardState::complete; - - setFileStats(lock); - JLOG(j_.trace()) << "shard " << index_ << " stored ledger sequence " - << ledgerSeq; - return true; -} - -bool -Shard::containsLedger(std::uint32_t ledgerSeq) const -{ - if (ledgerSeq < firstSeq_ || ledgerSeq > lastSeq_) - return false; - if (state_ != ShardState::acquire) - return true; - - std::lock_guard lock(mutex_); - if (!acquireInfo_) - { - JLOG(j_.error()) << "shard " << index_ - << " missing acquire SQLite database"; - return false; - } - return boost::icl::contains(acquireInfo_->storedSeqs, ledgerSeq); -} - -std::chrono::steady_clock::time_point -Shard::getLastUse() const -{ - std::lock_guard lock(mutex_); - return lastAccess_; -} - -std::pair -Shard::getFileInfo() const -{ - std::lock_guard lock(mutex_); - return {fileSz_, fdRequired_}; -} - -std::int32_t -Shard::getWriteLoad() -{ - auto const scopedCount{makeBackendCount()}; - if (!scopedCount) - return 0; - std::lock_guard lock(mutex_); - return backend_->getWriteLoad(); -} - -bool -Shard::isLegacy() const -{ - std::lock_guard lock(mutex_); - return legacy_; -} - -bool -Shard::finalize(bool writeSQLite, std::optional const& referenceHash) -{ - auto const scopedCount{makeBackendCount()}; - if (!scopedCount) - return false; - - uint256 hash{0}; - std::uint32_t ledgerSeq{0}; - auto fail = [&](std::string const& msg) { - JLOG(j_.fatal()) << "shard " << index_ << ". " << msg - << (hash.isZero() ? "" - : ". Ledger hash " + to_string(hash)) - << (ledgerSeq == 0 ? "" - : ". Ledger sequence " + - std::to_string(ledgerSeq)); - state_ = ShardState::finalizing; - progress_ = 0; - busy_ = false; - return false; - }; - - try - { - std::lock_guard lock(mutex_); - - state_ = ShardState::finalizing; - progress_ = 0; - - // Check if a final key has been stored - if (std::shared_ptr nodeObject; - backend_->fetch(finalKey.data(), &nodeObject) == Status::ok) - { - // Check final key's value - SerialIter sIt( - nodeObject->getData().data(), nodeObject->getData().size()); - if (sIt.get32() != version) - return fail("invalid version"); - - if (sIt.get32() != firstSeq_ || sIt.get32() != lastSeq_) - return fail("out of range ledger sequences"); - - if (hash = sIt.get256(); hash.isZero()) - return fail("invalid last ledger hash"); - } - else - { - // In the absence of a final key, an acquire SQLite database - // must be present in order to verify the shard - if (!acquireInfo_) - return fail("missing acquire SQLite database"); - - auto [res, seqshash] = selectAcquireDBLedgerSeqsHash( - *acquireInfo_->SQLiteDB->checkoutDb(), index_); - - if (!res) - return fail("missing or invalid ShardIndex"); - - if (!seqshash.hash) - return fail("missing LastLedgerHash"); - - if (!hash.parseHex(*seqshash.hash) || hash.isZero()) - return fail("invalid LastLedgerHash"); - - if (!seqshash.sequences) - return fail("missing StoredLedgerSeqs"); - - auto& storedSeqs{acquireInfo_->storedSeqs}; - if (!from_string(storedSeqs, *seqshash.sequences) || - boost::icl::first(storedSeqs) != firstSeq_ || - boost::icl::last(storedSeqs) != lastSeq_ || - storedSeqs.size() != maxLedgers_) - { - return fail("invalid StoredLedgerSeqs"); - } - } - } - catch (std::exception const& e) - { - return fail( - std::string(". Exception caught in function ") + __func__ + - ". Error: " + e.what()); - } - - // Verify the last ledger hash of a downloaded shard - // using a ledger hash obtained from the peer network - if (referenceHash && *referenceHash != hash) - return fail("invalid last ledger hash"); - - // Verify every ledger stored in the backend - Config const& config{app_.config()}; - std::shared_ptr ledger; - std::shared_ptr next; - auto const lastLedgerHash{hash}; - auto& shardFamily{*app_.getShardFamily()}; - auto const fullBelowCache{shardFamily.getFullBelowCache(lastSeq_)}; - auto const treeNodeCache{shardFamily.getTreeNodeCache(lastSeq_)}; - - // Reset caches to reduce memory usage - fullBelowCache->reset(); - treeNodeCache->reset(); - - Serializer s; - s.add32(version); - s.add32(firstSeq_); - s.add32(lastSeq_); - s.addBitString(lastLedgerHash); - - std::shared_ptr dShard{ - make_DeterministicShard(app_, dir_, index_, s, j_)}; - if (!dShard) - return fail("Failed to create deterministic shard"); - - // Start with the last ledger in the shard and walk backwards from - // child to parent until we reach the first ledger - ledgerSeq = lastSeq_; - while (ledgerSeq >= firstSeq_) - { - if (stop_) - return false; - - auto nodeObject{verifyFetch(hash)}; - if (!nodeObject) - return fail("invalid ledger"); - - ledger = std::make_shared( - deserializePrefixedHeader(makeSlice(nodeObject->getData())), - config, - shardFamily); - if (ledger->info().seq != ledgerSeq) - return fail("invalid ledger sequence"); - if (ledger->info().hash != hash) - return fail("invalid ledger hash"); - - ledger->stateMap().setLedgerSeq(ledgerSeq); - ledger->txMap().setLedgerSeq(ledgerSeq); - ledger->setImmutable(); - if (!ledger->stateMap().fetchRoot( - SHAMapHash{ledger->info().accountHash}, nullptr)) - { - return fail("missing root STATE node"); - } - if (ledger->info().txHash.isNonZero() && - !ledger->txMap().fetchRoot( - SHAMapHash{ledger->info().txHash}, nullptr)) - { - return fail("missing root TXN node"); - } - - if (!verifyLedger(ledger, next, dShard)) - return fail("failed to verify ledger"); - - if (!dShard->store(nodeObject)) - return fail("failed to store node object"); - - if (writeSQLite && !storeSQLite(ledger)) - return fail("failed storing to SQLite databases"); - - assert( - ledger->info().seq == ledgerSeq && - (ledger->info().seq < XRP_LEDGER_EARLIEST_FEES || - ledger->read(keylet::fees()))); - - hash = ledger->info().parentHash; - next = std::move(ledger); - - // Update progress - progress_ = maxLedgers_ - (ledgerSeq - firstSeq_); - - --ledgerSeq; - - fullBelowCache->reset(); - treeNodeCache->reset(); - } - - JLOG(j_.debug()) << "shard " << index_ << " is valid"; - - /* - TODO MP - SQLite VACUUM blocks all database access while processing. - Depending on the file size, that can take a while. Until we find - a non-blocking way of doing this, we cannot enable vacuum as - it can desync a server. - - try - { - // VACUUM the SQLite databases - auto const tmpDir {dir_ / "tmp_vacuum"}; - create_directory(tmpDir); - - auto vacuum = [&tmpDir](std::unique_ptr& sqliteDB) - { - auto session {sqliteDB->checkoutDb()}; - *session << "PRAGMA synchronous=OFF;"; - *session << "PRAGMA journal_mode=OFF;"; - *session << "PRAGMA temp_store_directory='" << - tmpDir.string() << "';"; - *session << "VACUUM;"; - }; - vacuum(lgrSQLiteDB_); - vacuum(txSQLiteDB_); - remove_all(tmpDir); - } - catch (std::exception const& e) - { - return fail( - std::string(". Exception caught in function ") + __func__ + - ". Error: " + e.what()); - } - */ - - auto const nodeObject{ - NodeObject::createObject(hotUNKNOWN, std::move(s.modData()), finalKey)}; - if (!dShard->store(nodeObject)) - return fail("failed to store node object"); - - try - { - { - // Store final key's value, may already be stored - std::lock_guard lock(mutex_); - backend_->store(nodeObject); - } - - // Do not allow all other threads work with the shard - busy_ = true; - - // Wait until all other threads leave the shard - while (backendCount_ > 1) - std::this_thread::yield(); - - std::lock_guard lock(mutex_); - - // Close original backend - backend_->close(); - - // Close SQL databases - lgrSQLiteDB_.reset(); - txSQLiteDB_.reset(); - - // Remove the acquire SQLite database - if (acquireInfo_) - { - acquireInfo_.reset(); - remove_all(dir_ / AcquireShardDBName); - } - - // Close deterministic backend - dShard->close(); - - // Replace original backend with deterministic backend - remove(dir_ / "nudb.key"); - remove(dir_ / "nudb.dat"); - rename(dShard->getDir() / "nudb.key", dir_ / "nudb.key"); - rename(dShard->getDir() / "nudb.dat", dir_ / "nudb.dat"); - - // Re-open deterministic shard - if (!open(lock)) - return fail("failed to open"); - - assert(state_ == ShardState::finalized); - - // Allow all other threads work with the shard - busy_ = false; - } - catch (std::exception const& e) - { - return fail( - std::string(". Exception caught in function ") + __func__ + - ". Error: " + e.what()); - } - - return true; -} - -bool -Shard::open(std::lock_guard const& lock) -{ - using namespace boost::filesystem; - Config const& config{app_.config()}; - auto preexist{false}; - auto fail = [this, &preexist](std::string const& msg) REQUIRES(mutex_) { - backend_->close(); - lgrSQLiteDB_.reset(); - txSQLiteDB_.reset(); - acquireInfo_.reset(); - - state_ = ShardState::acquire; - progress_ = 0; - - if (!preexist) - remove_all(dir_); - - if (!msg.empty()) - { - JLOG(j_.fatal()) << "shard " << index_ << " " << msg; - } - return false; - }; - auto createAcquireInfo = [this, &config]() REQUIRES(mutex_) { - DatabaseCon::Setup setup; - setup.startUp = config.standalone() ? config.LOAD : config.START_UP; - setup.standAlone = config.standalone(); - setup.dataDir = dir_; - setup.useGlobalPragma = true; - - acquireInfo_ = std::make_unique(); - acquireInfo_->SQLiteDB = makeAcquireDB( - setup, - DatabaseCon::CheckpointerSetup{&app_.getJobQueue(), &app_.logs()}); - - state_ = ShardState::acquire; - progress_ = 0; - }; - - try - { - // Open or create the NuDB key/value store - preexist = exists(dir_); - backend_->open(!preexist); - - if (!preexist) - { - // A new shard - createAcquireInfo(); - insertAcquireDBIndex(acquireInfo_->SQLiteDB->getSession(), index_); - } - else if (exists(dir_ / AcquireShardDBName)) - { - // A shard being acquired, backend is likely incomplete - createAcquireInfo(); - auto [res, s] = selectAcquireDBLedgerSeqs( - acquireInfo_->SQLiteDB->getSession(), index_); - - if (!res) - return fail("invalid acquire SQLite database"); - - if (s) - { - auto& storedSeqs{acquireInfo_->storedSeqs}; - if (!from_string(storedSeqs, *s)) - return fail("invalid StoredLedgerSeqs"); - - if (boost::icl::first(storedSeqs) < firstSeq_ || - boost::icl::last(storedSeqs) > lastSeq_) - { - return fail("invalid StoredLedgerSeqs"); - } - - // Check if backend is complete - progress_ = boost::icl::length(storedSeqs); - if (progress_ == maxLedgers_) - state_ = ShardState::complete; - } - } - else - { - // A shard with a finalized or complete state - std::shared_ptr nodeObject; - if (backend_->fetch(finalKey.data(), &nodeObject) != Status::ok) - { - legacy_ = true; - return fail("incompatible, missing backend final key"); - } - - // Check final key's value - SerialIter sIt( - nodeObject->getData().data(), nodeObject->getData().size()); - if (sIt.get32() != version) - return fail("invalid version"); - - if (sIt.get32() != firstSeq_ || sIt.get32() != lastSeq_) - return fail("out of range ledger sequences"); - - if (sIt.get256().isZero()) - return fail("invalid last ledger hash"); - - if (exists(dir_ / LgrDBName) && exists(dir_ / TxDBName)) - { - lastAccess_ = std::chrono::steady_clock::now(); - state_ = ShardState::finalized; - } - else - state_ = ShardState::complete; - - progress_ = maxLedgers_; - } - } - catch (std::exception const& e) - { - return fail( - std::string(". Exception caught in function ") + __func__ + - ". Error: " + e.what()); - } - - if (!initSQLite(lock)) - return fail({}); - - setFileStats(lock); - return true; -} - -bool -Shard::initSQLite(std::lock_guard const&) -{ - Config const& config{app_.config()}; - DatabaseCon::Setup const setup = [&]() { - DatabaseCon::Setup setup; - setup.startUp = config.standalone() ? config.LOAD : config.START_UP; - setup.standAlone = config.standalone(); - setup.dataDir = dir_; - setup.useGlobalPragma = (state_ != ShardState::complete); - return setup; - }(); - - try - { - if (lgrSQLiteDB_) - lgrSQLiteDB_.reset(); - - if (txSQLiteDB_) - txSQLiteDB_.reset(); - - switch (state_) - { - case ShardState::complete: - case ShardState::finalizing: - case ShardState::finalized: { - auto [lgr, tx] = makeShardCompleteLedgerDBs(config, setup); - - lgrSQLiteDB_ = std::move(lgr); - lgrSQLiteDB_->getSession() << boost::str( - boost::format("PRAGMA cache_size=-%d;") % - kilobytes(config.getValueFor( - SizedItem::lgrDBCache, std::nullopt))); - - txSQLiteDB_ = std::move(tx); - txSQLiteDB_->getSession() << boost::str( - boost::format("PRAGMA cache_size=-%d;") % - kilobytes(config.getValueFor( - SizedItem::txnDBCache, std::nullopt))); - break; - } - - // case ShardState::acquire: - // case ShardState::queued: - default: { - // Incomplete shards use a Write Ahead Log for performance - auto [lgr, tx] = makeShardIncompleteLedgerDBs( - config, - setup, - DatabaseCon::CheckpointerSetup{ - &app_.getJobQueue(), &app_.logs()}); - - lgrSQLiteDB_ = std::move(lgr); - lgrSQLiteDB_->getSession() << boost::str( - boost::format("PRAGMA cache_size=-%d;") % - kilobytes(config.getValueFor(SizedItem::lgrDBCache))); - - txSQLiteDB_ = std::move(tx); - txSQLiteDB_->getSession() << boost::str( - boost::format("PRAGMA cache_size=-%d;") % - kilobytes(config.getValueFor(SizedItem::txnDBCache))); - break; - } - } - } - catch (std::exception const& e) - { - JLOG(j_.fatal()) << "shard " << index_ - << ". Exception caught in function " << __func__ - << ". Error: " << e.what(); - return false; - } - - return true; -} - -bool -Shard::storeSQLite(std::shared_ptr const& ledger) -{ - if (stop_) - return false; - - try - { - std::lock_guard lock(mutex_); - - auto res = updateLedgerDBs( - *txSQLiteDB_->checkoutDb(), - *lgrSQLiteDB_->checkoutDb(), - ledger, - index_, - stop_, - j_); - - if (!res) - return false; - - // Update the acquire database if present - if (acquireInfo_) - { - std::optional s; - if (!acquireInfo_->storedSeqs.empty()) - s = to_string(acquireInfo_->storedSeqs); - - updateAcquireDB( - acquireInfo_->SQLiteDB->getSession(), - ledger, - index_, - lastSeq_, - s); - } - } - catch (std::exception const& e) - { - JLOG(j_.fatal()) << "shard " << index_ - << ". Exception caught in function " << __func__ - << ". Error: " << e.what(); - return false; - } - - return true; -} - -void -Shard::setFileStats(std::lock_guard const&) -{ - fileSz_ = 0; - fdRequired_ = 0; - try - { - using namespace boost::filesystem; - for (auto const& d : directory_iterator(dir_)) - { - if (is_regular_file(d)) - { - fileSz_ += file_size(d); - ++fdRequired_; - } - } - } - catch (std::exception const& e) - { - JLOG(j_.fatal()) << "shard " << index_ - << ". Exception caught in function " << __func__ - << ". Error: " << e.what(); - } -} - -bool -Shard::verifyLedger( - std::shared_ptr const& ledger, - std::shared_ptr const& next, - std::shared_ptr const& dShard) const -{ - auto fail = [j = j_, index = index_, &ledger](std::string const& msg) { - JLOG(j.error()) << "shard " << index << ". " << msg - << (ledger->info().hash.isZero() ? "" - : ". Ledger hash " + - to_string(ledger->info().hash)) - << (ledger->info().seq == 0 ? "" - : ". Ledger sequence " + - std::to_string(ledger->info().seq)); - return false; - }; - - if (ledger->info().hash.isZero()) - return fail("Invalid ledger hash"); - if (ledger->info().accountHash.isZero()) - return fail("Invalid ledger account hash"); - - bool error{false}; - auto visit = [this, &error, &dShard](SHAMapTreeNode const& node) { - if (stop_) - return false; - - auto nodeObject{verifyFetch(node.getHash().as_uint256())}; - if (!nodeObject || !dShard->store(nodeObject)) - error = true; - - return !error; - }; - - // Verify the state map - if (ledger->stateMap().getHash().isNonZero()) - { - if (!ledger->stateMap().isValid()) - return fail("Invalid state map"); - - try - { - if (next && next->info().parentHash == ledger->info().hash) - ledger->stateMap().visitDifferences(&next->stateMap(), visit); - else - ledger->stateMap().visitNodes(visit); - } - catch (std::exception const& e) - { - return fail( - std::string(". Exception caught in function ") + __func__ + - ". Error: " + e.what()); - } - - if (stop_) - return false; - if (error) - return fail("Invalid state map"); - } - - // Verify the transaction map - if (ledger->info().txHash.isNonZero()) - { - if (!ledger->txMap().isValid()) - return fail("Invalid transaction map"); - - try - { - ledger->txMap().visitNodes(visit); - } - catch (std::exception const& e) - { - return fail( - std::string(". Exception caught in function ") + __func__ + - ". Error: " + e.what()); - } - - if (stop_) - return false; - if (error) - return fail("Invalid transaction map"); - } - - return true; -} - -std::shared_ptr -Shard::verifyFetch(uint256 const& hash) const -{ - std::shared_ptr nodeObject; - auto fail = - [j = j_, index = index_, &hash, &nodeObject](std::string const& msg) { - JLOG(j.error()) << "shard " << index << ". " << msg - << ". Node object hash " << to_string(hash); - nodeObject.reset(); - return nodeObject; - }; - - try - { - std::lock_guard lock(mutex_); - switch (backend_->fetch(hash.data(), &nodeObject)) - { - case ok: - // Verify that the hash of node object matches the payload - if (nodeObject->getHash() != - sha512Half(makeSlice(nodeObject->getData()))) - return fail("Node object hash does not match payload"); - return nodeObject; - case notFound: - return fail("Missing node object"); - case dataCorrupt: - return fail("Corrupt node object"); - default: - return fail("Unknown error"); - } - } - catch (std::exception const& e) - { - return fail( - std::string(". Exception caught in function ") + __func__ + - ". Error: " + e.what()); - } -} - -Shard::Count -Shard::makeBackendCount() -{ - if (stop_ || busy_) - return Shard::Count{nullptr}; - - std::lock_guard lock(mutex_); - if (!backend_) - { - JLOG(j_.error()) << "shard " << index_ << " not initialized"; - return Shard::Count{nullptr}; - } - if (!backend_->isOpen()) - { - if (!open(lock)) - return Shard::Count{nullptr}; - } - else if (state_ == ShardState::finalized) - lastAccess_ = std::chrono::steady_clock::now(); - - return Shard::Count(&backendCount_); -} - -bool -Shard::doCallForSQL( - std::function const& callback, - LockedSociSession&& db) -{ - return callback(*db); -} - -bool -Shard::doCallForSQL( - std::function const& - callback, - LockedSociSession&& db) -{ - return callback(*db, index_); -} - -} // namespace NodeStore -} // namespace ripple diff --git a/src/xrpld/nodestore/detail/Shard.h b/src/xrpld/nodestore/detail/Shard.h deleted file mode 100644 index be11e538c77..00000000000 --- a/src/xrpld/nodestore/detail/Shard.h +++ /dev/null @@ -1,432 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2017 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_NODESTORE_SHARD_H_INCLUDED -#define RIPPLE_NODESTORE_SHARD_H_INCLUDED - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -namespace ripple { -namespace NodeStore { - -using PCache = TaggedCache; -using NCache = KeyCache; -class DatabaseShard; - -/* A range of historical ledgers backed by a node store. - Shards are indexed and store `ledgersPerShard`. - Shard `i` stores ledgers starting with sequence: `1 + (i * ledgersPerShard)` - and ending with sequence: `(i + 1) * ledgersPerShard`. - Once a shard has all its ledgers, it is never written to again. - - Public functions can be called concurrently from any thread. -*/ -class Shard final -{ -public: - /// Copy constructor (disallowed) - Shard(Shard const&) = delete; - - /// Move constructor (disallowed) - Shard(Shard&&) = delete; - - // Copy assignment (disallowed) - Shard& - operator=(Shard const&) = delete; - - // Move assignment (disallowed) - Shard& - operator=(Shard&&) = delete; - - Shard( - Application& app, - DatabaseShard const& db, - std::uint32_t index, - boost::filesystem::path const& dir, - beast::Journal j); - - Shard( - Application& app, - DatabaseShard const& db, - std::uint32_t index, - beast::Journal j); - - ~Shard(); - - /** Initialize shard. - - @param scheduler The scheduler to use for performing asynchronous tasks. - @param context The context to use for the backend. - */ - [[nodiscard]] bool - init(Scheduler& scheduler, nudb::context& context); - - /** Returns true if the database are open. - */ - [[nodiscard]] bool - isOpen() const; - - /** Try to close databases if not in use. - - @return true if databases were closed. - */ - bool - tryClose(); - - /** Notify shard to prepare for shutdown. - */ - void - stop() noexcept - { - stop_ = true; - } - - [[nodiscard]] std::optional - prepare(); - - [[nodiscard]] bool - storeNodeObject(std::shared_ptr const& nodeObject); - - [[nodiscard]] std::shared_ptr - fetchNodeObject(uint256 const& hash, FetchReport& fetchReport); - - /** Store a ledger. - - @param srcLedger The ledger to store. - @param next The ledger that immediately follows srcLedger, can be null. - @return StoreLedgerResult containing data about the store. - */ - struct StoreLedgerResult - { - std::uint64_t count{0}; // Number of storage calls - std::uint64_t size{0}; // Number of bytes stored - bool error{false}; - }; - - [[nodiscard]] StoreLedgerResult - storeLedger( - std::shared_ptr const& srcLedger, - std::shared_ptr const& next); - - [[nodiscard]] bool - setLedgerStored(std::shared_ptr const& ledger); - - [[nodiscard]] bool - containsLedger(std::uint32_t ledgerSeq) const; - - [[nodiscard]] std::uint32_t - index() const noexcept - { - return index_; - } - - [[nodiscard]] boost::filesystem::path const& - getDir() const noexcept - { - return dir_; - } - - [[nodiscard]] std::chrono::steady_clock::time_point - getLastUse() const; - - /** Returns a pair where the first item describes the storage space - utilized and the second item is the number of file descriptors required. - */ - [[nodiscard]] std::pair - getFileInfo() const; - - [[nodiscard]] ShardState - getState() const noexcept - { - return state_; - } - - /** Returns a percent signifying how complete - the current state of the shard is. - */ - [[nodiscard]] std::uint32_t - getPercentProgress() const noexcept - { - return calculatePercent(progress_, maxLedgers_); - } - - [[nodiscard]] std::int32_t - getWriteLoad(); - - /** Returns `true` if shard is older, without final key data - */ - [[nodiscard]] bool - isLegacy() const; - - /** Finalize shard by walking its ledgers, verifying each Merkle tree and - creating a deterministic backend. - - @param writeSQLite If true, SQLite entries will be rewritten using - verified backend data. - @param referenceHash If present, this hash must match the hash - of the last ledger in the shard. - */ - [[nodiscard]] bool - finalize(bool writeSQLite, std::optional const& referenceHash); - - /** Enables removal of the shard directory on destruction. - */ - void - removeOnDestroy() noexcept - { - removeOnDestroy_ = true; - } - - std::string - getStoredSeqs() - { - std::lock_guard lock(mutex_); - if (!acquireInfo_) - return ""; - - return to_string(acquireInfo_->storedSeqs); - } - - /** Invoke a callback on the ledger SQLite db - - @param callback Callback function to call. - @return Value returned by callback function. - */ - template - bool - callForLedgerSQL(std::function const& callback) - { - return callForSQL(callback, lgrSQLiteDB_->checkoutDb()); - } - - /** Invoke a callback on the transaction SQLite db - - @param callback Callback function to call. - @return Value returned by callback function. - */ - template - bool - callForTransactionSQL(std::function const& callback) - { - return callForSQL(callback, txSQLiteDB_->checkoutDb()); - } - - // Current shard version - static constexpr std::uint32_t version{2}; - - // The finalKey is a hard coded value of zero. It is used to store - // finalizing shard data to the backend. The data contains a version, - // last ledger's hash, and the first and last ledger sequences. - static uint256 const finalKey; - -private: - class Count final - { - public: - Count(Count const&) = delete; - Count& - operator=(Count const&) = delete; - Count& - operator=(Count&&) = delete; - - Count(Count&& other) noexcept : counter_(other.counter_) - { - other.counter_ = nullptr; - } - - explicit Count(std::atomic* counter) noexcept - : counter_(counter) - { - if (counter_) - ++(*counter_); - } - - ~Count() noexcept - { - if (counter_) - --(*counter_); - } - - explicit operator bool() const noexcept - { - return counter_ != nullptr; - } - - private: - std::atomic* counter_; - }; - - struct AcquireInfo - { - // SQLite database to track information about what has been acquired - std::unique_ptr SQLiteDB; - - // Tracks the sequences of ledgers acquired and stored in the backend - RangeSet storedSeqs; - }; - - Application& app_; - beast::Journal const j_; - mutable std::mutex mutex_; - mutable std::mutex storedMutex_; - - // Shard Index - std::uint32_t const index_; - - // First ledger sequence in the shard - std::uint32_t const firstSeq_; - - // Last ledger sequence in the shard - std::uint32_t const lastSeq_; - - // The maximum number of ledgers the shard can store - // The earliest shard may store fewer ledgers than subsequent shards - std::uint32_t const maxLedgers_; - - // Path to database files - boost::filesystem::path const dir_; - - // Storage space utilized by the shard - GUARDED_BY(mutex_) std::uint64_t fileSz_{0}; - - // Number of file descriptors required by the shard - GUARDED_BY(mutex_) std::uint32_t fdRequired_{0}; - - // NuDB key/value store for node objects - std::unique_ptr backend_ GUARDED_BY(mutex_); - - std::atomic backendCount_{0}; - - // Ledger SQLite database used for indexes - std::unique_ptr lgrSQLiteDB_ GUARDED_BY(mutex_); - - // Transaction SQLite database used for indexes - std::unique_ptr txSQLiteDB_ GUARDED_BY(mutex_); - - // Tracking information used only when acquiring a shard from the network. - // If the shard is finalized, this member will be null. - std::unique_ptr acquireInfo_ GUARDED_BY(mutex_); - ; - - // Older shard without an acquire database or final key - // Eventually there will be no need for this and should be removed - GUARDED_BY(mutex_) bool legacy_{false}; - - // Determines if the shard needs to stop processing for shutdown - std::atomic stop_{false}; - - // Determines if the shard busy with replacing by deterministic one - std::atomic busy_{false}; - - // State of the shard - std::atomic state_{ShardState::acquire}; - - // Number of ledgers processed for the current shard state - std::atomic progress_{0}; - - // Determines if the shard directory should be removed in the destructor - std::atomic removeOnDestroy_{false}; - - // The time of the last access of a shard with a finalized state - std::chrono::steady_clock::time_point lastAccess_ GUARDED_BY(mutex_); - ; - - // Open shard databases - [[nodiscard]] bool - open(std::lock_guard const& lock) REQUIRES(mutex_); - - // Open/Create SQLite databases - // Lock over mutex_ required - [[nodiscard]] bool - initSQLite(std::lock_guard const&) REQUIRES(mutex_); - - // Write SQLite entries for this ledger - [[nodiscard]] bool - storeSQLite(std::shared_ptr const& ledger); - - // Set storage and file descriptor usage stats - // Lock over mutex_ required - void - setFileStats(std::lock_guard const&) REQUIRES(mutex_); - - // Verify this ledger by walking its SHAMaps and verifying its Merkle trees - // Every node object verified will be stored in the deterministic shard - [[nodiscard]] bool - verifyLedger( - std::shared_ptr const& ledger, - std::shared_ptr const& next, - std::shared_ptr const& dShard) const; - - // Fetches from backend and log errors based on status codes - [[nodiscard]] std::shared_ptr - verifyFetch(uint256 const& hash) const; - - // Open databases if they are closed - [[nodiscard]] Shard::Count - makeBackendCount(); - - // Invoke a callback on the supplied session parameter - template - bool - callForSQL( - std::function const& callback, - LockedSociSession&& db) - { - auto const scopedCount{makeBackendCount()}; - if (!scopedCount) - return false; - - return doCallForSQL(callback, std::move(db)); - } - - // Invoke a callback that accepts a SQLite session parameter - bool - doCallForSQL( - std::function const& callback, - LockedSociSession&& db); - - // Invoke a callback that accepts a SQLite session and the - // shard index as parameters - bool - doCallForSQL( - std::function< - bool(soci::session& session, std::uint32_t shardIndex)> const& - callback, - LockedSociSession&& db); -}; - -} // namespace NodeStore -} // namespace ripple - -#endif diff --git a/src/xrpld/nodestore/detail/ShardInfo.cpp b/src/xrpld/nodestore/detail/ShardInfo.cpp deleted file mode 100644 index 43a9d484900..00000000000 --- a/src/xrpld/nodestore/detail/ShardInfo.cpp +++ /dev/null @@ -1,136 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include - -namespace ripple { -namespace NodeStore { - -std::string -ShardInfo::finalizedToString() const -{ - if (!finalized_.empty()) - return ripple::to_string(finalized_); - return {}; -} - -std::string -ShardInfo::incompleteToString() const -{ - std::string result; - if (!incomplete_.empty()) - { - for (auto const& [shardIndex, incomplete] : incomplete_) - { - result += std::to_string(shardIndex) + ":" + - std::to_string(incomplete.percentProgress()) + ","; - } - result.pop_back(); - } - - return result; -} - -bool -ShardInfo::update( - std::uint32_t shardIndex, - ShardState state, - std::uint32_t percentProgress) -{ - if (state == ShardState::finalized) - { - if (boost::icl::contains(finalized_, shardIndex)) - return false; - - finalized_.insert(shardIndex); - return true; - } - - return incomplete_.emplace(shardIndex, Incomplete(state, percentProgress)) - .second; -} - -protocol::TMPeerShardInfoV2 -ShardInfo::makeMessage(Application& app) -{ - protocol::TMPeerShardInfoV2 message; - Serializer s; - s.add32(HashPrefix::shardInfo); - - // Set the message creation time - msgTimestamp_ = app.timeKeeper().now(); - { - auto const timestamp{msgTimestamp_.time_since_epoch().count()}; - message.set_timestamp(timestamp); - s.add32(timestamp); - } - - if (!incomplete_.empty()) - { - message.mutable_incomplete()->Reserve(incomplete_.size()); - for (auto const& [shardIndex, incomplete] : incomplete_) - { - auto tmIncomplete{message.add_incomplete()}; - - tmIncomplete->set_shardindex(shardIndex); - s.add32(shardIndex); - - static_assert(std::is_same_v< - std::underlying_type_t, - std::uint32_t>); - auto const state{static_cast(incomplete.state())}; - tmIncomplete->set_state(state); - s.add32(state); - - // Set progress if greater than zero - auto const percentProgress{incomplete.percentProgress()}; - if (percentProgress > 0) - { - tmIncomplete->set_progress(percentProgress); - s.add32(percentProgress); - } - } - } - - if (!finalized_.empty()) - { - auto const str{ripple::to_string(finalized_)}; - message.set_finalized(str); - s.addRaw(str.data(), str.size()); - } - - // Set the public key - auto const& publicKey{app.nodeIdentity().first}; - message.set_publickey(publicKey.data(), publicKey.size()); - - // Create a digital signature using the node private key - auto const signature{sign(publicKey, app.nodeIdentity().second, s.slice())}; - - // Set the digital signature - message.set_signature(signature.data(), signature.size()); - - return message; -} - -} // namespace NodeStore -} // namespace ripple diff --git a/src/xrpld/nodestore/detail/TaskQueue.cpp b/src/xrpld/nodestore/detail/TaskQueue.cpp deleted file mode 100644 index 6062138c60f..00000000000 --- a/src/xrpld/nodestore/detail/TaskQueue.cpp +++ /dev/null @@ -1,76 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2019 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include - -#include - -namespace ripple { -namespace NodeStore { - -TaskQueue::TaskQueue() : workers_(*this, nullptr, "Shard store taskQueue", 1) -{ -} - -void -TaskQueue::stop() -{ - workers_.stop(); -} - -void -TaskQueue::addTask(std::function task) -{ - { - std::lock_guard lock{mutex_}; - tasks_.emplace(std::move(task)); - } - workers_.addTask(); -} - -size_t -TaskQueue::size() const -{ - std::lock_guard lock{mutex_}; - return tasks_.size() + processing_; -} - -void -TaskQueue::processTask(int instance) -{ - std::function task; - - { - std::lock_guard lock{mutex_}; - - assert(!tasks_.empty()); - task = std::move(tasks_.front()); - tasks_.pop(); - - ++processing_; - } - - task(); - - std::lock_guard lock{mutex_}; - --processing_; -} - -} // namespace NodeStore -} // namespace ripple diff --git a/src/xrpld/nodestore/detail/TaskQueue.h b/src/xrpld/nodestore/detail/TaskQueue.h deleted file mode 100644 index 8a743ff6016..00000000000 --- a/src/xrpld/nodestore/detail/TaskQueue.h +++ /dev/null @@ -1,64 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2019 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_NODESTORE_TASKQUEUE_H_INCLUDED -#define RIPPLE_NODESTORE_TASKQUEUE_H_INCLUDED - -#include - -#include -#include - -namespace ripple { -namespace NodeStore { - -class TaskQueue : private Workers::Callback -{ -public: - TaskQueue(); - - void - stop(); - - /** Adds a task to the queue - - @param task std::function with signature void() - */ - void - addTask(std::function task); - - /** Return the queue size - */ - [[nodiscard]] size_t - size() const; - -private: - mutable std::mutex mutex_; - Workers workers_; - std::queue> tasks_; - std::uint64_t processing_{0}; - - void - processTask(int instance) override; -}; - -} // namespace NodeStore -} // namespace ripple - -#endif diff --git a/src/xrpld/overlay/Overlay.h b/src/xrpld/overlay/Overlay.h index 1a3362d386f..b9550ba2ef4 100644 --- a/src/xrpld/overlay/Overlay.h +++ b/src/xrpld/overlay/Overlay.h @@ -219,15 +219,6 @@ class Overlay : public beast::PropertyStream::Source virtual std::uint64_t getPeerDisconnectCharges() const = 0; - /** Returns information reported to the crawl shard RPC command. - - @param includePublicKey include peer public keys in the result. - @param hops the maximum jumps the crawler will attempt. - The number of hops achieved is not guaranteed. - */ - virtual Json::Value - crawlShards(bool includePublicKey, std::uint32_t hops) = 0; - /** Returns the ID of the network this server is configured for, if any. The ID is just a numerical identifier, with the IDs 0, 1 and 2 used to diff --git a/src/xrpld/overlay/Peer.h b/src/xrpld/overlay/Peer.h index 81c04f7206c..82ed2c2481a 100644 --- a/src/xrpld/overlay/Peer.h +++ b/src/xrpld/overlay/Peer.h @@ -32,9 +32,6 @@ namespace Resource { class Charge; } -// Maximum hops to relay the peer shard info request -static constexpr std::uint32_t relayLimit = 3; - enum class ProtocolFeature { ValidatorListPropagation, ValidatorList2Propagation, diff --git a/src/xrpld/overlay/detail/Message.cpp b/src/xrpld/overlay/detail/Message.cpp index e19d718c73d..71917db0506 100644 --- a/src/xrpld/overlay/detail/Message.cpp +++ b/src/xrpld/overlay/detail/Message.cpp @@ -94,13 +94,9 @@ Message::compress() case protocol::mtSTATUS_CHANGE: case protocol::mtHAVE_SET: case protocol::mtVALIDATION: - case protocol::mtGET_PEER_SHARD_INFO: - case protocol::mtPEER_SHARD_INFO: case protocol::mtPROOF_PATH_REQ: case protocol::mtPROOF_PATH_RESPONSE: case protocol::mtREPLAY_DELTA_REQ: - case protocol::mtGET_PEER_SHARD_INFO_V2: - case protocol::mtPEER_SHARD_INFO_V2: case protocol::mtHAVE_TRANSACTIONS: break; } diff --git a/src/xrpld/overlay/detail/OverlayImpl.cpp b/src/xrpld/overlay/detail/OverlayImpl.cpp index 1978a2617aa..970873007c2 100644 --- a/src/xrpld/overlay/detail/OverlayImpl.cpp +++ b/src/xrpld/overlay/detail/OverlayImpl.cpp @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -679,103 +678,6 @@ OverlayImpl::reportTraffic( m_traffic.addCount(cat, isInbound, number); } -Json::Value -OverlayImpl::crawlShards(bool includePublicKey, std::uint32_t relays) -{ - using namespace std::chrono; - - Json::Value jv(Json::objectValue); - - // Add shard info from this server to json result - if (auto shardStore = app_.getShardStore()) - { - if (includePublicKey) - jv[jss::public_key] = - toBase58(TokenType::NodePublic, app_.nodeIdentity().first); - - auto const shardInfo{shardStore->getShardInfo()}; - if (!shardInfo->finalized().empty()) - jv[jss::complete_shards] = shardInfo->finalizedToString(); - if (!shardInfo->incomplete().empty()) - jv[jss::incomplete_shards] = shardInfo->incompleteToString(); - } - - if (relays == 0 || size() == 0) - return jv; - - { - protocol::TMGetPeerShardInfoV2 tmGPS; - tmGPS.set_relays(relays); - - // Wait if a request is in progress - std::unique_lock csLock{csMutex_}; - if (!csIDs_.empty()) - csCV_.wait(csLock); - - { - std::lock_guard lock{mutex_}; - for (auto const& id : ids_) - csIDs_.emplace(id.first); - } - - // Request peer shard info - foreach(send_always(std::make_shared( - tmGPS, protocol::mtGET_PEER_SHARD_INFO_V2))); - - if (csCV_.wait_for(csLock, seconds(60)) == std::cv_status::timeout) - { - csIDs_.clear(); - csCV_.notify_all(); - } - } - - // Combine shard info from peers - hash_map peerShardInfo; - for_each([&](std::shared_ptr&& peer) { - auto const psi{peer->getPeerShardInfos()}; - for (auto const& [publicKey, shardInfo] : psi) - { - auto const it{peerShardInfo.find(publicKey)}; - if (it == peerShardInfo.end()) - peerShardInfo.emplace(publicKey, shardInfo); - else if (shardInfo.msgTimestamp() > it->second.msgTimestamp()) - it->second = shardInfo; - } - }); - - // Add shard info to json result - if (!peerShardInfo.empty()) - { - auto& av = jv[jss::peers] = Json::Value(Json::arrayValue); - for (auto const& [publicKey, shardInfo] : peerShardInfo) - { - auto& pv{av.append(Json::Value(Json::objectValue))}; - if (includePublicKey) - { - pv[jss::public_key] = - toBase58(TokenType::NodePublic, publicKey); - } - - if (!shardInfo.finalized().empty()) - pv[jss::complete_shards] = shardInfo.finalizedToString(); - if (!shardInfo.incomplete().empty()) - pv[jss::incomplete_shards] = shardInfo.incompleteToString(); - } - } - - return jv; -} - -void -OverlayImpl::endOfPeerChain(std::uint32_t id) -{ - // Notify threads if all peers have received a reply from all peer chains - std::lock_guard csLock{csMutex_}; - csIDs_.erase(id); - if (csIDs_.empty()) - csCV_.notify_all(); -} - /** The number of active peers on the network Active peers are only those peers that have completed the handshake and are running the Ripple protocol. @@ -833,17 +735,6 @@ OverlayImpl::getOverlayInfo() if (minSeq != 0 || maxSeq != 0) pv[jss::complete_ledgers] = std::to_string(minSeq) + "-" + std::to_string(maxSeq); - - auto const peerShardInfos{sp->getPeerShardInfos()}; - auto const it{peerShardInfos.find(sp->getNodePublic())}; - if (it != peerShardInfos.end()) - { - auto const& shardInfo{it->second}; - if (!shardInfo.finalized().empty()) - pv[jss::complete_shards] = shardInfo.finalizedToString(); - if (!shardInfo.incomplete().empty()) - pv[jss::incomplete_shards] = shardInfo.incompleteToString(); - } }); return jv; diff --git a/src/xrpld/overlay/detail/OverlayImpl.h b/src/xrpld/overlay/detail/OverlayImpl.h index 1934a7c94c8..a50dfc5e905 100644 --- a/src/xrpld/overlay/detail/OverlayImpl.h +++ b/src/xrpld/overlay/detail/OverlayImpl.h @@ -119,12 +119,6 @@ class OverlayImpl : public Overlay, public reduce_relay::SquelchHandler std::atomic peerDisconnects_{0}; std::atomic peerDisconnectsCharges_{0}; - // 'cs' = crawl shards - std::mutex csMutex_; - std::condition_variable csCV_; - // Peer IDs expecting to receive a last link notification - std::set csIDs_; - reduce_relay::Slots slots_; // Transaction reduce-relay metrics @@ -392,16 +386,6 @@ class OverlayImpl : public Overlay, public reduce_relay::SquelchHandler return setup_.networkID; } - Json::Value - crawlShards(bool includePublicKey, std::uint32_t relays) override; - - /** Called when the reply from the last peer in a peer chain is received. - - @param id peer id that received the shard info. - */ - void - endOfPeerChain(std::uint32_t id); - /** Updates message count for validator/peer. Sends TMSquelch if the number * of messages for N peers reaches threshold T. A message is counted * if a peer receives the message for the first time and if diff --git a/src/xrpld/overlay/detail/PeerImp.cpp b/src/xrpld/overlay/detail/PeerImp.cpp index 96f793b8d80..86e336f850b 100644 --- a/src/xrpld/overlay/detail/PeerImp.cpp +++ b/src/xrpld/overlay/detail/PeerImp.cpp @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include @@ -521,17 +520,6 @@ PeerImp::hasLedger(uint256 const& hash, std::uint32_t seq) const recentLedgers_.end()) return true; } - - if (seq >= app_.getNodeStore().earliestLedgerSeq()) - { - std::lock_guard lock{shardInfoMutex_}; - auto const it{shardInfos_.find(publicKey_)}; - if (it != shardInfos_.end()) - { - auto const shardIndex{app_.getNodeStore().seqToShardIndex(seq)}; - return boost::icl::contains(it->second.finalized(), shardIndex); - } - } return false; } @@ -626,13 +614,6 @@ PeerImp::fail(std::string const& name, error_code ec) close(); } -hash_map const -PeerImp::getPeerShardInfos() const -{ - std::lock_guard l{shardInfoMutex_}; - return shardInfos_; -} - void PeerImp::gracefulClose() { @@ -878,11 +859,6 @@ PeerImp::doProtocolStart() if (auto m = overlay_.getManifestsMessage()) send(m); - // Request shard info from peer - protocol::TMGetPeerShardInfoV2 tmGPS; - tmGPS.set_relays(0); - send(std::make_shared(tmGPS, protocol::mtGET_PEER_SHARD_INFO_V2)); - setTimer(); } @@ -1175,294 +1151,6 @@ PeerImp::onMessage(std::shared_ptr const& m) app_.getFeeTrack().setClusterFee(clusterFee); } -void -PeerImp::onMessage(std::shared_ptr const& m) -{ - // DEPRECATED -} - -void -PeerImp::onMessage(std::shared_ptr const& m) -{ - // DEPRECATED -} - -void -PeerImp::onMessage(std::shared_ptr const& m) -{ - auto badData = [&](std::string msg) { - fee_ = Resource::feeBadData; - JLOG(p_journal_.warn()) << msg; - }; - - // Verify relays - if (m->relays() > relayLimit) - return badData("Invalid relays"); - - // Verify peer chain - // The peer chain should not contain this node's public key - // nor the public key of the sending peer - std::set pubKeyChain; - pubKeyChain.insert(app_.nodeIdentity().first); - pubKeyChain.insert(publicKey_); - - auto const peerChainSz{m->peerchain_size()}; - if (peerChainSz > 0) - { - if (peerChainSz > relayLimit) - return badData("Invalid peer chain size"); - - if (peerChainSz + m->relays() > relayLimit) - return badData("Invalid relays and peer chain size"); - - for (int i = 0; i < peerChainSz; ++i) - { - auto const slice{makeSlice(m->peerchain(i).publickey())}; - - // Verify peer public key - if (!publicKeyType(slice)) - return badData("Invalid peer public key"); - - // Verify peer public key is unique in the peer chain - if (!pubKeyChain.emplace(slice).second) - return badData("Invalid peer public key"); - } - } - - // Reply with shard info this node may have - if (auto shardStore = app_.getShardStore()) - { - auto reply{shardStore->getShardInfo()->makeMessage(app_)}; - if (peerChainSz > 0) - *(reply.mutable_peerchain()) = m->peerchain(); - send(std::make_shared(reply, protocol::mtPEER_SHARD_INFO_V2)); - } - - if (m->relays() == 0) - return; - - // Charge originating peer a fee for requesting relays - if (peerChainSz == 0) - fee_ = Resource::feeMediumBurdenPeer; - - // Add peer to the peer chain - m->add_peerchain()->set_publickey(publicKey_.data(), publicKey_.size()); - - // Relay the request to peers, exclude the peer chain - m->set_relays(m->relays() - 1); - overlay_.foreach(send_if_not( - std::make_shared(*m, protocol::mtGET_PEER_SHARD_INFO_V2), - [&](std::shared_ptr const& peer) { - return pubKeyChain.find(peer->getNodePublic()) != pubKeyChain.end(); - })); -} - -void -PeerImp::onMessage(std::shared_ptr const& m) -{ - // Find the earliest and latest shard indexes - auto const& db{app_.getNodeStore()}; - auto const earliestShardIndex{db.earliestShardIndex()}; - auto const latestShardIndex{[&]() -> std::optional { - auto const curLedgerSeq{app_.getLedgerMaster().getCurrentLedgerIndex()}; - if (curLedgerSeq >= db.earliestLedgerSeq()) - return db.seqToShardIndex(curLedgerSeq); - return std::nullopt; - }()}; - - auto badData = [&](std::string msg) { - fee_ = Resource::feeBadData; - JLOG(p_journal_.warn()) << msg; - }; - - // Used to create a digest and verify the message signature - Serializer s; - s.add32(HashPrefix::shardInfo); - - // Verify message creation time - NodeStore::ShardInfo shardInfo; - { - auto const timestamp{ - NetClock::time_point{std::chrono::seconds{m->timestamp()}}}; - auto const now{app_.timeKeeper().now()}; - if (timestamp > (now + 5s)) - return badData("Invalid timestamp"); - - // Check if stale - using namespace std::chrono_literals; - if (timestamp < (now - 5min)) - return badData("Stale timestamp"); - - s.add32(m->timestamp()); - shardInfo.setMsgTimestamp(timestamp); - } - - // Verify incomplete shards - auto const numIncomplete{m->incomplete_size()}; - if (numIncomplete > 0) - { - if (latestShardIndex && numIncomplete > *latestShardIndex) - return badData("Invalid number of incomplete shards"); - - // Verify each incomplete shard - for (int i = 0; i < numIncomplete; ++i) - { - auto const& incomplete{m->incomplete(i)}; - auto const shardIndex{incomplete.shardindex()}; - - // Verify shard index - if (shardIndex < earliestShardIndex || - (latestShardIndex && shardIndex > latestShardIndex)) - { - return badData("Invalid incomplete shard index"); - } - s.add32(shardIndex); - - // Verify state - auto const state{static_cast(incomplete.state())}; - switch (state) - { - // Incomplete states - case ShardState::acquire: - case ShardState::complete: - case ShardState::finalizing: - case ShardState::queued: - break; - - // case ShardState::finalized: - default: - return badData("Invalid incomplete shard state"); - } - s.add32(incomplete.state()); - - // Verify progress - std::uint32_t progress{0}; - if (incomplete.has_progress()) - { - progress = incomplete.progress(); - if (progress < 1 || progress > 100) - return badData("Invalid incomplete shard progress"); - s.add32(progress); - } - - // Verify each incomplete shard is unique - if (!shardInfo.update(shardIndex, state, progress)) - return badData("Invalid duplicate incomplete shards"); - } - } - - // Verify finalized shards - if (m->has_finalized()) - { - auto const& str{m->finalized()}; - if (str.empty()) - return badData("Invalid finalized shards"); - - if (!shardInfo.setFinalizedFromString(str)) - return badData("Invalid finalized shard indexes"); - - auto const& finalized{shardInfo.finalized()}; - auto const numFinalized{boost::icl::length(finalized)}; - if (numFinalized == 0 || - boost::icl::first(finalized) < earliestShardIndex || - (latestShardIndex && - boost::icl::last(finalized) > latestShardIndex)) - { - return badData("Invalid finalized shard indexes"); - } - - if (latestShardIndex && - (numFinalized + numIncomplete) > *latestShardIndex) - { - return badData("Invalid number of finalized and incomplete shards"); - } - - s.addRaw(str.data(), str.size()); - } - - // Verify public key - auto slice{makeSlice(m->publickey())}; - if (!publicKeyType(slice)) - return badData("Invalid public key"); - - // Verify peer public key isn't this nodes's public key - PublicKey const publicKey(slice); - if (publicKey == app_.nodeIdentity().first) - return badData("Invalid public key"); - - // Verify signature - if (!verify(publicKey, s.slice(), makeSlice(m->signature()), false)) - return badData("Invalid signature"); - - // Forward the message if a peer chain exists - auto const peerChainSz{m->peerchain_size()}; - if (peerChainSz > 0) - { - // Verify peer chain - if (peerChainSz > relayLimit) - return badData("Invalid peer chain size"); - - // The peer chain should not contain this node's public key - // nor the public key of the sending peer - std::set pubKeyChain; - pubKeyChain.insert(app_.nodeIdentity().first); - pubKeyChain.insert(publicKey_); - - for (int i = 0; i < peerChainSz; ++i) - { - // Verify peer public key - slice = makeSlice(m->peerchain(i).publickey()); - if (!publicKeyType(slice)) - return badData("Invalid peer public key"); - - // Verify peer public key is unique in the peer chain - if (!pubKeyChain.emplace(slice).second) - return badData("Invalid peer public key"); - } - - // If last peer in the chain is connected, relay the message - PublicKey const peerPubKey( - makeSlice(m->peerchain(peerChainSz - 1).publickey())); - if (auto peer = overlay_.findPeerByPublicKey(peerPubKey)) - { - m->mutable_peerchain()->RemoveLast(); - peer->send( - std::make_shared(*m, protocol::mtPEER_SHARD_INFO_V2)); - JLOG(p_journal_.trace()) - << "Relayed TMPeerShardInfoV2 from peer IP " - << remote_address_.address().to_string() << " to peer IP " - << peer->getRemoteAddress().to_string(); - } - else - { - // Peer is no longer available so the relay ends - JLOG(p_journal_.info()) << "Unable to relay peer shard info"; - } - } - - JLOG(p_journal_.trace()) - << "Consumed TMPeerShardInfoV2 originating from public key " - << toBase58(TokenType::NodePublic, publicKey) << " finalized shards[" - << ripple::to_string(shardInfo.finalized()) << "] incomplete shards[" - << (shardInfo.incomplete().empty() ? "empty" - : shardInfo.incompleteToString()) - << "]"; - - // Consume the message - { - std::lock_guard lock{shardInfoMutex_}; - auto const it{shardInfos_.find(publicKey_)}; - if (it == shardInfos_.end()) - shardInfos_.emplace(publicKey, std::move(shardInfo)); - else if (shardInfo.msgTimestamp() > it->second.msgTimestamp()) - it->second = std::move(shardInfo); - } - - // Notify overlay a reply was received from the last peer in this chain - if (peerChainSz == 0) - overlay_.endOfPeerChain(id_); -} - void PeerImp::onMessage(std::shared_ptr const& m) { @@ -1659,13 +1347,6 @@ PeerImp::onMessage(std::shared_ptr const& m) if (m->has_ledgerseq()) { auto const ledgerSeq{m->ledgerseq()}; - // Verifying the network's earliest ledger only pertains to shards. - if (app_.getShardStore() && - ledgerSeq < app_.getNodeStore().earliestLedgerSeq()) - { - return badData( - "Invalid ledger sequence " + std::to_string(ledgerSeq)); - } // Check if within a reasonable range using namespace std::chrono_literals; @@ -1835,14 +1516,6 @@ PeerImp::onMessage(std::shared_ptr const& m) } else { - // Verifying the network's earliest ledger only pertains to shards. - if (app_.getShardStore() && - ledgerSeq < app_.getNodeStore().earliestLedgerSeq()) - { - return badData( - "Invalid ledger sequence " + std::to_string(ledgerSeq)); - } - // Check if within a reasonable range using namespace std::chrono_literals; if (app_.getLedgerMaster().getValidatedLedgerAge() <= 10s && @@ -2705,14 +2378,6 @@ PeerImp::onMessage(std::shared_ptr const& m) // need to inject the NodeStore interfaces. std::uint32_t seq{obj.has_ledgerseq() ? obj.ledgerseq() : 0}; auto nodeObject{app_.getNodeStore().fetchNodeObject(hash, seq)}; - if (!nodeObject) - { - if (auto shardStore = app_.getShardStore()) - { - if (seq >= shardStore->earliestLedgerSeq()) - nodeObject = shardStore->fetchNodeObject(hash, seq); - } - } if (nodeObject) { protocol::TMIndexedObject& newObj = *reply.add_objects(); @@ -3312,44 +2977,28 @@ PeerImp::getLedger(std::shared_ptr const& m) ledger = app_.getLedgerMaster().getLedgerByHash(ledgerHash); if (!ledger) { - if (m->has_ledgerseq()) + JLOG(p_journal_.trace()) + << "getLedger: Don't have ledger with hash " << ledgerHash; + + if (m->has_querytype() && !m->has_requestcookie()) { - // Attempt to find ledger by sequence in the shard store - if (auto shards = app_.getShardStore()) + // Attempt to relay the request to a peer + if (auto const peer = getPeerWithLedger( + overlay_, + ledgerHash, + m->has_ledgerseq() ? m->ledgerseq() : 0, + this)) { - if (m->ledgerseq() >= shards->earliestLedgerSeq()) - { - ledger = - shards->fetchLedger(ledgerHash, m->ledgerseq()); - } + m->set_requestcookie(id()); + peer->send( + std::make_shared(*m, protocol::mtGET_LEDGER)); + JLOG(p_journal_.debug()) + << "getLedger: Request relayed to peer"; + return ledger; } - } - if (!ledger) - { JLOG(p_journal_.trace()) - << "getLedger: Don't have ledger with hash " << ledgerHash; - - if (m->has_querytype() && !m->has_requestcookie()) - { - // Attempt to relay the request to a peer - if (auto const peer = getPeerWithLedger( - overlay_, - ledgerHash, - m->has_ledgerseq() ? m->ledgerseq() : 0, - this)) - { - m->set_requestcookie(id()); - peer->send(std::make_shared( - *m, protocol::mtGET_LEDGER)); - JLOG(p_journal_.debug()) - << "getLedger: Request relayed to peer"; - return ledger; - } - - JLOG(p_journal_.trace()) - << "getLedger: Failed to find peer to relay request"; - } + << "getLedger: Failed to find peer to relay request"; } } } diff --git a/src/xrpld/overlay/detail/PeerImp.h b/src/xrpld/overlay/detail/PeerImp.h index 1c25d8089b8..9c76ddb4db8 100644 --- a/src/xrpld/overlay/detail/PeerImp.h +++ b/src/xrpld/overlay/detail/PeerImp.h @@ -22,7 +22,6 @@ #include #include -#include #include #include #include @@ -163,10 +162,6 @@ class PeerImp : public Peer, // been sent to or received from this peer. hash_map publisherListSequences_; - // Any known shard info from this peer and its sub peers - hash_map shardInfos_; - std::mutex mutable shardInfoMutex_; - Compressed compressionEnabled_ = Compressed::Off; // Queue of transactions' hashes that have not been @@ -415,10 +410,6 @@ class PeerImp : public Peer, void fail(std::string const& reason); - // Return any known shard info from this peer and its sub peers - [[nodiscard]] hash_map const - getPeerShardInfos() const; - bool compressionEnabled() const override { @@ -541,14 +532,6 @@ class PeerImp : public Peer, void onMessage(std::shared_ptr const& m); void - onMessage(std::shared_ptr const& m); - void - onMessage(std::shared_ptr const& m); - void - onMessage(std::shared_ptr const& m); - void - onMessage(std::shared_ptr const& m); - void onMessage(std::shared_ptr const& m); void onMessage(std::shared_ptr const& m); diff --git a/src/xrpld/overlay/detail/ProtocolMessage.h b/src/xrpld/overlay/detail/ProtocolMessage.h index b8c6a2c1cf2..8a7512afb31 100644 --- a/src/xrpld/overlay/detail/ProtocolMessage.h +++ b/src/xrpld/overlay/detail/ProtocolMessage.h @@ -88,10 +88,6 @@ protocolMessageName(int type) return "validator_list_collection"; case protocol::mtVALIDATION: return "validation"; - case protocol::mtGET_PEER_SHARD_INFO: - return "get_peer_shard_info"; - case protocol::mtPEER_SHARD_INFO: - return "peer_shard_info"; case protocol::mtGET_OBJECTS: return "get_objects"; case protocol::mtHAVE_TRANSACTIONS: @@ -108,10 +104,6 @@ protocolMessageName(int type) return "replay_delta_request"; case protocol::mtREPLAY_DELTA_RESPONSE: return "replay_delta_response"; - case protocol::mtGET_PEER_SHARD_INFO_V2: - return "get_peer_shard_info_v2"; - case protocol::mtPEER_SHARD_INFO_V2: - return "peer_shard_info_v2"; default: break; } @@ -436,14 +428,6 @@ invokeProtocolMessage( success = detail::invoke( *header, buffers, handler); break; - case protocol::mtGET_PEER_SHARD_INFO: - success = detail::invoke( - *header, buffers, handler); - break; - case protocol::mtPEER_SHARD_INFO: - success = detail::invoke( - *header, buffers, handler); - break; case protocol::mtVALIDATORLIST: success = detail::invoke( *header, buffers, handler); @@ -484,14 +468,6 @@ invokeProtocolMessage( success = detail::invoke( *header, buffers, handler); break; - case protocol::mtGET_PEER_SHARD_INFO_V2: - success = detail::invoke( - *header, buffers, handler); - break; - case protocol::mtPEER_SHARD_INFO_V2: - success = detail::invoke( - *header, buffers, handler); - break; default: handler.onMessageUnknown(header->message_type); success = true; diff --git a/src/xrpld/overlay/detail/TrafficCount.cpp b/src/xrpld/overlay/detail/TrafficCount.cpp index f3e9c137fba..c64a033e3e3 100644 --- a/src/xrpld/overlay/detail/TrafficCount.cpp +++ b/src/xrpld/overlay/detail/TrafficCount.cpp @@ -39,12 +39,6 @@ TrafficCount::categorize( if (type == protocol::mtENDPOINTS) return TrafficCount::category::overlay; - if ((type == protocol::mtGET_PEER_SHARD_INFO) || - (type == protocol::mtPEER_SHARD_INFO) || - (type == protocol::mtGET_PEER_SHARD_INFO_V2) || - (type == protocol::mtPEER_SHARD_INFO_V2)) - return TrafficCount::category::shards; - if (type == protocol::mtTRANSACTION) return TrafficCount::category::transaction; diff --git a/src/xrpld/overlay/detail/TrafficCount.h b/src/xrpld/overlay/detail/TrafficCount.h index acd96695257..7dd5cbba901 100644 --- a/src/xrpld/overlay/detail/TrafficCount.h +++ b/src/xrpld/overlay/detail/TrafficCount.h @@ -74,7 +74,6 @@ class TrafficCount proposal, validation, validatorlist, - shards, // shard-related traffic // TMHaveSet message: get_set, // transaction sets we try to get @@ -208,7 +207,6 @@ class TrafficCount {"proposals"}, // category::proposal {"validations"}, // category::validation {"validator_lists"}, // category::validatorlist - {"shards"}, // category::shards {"set_get"}, // category::get_set {"set_share"}, // category::share_set {"ledger_data_Transaction_Set_candidate_get"}, // category::ld_tsc_get diff --git a/src/xrpld/perflog/detail/PerfLogImp.cpp b/src/xrpld/perflog/detail/PerfLogImp.cpp index b9691e05c23..a4773b33e10 100644 --- a/src/xrpld/perflog/detail/PerfLogImp.cpp +++ b/src/xrpld/perflog/detail/PerfLogImp.cpp @@ -20,7 +20,6 @@ #include #include -#include #include #include #include @@ -299,10 +298,7 @@ PerfLogImp::report() report[jss::hostid] = hostname_; report[jss::counters] = counters_.countersJson(); report[jss::nodestore] = Json::objectValue; - if (app_.getShardStore()) - app_.getShardStore()->getCountsJson(report[jss::nodestore]); - else - app_.getNodeStore().getCountsJson(report[jss::nodestore]); + app_.getNodeStore().getCountsJson(report[jss::nodestore]); report[jss::current_activities] = counters_.currentJson(); app_.getOPs().stateAccounting(report); diff --git a/src/xrpld/rpc/ShardArchiveHandler.h b/src/xrpld/rpc/ShardArchiveHandler.h deleted file mode 100644 index 3a407b37976..00000000000 --- a/src/xrpld/rpc/ShardArchiveHandler.h +++ /dev/null @@ -1,176 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_RPC_SHARDARCHIVEHANDLER_H_INCLUDED -#define RIPPLE_RPC_SHARDARCHIVEHANDLER_H_INCLUDED - -#include -#include -#include -#include -#include -#include - -#include -#include - -namespace ripple { -#ifdef ENABLE_TESTS -namespace test { -class ShardArchiveHandler_test; -} -#endif // ENABLE_TESTS -namespace RPC { - -/** Handles the download and import of one or more shard archives. */ -class ShardArchiveHandler -{ -public: - using TimerOpCounter = - ClosureCounter; -#ifdef ENABLE_TESTS - friend class test::ShardArchiveHandler_test; -#endif // ENABLE_TESTS - - static boost::filesystem::path - getDownloadDirectory(Config const& config); - - static std::unique_ptr - makeShardArchiveHandler(Application& app); - - // Create a ShardArchiveHandler only if - // the state database is present, indicating - // that recovery is needed. - static std::unique_ptr - tryMakeRecoveryHandler(Application& app); - - explicit ShardArchiveHandler(Application& app); - - virtual ~ShardArchiveHandler() = default; - - [[nodiscard]] bool - init(); - - bool - add(std::uint32_t shardIndex, std::pair&& url); - - /** Starts downloading and importing archives. */ - bool - start(); - - void - stop(); - - void - release(); - -private: - ShardArchiveHandler() = delete; - ShardArchiveHandler(ShardArchiveHandler const&) = delete; - ShardArchiveHandler& - operator=(ShardArchiveHandler&&) = delete; - ShardArchiveHandler& - operator=(ShardArchiveHandler const&) = delete; - - [[nodiscard]] bool - initFromDB(std::lock_guard const&); - - /** Add an archive to be downloaded and imported. - @param shardIndex the index of the shard to be imported. - @param url the location of the archive. - @return `true` if successfully added. - @note Returns false if called while downloading. - */ - bool - add(std::uint32_t shardIndex, - parsedURL&& url, - std::lock_guard const&); - - // Begins the download and import of the next archive. - bool - next(std::lock_guard const& l); - - // Callback used by the downloader to notify completion of a download. - void - complete(boost::filesystem::path dstPath); - - // Extract a downloaded archive and import it into the shard store. - void - process(boost::filesystem::path const& dstPath); - - // Remove the archive being processed. - void - remove(std::lock_guard const&); - - void - doRelease(std::lock_guard const&); - - bool - onClosureFailed( - std::string const& errorMsg, - std::lock_guard const& lock); - - bool - removeAndProceed(std::lock_guard const& lock); - - ///////////////////////////////////////////////// - // m_ is used to protect access to downloader_, - // archives_, process_ and to protect setting and - // destroying sqlDB_. - ///////////////////////////////////////////////// - std::mutex mutable m_; - std::atomic_bool stopping_{false}; - std::shared_ptr downloader_; - std::map archives_; - bool process_; - std::unique_ptr sqlDB_; - ///////////////////////////////////////////////// - - Application& app_; - beast::Journal const j_; - boost::filesystem::path const downloadDir_; - boost::asio::basic_waitable_timer timer_; - JobCounter jobCounter_; - TimerOpCounter timerCounter_; - ShardVerificationScheduler verificationScheduler_; -}; - -//////////////////////////////////////////////////////////////////// -// The RecoveryHandler is an empty class that is constructed by -// the application when the ShardArchiveHandler's state database -// is present at application start, indicating that the handler -// needs to perform recovery. However, if recovery isn't needed -// at application start, and the user subsequently submits a request -// to download shards, we construct a ShardArchiveHandler rather -// than a RecoveryHandler to process the request. With this approach, -// type verification can be employed to determine whether the -// ShardArchiveHandler was constructed in recovery mode by the -// application, or as a response to a user submitting a request to -// download shards. -//////////////////////////////////////////////////////////////////// -class RecoveryHandler : public ShardArchiveHandler -{ -public: - explicit RecoveryHandler(Application& app); -}; - -} // namespace RPC -} // namespace ripple - -#endif diff --git a/src/xrpld/rpc/ShardVerificationScheduler.h b/src/xrpld/rpc/ShardVerificationScheduler.h deleted file mode 100644 index bc561381b3e..00000000000 --- a/src/xrpld/rpc/ShardVerificationScheduler.h +++ /dev/null @@ -1,84 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_RPC_SHARDVERIFICATIONSCHEDULER_H_INCLUDED -#define RIPPLE_RPC_SHARDVERIFICATIONSCHEDULER_H_INCLUDED - -#include -#include - -namespace ripple { -namespace RPC { - -class ShardVerificationScheduler -{ -public: - // This is the signature of the function that the client - // wants to have invoked upon timer expiration. The function - // should check the error code 'ec' and abort the function - // if the timer was cancelled: - // (ec == boost::asio::error::operation_aborted). - // In the body of the function, the client should perform - // the necessary verification. - using retryFunction = - std::function; - - ShardVerificationScheduler() = default; - - ShardVerificationScheduler( - std::chrono::seconds retryInterval, - std::uint32_t maxAttempts); - - bool - retry(Application& app, bool shouldHaveHash, retryFunction f); - - void - reset(); - -private: - using waitable_timer = - boost::asio::basic_waitable_timer; - - ///////////////////////////////////////////////////// - // NOTE: retryInterval_ and maxAttempts_ were chosen - // semi-arbitrarily and experimenting with other - // values might prove useful. - ///////////////////////////////////////////////////// - - static constexpr std::chrono::seconds defaultRetryInterval_{60}; - - static constexpr std::uint32_t defaultmaxAttempts_{5}; - - // The number of seconds to wait before retrying - // retrieval of a shard's last ledger hash - const std::chrono::seconds retryInterval_{defaultRetryInterval_}; - - // Maximum attempts to retrieve a shard's last ledger hash - const std::uint32_t maxAttempts_{defaultmaxAttempts_}; - - std::unique_ptr timer_; - - // Number of attempts to retrieve a shard's last ledger hash - std::uint32_t numAttempts_{0}; -}; - -} // namespace RPC -} // namespace ripple - -#endif // RIPPLE_RPC_SHARDVERIFICATIONSCHEDULER_H_INCLUDED diff --git a/src/xrpld/rpc/detail/Handler.cpp b/src/xrpld/rpc/detail/Handler.cpp index 4bac4610229..d4a3fda380f 100644 --- a/src/xrpld/rpc/detail/Handler.cpp +++ b/src/xrpld/rpc/detail/Handler.cpp @@ -99,12 +99,10 @@ Handler const handlerArray[]{ {"channel_verify", byRef(&doChannelVerify), Role::USER, NO_CONDITION}, {"connect", byRef(&doConnect), Role::ADMIN, NO_CONDITION}, {"consensus_info", byRef(&doConsensusInfo), Role::ADMIN, NO_CONDITION}, - {"crawl_shards", byRef(&doCrawlShards), Role::ADMIN, NO_CONDITION}, {"deposit_authorized", byRef(&doDepositAuthorized), Role::USER, NO_CONDITION}, - {"download_shard", byRef(&doDownloadShard), Role::ADMIN, NO_CONDITION}, {"feature", byRef(&doFeature), Role::USER, NO_CONDITION}, {"fee", byRef(&doFee), Role::USER, NEEDS_CURRENT_LEDGER}, {"fetch_info", byRef(&doFetchInfo), Role::ADMIN, NO_CONDITION}, @@ -140,7 +138,6 @@ Handler const handlerArray[]{ {"manifest", byRef(&doManifest), Role::USER, NO_CONDITION}, {"nft_buy_offers", byRef(&doNFTBuyOffers), Role::USER, NO_CONDITION}, {"nft_sell_offers", byRef(&doNFTSellOffers), Role::USER, NO_CONDITION}, - {"node_to_shard", byRef(&doNodeToShard), Role::ADMIN, NO_CONDITION}, {"noripple_check", byRef(&doNoRippleCheck), Role::USER, NO_CONDITION}, {"owner_info", byRef(&doOwnerInfo), Role::USER, NEEDS_CURRENT_LEDGER}, {"peers", byRef(&doPeers), Role::ADMIN, NO_CONDITION}, diff --git a/src/xrpld/rpc/detail/ShardArchiveHandler.cpp b/src/xrpld/rpc/detail/ShardArchiveHandler.cpp deleted file mode 100644 index 1ab8f7767b5..00000000000 --- a/src/xrpld/rpc/detail/ShardArchiveHandler.cpp +++ /dev/null @@ -1,585 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012-2014 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -namespace ripple { -namespace RPC { - -using namespace boost::filesystem; -using namespace std::chrono_literals; - -boost::filesystem::path -ShardArchiveHandler::getDownloadDirectory(Config const& config) -{ - return boost::filesystem::path{ - get(config.section(ConfigSection::shardDatabase()), - "download_path", - get(config.section(ConfigSection::shardDatabase()), - "path", - ""))} / - "download"; -} - -std::unique_ptr -ShardArchiveHandler::makeShardArchiveHandler(Application& app) -{ - return std::make_unique(app); -} - -std::unique_ptr -ShardArchiveHandler::tryMakeRecoveryHandler(Application& app) -{ - auto const downloadDir(getDownloadDirectory(app.config())); - - // Create the handler iff the database - // is present. - if (exists(downloadDir / stateDBName) && - is_regular_file(downloadDir / stateDBName)) - { - return std::make_unique(app); - } - - return nullptr; -} - -ShardArchiveHandler::ShardArchiveHandler(Application& app) - : process_(false) - , app_(app) - , j_(app.journal("ShardArchiveHandler")) - , downloadDir_(getDownloadDirectory(app.config())) - , timer_(app_.getIOService()) - , verificationScheduler_( - std::chrono::seconds(get( - app.config().section(ConfigSection::shardDatabase()), - "shard_verification_retry_interval")), - - get( - app.config().section(ConfigSection::shardDatabase()), - "shard_verification_max_attempts")) -{ - assert(app_.getShardStore()); -} - -bool -ShardArchiveHandler::init() -{ - std::lock_guard lock(m_); - - if (process_ || downloader_ != nullptr || sqlDB_ != nullptr) - { - JLOG(j_.warn()) << "Archives already being processed"; - return false; - } - - // Initialize from pre-existing database - if (exists(downloadDir_ / stateDBName) && - is_regular_file(downloadDir_ / stateDBName)) - { - downloader_ = - make_DatabaseDownloader(app_.getIOService(), app_.config(), j_); - - return initFromDB(lock); - } - - // Fresh initialization - else - { - try - { - create_directories(downloadDir_); - - sqlDB_ = makeArchiveDB(downloadDir_, stateDBName); - } - catch (std::exception const& e) - { - JLOG(j_.error()) - << "exception: " << e.what() << " in function: " << __func__; - - return false; - } - } - - return true; -} - -bool -ShardArchiveHandler::initFromDB(std::lock_guard const& lock) -{ - try - { - using namespace boost::filesystem; - - assert( - exists(downloadDir_ / stateDBName) && - is_regular_file(downloadDir_ / stateDBName)); - - sqlDB_ = makeArchiveDB(downloadDir_, stateDBName); - - readArchiveDB(*sqlDB_, [&](std::string const& url_, int state) { - parsedURL url; - - if (!parseUrl(url, url_)) - { - JLOG(j_.error()) << "Failed to parse url: " << url_; - - return; - } - - add(state, std::move(url), lock); - }); - - // Failed to load anything - // from the state database. - if (archives_.empty()) - { - release(); - return false; - } - } - catch (std::exception const& e) - { - JLOG(j_.error()) << "exception: " << e.what() - << " in function: " << __func__; - - return false; - } - - return true; -} - -void -ShardArchiveHandler::stop() -{ - stopping_ = true; - { - std::lock_guard lock(m_); - - if (downloader_) - { - downloader_->stop(); - downloader_.reset(); - } - - timer_.cancel(); - } - - jobCounter_.join( - "ShardArchiveHandler", std::chrono::milliseconds(2000), j_); - - timerCounter_.join( - "ShardArchiveHandler", std::chrono::milliseconds(2000), j_); -} - -bool -ShardArchiveHandler::add( - std::uint32_t shardIndex, - std::pair&& url) -{ - std::lock_guard lock(m_); - - if (!add(shardIndex, std::forward(url.first), lock)) - return false; - - insertArchiveDB(*sqlDB_, shardIndex, url.second); - - return true; -} - -bool -ShardArchiveHandler::add( - std::uint32_t shardIndex, - parsedURL&& url, - std::lock_guard const&) -{ - if (process_) - { - JLOG(j_.error()) << "Download and import already in progress"; - return false; - } - - auto const it{archives_.find(shardIndex)}; - if (it != archives_.end()) - return url == it->second; - - archives_.emplace(shardIndex, std::move(url)); - - return true; -} - -bool -ShardArchiveHandler::start() -{ - std::lock_guard lock(m_); - if (!app_.getShardStore()) - { - JLOG(j_.error()) << "No shard store available"; - return false; - } - if (process_) - { - JLOG(j_.warn()) << "Archives already being processed"; - return false; - } - if (archives_.empty()) - { - JLOG(j_.warn()) << "No archives to process"; - return false; - } - - std::vector shardIndexes(archives_.size()); - std::transform( - archives_.begin(), - archives_.end(), - shardIndexes.begin(), - [](auto const& entry) { return entry.first; }); - - if (!app_.getShardStore()->prepareShards(shardIndexes)) - return false; - - try - { - // Create temp root download directory - create_directories(downloadDir_); - - if (!downloader_) - { - // will throw if can't initialize ssl context - downloader_ = - make_DatabaseDownloader(app_.getIOService(), app_.config(), j_); - } - } - catch (std::exception const& e) - { - JLOG(j_.error()) << "exception: " << e.what(); - return false; - } - - process_ = true; - return next(lock); -} - -void -ShardArchiveHandler::release() -{ - std::lock_guard lock(m_); - doRelease(lock); -} - -bool -ShardArchiveHandler::next(std::lock_guard const& l) -{ - if (stopping_) - return false; - - if (archives_.empty()) - { - doRelease(l); - return false; - } - - auto const shardIndex{archives_.begin()->first}; - - // We use the sequence of the last validated ledger - // to determine whether or not we have stored a ledger - // that comes after the last ledger in this shard. A - // later ledger must be present in order to reliably - // retrieve the hash of the shard's last ledger. - std::optional expectedHash; - bool shouldHaveHash = false; - if (auto const seq = app_.getShardStore()->lastLedgerSeq(shardIndex); - (shouldHaveHash = app_.getLedgerMaster().getValidLedgerIndex() > seq)) - { - expectedHash = app_.getLedgerMaster().walkHashBySeq( - seq, InboundLedger::Reason::GENERIC); - } - - if (!expectedHash) - { - auto wrapper = - timerCounter_.wrap([this](boost::system::error_code const& ec) { - if (ec != boost::asio::error::operation_aborted) - { - std::lock_guard lock(m_); - this->next(lock); - } - }); - - if (!wrapper) - return onClosureFailed( - "failed to wrap closure for last ledger confirmation timer", l); - - if (!verificationScheduler_.retry(app_, shouldHaveHash, *wrapper)) - { - JLOG(j_.error()) << "failed to find last ledger hash for shard " - << shardIndex << ", maximum attempts reached"; - - return removeAndProceed(l); - } - - return true; - } - - // Create a temp archive directory at the root - auto const dstDir{downloadDir_ / std::to_string(shardIndex)}; - try - { - create_directory(dstDir); - } - catch (std::exception const& e) - { - JLOG(j_.error()) << "exception: " << e.what(); - return removeAndProceed(l); - } - - // Download the archive. Process in another thread - // to prevent holding up the lock if the downloader - // sleeps. - auto const& url{archives_.begin()->second}; - auto wrapper = jobCounter_.wrap([this, url, dstDir]() { - auto const ssl = (url.scheme == "https"); - auto const defaultPort = ssl ? 443 : 80; - - if (!downloader_->download( - url.domain, - std::to_string(url.port.value_or(defaultPort)), - url.path, - 11, - dstDir / "archive.tar.lz4", - [this](path dstPath) { complete(dstPath); }, - ssl)) - { - std::lock_guard l(m_); - removeAndProceed(l); - } - }); - - if (!wrapper) - return onClosureFailed( - "failed to wrap closure for starting download", l); - - app_.getJobQueue().addJob(jtCLIENT_SHARD, "ShardArchiveHandler", *wrapper); - - return true; -} - -void -ShardArchiveHandler::complete(path dstPath) -{ - if (stopping_) - return; - - { - std::lock_guard lock(m_); - try - { - if (!is_regular_file(dstPath)) - { - auto ar{archives_.begin()}; - JLOG(j_.error()) - << "Downloading shard id " << ar->first << " from URL " - << ar->second.domain << ar->second.path; - removeAndProceed(lock); - return; - } - } - catch (std::exception const& e) - { - JLOG(j_.error()) << "exception: " << e.what(); - removeAndProceed(lock); - return; - } - } - - // Make lambdas mutable captured vars can be moved from - auto wrapper = - jobCounter_.wrap([=, this, dstPath = std::move(dstPath)]() mutable { - if (stopping_) - return; - - // If not synced then defer and retry - auto const mode{app_.getOPs().getOperatingMode()}; - if (mode != OperatingMode::FULL) - { - std::lock_guard lock(m_); - timer_.expires_from_now(static_cast( - (static_cast(OperatingMode::FULL) - - static_cast(mode)) * - 10)); - - auto wrapper = timerCounter_.wrap( - [=, this, dstPath = std::move(dstPath)]( - boost::system::error_code const& ec) mutable { - if (ec != boost::asio::error::operation_aborted) - complete(std::move(dstPath)); - }); - - if (!wrapper) - onClosureFailed( - "failed to wrap closure for operating mode timer", - lock); - else - timer_.async_wait(*wrapper); - } - else - { - process(dstPath); - std::lock_guard lock(m_); - removeAndProceed(lock); - } - }); - - if (!wrapper) - { - if (stopping_) - return; - - JLOG(j_.error()) << "failed to wrap closure for process()"; - - std::lock_guard lock(m_); - removeAndProceed(lock); - } - - // Process in another thread to not hold up the IO service - app_.getJobQueue().addJob(jtCLIENT_SHARD, "ShardArchiveHandler", *wrapper); -} - -void -ShardArchiveHandler::process(path const& dstPath) -{ - std::uint32_t shardIndex; - { - std::lock_guard lock(m_); - shardIndex = archives_.begin()->first; - } - - auto const shardDir{dstPath.parent_path() / std::to_string(shardIndex)}; - try - { - // Extract the downloaded archive - extractTarLz4(dstPath, dstPath.parent_path()); - - // The extracted root directory name must match the shard index - if (!is_directory(shardDir)) - { - JLOG(j_.error()) << "Shard " << shardIndex - << " mismatches archive shard directory"; - return; - } - } - catch (std::exception const& e) - { - JLOG(j_.error()) << "exception: " << e.what(); - return; - } - - // Import the shard into the shard store - if (!app_.getShardStore()->importShard(shardIndex, shardDir)) - { - JLOG(j_.error()) << "Importing shard " << shardIndex; - return; - } - - JLOG(j_.debug()) << "Shard " << shardIndex << " downloaded and imported"; -} - -void -ShardArchiveHandler::remove(std::lock_guard const&) -{ - verificationScheduler_.reset(); - - auto const shardIndex{archives_.begin()->first}; - app_.getShardStore()->removePreShard(shardIndex); - archives_.erase(shardIndex); - - deleteFromArchiveDB(*sqlDB_, shardIndex); - - auto const dstDir{downloadDir_ / std::to_string(shardIndex)}; - try - { - remove_all(dstDir); - } - catch (std::exception const& e) - { - JLOG(j_.error()) << "exception: " << e.what(); - } -} - -void -ShardArchiveHandler::doRelease(std::lock_guard const&) -{ - timer_.cancel(); - for (auto const& ar : archives_) - app_.getShardStore()->removePreShard(ar.first); - archives_.clear(); - - dropArchiveDB(*sqlDB_); - - sqlDB_.reset(); - - // Remove temp root download directory - try - { - remove_all(downloadDir_); - } - catch (std::exception const& e) - { - JLOG(j_.error()) << "exception: " << e.what() - << " in function: " << __func__; - } - - downloader_.reset(); - process_ = false; -} - -bool -ShardArchiveHandler::onClosureFailed( - std::string const& errorMsg, - std::lock_guard const& lock) -{ - if (stopping_) - return false; - - JLOG(j_.error()) << errorMsg; - - return removeAndProceed(lock); -} - -bool -ShardArchiveHandler::removeAndProceed(std::lock_guard const& lock) -{ - remove(lock); - return next(lock); -} - -RecoveryHandler::RecoveryHandler(Application& app) : ShardArchiveHandler(app) -{ -} - -} // namespace RPC -} // namespace ripple diff --git a/src/xrpld/rpc/detail/ShardVerificationScheduler.cpp b/src/xrpld/rpc/detail/ShardVerificationScheduler.cpp deleted file mode 100644 index f571e8b29cd..00000000000 --- a/src/xrpld/rpc/detail/ShardVerificationScheduler.cpp +++ /dev/null @@ -1,68 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include - -namespace ripple { -namespace RPC { - -ShardVerificationScheduler::ShardVerificationScheduler( - std::chrono::seconds retryInterval, - std::uint32_t maxAttempts) - : retryInterval_( - (retryInterval == std::chrono::seconds(0) ? defaultRetryInterval_ - : retryInterval)) - , maxAttempts_(maxAttempts == 0 ? defaultmaxAttempts_ : maxAttempts) -{ -} - -bool -ShardVerificationScheduler::retry( - Application& app, - bool shouldHaveHash, - retryFunction f) -{ - if (numAttempts_ >= maxAttempts_) - return false; - - // Retry attempts only count when we - // have a validated ledger with a - // sequence later than the shard's - // last ledger. - if (shouldHaveHash) - ++numAttempts_; - - if (!timer_) - timer_ = std::make_unique(app.getIOService()); - - timer_->expires_from_now(retryInterval_); - timer_->async_wait(f); - - return true; -} - -void -ShardVerificationScheduler::reset() -{ - numAttempts_ = 0; -} - -} // namespace RPC -} // namespace ripple diff --git a/src/xrpld/rpc/handlers/CrawlShards.cpp b/src/xrpld/rpc/handlers/CrawlShards.cpp deleted file mode 100644 index f586d750439..00000000000 --- a/src/xrpld/rpc/handlers/CrawlShards.cpp +++ /dev/null @@ -1,73 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2018 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -/** RPC command that reports stored shards by nodes. - { - // Determines if the result includes node public key. - // optional, default is false - public_key: - - // The maximum number of peer hops to attempt. - // optional, default is zero, maximum is 3 - limit: - } -*/ -Json::Value -doCrawlShards(RPC::JsonContext& context) -{ - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - - if (context.role != Role::ADMIN) - return rpcError(rpcNO_PERMISSION); - - std::uint32_t relays{0}; - if (auto const& jv = context.params[jss::limit]) - { - if (!(jv.isUInt() || (jv.isInt() && jv.asInt() >= 0))) - return RPC::expected_field_error(jss::limit, "unsigned integer"); - relays = std::min(jv.asUInt(), relayLimit); - context.loadType = Resource::feeHighBurdenRPC; - } - else - context.loadType = Resource::feeMediumBurdenRPC; - - // Collect shard info from server and peers - bool const includePublicKey{ - context.params.isMember(jss::public_key) && - context.params[jss::public_key].asBool()}; - Json::Value jvResult{ - context.app.overlay().crawlShards(includePublicKey, relays)}; - - return jvResult; -} - -} // namespace ripple diff --git a/src/xrpld/rpc/handlers/DownloadShard.cpp b/src/xrpld/rpc/handlers/DownloadShard.cpp deleted file mode 100644 index 1ec12e0fa66..00000000000 --- a/src/xrpld/rpc/handlers/DownloadShard.cpp +++ /dev/null @@ -1,176 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012-2014 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -namespace ripple { - -/** RPC command that downloads and import shard archives. - { - shards: [{index: , url: }] - } - - example: - { - "command": "download_shard", - "shards": [ - {"index": 1, "url": "https://domain.com/1.tar.lz4"}, - {"index": 5, "url": "https://domain.com/5.tar.lz4"} - ] - } -*/ -Json::Value -doDownloadShard(RPC::JsonContext& context) -{ - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - - if (context.role != Role::ADMIN) - return rpcError(rpcNO_PERMISSION); - - // The shard store must be configured - auto shardStore{context.app.getShardStore()}; - if (!shardStore) - return rpcError(rpcNOT_ENABLED); - - // Return status update if already downloading - auto preShards{shardStore->getPreShards()}; - if (!preShards.empty()) - { - std::string s{"Download already in progress. Shard"}; - if (!std::all_of(preShards.begin(), preShards.end(), ::isdigit)) - s += "s"; - return RPC::makeObjectValue(s + " " + preShards); - } - - if (!context.params.isMember(jss::shards)) - return RPC::missing_field_error(jss::shards); - if (!context.params[jss::shards].isArray() || - context.params[jss::shards].size() == 0) - { - return RPC::expected_field_error(std::string(jss::shards), "an array"); - } - - // Validate shards - static const std::string ext{".tar.lz4"}; - std::map> archives; - for (auto& it : context.params[jss::shards]) - { - // Validate the index - if (!it.isMember(jss::index)) - return RPC::missing_field_error(jss::index); - auto& jv{it[jss::index]}; - if (!(jv.isUInt() || (jv.isInt() && jv.asInt() >= 0))) - { - return RPC::expected_field_error( - std::string(jss::index), "an unsigned integer"); - } - - // Validate the URL - if (!it.isMember(jss::url)) - return RPC::missing_field_error(jss::url); - parsedURL url; - auto unparsedURL = it[jss::url].asString(); - if (!parseUrl(url, unparsedURL) || url.domain.empty() || - url.path.empty()) - { - return RPC::invalid_field_error(jss::url); - } - if (url.scheme != "https" && url.scheme != "http") - return RPC::expected_field_error( - std::string(jss::url), "HTTPS or HTTP"); - - // URL must point to an lz4 compressed tar archive '.tar.lz4' - auto archiveName{url.path.substr(url.path.find_last_of("/\\") + 1)}; - if (archiveName.empty() || archiveName.size() <= ext.size()) - { - return RPC::make_param_error( - "Invalid field '" + std::string(jss::url) + - "', invalid archive name"); - } - if (!boost::iends_with(archiveName, ext)) - { - return RPC::make_param_error( - "Invalid field '" + std::string(jss::url) + - "', invalid archive extension"); - } - - // Check for duplicate indexes - if (!archives - .emplace( - jv.asUInt(), std::make_pair(std::move(url), unparsedURL)) - .second) - { - return RPC::make_param_error( - "Invalid field '" + std::string(jss::index) + - "', duplicate shard ids."); - } - } - - RPC::ShardArchiveHandler* handler = nullptr; - - try - { - handler = context.app.getShardArchiveHandler(); - - if (!handler) - return RPC::make_error( - rpcINTERNAL, "Failed to create ShardArchiveHandler."); - } - catch (std::exception const& e) - { - return RPC::make_error( - rpcINTERNAL, std::string("Failed to start download: ") + e.what()); - } - - for (auto& [index, url] : archives) - { - if (!handler->add(index, std::move(url))) - { - return RPC::make_param_error( - "Invalid field '" + std::string(jss::index) + "', shard id " + - std::to_string(index) + " exists or being acquired"); - } - } - - // Begin downloading. - if (!handler->start()) - { - handler->release(); - return rpcError(rpcINTERNAL); - } - - std::string s{"Downloading shard"}; - preShards = shardStore->getPreShards(); - if (!std::all_of(preShards.begin(), preShards.end(), ::isdigit)) - s += "s"; - return RPC::makeObjectValue(s + " " + preShards); -} - -} // namespace ripple diff --git a/src/xrpld/rpc/handlers/GetCounts.cpp b/src/xrpld/rpc/handlers/GetCounts.cpp index 0a2327e117a..035d698a5d4 100644 --- a/src/xrpld/rpc/handlers/GetCounts.cpp +++ b/src/xrpld/rpc/handlers/GetCounts.cpp @@ -25,9 +25,7 @@ #include #include #include -#include #include -#include #include #include #include @@ -113,11 +111,11 @@ getCountsJson(Application& app, int minObjectCount) ret[jss::AL_hit_rate] = app.getAcceptedLedgerCache().getHitRate(); ret[jss::fullbelow_size] = - static_cast(app.getNodeFamily().getFullBelowCache(0)->size()); + static_cast(app.getNodeFamily().getFullBelowCache()->size()); ret[jss::treenode_cache_size] = - app.getNodeFamily().getTreeNodeCache(0)->getCacheSize(); + app.getNodeFamily().getTreeNodeCache()->getCacheSize(); ret[jss::treenode_track_size] = - app.getNodeFamily().getTreeNodeCache(0)->getTrackSize(); + app.getNodeFamily().getTreeNodeCache()->getTrackSize(); std::string uptime; auto s = UptimeClock::now(); @@ -129,27 +127,7 @@ getCountsJson(Application& app, int minObjectCount) textTime(uptime, s, "second", 1s); ret[jss::uptime] = uptime; - if (auto shardStore = app.getShardStore()) - { - auto shardFamily{dynamic_cast(app.getShardFamily())}; - auto const [cacheSz, trackSz] = shardFamily->getTreeNodeCacheSize(); - Json::Value& jv = (ret[jss::shards] = Json::objectValue); - - jv[jss::fullbelow_size] = shardFamily->getFullBelowCacheSize(); - jv[jss::treenode_cache_size] = cacheSz; - jv[jss::treenode_track_size] = trackSz; - ret[jss::write_load] = shardStore->getWriteLoad(); - jv[jss::node_writes] = std::to_string(shardStore->getStoreCount()); - jv[jss::node_reads_total] = shardStore->getFetchTotalCount(); - jv[jss::node_reads_hit] = shardStore->getFetchHitCount(); - jv[jss::node_written_bytes] = - std::to_string(shardStore->getStoreSize()); - jv[jss::node_read_bytes] = shardStore->getFetchSize(); - } - else - { - app.getNodeStore().getCountsJson(ret); - } + app.getNodeStore().getCountsJson(ret); return ret; } diff --git a/src/xrpld/rpc/handlers/Handlers.h b/src/xrpld/rpc/handlers/Handlers.h index 917ad38a741..0085f51465a 100644 --- a/src/xrpld/rpc/handlers/Handlers.h +++ b/src/xrpld/rpc/handlers/Handlers.h @@ -61,8 +61,6 @@ doConsensusInfo(RPC::JsonContext&); Json::Value doDepositAuthorized(RPC::JsonContext&); Json::Value -doDownloadShard(RPC::JsonContext&); -Json::Value doFeature(RPC::JsonContext&); Json::Value doFee(RPC::JsonContext&); @@ -101,8 +99,6 @@ doNFTBuyOffers(RPC::JsonContext&); Json::Value doNFTSellOffers(RPC::JsonContext&); Json::Value -doNodeToShard(RPC::JsonContext&); -Json::Value doNoRippleCheck(RPC::JsonContext&); Json::Value doOwnerInfo(RPC::JsonContext&); @@ -139,8 +135,6 @@ doSign(RPC::JsonContext&); Json::Value doSignFor(RPC::JsonContext&); Json::Value -doCrawlShards(RPC::JsonContext&); -Json::Value doStop(RPC::JsonContext&); Json::Value doSubmit(RPC::JsonContext&); diff --git a/src/xrpld/rpc/handlers/NodeToShard.cpp b/src/xrpld/rpc/handlers/NodeToShard.cpp deleted file mode 100644 index 917086ab0f4..00000000000 --- a/src/xrpld/rpc/handlers/NodeToShard.cpp +++ /dev/null @@ -1,86 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2021 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -// node_to_shard [status|start|stop] -Json::Value -doNodeToShard(RPC::JsonContext& context) -{ - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - - // Shard store must be enabled - auto const shardStore = context.app.getShardStore(); - if (!shardStore) - return RPC::make_error(rpcNOT_ENABLED); - - if (!context.params.isMember(jss::action)) - return RPC::missing_field_error(jss::action); - - // Obtain and normalize the action to perform - auto const action = [&context] { - auto value = context.params[jss::action].asString(); - boost::to_lower(value); - - return value; - }(); - - // Vector of allowed actions - std::vector const allowedActions = {"status", "start", "stop"}; - - // Validate the action - if (std::find(allowedActions.begin(), allowedActions.end(), action) == - allowedActions.end()) - return RPC::invalid_field_error(jss::action); - - // Perform the action - if (action == "status") - { - // Get the status of the database import - return shardStore->getDatabaseImportStatus(); - } - else if (action == "start") - { - // Kick off an import - return shardStore->startNodeToShard(); - } - else if (action == "stop") - { - // Halt an import - return shardStore->stopNodeToShard(); - } - else - { - // Shouldn't happen - assert(false); - return rpcError(rpcINTERNAL); - } -} - -} // namespace ripple diff --git a/src/xrpld/rpc/handlers/Tx.cpp b/src/xrpld/rpc/handlers/Tx.cpp index de95044b71f..e32d926e566 100644 --- a/src/xrpld/rpc/handlers/Tx.cpp +++ b/src/xrpld/rpc/handlers/Tx.cpp @@ -100,7 +100,6 @@ doTxPostgres(RPC::Context& context, TxArgs const& args) if (locator.isFound()) { auto start = std::chrono::system_clock::now(); - // The second argument of fetch is ignored when not using shards if (auto obj = context.app.getNodeFamily().db().fetchNodeObject( locator.getNodestoreHash(), locator.getLedgerSequence())) { diff --git a/src/xrpld/shamap/Family.h b/src/xrpld/shamap/Family.h index 730f83483a6..6559ce5059b 100644 --- a/src/xrpld/shamap/Family.h +++ b/src/xrpld/shamap/Family.h @@ -53,28 +53,17 @@ class Family virtual beast::Journal const& journal() = 0; - /** Return a pointer to the Family Full Below Cache - - @param ledgerSeq ledger sequence determines a corresponding shard cache - @note ledgerSeq is used by ShardFamily and ignored by NodeFamily - */ + /** Return a pointer to the Family Full Below Cache */ virtual std::shared_ptr - getFullBelowCache(std::uint32_t ledgerSeq) = 0; - - /** Return a pointer to the Family Tree Node Cache + getFullBelowCache() = 0; - @param ledgerSeq ledger sequence determines a corresponding shard cache - @note ledgerSeq is used by ShardFamily and ignored by NodeFamily - */ + /** Return a pointer to the Family Tree Node Cache */ virtual std::shared_ptr - getTreeNodeCache(std::uint32_t ledgerSeq) = 0; + getTreeNodeCache() = 0; virtual void sweep() = 0; - virtual bool - isShardBacked() const = 0; - /** Acquire ledger that has a missing node by ledger sequence * * Throw if in reporting mode. diff --git a/src/xrpld/shamap/NodeFamily.h b/src/xrpld/shamap/NodeFamily.h index c540172c374..4062ea23897 100644 --- a/src/xrpld/shamap/NodeFamily.h +++ b/src/xrpld/shamap/NodeFamily.h @@ -60,18 +60,14 @@ class NodeFamily : public Family return j_; } - bool - isShardBacked() const override - { - return false; - } - - std::shared_ptr getFullBelowCache(std::uint32_t) override + std::shared_ptr + getFullBelowCache() override { return fbCache_; } - std::shared_ptr getTreeNodeCache(std::uint32_t) override + std::shared_ptr + getTreeNodeCache() override { return tnCache_; } diff --git a/src/xrpld/shamap/ShardFamily.h b/src/xrpld/shamap/ShardFamily.h deleted file mode 100644 index 2e8bece6dcf..00000000000 --- a/src/xrpld/shamap/ShardFamily.h +++ /dev/null @@ -1,125 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_SHAMAP_SHARDFAMILY_H_INCLUDED -#define RIPPLE_SHAMAP_SHARDFAMILY_H_INCLUDED - -#include -#include - -namespace ripple { - -class Application; - -class ShardFamily : public Family -{ -public: - ShardFamily() = delete; - ShardFamily(ShardFamily const&) = delete; - ShardFamily(ShardFamily&&) = delete; - - ShardFamily& - operator=(ShardFamily const&) = delete; - - ShardFamily& - operator=(ShardFamily&&) = delete; - - ShardFamily(Application& app, CollectorManager& cm); - - NodeStore::Database& - db() override - { - return db_; - } - - NodeStore::Database const& - db() const override - { - return db_; - } - - beast::Journal const& - journal() override - { - return j_; - } - - bool - isShardBacked() const override - { - return true; - } - - std::shared_ptr - getFullBelowCache(std::uint32_t ledgerSeq) override; - - /** Return the number of entries in the cache */ - int - getFullBelowCacheSize(); - - std::shared_ptr - getTreeNodeCache(std::uint32_t ledgerSeq) override; - - /** Return a pair where the first item is the number of items cached - and the second item is the number of entries in the cached - */ - std::pair - getTreeNodeCacheSize(); - - void - sweep() override; - - void - reset() override; - - void - missingNodeAcquireBySeq(std::uint32_t seq, uint256 const& nodeHash) - override; - - void - missingNodeAcquireByHash(uint256 const& hash, std::uint32_t seq) override - { - acquire(hash, seq); - } - -private: - Application& app_; - NodeStore::Database& db_; - CollectorManager& cm_; - beast::Journal const j_; - - std::unordered_map> fbCache_; - std::mutex fbCacheMutex_; - - std::unordered_map> tnCache_; - std::mutex tnCacheMutex_; - int const tnTargetSize_; - std::chrono::seconds const tnTargetAge_; - - // Missing node handler - LedgerIndex maxSeq_{0}; - std::mutex maxSeqMutex_; - - void - acquire(uint256 const& hash, std::uint32_t seq); -}; - -} // namespace ripple - -#endif diff --git a/src/xrpld/shamap/detail/SHAMap.cpp b/src/xrpld/shamap/detail/SHAMap.cpp index e17f9346b85..d06ba2a153a 100644 --- a/src/xrpld/shamap/detail/SHAMap.cpp +++ b/src/xrpld/shamap/detail/SHAMap.cpp @@ -1166,7 +1166,7 @@ SHAMap::dump(bool hash) const std::shared_ptr SHAMap::cacheLookup(SHAMapHash const& hash) const { - auto ret = f_.getTreeNodeCache(ledgerSeq_)->fetch(hash.as_uint256()); + auto ret = f_.getTreeNodeCache()->fetch(hash.as_uint256()); assert(!ret || !ret->cowid()); return ret; } @@ -1180,8 +1180,7 @@ SHAMap::canonicalize( assert(node->cowid() == 0); assert(node->getHash() == hash); - f_.getTreeNodeCache(ledgerSeq_) - ->canonicalize_replace_client(hash.as_uint256(), node); + f_.getTreeNodeCache()->canonicalize_replace_client(hash.as_uint256(), node); } void diff --git a/src/xrpld/shamap/detail/SHAMapSync.cpp b/src/xrpld/shamap/detail/SHAMapSync.cpp index 02d548be24e..7235e526560 100644 --- a/src/xrpld/shamap/detail/SHAMapSync.cpp +++ b/src/xrpld/shamap/detail/SHAMapSync.cpp @@ -192,8 +192,7 @@ SHAMap::gmn_ProcessNodes(MissingNodes& mn, MissingNodes::StackEntry& se) } else if ( !backed_ || - !f_.getFullBelowCache(ledgerSeq_) - ->touch_if_exists(childHash.as_uint256())) + !f_.getFullBelowCache()->touch_if_exists(childHash.as_uint256())) { bool pending = false; auto d = descendAsync( @@ -251,8 +250,7 @@ SHAMap::gmn_ProcessNodes(MissingNodes& mn, MissingNodes::StackEntry& se) node->setFullBelowGen(mn.generation_); if (backed_) { - f_.getFullBelowCache(ledgerSeq_) - ->insert(node->getHash().as_uint256()); + f_.getFullBelowCache()->insert(node->getHash().as_uint256()); } } @@ -323,7 +321,7 @@ SHAMap::getMissingNodes(int max, SHAMapSyncFilter* filter) max, filter, 512, // number of async reads per pass - f_.getFullBelowCache(ledgerSeq_)->getGeneration()); + f_.getFullBelowCache()->getGeneration()); if (!root_->isInner() || std::static_pointer_cast(root_)->isFullBelow( @@ -580,7 +578,7 @@ SHAMap::addKnownNode( return SHAMapAddNode::duplicate(); } - auto const generation = f_.getFullBelowCache(ledgerSeq_)->getGeneration(); + auto const generation = f_.getFullBelowCache()->getGeneration(); SHAMapNodeID iNodeID; auto iNode = root_.get(); @@ -598,8 +596,7 @@ SHAMap::addKnownNode( } auto childHash = inner->getChildHash(branch); - if (f_.getFullBelowCache(ledgerSeq_) - ->touch_if_exists(childHash.as_uint256())) + if (f_.getFullBelowCache()->touch_if_exists(childHash.as_uint256())) { return SHAMapAddNode::duplicate(); } diff --git a/src/xrpld/shamap/detail/ShardFamily.cpp b/src/xrpld/shamap/detail/ShardFamily.cpp deleted file mode 100644 index aef4c6cde0a..00000000000 --- a/src/xrpld/shamap/detail/ShardFamily.cpp +++ /dev/null @@ -1,198 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include - -namespace ripple { - -static NodeStore::Database& -getShardStore(Application& app) -{ - auto const dbPtr = app.getShardStore(); - assert(dbPtr); - return *dbPtr; -} - -ShardFamily::ShardFamily(Application& app, CollectorManager& cm) - : app_(app) - , db_(getShardStore(app)) - , cm_(cm) - , j_(app.journal("ShardFamily")) - , tnTargetSize_(app.config().getValueFor(SizedItem::treeCacheSize, 0)) - , tnTargetAge_(app.config().getValueFor(SizedItem::treeCacheAge, 0)) -{ -} - -std::shared_ptr -ShardFamily::getFullBelowCache(std::uint32_t ledgerSeq) -{ - auto const shardIndex{app_.getShardStore()->seqToShardIndex(ledgerSeq)}; - std::lock_guard lock(fbCacheMutex_); - if (auto const it{fbCache_.find(shardIndex)}; it != fbCache_.end()) - return it->second; - - // Create a cache for the corresponding shard - auto fbCache{std::make_shared( - "Shard family full below cache shard " + std::to_string(shardIndex), - stopwatch(), - j_, - cm_.collector(), - fullBelowTargetSize, - fullBelowExpiration)}; - return fbCache_.emplace(shardIndex, std::move(fbCache)).first->second; -} - -int -ShardFamily::getFullBelowCacheSize() -{ - size_t sz{0}; - std::lock_guard lock(fbCacheMutex_); - for (auto const& e : fbCache_) - sz += e.second->size(); - return sz; -} - -std::shared_ptr -ShardFamily::getTreeNodeCache(std::uint32_t ledgerSeq) -{ - auto const shardIndex{app_.getShardStore()->seqToShardIndex(ledgerSeq)}; - std::lock_guard lock(tnCacheMutex_); - if (auto const it{tnCache_.find(shardIndex)}; it != tnCache_.end()) - return it->second; - - // Create a cache for the corresponding shard - auto tnCache{std::make_shared( - "Shard family tree node cache shard " + std::to_string(shardIndex), - tnTargetSize_, - tnTargetAge_, - stopwatch(), - j_)}; - return tnCache_.emplace(shardIndex, std::move(tnCache)).first->second; -} - -std::pair -ShardFamily::getTreeNodeCacheSize() -{ - int cacheSz{0}; - int trackSz{0}; - std::lock_guard lock(tnCacheMutex_); - for (auto const& e : tnCache_) - { - cacheSz += e.second->getCacheSize(); - trackSz += e.second->getTrackSize(); - } - return {cacheSz, trackSz}; -} - -void -ShardFamily::sweep() -{ - { - std::lock_guard lock(fbCacheMutex_); - for (auto it = fbCache_.cbegin(); it != fbCache_.cend();) - { - it->second->sweep(); - - // Remove cache if empty - if (it->second->size() == 0) - it = fbCache_.erase(it); - else - ++it; - } - } - - std::lock_guard lock(tnCacheMutex_); - for (auto it = tnCache_.cbegin(); it != tnCache_.cend();) - { - it->second->sweep(); - - // Remove cache if empty - if (it->second->getTrackSize() == 0) - it = tnCache_.erase(it); - else - ++it; - } -} - -void -ShardFamily::reset() -{ - { - std::lock_guard lock(maxSeqMutex_); - maxSeq_ = 0; - } - - { - std::lock_guard lock(fbCacheMutex_); - fbCache_.clear(); - } - - std::lock_guard lock(tnCacheMutex_); - tnCache_.clear(); -} - -void -ShardFamily::missingNodeAcquireBySeq(std::uint32_t seq, uint256 const& nodeHash) -{ - std::ignore = nodeHash; - JLOG(j_.error()) << "Missing node in ledger sequence " << seq; - - std::unique_lock lock(maxSeqMutex_); - if (maxSeq_ == 0) - { - maxSeq_ = seq; - - do - { - // Try to acquire the most recent missing ledger - seq = maxSeq_; - - lock.unlock(); - - // This can invoke the missing node handler - acquire(app_.getLedgerMaster().getHashBySeq(seq), seq); - - lock.lock(); - } while (maxSeq_ != seq); - } - else if (maxSeq_ < seq) - { - // We found a more recent ledger with a missing node - maxSeq_ = seq; - } -} - -void -ShardFamily::acquire(uint256 const& hash, std::uint32_t seq) -{ - if (hash.isNonZero()) - { - JLOG(j_.error()) << "Missing node in " << to_string(hash); - - app_.getInboundLedgers().acquire( - hash, seq, InboundLedger::Reason::SHARD); - } -} - -} // namespace ripple From 0a331ea72e6239b6990cbea419d81af74c6c4895 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Mon, 5 Aug 2024 17:05:12 +0100 Subject: [PATCH 11/26] Factor out Transactor::trapTransaction (#5087) --- src/xrpld/app/tx/detail/Transactor.cpp | 10 +++++++++- src/xrpld/app/tx/detail/Transactor.h | 2 ++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/xrpld/app/tx/detail/Transactor.cpp b/src/xrpld/app/tx/detail/Transactor.cpp index 42e9f0677ab..6ae8be8a67f 100644 --- a/src/xrpld/app/tx/detail/Transactor.cpp +++ b/src/xrpld/app/tx/detail/Transactor.cpp @@ -825,6 +825,14 @@ Transactor::reset(XRPAmount fee) return {ter, fee}; } +// The sole purpose of this function is to provide a convenient, named +// location to set a breakpoint, to be used when replaying transactions. +void +Transactor::trapTransaction(uint256 txHash) const +{ + JLOG(j_.debug()) << "Transaction trapped: " << txHash; +} + //------------------------------------------------------------------------------ std::pair Transactor::operator()() @@ -857,7 +865,7 @@ Transactor::operator()() if (auto const& trap = ctx_.app.trapTxID(); trap && *trap == ctx_.tx.getTransactionID()) { - JLOG(j_.debug()) << "Transaction trapped: " << *trap; + trapTransaction(*trap); } auto result = ctx_.preclaimResult; diff --git a/src/xrpld/app/tx/detail/Transactor.h b/src/xrpld/app/tx/detail/Transactor.h index 27f22a0eb2e..c587e5e1994 100644 --- a/src/xrpld/app/tx/detail/Transactor.h +++ b/src/xrpld/app/tx/detail/Transactor.h @@ -198,6 +198,8 @@ class Transactor checkSingleSign(PreclaimContext const& ctx); static NotTEC checkMultiSign(PreclaimContext const& ctx); + + void trapTransaction(uint256) const; }; /** Performs early sanity checks on the txid */ From c19a88fee9cd5b034a2fbcc013416afc10464e35 Mon Sep 17 00:00:00 2001 From: Scott Schurr Date: Wed, 7 Aug 2024 15:14:19 -0700 Subject: [PATCH 12/26] Address rare corruption of NFTokenPage linked list (#4945) * Add fixNFTokenPageLinks amendment: It was discovered that under rare circumstances the links between NFTokenPages could be removed. If this happens, then the account_objects and account_nfts RPC commands under-report the NFTokens owned by an account. The fixNFTokenPageLinks amendment does the following to address the problem: - It fixes the underlying problem so no further broken links should be created. - It adds Invariants so, if such damage were introduced in the future, an invariant would stop it. - It adds a new FixLedgerState transaction that repairs directories that were damaged in this fashion. - It adds unit tests for all of it. --- include/xrpl/protocol/Feature.h | 3 +- include/xrpl/protocol/SField.h | 1 + include/xrpl/protocol/TER.h | 1 + include/xrpl/protocol/TxFormats.h | 5 +- include/xrpl/protocol/jss.h | 1 + src/libxrpl/protocol/Feature.cpp | 1 + src/libxrpl/protocol/SField.cpp | 1 + src/libxrpl/protocol/TER.cpp | 1 + src/libxrpl/protocol/TxFormats.cpp | 8 + src/test/app/FixNFTokenPageLinks_test.cpp | 676 ++++++++++++++++ src/test/app/NFTokenBurn_test.cpp | 859 ++++++++++++++++++--- src/test/jtx.h | 1 + src/test/jtx/impl/ledgerStateFix.cpp | 49 ++ src/test/jtx/ledgerStateFix.h | 44 ++ src/test/ledger/Invariants_test.cpp | 157 +++- src/xrpld/app/tx/detail/InvariantCheck.cpp | 49 +- src/xrpld/app/tx/detail/InvariantCheck.h | 2 + src/xrpld/app/tx/detail/LedgerStateFix.cpp | 99 +++ src/xrpld/app/tx/detail/LedgerStateFix.h | 57 ++ src/xrpld/app/tx/detail/NFTokenUtils.cpp | 160 +++- src/xrpld/app/tx/detail/NFTokenUtils.h | 7 + src/xrpld/app/tx/detail/applySteps.cpp | 3 + 22 files changed, 2054 insertions(+), 131 deletions(-) create mode 100644 src/test/app/FixNFTokenPageLinks_test.cpp create mode 100644 src/test/jtx/impl/ledgerStateFix.cpp create mode 100644 src/test/jtx/ledgerStateFix.h create mode 100644 src/xrpld/app/tx/detail/LedgerStateFix.cpp create mode 100644 src/xrpld/app/tx/detail/LedgerStateFix.h diff --git a/include/xrpl/protocol/Feature.h b/include/xrpl/protocol/Feature.h index 7eec46e89eb..a00d6b85c1b 100644 --- a/include/xrpl/protocol/Feature.h +++ b/include/xrpl/protocol/Feature.h @@ -80,7 +80,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 78; +static constexpr std::size_t numFeatures = 79; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -371,6 +371,7 @@ extern uint256 const fixReducedOffersV2; extern uint256 const fixEnforceNFTokenTrustline; extern uint256 const fixInnerObjTemplate2; extern uint256 const featureInvariantsV1_1; +extern uint256 const fixNFTokenPageLinks; } // namespace ripple diff --git a/include/xrpl/protocol/SField.h b/include/xrpl/protocol/SField.h index 15aa2272d75..7f54201a4b8 100644 --- a/include/xrpl/protocol/SField.h +++ b/include/xrpl/protocol/SField.h @@ -388,6 +388,7 @@ extern SF_UINT16 const sfHookEmitCount; extern SF_UINT16 const sfHookExecutionIndex; extern SF_UINT16 const sfHookApiVersion; extern SF_UINT16 const sfDiscountedFee; +extern SF_UINT16 const sfLedgerFixType; // 32-bit integers (common) extern SF_UINT32 const sfNetworkID; diff --git a/include/xrpl/protocol/TER.h b/include/xrpl/protocol/TER.h index 335ef8de39a..aae3c7107bd 100644 --- a/include/xrpl/protocol/TER.h +++ b/include/xrpl/protocol/TER.h @@ -182,6 +182,7 @@ enum TEFcodes : TERUnderlyingType { tefTOO_BIG, tefNO_TICKET, tefNFTOKEN_IS_NOT_TRANSFERABLE, + tefINVALID_LEDGER_FIX_TYPE, }; //------------------------------------------------------------------------------ diff --git a/include/xrpl/protocol/TxFormats.h b/include/xrpl/protocol/TxFormats.h index bd5dffd94e9..a3f5cca108c 100644 --- a/include/xrpl/protocol/TxFormats.h +++ b/include/xrpl/protocol/TxFormats.h @@ -190,13 +190,16 @@ enum TxType : std::uint16_t /** This transaction type deletes a DID */ ttDID_DELETE = 50, - /** This transaction type creates an Oracle instance */ ttORACLE_SET = 51, /** This transaction type deletes an Oracle instance */ ttORACLE_DELETE = 52, + /** This transaction type fixes a problem in the ledger state */ + ttLEDGER_STATE_FIX = 53, + + /** This system-generated transaction type is used to update the status of the various amendments. For details, see: https://xrpl.org/amendments.html diff --git a/include/xrpl/protocol/jss.h b/include/xrpl/protocol/jss.h index 84628da286f..e3eda80b44f 100644 --- a/include/xrpl/protocol/jss.h +++ b/include/xrpl/protocol/jss.h @@ -104,6 +104,7 @@ JSS(NFTokenAcceptOffer); // transaction type. JSS(NFTokenCancelOffer); // transaction type. JSS(NFTokenCreateOffer); // transaction type. JSS(NFTokenPage); // ledger type. +JSS(LedgerStateFix); // transaction type. JSS(LPTokenOut); // in: AMM Liquidity Provider deposit tokens JSS(LPTokenIn); // in: AMM Liquidity Provider withdraw tokens JSS(LPToken); // out: AMM Liquidity Provider tokens info diff --git a/src/libxrpl/protocol/Feature.cpp b/src/libxrpl/protocol/Feature.cpp index 87395b7e189..078369bf20c 100644 --- a/src/libxrpl/protocol/Feature.cpp +++ b/src/libxrpl/protocol/Feature.cpp @@ -497,6 +497,7 @@ REGISTER_FEATURE(NFTokenMintOffer, Supported::yes, VoteBehavior::De REGISTER_FIX (fixReducedOffersV2, Supported::yes, VoteBehavior::DefaultNo); REGISTER_FIX (fixEnforceNFTokenTrustline, Supported::yes, VoteBehavior::DefaultNo); REGISTER_FIX (fixInnerObjTemplate2, Supported::yes, VoteBehavior::DefaultNo); +REGISTER_FIX (fixNFTokenPageLinks, Supported::yes, VoteBehavior::DefaultNo); // InvariantsV1_1 will be changes to Supported::yes when all the // invariants expected to be included under it are complete. REGISTER_FEATURE(InvariantsV1_1, Supported::no, VoteBehavior::DefaultNo); diff --git a/src/libxrpl/protocol/SField.cpp b/src/libxrpl/protocol/SField.cpp index d56f3983352..f8eb2d6f877 100644 --- a/src/libxrpl/protocol/SField.cpp +++ b/src/libxrpl/protocol/SField.cpp @@ -113,6 +113,7 @@ CONSTRUCT_TYPED_SFIELD(sfHookStateChangeCount, "HookStateChangeCount", UINT16, CONSTRUCT_TYPED_SFIELD(sfHookEmitCount, "HookEmitCount", UINT16, 18); CONSTRUCT_TYPED_SFIELD(sfHookExecutionIndex, "HookExecutionIndex", UINT16, 19); CONSTRUCT_TYPED_SFIELD(sfHookApiVersion, "HookApiVersion", UINT16, 20); +CONSTRUCT_TYPED_SFIELD(sfLedgerFixType, "LedgerFixType", UINT16, 21); // 32-bit integers (common) CONSTRUCT_TYPED_SFIELD(sfNetworkID, "NetworkID", UINT32, 1); diff --git a/src/libxrpl/protocol/TER.cpp b/src/libxrpl/protocol/TER.cpp index f452b05464e..917bbf26a9f 100644 --- a/src/libxrpl/protocol/TER.cpp +++ b/src/libxrpl/protocol/TER.cpp @@ -137,6 +137,7 @@ transResults() MAKE_ERROR(tefTOO_BIG, "Transaction affects too many items."), MAKE_ERROR(tefNO_TICKET, "Ticket is not in ledger."), MAKE_ERROR(tefNFTOKEN_IS_NOT_TRANSFERABLE, "The specified NFToken is not transferable."), + MAKE_ERROR(tefINVALID_LEDGER_FIX_TYPE, "The LedgerFixType field has an invalid value."), MAKE_ERROR(telLOCAL_ERROR, "Local failure."), MAKE_ERROR(telBAD_DOMAIN, "Domain too long."), diff --git a/src/libxrpl/protocol/TxFormats.cpp b/src/libxrpl/protocol/TxFormats.cpp index 71c333dc497..8a93232604e 100644 --- a/src/libxrpl/protocol/TxFormats.cpp +++ b/src/libxrpl/protocol/TxFormats.cpp @@ -505,6 +505,14 @@ TxFormats::TxFormats() {sfOracleDocumentID, soeREQUIRED}, }, commonFields); + + add(jss::LedgerStateFix, + ttLEDGER_STATE_FIX, + { + {sfLedgerFixType, soeREQUIRED}, + {sfOwner, soeOPTIONAL}, + }, + commonFields); } TxFormats const& diff --git a/src/test/app/FixNFTokenPageLinks_test.cpp b/src/test/app/FixNFTokenPageLinks_test.cpp new file mode 100644 index 00000000000..dea6d4569e0 --- /dev/null +++ b/src/test/app/FixNFTokenPageLinks_test.cpp @@ -0,0 +1,676 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include + +namespace ripple { + +class FixNFTokenPageLinks_test : public beast::unit_test::suite +{ + // Helper function that returns the owner count of an account root. + static std::uint32_t + ownerCount(test::jtx::Env const& env, test::jtx::Account const& acct) + { + std::uint32_t ret{0}; + if (auto const sleAcct = env.le(acct)) + ret = sleAcct->at(sfOwnerCount); + return ret; + } + + // Helper function that returns the number of nfts owned by an account. + static std::uint32_t + nftCount(test::jtx::Env& env, test::jtx::Account const& acct) + { + Json::Value params; + params[jss::account] = acct.human(); + params[jss::type] = "state"; + Json::Value nfts = env.rpc("json", "account_nfts", to_string(params)); + return nfts[jss::result][jss::account_nfts].size(); + }; + + // A helper function that generates 96 nfts packed into three pages + // of 32 each. Returns a sorted vector of the NFTokenIDs packed into + // the pages. + std::vector + genPackedTokens(test::jtx::Env& env, test::jtx::Account const& owner) + { + using namespace test::jtx; + + std::vector nfts; + nfts.reserve(96); + + // We want to create fully packed NFT pages. This is a little + // tricky since the system currently in place is inclined to + // assign consecutive tokens to only 16 entries per page. + // + // By manipulating the internal form of the taxon we can force + // creation of NFT pages that are completely full. This lambda + // tells us the taxon value we should pass in in order for the + // internal representation to match the passed in value. + auto internalTaxon = [this, &env]( + Account const& acct, + std::uint32_t taxon) -> std::uint32_t { + std::uint32_t tokenSeq = [this, &env, &acct]() { + auto const le = env.le(acct); + if (BEAST_EXPECT(le)) + return le->at(~sfMintedNFTokens).value_or(0u); + return 0u; + }(); + + // If fixNFTokenRemint amendment is on, we must + // add FirstNFTokenSequence. + if (env.current()->rules().enabled(fixNFTokenRemint)) + tokenSeq += env.le(acct) + ->at(~sfFirstNFTokenSequence) + .value_or(env.seq(acct)); + + return toUInt32(nft::cipheredTaxon(tokenSeq, nft::toTaxon(taxon))); + }; + + for (std::uint32_t i = 0; i < 96; ++i) + { + // In order to fill the pages we use the taxon to break them + // into groups of 16 entries. By having the internal + // representation of the taxon go... + // 0, 3, 2, 5, 4, 7... + // in sets of 16 NFTs we can get each page to be fully + // populated. + std::uint32_t const intTaxon = (i / 16) + (i & 0b10000 ? 2 : 0); + uint32_t const extTaxon = internalTaxon(owner, intTaxon); + nfts.push_back( + token::getNextID(env, owner, extTaxon, tfTransferable)); + env(token::mint(owner, extTaxon), txflags(tfTransferable)); + env.close(); + } + + // Sort the NFTs so they are listed in storage order, not + // creation order. + std::sort(nfts.begin(), nfts.end()); + + // Verify that the owner does indeed have exactly three pages + // of NFTs with 32 entries in each page. + { + Json::Value params; + params[jss::account] = owner.human(); + auto resp = env.rpc("json", "account_objects", to_string(params)); + + Json::Value const& acctObjs = + resp[jss::result][jss::account_objects]; + + int pageCount = 0; + for (Json::UInt i = 0; i < acctObjs.size(); ++i) + { + if (BEAST_EXPECT( + acctObjs[i].isMember(sfNFTokens.jsonName) && + acctObjs[i][sfNFTokens.jsonName].isArray())) + { + BEAST_EXPECT(acctObjs[i][sfNFTokens.jsonName].size() == 32); + ++pageCount; + } + } + // If this check fails then the internal NFT directory logic + // has changed. + BEAST_EXPECT(pageCount == 3); + } + return nfts; + }; + + void + testLedgerStateFixErrors() + { + testcase("LedgerStateFix error cases"); + + using namespace test::jtx; + + Account const alice("alice"); + + { + // Verify that the LedgerStateFix transaction is disabled + // without the fixNFTokenPageLinks amendment. + Env env{*this, supported_amendments() - fixNFTokenPageLinks}; + env.fund(XRP(1000), alice); + + auto const linkFixFee = drops(env.current()->fees().increment); + env(ledgerStateFix::nftPageLinks(alice, alice), + fee(linkFixFee), + ter(temDISABLED)); + } + + Env env{*this, supported_amendments()}; + env.fund(XRP(1000), alice); + std::uint32_t const ticketSeq = env.seq(alice); + env(ticket::create(alice, 1)); + + // Preflight + + { + // Fail preflight1. Can't combine AcccountTxnID and ticket. + Json::Value tx = ledgerStateFix::nftPageLinks(alice, alice); + tx[sfAccountTxnID.jsonName] = + "00000000000000000000000000000000" + "00000000000000000000000000000000"; + env(tx, ticket::use(ticketSeq), ter(temINVALID)); + } + // Fee too low. + env(ledgerStateFix::nftPageLinks(alice, alice), ter(telINSUF_FEE_P)); + + // Invalid flags. + auto const linkFixFee = drops(env.current()->fees().increment); + env(ledgerStateFix::nftPageLinks(alice, alice), + fee(linkFixFee), + txflags(tfPassive), + ter(temINVALID_FLAG)); + + { + // ledgerStateFix::nftPageLinks requires an Owner field. + Json::Value tx = ledgerStateFix::nftPageLinks(alice, alice); + tx.removeMember(sfOwner.jsonName); + env(tx, fee(linkFixFee), ter(temINVALID)); + } + { + // Invalid LedgerFixType codes. + Json::Value tx = ledgerStateFix::nftPageLinks(alice, alice); + tx[sfLedgerFixType.jsonName] = 0; + env(tx, fee(linkFixFee), ter(tefINVALID_LEDGER_FIX_TYPE)); + + tx[sfLedgerFixType.jsonName] = 200; + env(tx, fee(linkFixFee), ter(tefINVALID_LEDGER_FIX_TYPE)); + } + + // Preclaim + Account const carol("carol"); + env.memoize(carol); + env(ledgerStateFix::nftPageLinks(alice, carol), + fee(linkFixFee), + ter(tecOBJECT_NOT_FOUND)); + } + + void + testTokenPageLinkErrors() + { + testcase("NFTokenPageLinkFix error cases"); + + using namespace test::jtx; + + Account const alice("alice"); + + Env env{*this, supported_amendments()}; + env.fund(XRP(1000), alice); + + // These cases all return the same TER code, but they exercise + // different cases where there is nothing to fix in an owner's + // NFToken pages. So they increase test coverage. + + // Owner has no pages to fix. + auto const linkFixFee = drops(env.current()->fees().increment); + env(ledgerStateFix::nftPageLinks(alice, alice), + fee(linkFixFee), + ter(tecFAILED_PROCESSING)); + + // Alice has only one page. + env(token::mint(alice), txflags(tfTransferable)); + env.close(); + + env(ledgerStateFix::nftPageLinks(alice, alice), + fee(linkFixFee), + ter(tecFAILED_PROCESSING)); + + // Alice has at least three pages. + for (std::uint32_t i = 0; i < 64; ++i) + { + env(token::mint(alice), txflags(tfTransferable)); + env.close(); + } + + env(ledgerStateFix::nftPageLinks(alice, alice), + fee(linkFixFee), + ter(tecFAILED_PROCESSING)); + } + + void + testFixNFTokenPageLinks() + { + // Steps: + // 1. Before the fixNFTokenPageLinks amendment is enabled, build the + // three kinds of damaged NFToken directories we know about: + // A. One where there is only one page, but without the final index. + // B. One with multiple pages and a missing final page. + // C. One with links missing in the middle of the chain. + // 2. Enable the fixNFTokenPageLinks amendment. + // 3. Invoke the LedgerStateFix transactor and repair the directories. + testcase("Fix links"); + + using namespace test::jtx; + + Account const alice("alice"); + Account const bob("bob"); + Account const carol("carol"); + Account const daria("daria"); + + Env env{*this, supported_amendments() - fixNFTokenPageLinks}; + env.fund(XRP(1000), alice, bob, carol, daria); + + //********************************************************************** + // Step 1A: Create damaged NFToken directories: + // o One where there is only one page, but without the final index. + //********************************************************************** + + // alice generates three packed pages. + std::vector aliceNFTs = genPackedTokens(env, alice); + BEAST_EXPECT(nftCount(env, alice) == 96); + BEAST_EXPECT(ownerCount(env, alice) == 3); + + // Get the index of the middle page. + uint256 const aliceMiddleNFTokenPageIndex = [&env, &alice]() { + auto lastNFTokenPage = env.le(keylet::nftpage_max(alice)); + return lastNFTokenPage->at(sfPreviousPageMin); + }(); + + // alice burns all the tokens in the first and last pages. + for (int i = 0; i < 32; ++i) + { + env(token::burn(alice, {aliceNFTs[i]})); + env.close(); + } + aliceNFTs.erase(aliceNFTs.begin(), aliceNFTs.begin() + 32); + for (int i = 0; i < 32; ++i) + { + env(token::burn(alice, {aliceNFTs.back()})); + aliceNFTs.pop_back(); + env.close(); + } + BEAST_EXPECT(ownerCount(env, alice) == 1); + BEAST_EXPECT(nftCount(env, alice) == 32); + + // Removing the last token from the last page deletes the last + // page. This is a bug. The contents of the next-to-last page + // should have been moved into the last page. + BEAST_EXPECT(!env.le(keylet::nftpage_max(alice))); + + // alice's "middle" page is still present, but has no links. + { + auto aliceMiddleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), aliceMiddleNFTokenPageIndex)); + if (!BEAST_EXPECT(aliceMiddleNFTokenPage)) + return; + + BEAST_EXPECT( + !aliceMiddleNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT( + !aliceMiddleNFTokenPage->isFieldPresent(sfNextPageMin)); + } + + //********************************************************************** + // Step 1B: Create damaged NFToken directories: + // o One with multiple pages and a missing final page. + //********************************************************************** + + // bob generates three packed pages. + std::vector bobNFTs = genPackedTokens(env, bob); + BEAST_EXPECT(nftCount(env, bob) == 96); + BEAST_EXPECT(ownerCount(env, bob) == 3); + + // Get the index of the middle page. + uint256 const bobMiddleNFTokenPageIndex = [&env, &bob]() { + auto lastNFTokenPage = env.le(keylet::nftpage_max(bob)); + return lastNFTokenPage->at(sfPreviousPageMin); + }(); + + // bob burns all the tokens in the very last page. + for (int i = 0; i < 32; ++i) + { + env(token::burn(bob, {bobNFTs.back()})); + bobNFTs.pop_back(); + env.close(); + } + BEAST_EXPECT(nftCount(env, bob) == 64); + BEAST_EXPECT(ownerCount(env, bob) == 2); + + // Removing the last token from the last page deletes the last + // page. This is a bug. The contents of the next-to-last page + // should have been moved into the last page. + BEAST_EXPECT(!env.le(keylet::nftpage_max(bob))); + + // bob's "middle" page is still present, but has lost the + // NextPageMin field. + { + auto bobMiddleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(bob), bobMiddleNFTokenPageIndex)); + if (!BEAST_EXPECT(bobMiddleNFTokenPage)) + return; + + BEAST_EXPECT( + bobMiddleNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT(!bobMiddleNFTokenPage->isFieldPresent(sfNextPageMin)); + } + + //********************************************************************** + // Step 1C: Create damaged NFToken directories: + // o One with links missing in the middle of the chain. + //********************************************************************** + + // carol generates three packed pages. + std::vector carolNFTs = genPackedTokens(env, carol); + BEAST_EXPECT(nftCount(env, carol) == 96); + BEAST_EXPECT(ownerCount(env, carol) == 3); + + // Get the index of the middle page. + uint256 const carolMiddleNFTokenPageIndex = [&env, &carol]() { + auto lastNFTokenPage = env.le(keylet::nftpage_max(carol)); + return lastNFTokenPage->at(sfPreviousPageMin); + }(); + + // carol sells all of the tokens in the very last page to daria. + std::vector dariaNFTs; + dariaNFTs.reserve(32); + for (int i = 0; i < 32; ++i) + { + uint256 const offerIndex = + keylet::nftoffer(carol, env.seq(carol)).key; + env(token::createOffer(carol, carolNFTs.back(), XRP(0)), + txflags(tfSellNFToken)); + env.close(); + + env(token::acceptSellOffer(daria, offerIndex)); + env.close(); + + dariaNFTs.push_back(carolNFTs.back()); + carolNFTs.pop_back(); + } + BEAST_EXPECT(nftCount(env, carol) == 64); + BEAST_EXPECT(ownerCount(env, carol) == 2); + + // Removing the last token from the last page deletes the last + // page. This is a bug. The contents of the next-to-last page + // should have been moved into the last page. + BEAST_EXPECT(!env.le(keylet::nftpage_max(carol))); + + // carol's "middle" page is still present, but has lost the + // NextPageMin field. + auto carolMiddleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(carol), carolMiddleNFTokenPageIndex)); + if (!BEAST_EXPECT(carolMiddleNFTokenPage)) + return; + + BEAST_EXPECT(carolMiddleNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT(!carolMiddleNFTokenPage->isFieldPresent(sfNextPageMin)); + + // At this point carol's NFT directory has the same problem that + // bob's has: the last page is missing. Now we make things more + // complicated by putting the last page back. carol buys their NFTs + // back from daria. + for (uint256 const& nft : dariaNFTs) + { + uint256 const offerIndex = + keylet::nftoffer(carol, env.seq(carol)).key; + env(token::createOffer(carol, nft, drops(1)), token::owner(daria)); + env.close(); + + env(token::acceptBuyOffer(daria, offerIndex)); + env.close(); + + carolNFTs.push_back(nft); + } + + // Note that carol actually owns 96 NFTs, but only 64 are reported + // because the links are damaged. + BEAST_EXPECT(nftCount(env, carol) == 64); + BEAST_EXPECT(ownerCount(env, carol) == 3); + + // carol's "middle" page is present and still has no NextPageMin field. + { + auto carolMiddleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(carol), carolMiddleNFTokenPageIndex)); + if (!BEAST_EXPECT(carolMiddleNFTokenPage)) + return; + + BEAST_EXPECT( + carolMiddleNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT( + !carolMiddleNFTokenPage->isFieldPresent(sfNextPageMin)); + } + // carol has a "last" page again, but it has no PreviousPageMin field. + { + auto carolLastNFTokenPage = env.le(keylet::nftpage_max(carol)); + + BEAST_EXPECT( + !carolLastNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT(!carolLastNFTokenPage->isFieldPresent(sfNextPageMin)); + } + + //********************************************************************** + // Step 2: Enable the fixNFTokenPageLinks amendment. + //********************************************************************** + // Verify that the LedgerStateFix transaction is not enabled. + auto const linkFixFee = drops(env.current()->fees().increment); + env(ledgerStateFix::nftPageLinks(daria, alice), + fee(linkFixFee), + ter(temDISABLED)); + + // Wait 15 ledgers so the LedgerStateFix transaction is no longer + // retried. + for (int i = 0; i < 15; ++i) + env.close(); + + env.enableFeature(fixNFTokenPageLinks); + env.close(); + + //********************************************************************** + // Step 3A: Repair the one-page directory (alice's) + //********************************************************************** + + // Verify that alice's NFToken directory is still damaged. + + // alice's last page should still be missing. + BEAST_EXPECT(!env.le(keylet::nftpage_max(alice))); + + // alice's "middle" page is still present and has no links. + { + auto aliceMiddleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), aliceMiddleNFTokenPageIndex)); + if (!BEAST_EXPECT(aliceMiddleNFTokenPage)) + return; + + BEAST_EXPECT( + !aliceMiddleNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT( + !aliceMiddleNFTokenPage->isFieldPresent(sfNextPageMin)); + } + + // The server "remembers" daria's failed nftPageLinks transaction + // signature. So we need to advance daria's sequence number before + // daria can submit a similar transaction. + env(noop(daria)); + + // daria fixes the links in alice's NFToken directory. + env(ledgerStateFix::nftPageLinks(daria, alice), fee(linkFixFee)); + env.close(); + + // alices's last page should now be present and include no links. + { + auto aliceLastNFTokenPage = env.le(keylet::nftpage_max(alice)); + if (!BEAST_EXPECT(aliceLastNFTokenPage)) + return; + + BEAST_EXPECT( + !aliceLastNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT(!aliceLastNFTokenPage->isFieldPresent(sfNextPageMin)); + } + + // alice's middle page should be gone. + BEAST_EXPECT(!env.le(keylet::nftpage( + keylet::nftpage_min(alice), aliceMiddleNFTokenPageIndex))); + + BEAST_EXPECT(nftCount(env, alice) == 32); + BEAST_EXPECT(ownerCount(env, alice) == 1); + + //********************************************************************** + // Step 3B: Repair the two-page directory (bob's) + //********************************************************************** + + // Verify that bob's NFToken directory is still damaged. + + // bob's last page should still be missing. + BEAST_EXPECT(!env.le(keylet::nftpage_max(bob))); + + // bob's "middle" page is still present and missing NextPageMin. + { + auto bobMiddleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(bob), bobMiddleNFTokenPageIndex)); + if (!BEAST_EXPECT(bobMiddleNFTokenPage)) + return; + + BEAST_EXPECT( + bobMiddleNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT(!bobMiddleNFTokenPage->isFieldPresent(sfNextPageMin)); + } + + // daria fixes the links in bob's NFToken directory. + env(ledgerStateFix::nftPageLinks(daria, bob), fee(linkFixFee)); + env.close(); + + // bob's last page should now be present and include a previous + // link but no next link. + { + auto const lastPageKeylet = keylet::nftpage_max(bob); + auto const bobLastNFTokenPage = env.le(lastPageKeylet); + if (!BEAST_EXPECT(bobLastNFTokenPage)) + return; + + BEAST_EXPECT(bobLastNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT( + bobLastNFTokenPage->at(sfPreviousPageMin) != + bobMiddleNFTokenPageIndex); + BEAST_EXPECT(!bobLastNFTokenPage->isFieldPresent(sfNextPageMin)); + + auto const bobNewFirstNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(bob), + bobLastNFTokenPage->at(sfPreviousPageMin))); + if (!BEAST_EXPECT(bobNewFirstNFTokenPage)) + return; + + BEAST_EXPECT( + bobNewFirstNFTokenPage->isFieldPresent(sfNextPageMin) && + bobNewFirstNFTokenPage->at(sfNextPageMin) == + lastPageKeylet.key); + BEAST_EXPECT( + !bobNewFirstNFTokenPage->isFieldPresent(sfPreviousPageMin)); + } + + // bob's middle page should be gone. + BEAST_EXPECT(!env.le(keylet::nftpage( + keylet::nftpage_min(bob), bobMiddleNFTokenPageIndex))); + + BEAST_EXPECT(nftCount(env, bob) == 64); + BEAST_EXPECT(ownerCount(env, bob) == 2); + + //********************************************************************** + // Step 3C: Repair the three-page directory (carol's) + //********************************************************************** + + // Verify that carol's NFToken directory is still damaged. + + // carol's "middle" page is present and has no NextPageMin field. + { + auto carolMiddleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(carol), carolMiddleNFTokenPageIndex)); + if (!BEAST_EXPECT(carolMiddleNFTokenPage)) + return; + + BEAST_EXPECT( + carolMiddleNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT( + !carolMiddleNFTokenPage->isFieldPresent(sfNextPageMin)); + } + // carol has a "last" page, but it has no PreviousPageMin field. + { + auto carolLastNFTokenPage = env.le(keylet::nftpage_max(carol)); + + BEAST_EXPECT( + !carolLastNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT(!carolLastNFTokenPage->isFieldPresent(sfNextPageMin)); + } + + // carol fixes the links in their own NFToken directory. + env(ledgerStateFix::nftPageLinks(carol, carol), fee(linkFixFee)); + env.close(); + + { + // carol's "middle" page is present and now has a NextPageMin field. + auto const lastPageKeylet = keylet::nftpage_max(carol); + auto carolMiddleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(carol), carolMiddleNFTokenPageIndex)); + if (!BEAST_EXPECT(carolMiddleNFTokenPage)) + return; + + BEAST_EXPECT( + carolMiddleNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT( + carolMiddleNFTokenPage->isFieldPresent(sfNextPageMin) && + carolMiddleNFTokenPage->at(sfNextPageMin) == + lastPageKeylet.key); + + // carol has a "last" page that includes a PreviousPageMin field. + auto carolLastNFTokenPage = env.le(lastPageKeylet); + if (!BEAST_EXPECT(carolLastNFTokenPage)) + return; + + BEAST_EXPECT( + carolLastNFTokenPage->isFieldPresent(sfPreviousPageMin) && + carolLastNFTokenPage->at(sfPreviousPageMin) == + carolMiddleNFTokenPageIndex); + BEAST_EXPECT(!carolLastNFTokenPage->isFieldPresent(sfNextPageMin)); + + // carol also has a "first" page that includes a NextPageMin field. + auto carolFirstNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(carol), + carolMiddleNFTokenPage->at(sfPreviousPageMin))); + if (!BEAST_EXPECT(carolFirstNFTokenPage)) + return; + + BEAST_EXPECT( + carolFirstNFTokenPage->isFieldPresent(sfNextPageMin) && + carolFirstNFTokenPage->at(sfNextPageMin) == + carolMiddleNFTokenPageIndex); + BEAST_EXPECT( + !carolFirstNFTokenPage->isFieldPresent(sfPreviousPageMin)); + } + + // With the link repair, the server knows that carol has 96 NFTs. + BEAST_EXPECT(nftCount(env, carol) == 96); + BEAST_EXPECT(ownerCount(env, carol) == 3); + } + +public: + void + run() override + { + testLedgerStateFixErrors(); + testTokenPageLinkErrors(); + testFixNFTokenPageLinks(); + } +}; + +BEAST_DEFINE_TESTSUITE(FixNFTokenPageLinks, tx, ripple); + +} // namespace ripple diff --git a/src/test/app/NFTokenBurn_test.cpp b/src/test/app/NFTokenBurn_test.cpp index 8219889b4be..35a8858f868 100644 --- a/src/test/app/NFTokenBurn_test.cpp +++ b/src/test/app/NFTokenBurn_test.cpp @@ -80,6 +80,73 @@ class NFTokenBurnBaseUtil_test : public beast::unit_test::suite return nftokenID; }; + // printNFTPages is a helper function that may be used for debugging. + // + // It uses the ledger RPC command to show the NFT pages in the ledger. + // This parameter controls how noisy the output is. + enum Volume : bool { + quiet = false, + noisy = true, + }; + + void + printNFTPages(test::jtx::Env& env, Volume vol) + { + Json::Value jvParams; + jvParams[jss::ledger_index] = "current"; + jvParams[jss::binary] = false; + { + Json::Value jrr = env.rpc( + "json", + "ledger_data", + boost::lexical_cast(jvParams)); + + // Iterate the state and print all NFTokenPages. + if (!jrr.isMember(jss::result) || + !jrr[jss::result].isMember(jss::state)) + { + std::cout << "No ledger state found!" << std::endl; + return; + } + Json::Value& state = jrr[jss::result][jss::state]; + if (!state.isArray()) + { + std::cout << "Ledger state is not array!" << std::endl; + return; + } + for (Json::UInt i = 0; i < state.size(); ++i) + { + if (state[i].isMember(sfNFTokens.jsonName) && + state[i][sfNFTokens.jsonName].isArray()) + { + std::uint32_t tokenCount = + state[i][sfNFTokens.jsonName].size(); + std::cout << tokenCount << " NFtokens in page " + << state[i][jss::index].asString() << std::endl; + + if (vol == noisy) + { + std::cout << state[i].toStyledString() << std::endl; + } + else + { + if (tokenCount > 0) + std::cout << "first: " + << state[i][sfNFTokens.jsonName][0u] + .toStyledString() + << std::endl; + if (tokenCount > 1) + std::cout + << "last: " + << state[i][sfNFTokens.jsonName][tokenCount - 1] + .toStyledString() + << std::endl; + } + } + } + } + } + void testBurnRandom(FeatureBitset features) { @@ -297,76 +364,10 @@ class NFTokenBurnBaseUtil_test : public beast::unit_test::suite Env env{*this, features}; env.fund(XRP(1000), alice); - // printNFTPages is a lambda that may be used for debugging. - // - // It uses the ledger RPC command to show the NFT pages in the ledger. - // This parameter controls how noisy the output is. - enum Volume : bool { - quiet = false, - noisy = true, - }; - - [[maybe_unused]] auto printNFTPages = [&env](Volume vol) { - Json::Value jvParams; - jvParams[jss::ledger_index] = "current"; - jvParams[jss::binary] = false; - { - Json::Value jrr = env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams)); - - // Iterate the state and print all NFTokenPages. - if (!jrr.isMember(jss::result) || - !jrr[jss::result].isMember(jss::state)) - { - std::cout << "No ledger state found!" << std::endl; - return; - } - Json::Value& state = jrr[jss::result][jss::state]; - if (!state.isArray()) - { - std::cout << "Ledger state is not array!" << std::endl; - return; - } - for (Json::UInt i = 0; i < state.size(); ++i) - { - if (state[i].isMember(sfNFTokens.jsonName) && - state[i][sfNFTokens.jsonName].isArray()) - { - std::uint32_t tokenCount = - state[i][sfNFTokens.jsonName].size(); - std::cout << tokenCount << " NFTokens in page " - << state[i][jss::index].asString() - << std::endl; - - if (vol == noisy) - { - std::cout << state[i].toStyledString() << std::endl; - } - else - { - if (tokenCount > 0) - std::cout << "first: " - << state[i][sfNFTokens.jsonName][0u] - .toStyledString() - << std::endl; - if (tokenCount > 1) - std::cout << "last: " - << state[i][sfNFTokens.jsonName] - [tokenCount - 1] - .toStyledString() - << std::endl; - } - } - } - } - }; - // A lambda that generates 96 nfts packed into three pages of 32 each. - auto genPackedTokens = [this, &env, &alice]( - std::vector& nfts) { - nfts.clear(); + // Returns a sorted vector of the NFTokenIDs packed into the pages. + auto genPackedTokens = [this, &env, &alice]() { + std::vector nfts; nfts.reserve(96); // We want to create fully packed NFT pages. This is a little @@ -441,23 +442,24 @@ class NFTokenBurnBaseUtil_test : public beast::unit_test::suite // has changed. BEAST_EXPECT(pageCount == 3); } + return nfts; }; - - // Generate three packed pages. Then burn the tokens in order from - // first to last. This exercises specific cases where coalescing - // pages is not possible. - std::vector nfts; - genPackedTokens(nfts); - BEAST_EXPECT(nftCount(env, alice) == 96); - BEAST_EXPECT(ownerCount(env, alice) == 3); - - for (uint256 const& nft : nfts) { - env(token::burn(alice, {nft})); - env.close(); + // Generate three packed pages. Then burn the tokens in order from + // first to last. This exercises specific cases where coalescing + // pages is not possible. + std::vector nfts = genPackedTokens(); + BEAST_EXPECT(nftCount(env, alice) == 96); + BEAST_EXPECT(ownerCount(env, alice) == 3); + + for (uint256 const& nft : nfts) + { + env(token::burn(alice, {nft})); + env.close(); + } + BEAST_EXPECT(nftCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, alice) == 0); } - BEAST_EXPECT(nftCount(env, alice) == 0); - BEAST_EXPECT(ownerCount(env, alice) == 0); // A lambda verifies that the ledger no longer contains any NFT pages. auto checkNoTokenPages = [this, &env]() { @@ -479,48 +481,421 @@ class NFTokenBurnBaseUtil_test : public beast::unit_test::suite } }; checkNoTokenPages(); + { + // Generate three packed pages. Then burn the tokens in order from + // last to first. This exercises different specific cases where + // coalescing pages is not possible. + std::vector nfts = genPackedTokens(); + BEAST_EXPECT(nftCount(env, alice) == 96); + BEAST_EXPECT(ownerCount(env, alice) == 3); + + // Verify that that all three pages are present and remember the + // indexes. + auto lastNFTokenPage = env.le(keylet::nftpage_max(alice)); + if (!BEAST_EXPECT(lastNFTokenPage)) + return; + + uint256 const middleNFTokenPageIndex = + lastNFTokenPage->at(sfPreviousPageMin); + auto middleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), middleNFTokenPageIndex)); + if (!BEAST_EXPECT(middleNFTokenPage)) + return; + + uint256 const firstNFTokenPageIndex = + middleNFTokenPage->at(sfPreviousPageMin); + auto firstNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), firstNFTokenPageIndex)); + if (!BEAST_EXPECT(firstNFTokenPage)) + return; + + // Burn almost all the tokens in the very last page. + for (int i = 0; i < 31; ++i) + { + env(token::burn(alice, {nfts.back()})); + nfts.pop_back(); + env.close(); + } - // Generate three packed pages. Then burn the tokens in order from - // last to first. This exercises different specific cases where - // coalescing pages is not possible. - genPackedTokens(nfts); - BEAST_EXPECT(nftCount(env, alice) == 96); - BEAST_EXPECT(ownerCount(env, alice) == 3); + // Verify that the last page is still present and contains just one + // NFT. + lastNFTokenPage = env.le(keylet::nftpage_max(alice)); + if (!BEAST_EXPECT(lastNFTokenPage)) + return; - std::reverse(nfts.begin(), nfts.end()); - for (uint256 const& nft : nfts) - { - env(token::burn(alice, {nft})); + BEAST_EXPECT( + lastNFTokenPage->getFieldArray(sfNFTokens).size() == 1); + BEAST_EXPECT(lastNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT(!lastNFTokenPage->isFieldPresent(sfNextPageMin)); + + // Delete the last token from the last page. + env(token::burn(alice, {nfts.back()})); + nfts.pop_back(); env.close(); + + if (features[fixNFTokenPageLinks]) + { + // Removing the last token from the last page deletes the + // _previous_ page because we need to preserve that last + // page an an anchor. The contents of the next-to-last page + // are moved into the last page. + lastNFTokenPage = env.le(keylet::nftpage_max(alice)); + BEAST_EXPECT(lastNFTokenPage); + BEAST_EXPECT( + lastNFTokenPage->at(~sfPreviousPageMin) == + firstNFTokenPageIndex); + BEAST_EXPECT(!lastNFTokenPage->isFieldPresent(sfNextPageMin)); + BEAST_EXPECT( + lastNFTokenPage->getFieldArray(sfNFTokens).size() == 32); + + // The "middle" page should be gone. + middleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), middleNFTokenPageIndex)); + BEAST_EXPECT(!middleNFTokenPage); + + // The "first" page should still be present and linked to + // the last page. + firstNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), firstNFTokenPageIndex)); + BEAST_EXPECT(firstNFTokenPage); + BEAST_EXPECT( + !firstNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT( + firstNFTokenPage->at(~sfNextPageMin) == + lastNFTokenPage->key()); + BEAST_EXPECT( + lastNFTokenPage->getFieldArray(sfNFTokens).size() == 32); + } + else + { + // Removing the last token from the last page deletes the last + // page. This is a bug. The contents of the next-to-last page + // should have been moved into the last page. + lastNFTokenPage = env.le(keylet::nftpage_max(alice)); + BEAST_EXPECT(!lastNFTokenPage); + + // The "middle" page is still present, but has lost the + // NextPageMin field. + middleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), middleNFTokenPageIndex)); + if (!BEAST_EXPECT(middleNFTokenPage)) + return; + BEAST_EXPECT( + middleNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT(!middleNFTokenPage->isFieldPresent(sfNextPageMin)); + } + + // Delete the rest of the NFTokens. + while (!nfts.empty()) + { + env(token::burn(alice, {nfts.back()})); + nfts.pop_back(); + env.close(); + } + BEAST_EXPECT(nftCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, alice) == 0); + } + checkNoTokenPages(); + { + // Generate three packed pages. Then burn all tokens in the middle + // page. This exercises the case where a page is removed between + // two fully populated pages. + std::vector nfts = genPackedTokens(); + BEAST_EXPECT(nftCount(env, alice) == 96); + BEAST_EXPECT(ownerCount(env, alice) == 3); + + // Verify that that all three pages are present and remember the + // indexes. + auto lastNFTokenPage = env.le(keylet::nftpage_max(alice)); + if (!BEAST_EXPECT(lastNFTokenPage)) + return; + + uint256 const middleNFTokenPageIndex = + lastNFTokenPage->at(sfPreviousPageMin); + auto middleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), middleNFTokenPageIndex)); + if (!BEAST_EXPECT(middleNFTokenPage)) + return; + + uint256 const firstNFTokenPageIndex = + middleNFTokenPage->at(sfPreviousPageMin); + auto firstNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), firstNFTokenPageIndex)); + if (!BEAST_EXPECT(firstNFTokenPage)) + return; + + for (std::size_t i = 32; i < 64; ++i) + { + env(token::burn(alice, nfts[i])); + env.close(); + } + nfts.erase(nfts.begin() + 32, nfts.begin() + 64); + BEAST_EXPECT(nftCount(env, alice) == 64); + BEAST_EXPECT(ownerCount(env, alice) == 2); + + // Verify that middle page is gone and the links in the two + // remaining pages are correct. + middleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), middleNFTokenPageIndex)); + BEAST_EXPECT(!middleNFTokenPage); + + lastNFTokenPage = env.le(keylet::nftpage_max(alice)); + BEAST_EXPECT(!lastNFTokenPage->isFieldPresent(sfNextPageMin)); + BEAST_EXPECT( + lastNFTokenPage->getFieldH256(sfPreviousPageMin) == + firstNFTokenPageIndex); + + firstNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), firstNFTokenPageIndex)); + BEAST_EXPECT( + firstNFTokenPage->getFieldH256(sfNextPageMin) == + keylet::nftpage_max(alice).key); + BEAST_EXPECT(!firstNFTokenPage->isFieldPresent(sfPreviousPageMin)); + + // Burn the remaining nfts. + for (uint256 const& nft : nfts) + { + env(token::burn(alice, {nft})); + env.close(); + } + BEAST_EXPECT(nftCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, alice) == 0); } - BEAST_EXPECT(nftCount(env, alice) == 0); - BEAST_EXPECT(ownerCount(env, alice) == 0); checkNoTokenPages(); + { + // Generate three packed pages. Then burn all the tokens in the + // first page followed by all the tokens in the last page. This + // exercises a specific case where coalescing pages is not possible. + std::vector nfts = genPackedTokens(); + BEAST_EXPECT(nftCount(env, alice) == 96); + BEAST_EXPECT(ownerCount(env, alice) == 3); + + // Verify that that all three pages are present and remember the + // indexes. + auto lastNFTokenPage = env.le(keylet::nftpage_max(alice)); + if (!BEAST_EXPECT(lastNFTokenPage)) + return; + + uint256 const middleNFTokenPageIndex = + lastNFTokenPage->at(sfPreviousPageMin); + auto middleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), middleNFTokenPageIndex)); + if (!BEAST_EXPECT(middleNFTokenPage)) + return; + + uint256 const firstNFTokenPageIndex = + middleNFTokenPage->at(sfPreviousPageMin); + auto firstNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), firstNFTokenPageIndex)); + if (!BEAST_EXPECT(firstNFTokenPage)) + return; + + // Burn all the tokens in the first page. + std::reverse(nfts.begin(), nfts.end()); + for (int i = 0; i < 32; ++i) + { + env(token::burn(alice, {nfts.back()})); + nfts.pop_back(); + env.close(); + } - // Generate three packed pages. Then burn all tokens in the middle - // page. This exercises the case where a page is removed between - // two fully populated pages. - genPackedTokens(nfts); - BEAST_EXPECT(nftCount(env, alice) == 96); - BEAST_EXPECT(ownerCount(env, alice) == 3); + // Verify the first page is gone. + firstNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), firstNFTokenPageIndex)); + BEAST_EXPECT(!firstNFTokenPage); + + // Check the links in the other two pages. + middleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), middleNFTokenPageIndex)); + if (!BEAST_EXPECT(middleNFTokenPage)) + return; + BEAST_EXPECT(!middleNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT(middleNFTokenPage->isFieldPresent(sfNextPageMin)); + + lastNFTokenPage = env.le(keylet::nftpage_max(alice)); + if (!BEAST_EXPECT(lastNFTokenPage)) + return; + BEAST_EXPECT(lastNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT(!lastNFTokenPage->isFieldPresent(sfNextPageMin)); + + // Burn all the tokens in the last page. + std::reverse(nfts.begin(), nfts.end()); + for (int i = 0; i < 32; ++i) + { + env(token::burn(alice, {nfts.back()})); + nfts.pop_back(); + env.close(); + } - for (std::size_t i = 32; i < 64; ++i) - { - env(token::burn(alice, nfts[i])); - env.close(); + if (features[fixNFTokenPageLinks]) + { + // Removing the last token from the last page deletes the + // _previous_ page because we need to preserve that last + // page an an anchor. The contents of the next-to-last page + // are moved into the last page. + lastNFTokenPage = env.le(keylet::nftpage_max(alice)); + BEAST_EXPECT(lastNFTokenPage); + BEAST_EXPECT( + !lastNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT(!lastNFTokenPage->isFieldPresent(sfNextPageMin)); + BEAST_EXPECT( + lastNFTokenPage->getFieldArray(sfNFTokens).size() == 32); + + // The "middle" page should be gone. + middleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), middleNFTokenPageIndex)); + BEAST_EXPECT(!middleNFTokenPage); + + // The "first" page should still be gone. + firstNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), firstNFTokenPageIndex)); + BEAST_EXPECT(!firstNFTokenPage); + } + else + { + // Removing the last token from the last page deletes the last + // page. This is a bug. The contents of the next-to-last page + // should have been moved into the last page. + lastNFTokenPage = env.le(keylet::nftpage_max(alice)); + BEAST_EXPECT(!lastNFTokenPage); + + // The "middle" page is still present, but has lost the + // NextPageMin field. + middleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), middleNFTokenPageIndex)); + if (!BEAST_EXPECT(middleNFTokenPage)) + return; + BEAST_EXPECT( + !middleNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT(!middleNFTokenPage->isFieldPresent(sfNextPageMin)); + } + + // Delete the rest of the NFTokens. + while (!nfts.empty()) + { + env(token::burn(alice, {nfts.back()})); + nfts.pop_back(); + env.close(); + } + BEAST_EXPECT(nftCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, alice) == 0); } - nfts.erase(nfts.begin() + 32, nfts.begin() + 64); - BEAST_EXPECT(nftCount(env, alice) == 64); - BEAST_EXPECT(ownerCount(env, alice) == 2); + checkNoTokenPages(); - // Burn the remaining nfts. - for (uint256 const& nft : nfts) + if (features[fixNFTokenPageLinks]) { - env(token::burn(alice, {nft})); - env.close(); + // Exercise the invariant that the final NFTokenPage of a directory + // may not be removed if there are NFTokens in other pages of the + // directory. + // + // We're going to fire an Invariant failure that is difficult to + // cause. We do it here because the tools are here. + // + // See Invariants_test.cpp for examples of other invariant tests + // that this one is modeled after. + + // Generate three closely packed NFTokenPages. + std::vector nfts = genPackedTokens(); + BEAST_EXPECT(nftCount(env, alice) == 96); + BEAST_EXPECT(ownerCount(env, alice) == 3); + + // Burn almost all the tokens in the very last page. + for (int i = 0; i < 31; ++i) + { + env(token::burn(alice, {nfts.back()})); + nfts.pop_back(); + env.close(); + } + { + // Create an ApplyContext we can use to run the invariant + // checks. These variables must outlive the ApplyContext. + OpenView ov{*env.current()}; + STTx tx{ttACCOUNT_SET, [](STObject&) {}}; + test::StreamSink sink{beast::severities::kWarning}; + beast::Journal jlog{sink}; + ApplyContext ac{ + env.app(), + ov, + tx, + tesSUCCESS, + env.current()->fees().base, + tapNONE, + jlog}; + + // Verify that the last page is present and contains one NFT. + auto lastNFTokenPage = + ac.view().peek(keylet::nftpage_max(alice)); + if (!BEAST_EXPECT(lastNFTokenPage)) + return; + BEAST_EXPECT( + lastNFTokenPage->getFieldArray(sfNFTokens).size() == 1); + + // Erase that last page. + ac.view().erase(lastNFTokenPage); + + // Exercise the invariant. + TER terActual = tesSUCCESS; + for (TER const& terExpect : + {TER(tecINVARIANT_FAILED), TER(tefINVARIANT_FAILED)}) + { + terActual = ac.checkInvariants(terActual, XRPAmount{}); + BEAST_EXPECT(terExpect == terActual); + BEAST_EXPECT( + sink.messages().str().starts_with("Invariant failed:")); + // uncomment to log the invariant failure message + // log << " --> " << sink.messages().str() << std::endl; + BEAST_EXPECT( + sink.messages().str().find( + "Last NFT page deleted with non-empty directory") != + std::string::npos); + } + } + { + // Create an ApplyContext we can use to run the invariant + // checks. These variables must outlive the ApplyContext. + OpenView ov{*env.current()}; + STTx tx{ttACCOUNT_SET, [](STObject&) {}}; + test::StreamSink sink{beast::severities::kWarning}; + beast::Journal jlog{sink}; + ApplyContext ac{ + env.app(), + ov, + tx, + tesSUCCESS, + env.current()->fees().base, + tapNONE, + jlog}; + + // Verify that the middle page is present. + auto lastNFTokenPage = + ac.view().peek(keylet::nftpage_max(alice)); + auto middleNFTokenPage = ac.view().peek(keylet::nftpage( + keylet::nftpage_min(alice), + lastNFTokenPage->getFieldH256(sfPreviousPageMin))); + BEAST_EXPECT(middleNFTokenPage); + + // Remove the NextMinPage link from the middle page to fire + // the invariant. + middleNFTokenPage->makeFieldAbsent(sfNextPageMin); + ac.view().update(middleNFTokenPage); + + // Exercise the invariant. + TER terActual = tesSUCCESS; + for (TER const& terExpect : + {TER(tecINVARIANT_FAILED), TER(tefINVARIANT_FAILED)}) + { + terActual = ac.checkInvariants(terActual, XRPAmount{}); + BEAST_EXPECT(terExpect == terActual); + BEAST_EXPECT( + sink.messages().str().starts_with("Invariant failed:")); + // uncomment to log the invariant failure message + // log << " --> " << sink.messages().str() << std::endl; + BEAST_EXPECT( + sink.messages().str().find("Lost NextMinPage link") != + std::string::npos); + } + } } - BEAST_EXPECT(nftCount(env, alice) == 0); - checkNoTokenPages(); } void @@ -778,12 +1153,238 @@ class NFTokenBurnBaseUtil_test : public beast::unit_test::suite } } + void + exerciseBrokenLinks(FeatureBitset features) + { + // Amendment fixNFTokenPageLinks prevents the breakage we want + // to observe. + if (features[fixNFTokenPageLinks]) + return; + + // a couple of directory merging scenarios that can only be tested by + // inserting and deleting in an ordered fashion. We do that testing + // now. + testcase("Exercise broken links"); + + using namespace test::jtx; + + Account const alice{"alice"}; + Account const minter{"minter"}; + + Env env{*this, features}; + env.fund(XRP(1000), alice, minter); + + // A lambda that generates 96 nfts packed into three pages of 32 each. + // Returns a sorted vector of the NFTokenIDs packed into the pages. + auto genPackedTokens = [this, &env, &alice, &minter]() { + std::vector nfts; + nfts.reserve(96); + + // We want to create fully packed NFT pages. This is a little + // tricky since the system currently in place is inclined to + // assign consecutive tokens to only 16 entries per page. + // + // By manipulating the internal form of the taxon we can force + // creation of NFT pages that are completely full. This lambda + // tells us the taxon value we should pass in in order for the + // internal representation to match the passed in value. + auto internalTaxon = [&env]( + Account const& acct, + std::uint32_t taxon) -> std::uint32_t { + std::uint32_t tokenSeq = + env.le(acct)->at(~sfMintedNFTokens).value_or(0); + + // If fixNFTokenRemint amendment is on, we must + // add FirstNFTokenSequence. + if (env.current()->rules().enabled(fixNFTokenRemint)) + tokenSeq += env.le(acct) + ->at(~sfFirstNFTokenSequence) + .value_or(env.seq(acct)); + + return toUInt32( + nft::cipheredTaxon(tokenSeq, nft::toTaxon(taxon))); + }; + + for (std::uint32_t i = 0; i < 96; ++i) + { + // In order to fill the pages we use the taxon to break them + // into groups of 16 entries. By having the internal + // representation of the taxon go... + // 0, 3, 2, 5, 4, 7... + // in sets of 16 NFTs we can get each page to be fully + // populated. + std::uint32_t const intTaxon = (i / 16) + (i & 0b10000 ? 2 : 0); + uint32_t const extTaxon = internalTaxon(minter, intTaxon); + nfts.push_back( + token::getNextID(env, minter, extTaxon, tfTransferable)); + env(token::mint(minter, extTaxon), txflags(tfTransferable)); + env.close(); + + // Minter creates an offer for the NFToken. + uint256 const minterOfferIndex = + keylet::nftoffer(minter, env.seq(minter)).key; + env(token::createOffer(minter, nfts.back(), XRP(0)), + txflags(tfSellNFToken)); + env.close(); + + // alice accepts the offer. + env(token::acceptSellOffer(alice, minterOfferIndex)); + env.close(); + } + + // Sort the NFTs so they are listed in storage order, not + // creation order. + std::sort(nfts.begin(), nfts.end()); + + // Verify that the ledger does indeed contain exactly three pages + // of NFTs with 32 entries in each page. + Json::Value jvParams; + jvParams[jss::ledger_index] = "current"; + jvParams[jss::binary] = false; + { + Json::Value jrr = env.rpc( + "json", + "ledger_data", + boost::lexical_cast(jvParams)); + + Json::Value& state = jrr[jss::result][jss::state]; + + int pageCount = 0; + for (Json::UInt i = 0; i < state.size(); ++i) + { + if (state[i].isMember(sfNFTokens.jsonName) && + state[i][sfNFTokens.jsonName].isArray()) + { + BEAST_EXPECT( + state[i][sfNFTokens.jsonName].size() == 32); + ++pageCount; + } + } + // If this check fails then the internal NFT directory logic + // has changed. + BEAST_EXPECT(pageCount == 3); + } + return nfts; + }; + + // Generate three packed pages. + std::vector nfts = genPackedTokens(); + BEAST_EXPECT(nftCount(env, alice) == 96); + BEAST_EXPECT(ownerCount(env, alice) == 3); + + // Verify that that all three pages are present and remember the + // indexes. + auto lastNFTokenPage = env.le(keylet::nftpage_max(alice)); + if (!BEAST_EXPECT(lastNFTokenPage)) + return; + + uint256 const middleNFTokenPageIndex = + lastNFTokenPage->at(sfPreviousPageMin); + auto middleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), middleNFTokenPageIndex)); + if (!BEAST_EXPECT(middleNFTokenPage)) + return; + + uint256 const firstNFTokenPageIndex = + middleNFTokenPage->at(sfPreviousPageMin); + auto firstNFTokenPage = env.le( + keylet::nftpage(keylet::nftpage_min(alice), firstNFTokenPageIndex)); + if (!BEAST_EXPECT(firstNFTokenPage)) + return; + + // Sell all the tokens in the very last page back to minter. + std::vector last32NFTs; + for (int i = 0; i < 32; ++i) + { + last32NFTs.push_back(nfts.back()); + nfts.pop_back(); + + // alice creates an offer for the NFToken. + uint256 const aliceOfferIndex = + keylet::nftoffer(alice, env.seq(alice)).key; + env(token::createOffer(alice, last32NFTs.back(), XRP(0)), + txflags(tfSellNFToken)); + env.close(); + + // minter accepts the offer. + env(token::acceptSellOffer(minter, aliceOfferIndex)); + env.close(); + } + + // Removing the last token from the last page deletes alice's last + // page. This is a bug. The contents of the next-to-last page + // should have been moved into the last page. + lastNFTokenPage = env.le(keylet::nftpage_max(alice)); + BEAST_EXPECT(!lastNFTokenPage); + BEAST_EXPECT(ownerCount(env, alice) == 2); + + // The "middle" page is still present, but has lost the + // NextPageMin field. + middleNFTokenPage = env.le(keylet::nftpage( + keylet::nftpage_min(alice), middleNFTokenPageIndex)); + if (!BEAST_EXPECT(middleNFTokenPage)) + return; + BEAST_EXPECT(middleNFTokenPage->isFieldPresent(sfPreviousPageMin)); + BEAST_EXPECT(!middleNFTokenPage->isFieldPresent(sfNextPageMin)); + + // Attempt to delete alice's account, but fail because she owns NFTs. + auto const acctDelFee{drops(env.current()->fees().increment)}; + env(acctdelete(alice, minter), + fee(acctDelFee), + ter(tecHAS_OBLIGATIONS)); + env.close(); + + // minter sells the last 32 NFTs back to alice. + for (uint256 nftID : last32NFTs) + { + // minter creates an offer for the NFToken. + uint256 const minterOfferIndex = + keylet::nftoffer(minter, env.seq(minter)).key; + env(token::createOffer(minter, nftID, XRP(0)), + txflags(tfSellNFToken)); + env.close(); + + // alice accepts the offer. + env(token::acceptSellOffer(alice, minterOfferIndex)); + env.close(); + } + BEAST_EXPECT(ownerCount(env, alice) == 3); // Three NFTokenPages. + + // alice has an NFToken directory with a broken link in the middle. + { + // Try the account_objects RPC command. Alice's account only shows + // two NFT pages even though she owns more. + Json::Value acctObjs = [&env, &alice]() { + Json::Value params; + params[jss::account] = alice.human(); + return env.rpc("json", "account_objects", to_string(params)); + }(); + BEAST_EXPECT(!acctObjs.isMember(jss::marker)); + BEAST_EXPECT( + acctObjs[jss::result][jss::account_objects].size() == 2); + } + { + // Try the account_nfts RPC command. It only returns 64 NFTs + // although alice owns 96. + Json::Value aliceNFTs = [&env, &alice]() { + Json::Value params; + params[jss::account] = alice.human(); + params[jss::type] = "state"; + return env.rpc("json", "account_nfts", to_string(params)); + }(); + BEAST_EXPECT(!aliceNFTs.isMember(jss::marker)); + BEAST_EXPECT( + aliceNFTs[jss::result][jss::account_nfts].size() == 64); + } + } + void testWithFeats(FeatureBitset features) { testBurnRandom(features); testBurnSequential(features); testBurnTooManyOffers(features); + exerciseBrokenLinks(features); } protected: @@ -792,13 +1393,18 @@ class NFTokenBurnBaseUtil_test : public beast::unit_test::suite { using namespace test::jtx; static FeatureBitset const all{supported_amendments()}; + static FeatureBitset const fixNFTV1_2{fixNonFungibleTokensV1_2}; static FeatureBitset const fixNFTDir{fixNFTokenDirV1}; - - static std::array const feats{ - all - fixNonFungibleTokensV1_2 - fixNFTDir - fixNFTokenRemint, - all - fixNonFungibleTokensV1_2 - fixNFTokenRemint, - all - fixNFTokenRemint, - all}; + static FeatureBitset const fixNFTRemint{fixNFTokenRemint}; + static FeatureBitset const fixNFTPageLinks{fixNFTokenPageLinks}; + + static std::array const feats{ + all - fixNFTV1_2 - fixNFTDir - fixNFTRemint - fixNFTPageLinks, + all - fixNFTV1_2 - fixNFTRemint - fixNFTPageLinks, + all - fixNFTRemint - fixNFTPageLinks, + all - fixNFTPageLinks, + all, + }; if (BEAST_EXPECT(instance < feats.size())) { @@ -835,19 +1441,30 @@ class NFTokenBurnWOFixTokenRemint_test : public NFTokenBurnBaseUtil_test } }; +class NFTokenBurnWOFixNFTPageLinks_test : public NFTokenBurnBaseUtil_test +{ +public: + void + run() override + { + NFTokenBurnBaseUtil_test::run(3); + } +}; + class NFTokenBurnAllFeatures_test : public NFTokenBurnBaseUtil_test { public: void run() override { - NFTokenBurnBaseUtil_test::run(3, true); + NFTokenBurnBaseUtil_test::run(4, true); } }; BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnBaseUtil, tx, ripple, 3); BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnWOfixFungTokens, tx, ripple, 3); BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnWOFixTokenRemint, tx, ripple, 3); +BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnWOFixNFTPageLinks, tx, ripple, 3); BEAST_DEFINE_TESTSUITE_PRIO(NFTokenBurnAllFeatures, tx, ripple, 3); } // namespace ripple diff --git a/src/test/jtx.h b/src/test/jtx.h index a3255ef3af9..6de7cd480fa 100644 --- a/src/test/jtx.h +++ b/src/test/jtx.h @@ -40,6 +40,7 @@ #include #include #include +#include #include #include #include diff --git a/src/test/jtx/impl/ledgerStateFix.cpp b/src/test/jtx/impl/ledgerStateFix.cpp new file mode 100644 index 00000000000..2f121dc2671 --- /dev/null +++ b/src/test/jtx/impl/ledgerStateFix.cpp @@ -0,0 +1,49 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include +#include + +namespace ripple { +namespace test { +namespace jtx { + +namespace ledgerStateFix { + +// Fix NFTokenPage links on owner's account. acct pays fee. +Json::Value +nftPageLinks(jtx::Account const& acct, jtx::Account const& owner) +{ + Json::Value jv; + jv[sfAccount.jsonName] = acct.human(); + jv[sfLedgerFixType.jsonName] = LedgerStateFix::nfTokenPageLink; + jv[sfOwner.jsonName] = owner.human(); + jv[sfTransactionType.jsonName] = jss::LedgerStateFix; + jv[sfFlags.jsonName] = tfUniversal; + return jv; +} + +} // namespace ledgerStateFix + +} // namespace jtx +} // namespace test +} // namespace ripple diff --git a/src/test/jtx/ledgerStateFix.h b/src/test/jtx/ledgerStateFix.h new file mode 100644 index 00000000000..bf0a56cabe3 --- /dev/null +++ b/src/test/jtx/ledgerStateFix.h @@ -0,0 +1,44 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_TEST_JTX_LEDGER_STATE_FIX_H_INCLUDED +#define RIPPLE_TEST_JTX_LEDGER_STATE_FIX_H_INCLUDED + +#include +#include + +namespace ripple { +namespace test { +namespace jtx { + +/** LedgerStateFix operations. */ +namespace ledgerStateFix { + +/** Repair the links in an NFToken directory. */ +Json::Value +nftPageLinks(jtx::Account const& acct, jtx::Account const& owner); + +} // namespace ledgerStateFix + +} // namespace jtx + +} // namespace test +} // namespace ripple + +#endif diff --git a/src/test/ledger/Invariants_test.cpp b/src/test/ledger/Invariants_test.cpp index 66523700a88..8d7b08fa1ab 100644 --- a/src/test/ledger/Invariants_test.cpp +++ b/src/test/ledger/Invariants_test.cpp @@ -24,7 +24,9 @@ #include #include #include +#include #include + #include namespace ripple { @@ -110,10 +112,9 @@ class Invariants_test : public beast::unit_test::suite terActual = ac.checkInvariants(terActual, fee); BEAST_EXPECT(terExpect == terActual); BEAST_EXPECT( - boost::starts_with( - sink.messages().str(), "Invariant failed:") || - boost::starts_with( - sink.messages().str(), "Transaction caused an exception")); + sink.messages().str().starts_with("Invariant failed:") || + sink.messages().str().starts_with( + "Transaction caused an exception")); // uncomment if you want to log the invariant failure message // log << " --> " << sink.messages().str() << std::endl; for (auto const& m : expect_logs) @@ -650,6 +651,153 @@ class Invariants_test : public beast::unit_test::suite STTx{ttPAYMENT, [](STObject& tx) {}}); } + void + testNFTokenPageInvariants() + { + using namespace test::jtx; + testcase << "NFTokenPage"; + + // lambda that returns an STArray of NFTokenIDs. + uint256 const firstNFTID( + "0000000000000000000000000000000000000001FFFFFFFFFFFFFFFF00000000"); + auto makeNFTokenIDs = [&firstNFTID](unsigned int nftCount) { + SOTemplate const* nfTokenTemplate = + InnerObjectFormats::getInstance().findSOTemplateBySField( + sfNFToken); + + uint256 nftID(firstNFTID); + STArray ret; + for (int i = 0; i < nftCount; ++i) + { + STObject newNFToken( + *nfTokenTemplate, sfNFToken, [&nftID](STObject& object) { + object.setFieldH256(sfNFTokenID, nftID); + }); + ret.push_back(std::move(newNFToken)); + ++nftID; + } + return ret; + }; + + doInvariantCheck( + {{"NFT page has invalid size"}}, + [&makeNFTokenIDs]( + Account const& A1, Account const&, ApplyContext& ac) { + auto nftPage = std::make_shared(keylet::nftpage_max(A1)); + nftPage->setFieldArray(sfNFTokens, makeNFTokenIDs(0)); + + ac.view().insert(nftPage); + return true; + }); + + doInvariantCheck( + {{"NFT page has invalid size"}}, + [&makeNFTokenIDs]( + Account const& A1, Account const&, ApplyContext& ac) { + auto nftPage = std::make_shared(keylet::nftpage_max(A1)); + nftPage->setFieldArray(sfNFTokens, makeNFTokenIDs(33)); + + ac.view().insert(nftPage); + return true; + }); + + doInvariantCheck( + {{"NFTs on page are not sorted"}}, + [&makeNFTokenIDs]( + Account const& A1, Account const&, ApplyContext& ac) { + STArray nfTokens = makeNFTokenIDs(2); + std::iter_swap(nfTokens.begin(), nfTokens.begin() + 1); + + auto nftPage = std::make_shared(keylet::nftpage_max(A1)); + nftPage->setFieldArray(sfNFTokens, nfTokens); + + ac.view().insert(nftPage); + return true; + }); + + doInvariantCheck( + {{"NFT contains empty URI"}}, + [&makeNFTokenIDs]( + Account const& A1, Account const&, ApplyContext& ac) { + STArray nfTokens = makeNFTokenIDs(1); + nfTokens[0].setFieldVL(sfURI, Blob{}); + + auto nftPage = std::make_shared(keylet::nftpage_max(A1)); + nftPage->setFieldArray(sfNFTokens, nfTokens); + + ac.view().insert(nftPage); + return true; + }); + + doInvariantCheck( + {{"NFT page is improperly linked"}}, + [&makeNFTokenIDs]( + Account const& A1, Account const&, ApplyContext& ac) { + auto nftPage = std::make_shared(keylet::nftpage_max(A1)); + nftPage->setFieldArray(sfNFTokens, makeNFTokenIDs(1)); + nftPage->setFieldH256( + sfPreviousPageMin, keylet::nftpage_max(A1).key); + + ac.view().insert(nftPage); + return true; + }); + + doInvariantCheck( + {{"NFT page is improperly linked"}}, + [&makeNFTokenIDs]( + Account const& A1, Account const& A2, ApplyContext& ac) { + auto nftPage = std::make_shared(keylet::nftpage_max(A1)); + nftPage->setFieldArray(sfNFTokens, makeNFTokenIDs(1)); + nftPage->setFieldH256( + sfPreviousPageMin, keylet::nftpage_min(A2).key); + + ac.view().insert(nftPage); + return true; + }); + + doInvariantCheck( + {{"NFT page is improperly linked"}}, + [&makeNFTokenIDs]( + Account const& A1, Account const&, ApplyContext& ac) { + auto nftPage = std::make_shared(keylet::nftpage_max(A1)); + nftPage->setFieldArray(sfNFTokens, makeNFTokenIDs(1)); + nftPage->setFieldH256(sfNextPageMin, nftPage->key()); + + ac.view().insert(nftPage); + return true; + }); + + doInvariantCheck( + {{"NFT page is improperly linked"}}, + [&makeNFTokenIDs]( + Account const& A1, Account const& A2, ApplyContext& ac) { + STArray nfTokens = makeNFTokenIDs(1); + auto nftPage = std::make_shared(keylet::nftpage( + keylet::nftpage_max(A1), + ++(nfTokens[0].getFieldH256(sfNFTokenID)))); + nftPage->setFieldArray(sfNFTokens, std::move(nfTokens)); + nftPage->setFieldH256( + sfNextPageMin, keylet::nftpage_max(A2).key); + + ac.view().insert(nftPage); + return true; + }); + + doInvariantCheck( + {{"NFT found in incorrect page"}}, + [&makeNFTokenIDs]( + Account const& A1, Account const&, ApplyContext& ac) { + STArray nfTokens = makeNFTokenIDs(2); + auto nftPage = std::make_shared(keylet::nftpage( + keylet::nftpage_max(A1), + (nfTokens[1].getFieldH256(sfNFTokenID)))); + nftPage->setFieldArray(sfNFTokens, std::move(nfTokens)); + + ac.view().insert(nftPage); + return true; + }); + } + public: void run() override @@ -664,6 +812,7 @@ class Invariants_test : public beast::unit_test::suite testNoBadOffers(); testNoZeroEscrow(); testValidNewAccountRoot(); + testNFTokenPageInvariants(); } }; diff --git a/src/xrpld/app/tx/detail/InvariantCheck.cpp b/src/xrpld/app/tx/detail/InvariantCheck.cpp index 70210b90d75..f855ad8578c 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.cpp +++ b/src/xrpld/app/tx/detail/InvariantCheck.cpp @@ -612,6 +612,10 @@ ValidNFTokenPage::visitEntry( static constexpr uint256 const& pageBits = nft::pageMask; static constexpr uint256 const accountBits = ~pageBits; + if ((before && before->getType() != ltNFTOKEN_PAGE) || + (after && after->getType() != ltNFTOKEN_PAGE)) + return; + auto check = [this, isDelete](std::shared_ptr const& sle) { uint256 const account = sle->key() & accountBits; uint256 const hiLimit = sle->key() & pageBits; @@ -673,11 +677,37 @@ ValidNFTokenPage::visitEntry( } }; - if (before && before->getType() == ltNFTOKEN_PAGE) + if (before) + { check(before); - if (after && after->getType() == ltNFTOKEN_PAGE) + // While an account's NFToken directory contains any NFTokens, the last + // NFTokenPage (with 96 bits of 1 in the low part of the index) should + // never be deleted. + if (isDelete && (before->key() & nft::pageMask) == nft::pageMask && + before->isFieldPresent(sfPreviousPageMin)) + { + deletedFinalPage_ = true; + } + } + + if (after) check(after); + + if (!isDelete && before && after) + { + // If the NFTokenPage + // 1. Has a NextMinPage field in before, but loses it in after, and + // 2. This is not the last page in the directory + // Then we have identified a corruption in the links between the + // NFToken pages in the NFToken directory. + if ((before->key() & nft::pageMask) != nft::pageMask && + before->isFieldPresent(sfNextPageMin) && + !after->isFieldPresent(sfNextPageMin)) + { + deletedLink_ = true; + } + } } bool @@ -718,6 +748,21 @@ ValidNFTokenPage::finalize( return false; } + if (view.rules().enabled(fixNFTokenPageLinks)) + { + if (deletedFinalPage_) + { + JLOG(j.fatal()) << "Invariant failed: Last NFT page deleted with " + "non-empty directory."; + return false; + } + if (deletedLink_) + { + JLOG(j.fatal()) << "Invariant failed: Lost NextMinPage link."; + return false; + } + } + return true; } diff --git a/src/xrpld/app/tx/detail/InvariantCheck.h b/src/xrpld/app/tx/detail/InvariantCheck.h index 6a83f5c9b7b..1b3234bae69 100644 --- a/src/xrpld/app/tx/detail/InvariantCheck.h +++ b/src/xrpld/app/tx/detail/InvariantCheck.h @@ -367,6 +367,8 @@ class ValidNFTokenPage bool badSort_ = false; bool badURI_ = false; bool invalidSize_ = false; + bool deletedFinalPage_ = false; + bool deletedLink_ = false; public: void diff --git a/src/xrpld/app/tx/detail/LedgerStateFix.cpp b/src/xrpld/app/tx/detail/LedgerStateFix.cpp new file mode 100644 index 00000000000..568ed49304a --- /dev/null +++ b/src/xrpld/app/tx/detail/LedgerStateFix.cpp @@ -0,0 +1,99 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include +#include +#include +#include +#include + +namespace ripple { + +NotTEC +LedgerStateFix::preflight(PreflightContext const& ctx) +{ + if (!ctx.rules.enabled(fixNFTokenPageLinks)) + return temDISABLED; + + if (ctx.tx.getFlags() & tfUniversalMask) + return temINVALID_FLAG; + + if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) + return ret; + + switch (ctx.tx[sfLedgerFixType]) + { + case FixType::nfTokenPageLink: + if (!ctx.tx.isFieldPresent(sfOwner)) + return temINVALID; + break; + + default: + return tefINVALID_LEDGER_FIX_TYPE; + } + + return preflight2(ctx); +} + +XRPAmount +LedgerStateFix::calculateBaseFee(ReadView const& view, STTx const& tx) +{ + // The fee required for LedgerStateFix is one owner reserve, just like + // the fee for AccountDelete. + return view.fees().increment; +} + +TER +LedgerStateFix::preclaim(PreclaimContext const& ctx) +{ + switch (ctx.tx[sfLedgerFixType]) + { + case FixType::nfTokenPageLink: { + AccountID const owner{ctx.tx[sfOwner]}; + if (!ctx.view.read(keylet::account(owner))) + return tecOBJECT_NOT_FOUND; + + return tesSUCCESS; + } + } + + // preflight is supposed to verify that only valid FixTypes get to preclaim. + return tecINTERNAL; +} + +TER +LedgerStateFix::doApply() +{ + switch (ctx_.tx[sfLedgerFixType]) + { + case FixType::nfTokenPageLink: + if (!nft::repairNFTokenDirectoryLinks(view(), ctx_.tx[sfOwner])) + return tecFAILED_PROCESSING; + + return tesSUCCESS; + } + + // preflight is supposed to verify that only valid FixTypes get to doApply. + return tecINTERNAL; +} + +} // namespace ripple diff --git a/src/xrpld/app/tx/detail/LedgerStateFix.h b/src/xrpld/app/tx/detail/LedgerStateFix.h new file mode 100644 index 00000000000..b480d239291 --- /dev/null +++ b/src/xrpld/app/tx/detail/LedgerStateFix.h @@ -0,0 +1,57 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2024 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_TX_LEDGER_STATE_FIX_H_INCLUDED +#define RIPPLE_TX_LEDGER_STATE_FIX_H_INCLUDED + +#include +#include +#include + +namespace ripple { + +class LedgerStateFix : public Transactor +{ +public: + enum FixType : std::uint16_t { + nfTokenPageLink = 1, + }; + + static constexpr ConsequencesFactoryType ConsequencesFactory{Normal}; + + explicit LedgerStateFix(ApplyContext& ctx) : Transactor(ctx) + { + } + + static NotTEC + preflight(PreflightContext const& ctx); + + static XRPAmount + calculateBaseFee(ReadView const& view, STTx const& tx); + + static TER + preclaim(PreclaimContext const& ctx); + + TER + doApply() override; +}; + +} // namespace ripple + +#endif diff --git a/src/xrpld/app/tx/detail/NFTokenUtils.cpp b/src/xrpld/app/tx/detail/NFTokenUtils.cpp index 279bf6b9816..61ff8e200b3 100644 --- a/src/xrpld/app/tx/detail/NFTokenUtils.cpp +++ b/src/xrpld/app/tx/detail/NFTokenUtils.cpp @@ -429,10 +429,48 @@ removeToken( return tesSUCCESS; } - // The page is empty, so we can just unlink it and then remove it. if (prev) { - // Make our previous page point to our next page: + // With fixNFTokenPageLinks... + // The page is empty and there is a prev. If the last page of the + // directory is empty then we need to: + // 1. Move the contents of the previous page into the last page. + // 2. Fix up the link from prev's previous page. + // 3. Fix up the owner count. + // 4. Erase the previous page. + if (view.rules().enabled(fixNFTokenPageLinks) && + ((curr->key() & nft::pageMask) == pageMask)) + { + // Copy all relevant information from prev to curr. + curr->peekFieldArray(sfNFTokens) = prev->peekFieldArray(sfNFTokens); + + if (auto const prevLink = prev->at(~sfPreviousPageMin)) + { + curr->at(sfPreviousPageMin) = *prevLink; + + // Also fix up the NextPageMin link in the new Previous. + auto const newPrev = loadPage(curr, sfPreviousPageMin); + newPrev->at(sfNextPageMin) = curr->key(); + view.update(newPrev); + } + else + { + curr->makeFieldAbsent(sfPreviousPageMin); + } + + adjustOwnerCount( + view, + view.peek(keylet::account(owner)), + -1, + beast::Journal{beast::Journal::getNullSink()}); + + view.update(curr); + view.erase(prev); + return tesSUCCESS; + } + + // The page is empty and not the last page, so we can just unlink it + // and then remove it. if (next) prev->setFieldH256(sfNextPageMin, next->key()); else @@ -637,6 +675,124 @@ deleteTokenOffer(ApplyView& view, std::shared_ptr const& offer) return true; } +bool +repairNFTokenDirectoryLinks(ApplyView& view, AccountID const& owner) +{ + bool didRepair = false; + + auto const last = keylet::nftpage_max(owner); + + std::shared_ptr page = view.peek(Keylet( + ltNFTOKEN_PAGE, + view.succ(keylet::nftpage_min(owner).key, last.key.next()) + .value_or(last.key))); + + if (!page) + return didRepair; + + if (page->key() == last.key) + { + // There's only one page in this entire directory. There should be + // no links on that page. + bool const nextPresent = page->isFieldPresent(sfNextPageMin); + bool const prevPresent = page->isFieldPresent(sfPreviousPageMin); + if (nextPresent || prevPresent) + { + didRepair = true; + if (prevPresent) + page->makeFieldAbsent(sfPreviousPageMin); + if (nextPresent) + page->makeFieldAbsent(sfNextPageMin); + view.update(page); + } + return didRepair; + } + + // First page is not the same as last page. The first page should not + // contain a previous link. + if (page->isFieldPresent(sfPreviousPageMin)) + { + didRepair = true; + page->makeFieldAbsent(sfPreviousPageMin); + view.update(page); + } + + std::shared_ptr nextPage; + while ( + (nextPage = view.peek(Keylet( + ltNFTOKEN_PAGE, + view.succ(page->key().next(), last.key.next()) + .value_or(last.key))))) + { + if (!page->isFieldPresent(sfNextPageMin) || + page->getFieldH256(sfNextPageMin) != nextPage->key()) + { + didRepair = true; + page->setFieldH256(sfNextPageMin, nextPage->key()); + view.update(page); + } + + if (!nextPage->isFieldPresent(sfPreviousPageMin) || + nextPage->getFieldH256(sfPreviousPageMin) != page->key()) + { + didRepair = true; + nextPage->setFieldH256(sfPreviousPageMin, page->key()); + view.update(nextPage); + } + + if (nextPage->key() == last.key) + // We need special handling for the last page. + break; + + page = nextPage; + } + + // When we arrive here, nextPage should have the same index as last. + // If not, then that's something we need to fix. + if (!nextPage) + { + // It turns out that page is the last page for this owner, but + // that last page does not have the expected final index. We need + // to move the contents of the current last page into a page with the + // correct index. + // + // The owner count does not need to change because, even though + // we're adding a page, we'll also remove the page that used to be + // last. + didRepair = true; + nextPage = std::make_shared(last); + + // Copy all relevant information from prev to curr. + nextPage->peekFieldArray(sfNFTokens) = page->peekFieldArray(sfNFTokens); + + if (auto const prevLink = page->at(~sfPreviousPageMin)) + { + nextPage->at(sfPreviousPageMin) = *prevLink; + + // Also fix up the NextPageMin link in the new Previous. + auto const newPrev = view.peek(Keylet(ltNFTOKEN_PAGE, *prevLink)); + if (!newPrev) + Throw( + "NFTokenPage directory for " + to_string(owner) + + " cannot be repaired. Unexpected link problem."); + newPrev->at(sfNextPageMin) = nextPage->key(); + view.update(newPrev); + } + view.erase(page); + view.insert(nextPage); + return didRepair; + } + + assert(nextPage); + if (nextPage->isFieldPresent(sfNextPageMin)) + { + didRepair = true; + nextPage->makeFieldAbsent(sfNextPageMin); + view.update(nextPage); + } + return didRepair; +} + NotTEC tokenOfferCreatePreflight( AccountID const& acctID, diff --git a/src/xrpld/app/tx/detail/NFTokenUtils.h b/src/xrpld/app/tx/detail/NFTokenUtils.h index 243c5273399..97d109b8318 100644 --- a/src/xrpld/app/tx/detail/NFTokenUtils.h +++ b/src/xrpld/app/tx/detail/NFTokenUtils.h @@ -95,6 +95,13 @@ removeToken( bool deleteTokenOffer(ApplyView& view, std::shared_ptr const& offer); +/** Repairs the links in an NFTokenPage directory. + + Returns true if a repair took place, otherwise false. +*/ +bool +repairNFTokenDirectoryLinks(ApplyView& view, AccountID const& owner); + bool compareTokens(uint256 const& a, uint256 const& b); diff --git a/src/xrpld/app/tx/detail/applySteps.cpp b/src/xrpld/app/tx/detail/applySteps.cpp index 9ddaa3051c4..cbeabb6fc9c 100644 --- a/src/xrpld/app/tx/detail/applySteps.cpp +++ b/src/xrpld/app/tx/detail/applySteps.cpp @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -97,6 +98,8 @@ with_txn_type(TxType txnType, F&& f) return f.template operator()(); case ttESCROW_CANCEL: return f.template operator()(); + case ttLEDGER_STATE_FIX: + return f.template operator()(); case ttPAYCHAN_CLAIM: return f.template operator()(); case ttPAYCHAN_CREATE: From 93d8bafb24a7846210b49376f6c0e78eea484a0f Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Thu, 15 Aug 2024 12:51:50 -0400 Subject: [PATCH 13/26] chore: libxrpl verification on CI (#5028) Implements a CI workflow that detects when a new version of libxrpl is proposed, uploads it to artifactory under the `clio` channel and notifies Clio's CI to check this newly proposed version. --- .github/workflows/libxrpl.yml | 88 +++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 .github/workflows/libxrpl.yml diff --git a/.github/workflows/libxrpl.yml b/.github/workflows/libxrpl.yml new file mode 100644 index 00000000000..fe4a2c3e220 --- /dev/null +++ b/.github/workflows/libxrpl.yml @@ -0,0 +1,88 @@ +name: Check libXRPL compatibility with Clio +env: + CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod + CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }} + CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }} +on: + pull_request: + paths: + - 'src/libxrpl/protocol/BuildInfo.cpp' + - '.github/workflows/libxrpl.yml' +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + publish: + name: Publish libXRPL + outputs: + outcome: ${{ steps.upload.outputs.outcome }} + version: ${{ steps.version.outputs.version }} + channel: ${{ steps.channel.outputs.channel }} + runs-on: [self-hosted, heavy] + container: rippleci/rippled-build-ubuntu:aaf5e3e + steps: + - name: Wait for essential checks to succeed + uses: lewagon/wait-on-check-action@v1.3.4 + with: + ref: ${{ github.event.pull_request.head.sha || github.sha }} + running-workflow-name: wait-for-check-regexp + check-regexp: '(dependencies|test).*linux.*' # Ignore windows and mac tests but make sure linux passes + repo-token: ${{ secrets.GITHUB_TOKEN }} + wait-interval: 10 + - name: Checkout + uses: actions/checkout@v4 + - name: Generate channel + id: channel + shell: bash + run: | + echo channel="clio/pr_${{ github.event.pull_request.number }}" | tee ${GITHUB_OUTPUT} + - name: Export new package + shell: bash + run: | + conan export . ${{ steps.channel.outputs.channel }} + - name: Add Ripple Conan remote + shell: bash + run: | + conan remote list + conan remote remove ripple || true + # Do not quote the URL. An empty string will be accepted (with a non-fatal warning), but a missing argument will not. + conan remote add ripple ${{ env.CONAN_URL }} --insert 0 + - name: Parse new version + id: version + shell: bash + run: | + echo version="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" \ + | awk -F '"' '{print $2}')" | tee ${GITHUB_OUTPUT} + - name: Try to authenticate to Ripple Conan remote + id: remote + shell: bash + run: | + # `conan user` implicitly uses the environment variables CONAN_LOGIN_USERNAME_ and CONAN_PASSWORD_. + # https://docs.conan.io/1/reference/commands/misc/user.html#using-environment-variables + # https://docs.conan.io/1/reference/env_vars.html#conan-login-username-conan-login-username-remote-name + # https://docs.conan.io/1/reference/env_vars.html#conan-password-conan-password-remote-name + echo outcome=$(conan user --remote ripple --password >&2 \ + && echo success || echo failure) | tee ${GITHUB_OUTPUT} + - name: Upload new package + id: upload + if: (steps.remote.outputs.outcome == 'success') + shell: bash + run: | + echo "conan upload version ${{ steps.version.outputs.version }} on channel ${{ steps.channel.outputs.channel }}" + echo outcome=$(conan upload xrpl/${{ steps.version.outputs.version }}@${{ steps.channel.outputs.channel }} --remote ripple --confirm >&2 \ + && echo success || echo failure) | tee ${GITHUB_OUTPUT} + notify_clio: + name: Notify Clio + runs-on: ubuntu-latest + needs: publish + env: + GH_TOKEN: ${{ secrets.CLIO_NOTIFY_TOKEN }} + steps: + - name: Notify Clio about new version + if: (needs.publish.outputs.outcome == 'success') + shell: bash + run: | + gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \ + -F "client_payload[version]=${{ needs.publish.outputs.version }}@${{ needs.publish.outputs.channel }}" From d9bd75e68326861fb38fd5b27d47da1054a7fc3b Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Thu, 15 Aug 2024 17:03:50 -0400 Subject: [PATCH 14/26] chore: Fix documentation generation job: (#5091) * Add "doxygen" to list of supported branches to allow for testing and development. * Add titles / H1 to some .md files that don't have them. --- .github/workflows/doxygen.yml | 1 + bin/ci/README.md | 2 ++ cmake/RippledDocs.cmake | 18 ++++++++++-------- docs/Doxyfile | 15 +++++++-------- external/README.md | 2 ++ include/xrpl/proto/org/xrpl/rpc/v1/README.md | 2 ++ src/xrpld/app/reporting/README.md | 2 ++ 7 files changed, 26 insertions(+), 16 deletions(-) diff --git a/.github/workflows/doxygen.yml b/.github/workflows/doxygen.yml index 10a1465192a..e2265d1b83b 100644 --- a/.github/workflows/doxygen.yml +++ b/.github/workflows/doxygen.yml @@ -4,6 +4,7 @@ on: push: branches: - develop + - doxygen concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true diff --git a/bin/ci/README.md b/bin/ci/README.md index 36d4fc1d310..32ae24b3a20 100644 --- a/bin/ci/README.md +++ b/bin/ci/README.md @@ -1,3 +1,5 @@ +# Continuous Integration (CI) Scripts + In this directory are two scripts, `build.sh` and `test.sh` used for building and testing rippled. diff --git a/cmake/RippledDocs.cmake b/cmake/RippledDocs.cmake index a9b8b283bf0..d93bc119c0d 100644 --- a/cmake/RippledDocs.cmake +++ b/cmake/RippledDocs.cmake @@ -21,15 +21,17 @@ set(doxyfile "${CMAKE_CURRENT_SOURCE_DIR}/docs/Doxyfile") file(GLOB_RECURSE doxygen_input docs/*.md - src/ripple/*.h - src/ripple/*.cpp - src/ripple/*.md - src/test/*.h - src/test/*.md) + include/*.h + include/*.cpp + include/*.md + src/*.h + src/*.cpp + src/*.md + Builds/*.md + *.md) list(APPEND doxygen_input - README.md - RELEASENOTES.md - src/README.md) + external/README.md + ) set(dependencies "${doxygen_input}" "${doxyfile}") function(verbose_find_path variable name) diff --git a/docs/Doxyfile b/docs/Doxyfile index 48a0b5d1e1a..750ae0fb649 100644 --- a/docs/Doxyfile +++ b/docs/Doxyfile @@ -103,18 +103,17 @@ WARN_LOGFILE = # Configuration options related to the input files #--------------------------------------------------------------------------- INPUT = \ - docs \ - src/ripple \ - src/test \ - src/README.md \ - README.md \ - RELEASENOTES.md \ + . \ INPUT_ENCODING = UTF-8 FILE_PATTERNS = *.h *.cpp *.md RECURSIVE = YES -EXCLUDE = +EXCLUDE = \ + .github \ + external/ed25519-donna \ + external/secp256k1 \ + EXCLUDE_SYMLINKS = NO EXCLUDE_PATTERNS = EXCLUDE_SYMBOLS = @@ -130,7 +129,7 @@ INPUT_FILTER = FILTER_PATTERNS = FILTER_SOURCE_FILES = NO FILTER_SOURCE_PATTERNS = -USE_MDFILE_AS_MAINPAGE = src/README.md +USE_MDFILE_AS_MAINPAGE = ./README.md #--------------------------------------------------------------------------- # Configuration options related to source browsing diff --git a/external/README.md b/external/README.md index f45f80965a5..25ae577ba58 100644 --- a/external/README.md +++ b/external/README.md @@ -1,3 +1,5 @@ +# External Conan recipes + The subdirectories in this directory contain either copies or Conan recipes of external libraries used by rippled. The Conan recipes include patches we have not yet pushed upstream. diff --git a/include/xrpl/proto/org/xrpl/rpc/v1/README.md b/include/xrpl/proto/org/xrpl/rpc/v1/README.md index c5000104257..9268439847d 100644 --- a/include/xrpl/proto/org/xrpl/rpc/v1/README.md +++ b/include/xrpl/proto/org/xrpl/rpc/v1/README.md @@ -1,3 +1,5 @@ +# Protocol buffer definitions for gRPC + This folder contains the protocol buffer definitions used by the rippled gRPC API. The gRPC API attempts to mimic the JSON/Websocket API as much as possible. As of April 2020, the gRPC API supports a subset of the full rippled API: diff --git a/src/xrpld/app/reporting/README.md b/src/xrpld/app/reporting/README.md index 745cbb633a9..f55b2d8d60d 100644 --- a/src/xrpld/app/reporting/README.md +++ b/src/xrpld/app/reporting/README.md @@ -1,3 +1,5 @@ +# Reporting mode + Reporting mode is a special operating mode of rippled, designed to handle RPCs for validated data. A server running in reporting mode does not connect to the p2p network, but rather extracts validated data from a node that is connected From 00ed7c942436f02644a13169002b5123f4e2a116 Mon Sep 17 00:00:00 2001 From: Valentin Balaschenko <13349202+vlntb@users.noreply.github.com> Date: Fri, 23 Aug 2024 14:43:02 -0400 Subject: [PATCH 15/26] Track latencies of certain code blocks, and log if they take too long --- src/ripple/app/consensus/RCLValidations.cpp | 9 +- src/ripple/app/ledger/impl/InboundLedgers.cpp | 128 ++++++++++-------- src/ripple/app/main/Application.cpp | 2 +- src/ripple/app/main/Main.cpp | 2 +- src/ripple/app/rdb/Download.h | 4 +- src/ripple/app/rdb/ShardArchive.h | 6 +- src/ripple/app/rdb/UnitaryShard.h | 12 +- src/ripple/app/rdb/Vacuum.h | 3 +- src/ripple/app/rdb/Wallet.h | 9 +- src/ripple/app/rdb/backend/detail/Node.h | 4 +- src/ripple/app/rdb/backend/detail/Shard.h | 4 +- .../app/rdb/backend/detail/impl/Node.cpp | 7 +- .../app/rdb/backend/detail/impl/Shard.cpp | 13 +- .../app/rdb/backend/impl/SQLiteDatabase.cpp | 4 +- src/ripple/app/rdb/impl/Download.cpp | 5 +- src/ripple/app/rdb/impl/ShardArchive.cpp | 7 +- src/ripple/app/rdb/impl/UnitaryShard.cpp | 20 +-- src/ripple/app/rdb/impl/Vacuum.cpp | 4 +- src/ripple/app/rdb/impl/Wallet.cpp | 11 +- src/ripple/basics/PerfLog.h | 24 ++++ src/ripple/core/Config.h | 6 + src/ripple/core/DatabaseCon.h | 38 ++++-- src/ripple/net/DatabaseBody.h | 5 +- src/ripple/net/DatabaseDownloader.h | 3 +- src/ripple/net/HTTPDownloader.h | 3 +- src/ripple/net/impl/DatabaseBody.ipp | 5 +- src/ripple/net/impl/DatabaseDownloader.cpp | 5 +- src/ripple/net/impl/HTTPDownloader.cpp | 2 +- src/ripple/nodestore/impl/Shard.cpp | 8 +- src/ripple/overlay/impl/PeerImp.cpp | 13 +- src/ripple/rpc/impl/ShardArchiveHandler.cpp | 4 +- src/test/app/Manifest_test.cpp | 2 +- 32 files changed, 246 insertions(+), 126 deletions(-) diff --git a/src/ripple/app/consensus/RCLValidations.cpp b/src/ripple/app/consensus/RCLValidations.cpp index ab9391385dd..6b626569e69 100644 --- a/src/ripple/app/consensus/RCLValidations.cpp +++ b/src/ripple/app/consensus/RCLValidations.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -126,7 +127,13 @@ RCLValidationsAdaptor::now() const std::optional RCLValidationsAdaptor::acquire(LedgerHash const& hash) { - auto ledger = app_.getLedgerMaster().getLedgerByHash(hash); + using namespace std::chrono_literals; + auto ledger = perf::measureDurationAndLog( + [&]() { return app_.getLedgerMaster().getLedgerByHash(hash); }, + "getLedgerByHash", + 10ms, + j_); + if (!ledger) { JLOG(j_.debug()) diff --git a/src/ripple/app/ledger/impl/InboundLedgers.cpp b/src/ripple/app/ledger/impl/InboundLedgers.cpp index 0bff434edbc..b9b8b9fcfd2 100644 --- a/src/ripple/app/ledger/impl/InboundLedgers.cpp +++ b/src/ripple/app/ledger/impl/InboundLedgers.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -69,76 +70,83 @@ class InboundLedgersImp : public InboundLedgers std::uint32_t seq, InboundLedger::Reason reason) override { - assert(hash.isNonZero()); - assert( - reason != InboundLedger::Reason::SHARD || - (seq != 0 && app_.getShardStore())); - - // probably not the right rule - if (app_.getOPs().isNeedNetworkLedger() && - (reason != InboundLedger::Reason::GENERIC) && - (reason != InboundLedger::Reason::CONSENSUS)) - return {}; - - bool isNew = true; - std::shared_ptr inbound; - { - ScopedLockType sl(mLock); - if (stopping_) - { + auto doAcquire = [&, seq, reason]() -> std::shared_ptr { + assert(hash.isNonZero()); + assert( + reason != InboundLedger::Reason::SHARD || + (seq != 0 && app_.getShardStore())); + + // probably not the right rule + if (app_.getOPs().isNeedNetworkLedger() && + (reason != InboundLedger::Reason::GENERIC) && + (reason != InboundLedger::Reason::CONSENSUS)) return {}; - } - auto it = mLedgers.find(hash); - if (it != mLedgers.end()) + bool isNew = true; + std::shared_ptr inbound; { - isNew = false; - inbound = it->second; - } - else - { - inbound = std::make_shared( - app_, - hash, - seq, - reason, - std::ref(m_clock), - mPeerSetBuilder->build()); - mLedgers.emplace(hash, inbound); - inbound->init(sl); - ++mCounter; + ScopedLockType sl(mLock); + if (stopping_) + { + return {}; + } + + auto it = mLedgers.find(hash); + if (it != mLedgers.end()) + { + isNew = false; + inbound = it->second; + } + else + { + inbound = std::make_shared( + app_, + hash, + seq, + reason, + std::ref(m_clock), + mPeerSetBuilder->build()); + mLedgers.emplace(hash, inbound); + inbound->init(sl); + ++mCounter; + } } - } - if (inbound->isFailed()) - return {}; + if (inbound->isFailed()) + return {}; - if (!isNew) - inbound->update(seq); + if (!isNew) + inbound->update(seq); - if (!inbound->isComplete()) - return {}; + if (!inbound->isComplete()) + return {}; - if (reason == InboundLedger::Reason::HISTORY) - { - if (inbound->getLedger()->stateMap().family().isShardBacked()) - app_.getNodeStore().storeLedger(inbound->getLedger()); - } - else if (reason == InboundLedger::Reason::SHARD) - { - auto shardStore = app_.getShardStore(); - if (!shardStore) + if (reason == InboundLedger::Reason::HISTORY) { - JLOG(j_.error()) - << "Acquiring shard with no shard store available"; - return {}; + if (inbound->getLedger()->stateMap().family().isShardBacked()) + app_.getNodeStore().storeLedger(inbound->getLedger()); } - if (inbound->getLedger()->stateMap().family().isShardBacked()) - shardStore->setStored(inbound->getLedger()); - else - shardStore->storeLedger(inbound->getLedger()); - } - return inbound->getLedger(); + else if (reason == InboundLedger::Reason::SHARD) + { + auto shardStore = app_.getShardStore(); + if (!shardStore) + { + JLOG(j_.error()) + << "Acquiring shard with no shard store available"; + return {}; + } + if (inbound->getLedger()->stateMap().family().isShardBacked()) + shardStore->setStored(inbound->getLedger()); + else + shardStore->storeLedger(inbound->getLedger()); + } + return inbound->getLedger(); + }; + using namespace std::chrono_literals; + std::shared_ptr ledger = perf::measureDurationAndLog( + doAcquire, "InboundLedgersImp::acquire", 500ms, j_); + + return ledger; } std::shared_ptr diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index 55300a390c9..b1305e46672 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -942,7 +942,7 @@ class ApplicationImp : public Application, public BasicApp auto setup = setup_DatabaseCon(*config_, m_journal); setup.useGlobalPragma = false; - mWalletDB = makeWalletDB(setup); + mWalletDB = makeWalletDB(setup, m_journal); } catch (std::exception const& e) { diff --git a/src/ripple/app/main/Main.cpp b/src/ripple/app/main/Main.cpp index 710e4e9674f..02989b9ee6c 100644 --- a/src/ripple/app/main/Main.cpp +++ b/src/ripple/app/main/Main.cpp @@ -595,7 +595,7 @@ run(int argc, char** argv) try { auto setup = setup_DatabaseCon(*config); - if (!doVacuumDB(setup)) + if (!doVacuumDB(setup, config->journal())) return -1; } catch (std::exception const& e) diff --git a/src/ripple/app/rdb/Download.h b/src/ripple/app/rdb/Download.h index b72b5ec57e7..a16c9aa8788 100644 --- a/src/ripple/app/rdb/Download.h +++ b/src/ripple/app/rdb/Download.h @@ -36,13 +36,15 @@ namespace ripple { * download process or continues an existing one. * @param setup Path to the database and other opening parameters. * @param path Path of the new file to download. + * @param j Journal. * @return Pair containing a unique pointer to the database and the amount of * bytes already downloaded if a download is being continued. */ std::pair, std::optional> openDatabaseBodyDb( DatabaseCon::Setup const& setup, - boost::filesystem::path const& path); + boost::filesystem::path const& path, + beast::Journal j); /** * @brief databaseBodyDoPut Saves a new fragment of a downloaded file. diff --git a/src/ripple/app/rdb/ShardArchive.h b/src/ripple/app/rdb/ShardArchive.h index 20c4382b056..27db8279718 100644 --- a/src/ripple/app/rdb/ShardArchive.h +++ b/src/ripple/app/rdb/ShardArchive.h @@ -30,10 +30,14 @@ namespace ripple { * descriptor. * @param dir Path to the database to open. * @param dbName Name of the database. + * @param j Journal. * @return Unique pointer to the opened database. */ std::unique_ptr -makeArchiveDB(boost::filesystem::path const& dir, std::string const& dbName); +makeArchiveDB( + boost::filesystem::path const& dir, + std::string const& dbName, + beast::Journal j); /** * @brief readArchiveDB Reads entries from the shard archive database and diff --git a/src/ripple/app/rdb/UnitaryShard.h b/src/ripple/app/rdb/UnitaryShard.h index d2ac773dbd3..e9a2ac93d2d 100644 --- a/src/ripple/app/rdb/UnitaryShard.h +++ b/src/ripple/app/rdb/UnitaryShard.h @@ -39,13 +39,15 @@ struct DatabasePair * and returns their descriptors. * @param config Config object. * @param setup Path to the databases and other opening parameters. + * @param j Journal. * @return Pair of unique pointers to the opened ledger and transaction * databases. */ DatabasePair makeShardCompleteLedgerDBs( Config const& config, - DatabaseCon::Setup const& setup); + DatabaseCon::Setup const& setup, + beast::Journal j); /** * @brief makeShardIncompleteLedgerDBs Opens shard databases for partially @@ -53,6 +55,7 @@ makeShardCompleteLedgerDBs( * @param config Config object. * @param setup Path to the databases and other opening parameters. * @param checkpointerSetup Checkpointer parameters. + * @param j Journal. * @return Pair of unique pointers to the opened ledger and transaction * databases. */ @@ -60,7 +63,8 @@ DatabasePair makeShardIncompleteLedgerDBs( Config const& config, DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup); + DatabaseCon::CheckpointerSetup const& checkpointerSetup, + beast::Journal j); /** * @brief updateLedgerDBs Saves the given ledger to shard databases. @@ -86,12 +90,14 @@ updateLedgerDBs( * descriptor. * @param setup Path to the database and other opening parameters. * @param checkpointerSetup Checkpointer parameters. + * @param j Journal. * @return Unique pointer to the opened database. */ std::unique_ptr makeAcquireDB( DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup); + DatabaseCon::CheckpointerSetup const& checkpointerSetup, + beast::Journal j); /** * @brief insertAcquireDBIndex Adds a new shard index to the shard acquire diff --git a/src/ripple/app/rdb/Vacuum.h b/src/ripple/app/rdb/Vacuum.h index 3db18da045a..463f046e0a5 100644 --- a/src/ripple/app/rdb/Vacuum.h +++ b/src/ripple/app/rdb/Vacuum.h @@ -27,10 +27,11 @@ namespace ripple { /** * @brief doVacuumDB Creates, initialises, and performs cleanup on a database. * @param setup Path to the database and other opening parameters. + * @param j Journal. * @return True if the vacuum process completed successfully. */ bool -doVacuumDB(DatabaseCon::Setup const& setup); +doVacuumDB(DatabaseCon::Setup const& setup, beast::Journal j); } // namespace ripple diff --git a/src/ripple/app/rdb/Wallet.h b/src/ripple/app/rdb/Wallet.h index e9846714ece..6a15997ffe4 100644 --- a/src/ripple/app/rdb/Wallet.h +++ b/src/ripple/app/rdb/Wallet.h @@ -32,19 +32,24 @@ namespace ripple { /** * @brief makeWalletDB Opens the wallet database and returns it. * @param setup Path to the database and other opening parameters. + * @param j Journal. * @return Unique pointer to the database descriptor. */ std::unique_ptr -makeWalletDB(DatabaseCon::Setup const& setup); +makeWalletDB(DatabaseCon::Setup const& setup, beast::Journal j); /** * @brief makeTestWalletDB Opens a test wallet database with an arbitrary name. * @param setup Path to the database and other opening parameters. * @param dbname Name of the database. + * @param j Journal. * @return Unique pointer to the database descriptor. */ std::unique_ptr -makeTestWalletDB(DatabaseCon::Setup const& setup, std::string const& dbname); +makeTestWalletDB( + DatabaseCon::Setup const& setup, + std::string const& dbname, + beast::Journal j); /** * @brief getManifests Loads a manifest from the wallet database and stores it diff --git a/src/ripple/app/rdb/backend/detail/Node.h b/src/ripple/app/rdb/backend/detail/Node.h index ab3a99f5f95..2c3f264d7a9 100644 --- a/src/ripple/app/rdb/backend/detail/Node.h +++ b/src/ripple/app/rdb/backend/detail/Node.h @@ -47,6 +47,7 @@ struct DatabasePairValid * @param config Config object. * @param setup Path to database and opening parameters. * @param checkpointerSetup Database checkpointer setup. + * @param j Journal. * @return Struct DatabasePairValid which contain unique pointers to ledger * and transaction databases and flag if opening was successfull. */ @@ -54,7 +55,8 @@ DatabasePairValid makeLedgerDBs( Config const& config, DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup); + DatabaseCon::CheckpointerSetup const& checkpointerSetup, + beast::Journal j); /** * @brief getMinLedgerSeq Returns minimum ledger sequence in given table. diff --git a/src/ripple/app/rdb/backend/detail/Shard.h b/src/ripple/app/rdb/backend/detail/Shard.h index ac88c24bd78..02e75d2aa64 100644 --- a/src/ripple/app/rdb/backend/detail/Shard.h +++ b/src/ripple/app/rdb/backend/detail/Shard.h @@ -37,6 +37,7 @@ namespace detail { * @param config Config object. * @param setup Path to database and opening parameters. * @param checkpointerSetup Database checkpointer setup. + * @param j Journal. * @return Struct DatabasePair which contains unique pointers to the ledger * and transaction databases. */ @@ -44,7 +45,8 @@ DatabasePair makeMetaDBs( Config const& config, DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup); + DatabaseCon::CheckpointerSetup const& checkpointerSetup, + beast::Journal j); /** * @brief saveLedgerMeta Stores (transaction ID -> shard index) and diff --git a/src/ripple/app/rdb/backend/detail/impl/Node.cpp b/src/ripple/app/rdb/backend/detail/impl/Node.cpp index 0905d6121ae..894cae1385c 100644 --- a/src/ripple/app/rdb/backend/detail/impl/Node.cpp +++ b/src/ripple/app/rdb/backend/detail/impl/Node.cpp @@ -67,11 +67,12 @@ DatabasePairValid makeLedgerDBs( Config const& config, DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup) + DatabaseCon::CheckpointerSetup const& checkpointerSetup, + beast::Journal j) { // ledger database auto lgr{std::make_unique( - setup, LgrDBName, LgrDBPragma, LgrDBInit, checkpointerSetup)}; + setup, LgrDBName, LgrDBPragma, LgrDBInit, checkpointerSetup, j)}; lgr->getSession() << boost::str( boost::format("PRAGMA cache_size=-%d;") % kilobytes(config.getValueFor(SizedItem::lgrDBCache))); @@ -80,7 +81,7 @@ makeLedgerDBs( { // transaction database auto tx{std::make_unique( - setup, TxDBName, TxDBPragma, TxDBInit, checkpointerSetup)}; + setup, TxDBName, TxDBPragma, TxDBInit, checkpointerSetup, j)}; tx->getSession() << boost::str( boost::format("PRAGMA cache_size=-%d;") % kilobytes(config.getValueFor(SizedItem::txnDBCache))); diff --git a/src/ripple/app/rdb/backend/detail/impl/Shard.cpp b/src/ripple/app/rdb/backend/detail/impl/Shard.cpp index f7a0ce4571b..540e8d1d221 100644 --- a/src/ripple/app/rdb/backend/detail/impl/Shard.cpp +++ b/src/ripple/app/rdb/backend/detail/impl/Shard.cpp @@ -32,7 +32,8 @@ DatabasePair makeMetaDBs( Config const& config, DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup) + DatabaseCon::CheckpointerSetup const& checkpointerSetup, + beast::Journal j) { // ledger meta database auto lgrMetaDB{std::make_unique( @@ -40,14 +41,20 @@ makeMetaDBs( LgrMetaDBName, LgrMetaDBPragma, LgrMetaDBInit, - checkpointerSetup)}; + checkpointerSetup, + j)}; if (!config.useTxTables()) return {std::move(lgrMetaDB), nullptr}; // transaction meta database auto txMetaDB{std::make_unique( - setup, TxMetaDBName, TxMetaDBPragma, TxMetaDBInit, checkpointerSetup)}; + setup, + TxMetaDBName, + TxMetaDBPragma, + TxMetaDBInit, + checkpointerSetup, + j)}; return {std::move(lgrMetaDB), std::move(txMetaDB)}; } diff --git a/src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp b/src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp index 547ab843b36..05a460819d0 100644 --- a/src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp +++ b/src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp @@ -447,7 +447,7 @@ SQLiteDatabaseImp::makeLedgerDBs( DatabaseCon::CheckpointerSetup const& checkpointerSetup) { auto [lgr, tx, res] = - detail::makeLedgerDBs(config, setup, checkpointerSetup); + detail::makeLedgerDBs(config, setup, checkpointerSetup, j_); txdb_ = std::move(tx); lgrdb_ = std::move(lgr); return res; @@ -460,7 +460,7 @@ SQLiteDatabaseImp::makeMetaDBs( DatabaseCon::CheckpointerSetup const& checkpointerSetup) { auto [lgrMetaDB, txMetaDB] = - detail::makeMetaDBs(config, setup, checkpointerSetup); + detail::makeMetaDBs(config, setup, checkpointerSetup, j_); txMetaDB_ = std::move(txMetaDB); lgrMetaDB_ = std::move(lgrMetaDB); diff --git a/src/ripple/app/rdb/impl/Download.cpp b/src/ripple/app/rdb/impl/Download.cpp index 0905ee577b1..6f6d6bf7577 100644 --- a/src/ripple/app/rdb/impl/Download.cpp +++ b/src/ripple/app/rdb/impl/Download.cpp @@ -25,14 +25,15 @@ namespace ripple { std::pair, std::optional> openDatabaseBodyDb( DatabaseCon::Setup const& setup, - boost::filesystem::path const& path) + boost::filesystem::path const& path, + beast::Journal j) { // SOCI requires boost::optional (not std::optional) as the parameter. boost::optional pathFromDb; boost::optional size; auto conn = std::make_unique( - setup, "Download", DownloaderDBPragma, DatabaseBodyDBInit); + setup, "Download", DownloaderDBPragma, DatabaseBodyDBInit, j); auto& session = *conn->checkoutDb(); diff --git a/src/ripple/app/rdb/impl/ShardArchive.cpp b/src/ripple/app/rdb/impl/ShardArchive.cpp index 6880aa00136..edad36f7099 100644 --- a/src/ripple/app/rdb/impl/ShardArchive.cpp +++ b/src/ripple/app/rdb/impl/ShardArchive.cpp @@ -22,10 +22,13 @@ namespace ripple { std::unique_ptr -makeArchiveDB(boost::filesystem::path const& dir, std::string const& dbName) +makeArchiveDB( + boost::filesystem::path const& dir, + std::string const& dbName, + beast::Journal j) { return std::make_unique( - dir, dbName, DownloaderDBPragma, ShardArchiveHandlerDBInit); + dir, dbName, DownloaderDBPragma, ShardArchiveHandlerDBInit, j); } void diff --git a/src/ripple/app/rdb/impl/UnitaryShard.cpp b/src/ripple/app/rdb/impl/UnitaryShard.cpp index ab1758b4852..786340f5955 100644 --- a/src/ripple/app/rdb/impl/UnitaryShard.cpp +++ b/src/ripple/app/rdb/impl/UnitaryShard.cpp @@ -27,16 +27,17 @@ namespace ripple { DatabasePair makeShardCompleteLedgerDBs( Config const& config, - DatabaseCon::Setup const& setup) + DatabaseCon::Setup const& setup, + beast::Journal j) { auto tx{std::make_unique( - setup, TxDBName, FinalShardDBPragma, TxDBInit)}; + setup, TxDBName, FinalShardDBPragma, TxDBInit, j)}; tx->getSession() << boost::str( boost::format("PRAGMA cache_size=-%d;") % kilobytes(config.getValueFor(SizedItem::txnDBCache, std::nullopt))); auto lgr{std::make_unique( - setup, LgrDBName, FinalShardDBPragma, LgrDBInit)}; + setup, LgrDBName, FinalShardDBPragma, LgrDBInit, j)}; lgr->getSession() << boost::str( boost::format("PRAGMA cache_size=-%d;") % kilobytes(config.getValueFor(SizedItem::lgrDBCache, std::nullopt))); @@ -48,18 +49,19 @@ DatabasePair makeShardIncompleteLedgerDBs( Config const& config, DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup) + DatabaseCon::CheckpointerSetup const& checkpointerSetup, + beast::Journal j) { // transaction database auto tx{std::make_unique( - setup, TxDBName, TxDBPragma, TxDBInit, checkpointerSetup)}; + setup, TxDBName, TxDBPragma, TxDBInit, checkpointerSetup, j)}; tx->getSession() << boost::str( boost::format("PRAGMA cache_size=-%d;") % kilobytes(config.getValueFor(SizedItem::txnDBCache))); // ledger database auto lgr{std::make_unique( - setup, LgrDBName, LgrDBPragma, LgrDBInit, checkpointerSetup)}; + setup, LgrDBName, LgrDBPragma, LgrDBInit, checkpointerSetup, j)}; lgr->getSession() << boost::str( boost::format("PRAGMA cache_size=-%d;") % kilobytes(config.getValueFor(SizedItem::lgrDBCache))); @@ -209,14 +211,16 @@ updateLedgerDBs( std::unique_ptr makeAcquireDB( DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup) + DatabaseCon::CheckpointerSetup const& checkpointerSetup, + beast::Journal j) { return std::make_unique( setup, AcquireShardDBName, AcquireShardDBPragma, AcquireShardDBInit, - checkpointerSetup); + checkpointerSetup, + j); } void diff --git a/src/ripple/app/rdb/impl/Vacuum.cpp b/src/ripple/app/rdb/impl/Vacuum.cpp index aad456cc5a8..20386f07b18 100644 --- a/src/ripple/app/rdb/impl/Vacuum.cpp +++ b/src/ripple/app/rdb/impl/Vacuum.cpp @@ -23,7 +23,7 @@ namespace ripple { bool -doVacuumDB(DatabaseCon::Setup const& setup) +doVacuumDB(DatabaseCon::Setup const& setup, beast::Journal j) { boost::filesystem::path dbPath = setup.dataDir / TxDBName; @@ -41,7 +41,7 @@ doVacuumDB(DatabaseCon::Setup const& setup) } auto txnDB = - std::make_unique(setup, TxDBName, TxDBPragma, TxDBInit); + std::make_unique(setup, TxDBName, TxDBPragma, TxDBInit, j); auto& session = txnDB->getSession(); std::uint32_t pageSize; diff --git a/src/ripple/app/rdb/impl/Wallet.cpp b/src/ripple/app/rdb/impl/Wallet.cpp index 3715c4c7458..79734ab67dc 100644 --- a/src/ripple/app/rdb/impl/Wallet.cpp +++ b/src/ripple/app/rdb/impl/Wallet.cpp @@ -23,19 +23,22 @@ namespace ripple { std::unique_ptr -makeWalletDB(DatabaseCon::Setup const& setup) +makeWalletDB(DatabaseCon::Setup const& setup, beast::Journal j) { // wallet database return std::make_unique( - setup, WalletDBName, std::array(), WalletDBInit); + setup, WalletDBName, std::array(), WalletDBInit, j); } std::unique_ptr -makeTestWalletDB(DatabaseCon::Setup const& setup, std::string const& dbname) +makeTestWalletDB( + DatabaseCon::Setup const& setup, + std::string const& dbname, + beast::Journal j) { // wallet database return std::make_unique( - setup, dbname.data(), std::array(), WalletDBInit); + setup, dbname.data(), std::array(), WalletDBInit, j); } void diff --git a/src/ripple/basics/PerfLog.h b/src/ripple/basics/PerfLog.h index 3d1cb371715..49ed27338db 100644 --- a/src/ripple/basics/PerfLog.h +++ b/src/ripple/basics/PerfLog.h @@ -179,6 +179,30 @@ make_PerfLog( beast::Journal journal, std::function&& signalStop); +template +auto +measureDurationAndLog( + Func&& func, + const std::string& actionDescription, + std::chrono::duration maxDelay, + const beast::Journal& journal) +{ + auto start_time = std::chrono::high_resolution_clock::now(); + + auto result = func(); + + auto end_time = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast( + end_time - start_time); + if (duration > maxDelay) + { + JLOG(journal.warn()) + << actionDescription << " took " << duration.count() << " ms"; + } + + return result; +} + } // namespace perf } // namespace ripple diff --git a/src/ripple/core/Config.h b/src/ripple/core/Config.h index cf41678a16c..25852615185 100644 --- a/src/ripple/core/Config.h +++ b/src/ripple/core/Config.h @@ -396,6 +396,12 @@ class Config : public BasicConfig int getValueFor(SizedItem item, std::optional node = std::nullopt) const; + + beast::Journal + journal() const + { + return j_; + } }; FeeSetup diff --git a/src/ripple/core/DatabaseCon.h b/src/ripple/core/DatabaseCon.h index 59eec3a32e9..91d2a446668 100644 --- a/src/ripple/core/DatabaseCon.h +++ b/src/ripple/core/DatabaseCon.h @@ -21,6 +21,7 @@ #define RIPPLE_APP_DATA_DATABASECON_H_INCLUDED #include +#include #include #include #include @@ -115,7 +116,8 @@ class DatabaseCon Setup const& setup, std::string const& dbName, std::array const& pragma, - std::array const& initSQL) + std::array const& initSQL, + beast::Journal journal) // Use temporary files or regular DB files? : DatabaseCon( setup.standAlone && !setup.reporting && @@ -126,7 +128,8 @@ class DatabaseCon : (setup.dataDir / dbName), setup.commonPragma(), pragma, - initSQL) + initSQL, + journal) { } @@ -137,8 +140,9 @@ class DatabaseCon std::string const& dbName, std::array const& pragma, std::array const& initSQL, - CheckpointerSetup const& checkpointerSetup) - : DatabaseCon(setup, dbName, pragma, initSQL) + CheckpointerSetup const& checkpointerSetup, + beast::Journal journal) + : DatabaseCon(setup, dbName, pragma, initSQL, journal) { setupCheckpointing(checkpointerSetup.jobQueue, *checkpointerSetup.logs); } @@ -148,8 +152,9 @@ class DatabaseCon boost::filesystem::path const& dataDir, std::string const& dbName, std::array const& pragma, - std::array const& initSQL) - : DatabaseCon(dataDir / dbName, nullptr, pragma, initSQL) + std::array const& initSQL, + beast::Journal journal) + : DatabaseCon(dataDir / dbName, nullptr, pragma, initSQL, journal) { } @@ -160,8 +165,9 @@ class DatabaseCon std::string const& dbName, std::array const& pragma, std::array const& initSQL, - CheckpointerSetup const& checkpointerSetup) - : DatabaseCon(dataDir, dbName, pragma, initSQL) + CheckpointerSetup const& checkpointerSetup, + beast::Journal journal) + : DatabaseCon(dataDir, dbName, pragma, initSQL, journal) { setupCheckpointing(checkpointerSetup.jobQueue, *checkpointerSetup.logs); } @@ -177,7 +183,14 @@ class DatabaseCon LockedSociSession checkoutDb() { - return LockedSociSession(session_, lock_); + using namespace std::chrono_literals; + LockedSociSession session = perf::measureDurationAndLog( + [&]() { return LockedSociSession(session_, lock_); }, + "checkoutDb", + 10ms, + j_); + + return session; } private: @@ -189,8 +202,9 @@ class DatabaseCon boost::filesystem::path const& pPath, std::vector const* commonPragma, std::array const& pragma, - std::array const& initSQL) - : session_(std::make_shared()) + std::array const& initSQL, + beast::Journal journal) + : session_(std::make_shared()), j_(journal) { open(*session_, "sqlite", pPath.string()); @@ -224,6 +238,8 @@ class DatabaseCon // shared_ptr in this class. session_ will never be null. std::shared_ptr const session_; std::shared_ptr checkpointer_; + + beast::Journal const j_; }; // Return the checkpointer from its id. If the checkpointer no longer exists, an diff --git a/src/ripple/net/DatabaseBody.h b/src/ripple/net/DatabaseBody.h index c828e5bf123..4c74b879a59 100644 --- a/src/ripple/net/DatabaseBody.h +++ b/src/ripple/net/DatabaseBody.h @@ -102,13 +102,16 @@ class DatabaseBody::value_type @param io_service The asio context for running a strand. @param ec Set to the error, if any occurred + + @param j Journal. */ void open( boost::filesystem::path const& path, Config const& config, boost::asio::io_service& io_service, - boost::system::error_code& ec); + boost::system::error_code& ec, + beast::Journal j); }; /** Algorithm for storing buffers when parsing. diff --git a/src/ripple/net/DatabaseDownloader.h b/src/ripple/net/DatabaseDownloader.h index 3e920909417..476939e1ff4 100644 --- a/src/ripple/net/DatabaseDownloader.h +++ b/src/ripple/net/DatabaseDownloader.h @@ -43,7 +43,8 @@ class DatabaseDownloader : public HTTPDownloader getParser( boost::filesystem::path dstPath, std::function complete, - boost::system::error_code& ec) override; + boost::system::error_code& ec, + beast::Journal j) override; bool checkPath(boost::filesystem::path const& dstPath) override; diff --git a/src/ripple/net/HTTPDownloader.h b/src/ripple/net/HTTPDownloader.h index 39b9a904aa3..34e81ea1f06 100644 --- a/src/ripple/net/HTTPDownloader.h +++ b/src/ripple/net/HTTPDownloader.h @@ -113,7 +113,8 @@ class HTTPDownloader : public std::enable_shared_from_this getParser( boost::filesystem::path dstPath, std::function complete, - boost::system::error_code& ec) = 0; + boost::system::error_code& ec, + beast::Journal j) = 0; virtual bool checkPath(boost::filesystem::path const& dstPath) = 0; diff --git a/src/ripple/net/impl/DatabaseBody.ipp b/src/ripple/net/impl/DatabaseBody.ipp index cdc7da2bc41..875a883527f 100644 --- a/src/ripple/net/impl/DatabaseBody.ipp +++ b/src/ripple/net/impl/DatabaseBody.ipp @@ -46,7 +46,8 @@ DatabaseBody::value_type::open( boost::filesystem::path const& path, Config const& config, boost::asio::io_service& io_service, - boost::system::error_code& ec) + boost::system::error_code& ec, + beast::Journal j) { strand_.reset(new boost::asio::io_service::strand(io_service)); path_ = path; @@ -55,7 +56,7 @@ DatabaseBody::value_type::open( setup.dataDir = path.parent_path(); setup.useGlobalPragma = false; - auto [conn, size] = openDatabaseBodyDb(setup, path); + auto [conn, size] = openDatabaseBodyDb(setup, path, j); conn_ = std::move(conn); if (size) fileSize_ = *size; diff --git a/src/ripple/net/impl/DatabaseDownloader.cpp b/src/ripple/net/impl/DatabaseDownloader.cpp index eab0d74d7d9..52388cf0d1d 100644 --- a/src/ripple/net/impl/DatabaseDownloader.cpp +++ b/src/ripple/net/impl/DatabaseDownloader.cpp @@ -45,13 +45,14 @@ auto DatabaseDownloader::getParser( boost::filesystem::path dstPath, std::function complete, - boost::system::error_code& ec) -> std::shared_ptr + boost::system::error_code& ec, + beast::Journal j) -> std::shared_ptr { using namespace boost::beast; auto p = std::make_shared>(); p->body_limit(std::numeric_limits::max()); - p->get().body().open(dstPath, config_, io_service_, ec); + p->get().body().open(dstPath, config_, io_service_, ec, j); if (ec) p->get().body().close(); diff --git a/src/ripple/net/impl/HTTPDownloader.cpp b/src/ripple/net/impl/HTTPDownloader.cpp index 44d27466224..43bf8d92589 100644 --- a/src/ripple/net/impl/HTTPDownloader.cpp +++ b/src/ripple/net/impl/HTTPDownloader.cpp @@ -146,7 +146,7 @@ HTTPDownloader::do_session( if (stop_.load()) return exit(); - auto p = this->getParser(dstPath, complete, ec); + auto p = this->getParser(dstPath, complete, ec, j_); if (ec) return failAndExit("getParser", p); diff --git a/src/ripple/nodestore/impl/Shard.cpp b/src/ripple/nodestore/impl/Shard.cpp index 10adf298361..f05f56903d2 100644 --- a/src/ripple/nodestore/impl/Shard.cpp +++ b/src/ripple/nodestore/impl/Shard.cpp @@ -862,7 +862,8 @@ Shard::open(std::lock_guard const& lock) acquireInfo_ = std::make_unique(); acquireInfo_->SQLiteDB = makeAcquireDB( setup, - DatabaseCon::CheckpointerSetup{&app_.getJobQueue(), &app_.logs()}); + DatabaseCon::CheckpointerSetup{&app_.getJobQueue(), &app_.logs()}, + j_); state_ = ShardState::acquire; progress_ = 0; @@ -981,7 +982,7 @@ Shard::initSQLite(std::lock_guard const&) case ShardState::complete: case ShardState::finalizing: case ShardState::finalized: { - auto [lgr, tx] = makeShardCompleteLedgerDBs(config, setup); + auto [lgr, tx] = makeShardCompleteLedgerDBs(config, setup, j_); lgrSQLiteDB_ = std::move(lgr); lgrSQLiteDB_->getSession() << boost::str( @@ -1005,7 +1006,8 @@ Shard::initSQLite(std::lock_guard const&) config, setup, DatabaseCon::CheckpointerSetup{ - &app_.getJobQueue(), &app_.logs()}); + &app_.getJobQueue(), &app_.logs()}, + j_); lgrSQLiteDB_ = std::move(lgr); lgrSQLiteDB_->getSession() << boost::str( diff --git a/src/ripple/overlay/impl/PeerImp.cpp b/src/ripple/overlay/impl/PeerImp.cpp index f93c9f135ad..69ef6061d59 100644 --- a/src/ripple/overlay/impl/PeerImp.cpp +++ b/src/ripple/overlay/impl/PeerImp.cpp @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -920,8 +921,16 @@ PeerImp::onReadMessage(error_code ec, std::size_t bytes_transferred) while (read_buffer_.size() > 0) { std::size_t bytes_consumed; - std::tie(bytes_consumed, ec) = - invokeProtocolMessage(read_buffer_.data(), *this, hint); + + using namespace std::chrono_literals; + std::tie(bytes_consumed, ec) = perf::measureDurationAndLog( + [&]() { + return invokeProtocolMessage(read_buffer_.data(), *this, hint); + }, + "invokeProtocolMessage", + 350ms, + journal_); + if (ec) return fail("onReadMessage", ec); if (!socket_.is_open()) diff --git a/src/ripple/rpc/impl/ShardArchiveHandler.cpp b/src/ripple/rpc/impl/ShardArchiveHandler.cpp index d05744f483a..90bd5edc361 100644 --- a/src/ripple/rpc/impl/ShardArchiveHandler.cpp +++ b/src/ripple/rpc/impl/ShardArchiveHandler.cpp @@ -114,7 +114,7 @@ ShardArchiveHandler::init() { create_directories(downloadDir_); - sqlDB_ = makeArchiveDB(downloadDir_, stateDBName); + sqlDB_ = makeArchiveDB(downloadDir_, stateDBName, j_); } catch (std::exception const& e) { @@ -139,7 +139,7 @@ ShardArchiveHandler::initFromDB(std::lock_guard const& lock) exists(downloadDir_ / stateDBName) && is_regular_file(downloadDir_ / stateDBName)); - sqlDB_ = makeArchiveDB(downloadDir_, stateDBName); + sqlDB_ = makeArchiveDB(downloadDir_, stateDBName, j_); readArchiveDB(*sqlDB_, [&](std::string const& url_, int state) { parsedURL url; diff --git a/src/test/app/Manifest_test.cpp b/src/test/app/Manifest_test.cpp index b72623309e9..0f21cab0d63 100644 --- a/src/test/app/Manifest_test.cpp +++ b/src/test/app/Manifest_test.cpp @@ -255,7 +255,7 @@ class Manifest_test : public beast::unit_test::suite setup.dataDir = getDatabasePath(); assert(!setup.useGlobalPragma); - auto dbCon = makeTestWalletDB(setup, dbName); + auto dbCon = makeTestWalletDB(setup, dbName, env.journal); auto getPopulatedManifests = [](ManifestCache const& cache) -> std::vector { From 4d7aed84ec1285397c44ee9f0beb529218068706 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Wed, 28 Aug 2024 13:00:50 -0500 Subject: [PATCH 16/26] refactor: Remove reporting mode (#5092) --- BUILD.md | 1 - CMakeLists.txt | 9 - README.md | 2 +- cfg/initdb.sh | 10 - cfg/rippled-example.cfg | 228 +-- cfg/rippled-reporting.cfg | 1638 ----------------- cmake/RippledCore.cmake | 8 - cmake/RippledSettings.cmake | 2 - conanfile.py | 9 - .../proto/org/xrpl/rpc/v1/xrp_ledger.proto | 2 +- include/xrpl/protocol/ErrorCodes.h | 9 +- src/libxrpl/protocol/ErrorCodes.cpp | 2 - src/test/rpc/ReportingETL_test.cpp | 1144 ------------ src/xrpld/app/ledger/AcceptedLedger.cpp | 13 +- src/xrpld/app/ledger/Ledger.cpp | 102 +- src/xrpld/app/ledger/Ledger.h | 26 - src/xrpld/app/ledger/LedgerMaster.h | 22 - src/xrpld/app/ledger/detail/LedgerMaster.cpp | 62 +- src/xrpld/app/ledger/detail/LedgerToJson.cpp | 10 +- src/xrpld/app/main/Application.cpp | 218 +-- src/xrpld/app/main/Application.h | 5 - src/xrpld/app/main/GRPCServer.cpp | 118 +- src/xrpld/app/main/Main.cpp | 15 - src/xrpld/app/misc/NetworkOPs.cpp | 384 +--- src/xrpld/app/misc/NetworkOPs.h | 9 - src/xrpld/app/misc/SHAMapStoreImp.cpp | 28 - src/xrpld/app/misc/detail/Transaction.cpp | 16 - src/xrpld/app/rdb/README.md | 3 - src/xrpld/app/rdb/RelationalDatabase.h | 23 - src/xrpld/app/rdb/backend/PostgresDatabase.h | 113 -- .../rdb/backend/detail/PostgresDatabase.cpp | 1072 ----------- .../app/rdb/detail/RelationalDatabase.cpp | 37 +- src/xrpld/app/reporting/ETLHelpers.h | 195 -- src/xrpld/app/reporting/ETLSource.cpp | 982 ---------- src/xrpld/app/reporting/ETLSource.h | 435 ----- src/xrpld/app/reporting/P2pProxy.cpp | 84 - src/xrpld/app/reporting/P2pProxy.h | 113 -- src/xrpld/app/reporting/README.md | 110 -- src/xrpld/app/reporting/ReportingETL.cpp | 960 ---------- src/xrpld/app/reporting/ReportingETL.h | 367 ---- src/xrpld/core/Config.h | 21 - src/xrpld/core/DatabaseCon.h | 4 +- src/xrpld/core/Pg.cpp | 1415 -------------- src/xrpld/core/Pg.h | 520 ------ src/xrpld/core/detail/Config.cpp | 5 - src/xrpld/core/detail/DatabaseCon.cpp | 1 - src/xrpld/nodestore/Backend.h | 34 - src/xrpld/nodestore/Database.h | 11 - .../nodestore/backend/CassandraFactory.cpp | 983 ---------- src/xrpld/nodestore/detail/Database.cpp | 9 - src/xrpld/nodestore/detail/DatabaseNodeImp.h | 6 - src/xrpld/nodestore/detail/ManagerImp.cpp | 6 - src/xrpld/rpc/detail/DeliveredAmount.cpp | 18 +- src/xrpld/rpc/detail/Handler.cpp | 4 - src/xrpld/rpc/detail/Handler.h | 16 - src/xrpld/rpc/detail/RPCHandler.cpp | 34 - src/xrpld/rpc/detail/RPCHelpers.cpp | 34 +- src/xrpld/rpc/detail/TransactionSign.cpp | 6 +- src/xrpld/rpc/handlers/AccountTx.cpp | 14 - src/xrpld/rpc/handlers/CanDelete.cpp | 3 - src/xrpld/rpc/handlers/Connect.cpp | 3 - src/xrpld/rpc/handlers/ConsensusInfo.cpp | 3 - src/xrpld/rpc/handlers/Feature1.cpp | 3 - src/xrpld/rpc/handlers/FetchInfo.cpp | 3 - src/xrpld/rpc/handlers/GetCounts.cpp | 2 +- src/xrpld/rpc/handlers/LedgerAccept.cpp | 2 +- src/xrpld/rpc/handlers/LedgerHandler.cpp | 3 +- src/xrpld/rpc/handlers/Manifest.cpp | 3 - src/xrpld/rpc/handlers/Peers.cpp | 3 - src/xrpld/rpc/handlers/Reservations.cpp | 9 - src/xrpld/rpc/handlers/ServerInfo.cpp | 9 - src/xrpld/rpc/handlers/Subscribe.cpp | 6 - src/xrpld/rpc/handlers/Tx.cpp | 121 +- src/xrpld/rpc/handlers/TxHistory.cpp | 3 - src/xrpld/rpc/handlers/UnlList.cpp | 2 - src/xrpld/rpc/handlers/ValidatorListSites.cpp | 3 - src/xrpld/rpc/handlers/Validators.cpp | 3 - src/xrpld/shamap/Family.h | 2 - src/xrpld/shamap/detail/NodeFamily.cpp | 8 - 79 files changed, 217 insertions(+), 11704 deletions(-) delete mode 100755 cfg/initdb.sh delete mode 100644 cfg/rippled-reporting.cfg delete mode 100644 src/test/rpc/ReportingETL_test.cpp delete mode 100644 src/xrpld/app/rdb/backend/PostgresDatabase.h delete mode 100644 src/xrpld/app/rdb/backend/detail/PostgresDatabase.cpp delete mode 100644 src/xrpld/app/reporting/ETLHelpers.h delete mode 100644 src/xrpld/app/reporting/ETLSource.cpp delete mode 100644 src/xrpld/app/reporting/ETLSource.h delete mode 100644 src/xrpld/app/reporting/P2pProxy.cpp delete mode 100644 src/xrpld/app/reporting/P2pProxy.h delete mode 100644 src/xrpld/app/reporting/README.md delete mode 100644 src/xrpld/app/reporting/ReportingETL.cpp delete mode 100644 src/xrpld/app/reporting/ReportingETL.h delete mode 100644 src/xrpld/core/Pg.cpp delete mode 100644 src/xrpld/core/Pg.h delete mode 100644 src/xrpld/nodestore/backend/CassandraFactory.cpp diff --git a/BUILD.md b/BUILD.md index b4201ef0437..a39df98a5a6 100644 --- a/BUILD.md +++ b/BUILD.md @@ -376,7 +376,6 @@ stored inside the build directory, as either of: | Option | Default Value | Description | | --- | ---| ---| | `assert` | OFF | Enable assertions. -| `reporting` | OFF | Build the reporting mode feature. | | `coverage` | OFF | Prepare the coverage report. | | `tests` | ON | Build tests. | | `unity` | ON | Configure a unity build. | diff --git a/CMakeLists.txt b/CMakeLists.txt index a69583f9cbf..0c34f89397d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -115,15 +115,6 @@ else() endif() target_link_libraries(ripple_libs INTERFACE ${nudb}) -if(reporting) - find_package(cassandra-cpp-driver REQUIRED) - find_package(PostgreSQL REQUIRED) - target_link_libraries(ripple_libs INTERFACE - cassandra-cpp-driver::cassandra-cpp-driver - PostgreSQL::PostgreSQL - ) -endif() - if(coverage) include(RippledCov) endif() diff --git a/README.md b/README.md index 45dc2005ea2..cc002a2dd82 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powe ## rippled The server software that powers the XRP Ledger is called `rippled` and is available in this repository under the permissive [ISC open-source license](LICENSE.md). The `rippled` server software is written primarily in C++ and runs on a variety of platforms. The `rippled` server software can run in several modes depending on its [configuration](https://xrpl.org/rippled-server-modes.html). -If you are interested in running an **API Server** (including a **Full History Server**) or a **Reporting Mode** server, take a look at [Clio](https://github.com/XRPLF/clio). rippled Reporting Mode is expected to be replaced by Clio. +If you are interested in running an **API Server** (including a **Full History Server**), take a look at [Clio](https://github.com/XRPLF/clio). (rippled Reporting Mode has been replaced by Clio.) ### Build from Source diff --git a/cfg/initdb.sh b/cfg/initdb.sh deleted file mode 100755 index 9ca02ed5632..00000000000 --- a/cfg/initdb.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -# Execute this script with a running Postgres server on the current host. -# It should work with the most generic installation of Postgres, -# and is necessary for rippled to store data in Postgres. - -# usage: sudo -u postgres ./initdb.sh -psql -c "CREATE USER rippled" -psql -c "CREATE DATABASE rippled WITH OWNER = rippled" - diff --git a/cfg/rippled-example.cfg b/cfg/rippled-example.cfg index b283900d013..673ab3213e8 100644 --- a/cfg/rippled-example.cfg +++ b/cfg/rippled-example.cfg @@ -13,7 +13,7 @@ # # 4. HTTPS Client # -# 5. Reporting Mode +# 5. # # 6. Database # @@ -883,119 +883,6 @@ # #------------------------------------------------------------------------------- # -# 5. Reporting Mode -# -#------------ -# -# rippled has an optional operating mode called Reporting Mode. In Reporting -# Mode, rippled does not connect to the peer to peer network. Instead, rippled -# will continuously extract data from one or more rippled servers that are -# connected to the peer to peer network (referred to as an ETL source). -# Reporting mode servers will forward RPC requests that require access to the -# peer to peer network (submit, fee, etc) to an ETL source. -# -# [reporting] Settings for Reporting Mode. If and only if this section is -# present, rippled will start in reporting mode. This section -# contains a list of ETL source names, and key-value pairs. The -# ETL source names each correspond to a configuration file -# section; the names must match exactly. The key-value pairs are -# optional. -# -# -# [] -# -# A series of key/value pairs that specify an ETL source. -# -# source_ip = -# -# Required. IP address of the ETL source. Can also be a DNS record. -# -# source_ws_port = -# -# Required. Port on which ETL source is accepting unencrypted websocket -# connections. -# -# source_grpc_port = -# -# Required for ETL. Port on which ETL source is accepting gRPC requests. -# If this option is ommitted, this ETL source cannot actually be used for -# ETL; the Reporting Mode server can still forward RPCs to this ETL -# source, but cannot extract data from this ETL source. -# -# -# Key-value pairs (all optional): -# -# read_only Valid values: 0, 1. Default is 0. If set to 1, the server -# will start in strict read-only mode, and will not perform -# ETL. The server will still handle RPC requests, and will -# still forward RPC requests that require access to the p2p -# network. -# -# start_sequence -# Sequence of first ledger to extract if the database is empty. -# ETL extracts ledgers in order. If this setting is absent and -# the database is empty, ETL will start with the next ledger -# validated by the network. If this setting is present and the -# database is not empty, an exception is thrown. -# -# num_markers Degree of parallelism used during the initial ledger -# download. Only used if the database is empty. Valid values -# are 1-256. A higher degree of parallelism results in a -# faster download, but puts more load on the ETL source. -# Default is 2. -# -# Example: -# -# [reporting] -# etl_source1 -# etl_source2 -# read_only=0 -# start_sequence=32570 -# num_markers=8 -# -# [etl_source1] -# source_ip=1.2.3.4 -# source_ws_port=6005 -# source_grpc_port=50051 -# -# [etl_source2] -# source_ip=5.6.7.8 -# source_ws_port=6005 -# source_grpc_port=50051 -# -# Minimal Example: -# -# [reporting] -# etl_source1 -# -# [etl_source1] -# source_ip=1.2.3.4 -# source_ws_port=6005 -# source_grpc_port=50051 -# -# -# Notes: -# -# Reporting Mode requires Postgres (instead of SQLite). The Postgres -# connection info is specified under the [ledger_tx_tables] config section; -# see the Database section for further documentation. -# -# Each ETL source specified must have gRPC enabled (by adding a [port_grpc] -# section to the config). It is recommended to add a secure_gateway entry to -# the gRPC section, in order to bypass the server's rate limiting. -# This section needs to be added to the config of the ETL source, not -# the config of the reporting node. In the example below, the -# reporting server is running at 127.0.0.1. Multiple IPs can be -# specified in secure_gateway via a comma separated list. -# -# [port_grpc] -# ip = 0.0.0.0 -# port = 50051 -# secure_gateway = 127.0.0.1 -# -# -#------------------------------------------------------------------------------- -# # 6. Database # #------------ @@ -1003,13 +890,7 @@ # rippled creates 4 SQLite database to hold bookkeeping information # about transactions, local credentials, and various other things. # It also creates the NodeDB, which holds all the objects that -# make up the current and historical ledgers. In Reporting Mode, rippled -# uses a Postgres database instead of SQLite. -# -# The simplest way to work with Postgres is to install it locally. -# When it is running, execute the initdb.sh script in the current -# directory as: sudo -u postgres ./initdb.sh -# This will create the rippled user and an empty database of the same name. +# make up the current and historical ledgers. # # The size of the NodeDB grows in proportion to the amount of new data and the # amount of historical data (a configurable setting) so the performance of the @@ -1051,33 +932,10 @@ # keeping full history is not advised, and using online delete is # recommended. # -# type = Cassandra -# -# Apache Cassandra is an open-source, distributed key-value store - see -# https://cassandra.apache.org/ for more details. -# -# Cassandra is an alternative backend to be used only with Reporting Mode. -# See the Reporting Mode section for more details about Reporting Mode. -# # Required keys for NuDB and RocksDB: # # path Location to store the database # -# Required keys for Cassandra: -# -# contact_points IP of a node in the Cassandra cluster -# -# port CQL Native Transport Port -# -# secure_connect_bundle -# Absolute path to a secure connect bundle. When using -# a secure connect bundle, contact_points and port are -# not required. -# -# keyspace Name of Cassandra keyspace to use -# -# table_name Name of table in above keyspace to use -# # Optional keys # # cache_size Size of cache for database records. Default is 16384. @@ -1153,25 +1011,6 @@ # checking until healthy. # Default is 5. # -# Optional keys for Cassandra: -# -# username Username to use if Cassandra cluster requires -# authentication -# -# password Password to use if Cassandra cluster requires -# authentication -# -# max_requests_outstanding -# Limits the maximum number of concurrent database -# writes. Default is 10 million. For slower clusters, -# large numbers of concurrent writes can overload the -# cluster. Setting this option can help eliminate -# write timeouts and other write errors due to the -# cluster being overloaded. -# io_threads -# Set the number of IO threads used by the -# Cassandra driver. Defaults to 4. -# # Notes: # The 'node_db' entry configures the primary, persistent storage. # @@ -1267,42 +1106,6 @@ # This setting may not be combined with the # "safety_level" setting. # -# [ledger_tx_tables] (optional) -# -# conninfo Info for connecting to Postgres. Format is -# postgres://[username]:[password]@[ip]/[database]. -# The database and user must already exist. If this -# section is missing and rippled is running in -# Reporting Mode, rippled will connect as the -# user running rippled to a database with the -# same name. On Linux and Mac OS X, the connection -# will take place using the server's UNIX domain -# socket. On Windows, through the localhost IP -# address. Default is empty. -# -# use_tx_tables Valid values: 1, 0 -# The default is 1 (true). Determines whether to use -# the SQLite transaction database. If set to 0, -# rippled will not write to the transaction database, -# and will reject tx, account_tx and tx_history RPCs. -# In Reporting Mode, this setting is ignored. -# -# max_connections Valid values: any positive integer up to 64 bit -# storage length. This configures the maximum -# number of concurrent connections to postgres. -# Default is the maximum possible value to -# fit in a 64 bit integer. -# -# timeout Number of seconds after which idle postgres -# connections are discconnected. If set to 0, -# connections never timeout. Default is 600. -# -# -# remember_ip Value values: 1, 0 -# Default is 1 (true). Whether to cache host and -# port connection settings. -# -# #------------------------------------------------------------------------------- # # 7. Diagnostics @@ -1566,6 +1369,12 @@ # Admin level API commands over Secure Websockets, when originating # from the same machine (via the loopback adapter at 127.0.0.1). # +# "grpc" +# +# ETL commands for Clio. We recommend setting secure_gateway +# in this section to a comma-separated list of the addresses +# of your Clio servers, in order to bypass rippled's rate limiting. +# # This port is commented out but can be enabled by removing # the '#' from each corresponding line including the entry under [server] # @@ -1648,15 +1457,6 @@ advisory_delete=0 /var/lib/rippled/db -# To use Postgres, uncomment this section and fill in the appropriate connection -# info. Postgres can only be used in Reporting Mode. -# To disable writing to the transaction database, uncomment this section, and -# set use_tx_tables=0 -# [ledger_tx_tables] -# conninfo = postgres://[username]:[password]@[ip]/[database] -# use_tx_tables=1 - - # This needs to be an absolute directory reference, not a relative one. # Modify this value as required. [debug_logfile] @@ -1684,15 +1484,3 @@ validators.txt # set to ssl_verify to 0. [ssl_verify] 1 - - -# To run in Reporting Mode, uncomment this section and fill in the appropriate -# connection info for one or more ETL sources. -# [reporting] -# etl_source -# -# -# [etl_source] -# source_grpc_port=50051 -# source_ws_port=6005 -# source_ip=127.0.0.1 diff --git a/cfg/rippled-reporting.cfg b/cfg/rippled-reporting.cfg deleted file mode 100644 index 9776ef5ee45..00000000000 --- a/cfg/rippled-reporting.cfg +++ /dev/null @@ -1,1638 +0,0 @@ -#------------------------------------------------------------------------------- -# -# -#------------------------------------------------------------------------------- -# -# Contents -# -# 1. Server -# -# 2. Peer Protocol -# -# 3. Ripple Protocol -# -# 4. HTTPS Client -# -# 5. Reporting Mode -# -# 6. Database -# -# 7. Diagnostics -# -# 8. Voting -# -# 9. Misc Settings -# -# 10. Example Settings -# -#------------------------------------------------------------------------------- -# -# Purpose -# -# This file documents and provides examples of all rippled server process -# configuration options. When the rippled server instance is launched, it -# looks for a file with the following name: -# -# rippled.cfg -# -# For more information on where the rippled server instance searches for the -# file, visit: -# -# https://xrpl.org/commandline-usage.html#generic-options -# -# This file should be named rippled.cfg. This file is UTF-8 with DOS, UNIX, -# or Mac style end of lines. Blank lines and lines beginning with '#' are -# ignored. Undefined sections are reserved. No escapes are currently defined. -# -# Notation -# -# In this document a simple BNF notation is used. Angle brackets denote -# required elements, square brackets denote optional elements, and single -# quotes indicate string literals. A vertical bar separating 1 or more -# elements is a logical "or"; any one of the elements may be chosen. -# Parentheses are notational only, and used to group elements; they are not -# part of the syntax unless they appear in quotes. White space may always -# appear between elements, it has no effect on values. -# -# A required identifier -# '=' The equals sign character -# | Logical "or" -# ( ) Used for grouping -# -# -# An identifier is a string of upper or lower case letters, digits, or -# underscores subject to the requirement that the first character of an -# identifier must be a letter. Identifiers are not case sensitive (but -# values may be). -# -# Some configuration sections contain key/value pairs. A line containing -# a key/value pair has this syntax: -# -# '=' -# -# Depending on the section and key, different value types are possible: -# -# A signed integer -# An unsigned integer -# A boolean. 1 = true/yes/on, 0 = false/no/off. -# -# Consult the documentation on the key in question to determine the possible -# value types. -# -# -# -#------------------------------------------------------------------------------- -# -# 1. Server -# -#---------- -# -# -# -# rippled offers various server protocols to clients making inbound -# connections. The listening ports rippled uses are "universal" ports -# which may be configured to handshake in one or more of the available -# supported protocols. These universal ports simplify administration: -# A single open port can be used for multiple protocols. -# -# NOTE At least one server port must be defined in order -# to accept incoming network connections. -# -# -# [server] -# -# A list of port names and key/value pairs. A port name must start with a -# letter and contain only letters and numbers. The name is not case-sensitive. -# For each name in this list, rippled will look for a configuration file -# section with the same name and use it to create a listening port. The -# name is informational only; the choice of name does not affect the function -# of the listening port. -# -# Key/value pairs specified in this section are optional, and apply to all -# listening ports unless the port overrides the value in its section. They -# may be considered default values. -# -# Suggestion: -# -# To avoid a conflict with port names and future configuration sections, -# we recommend prepending "port_" to the port name. This prefix is not -# required, but suggested. -# -# This example defines two ports with different port numbers and settings: -# -# [server] -# port_public -# port_private -# port = 80 -# -# [port_public] -# ip = 0.0.0.0 -# port = 443 -# protocol = peer,https -# -# [port_private] -# ip = 127.0.0.1 -# protocol = http -# -# When rippled is used as a command line client (for example, issuing a -# server stop command), the first port advertising the http or https -# protocol will be used to make the connection. -# -# -# -# [] -# -# A series of key/value pairs that define the settings for the port with -# the corresponding name. These keys are possible: -# -# ip = -# -# Required. Determines the IP address of the network interface to bind -# to. To bind to all available IPv4 interfaces, use 0.0.0.0 -# To binding to all IPv4 and IPv6 interfaces, use :: -# -# NOTE if the ip value is ::, then any incoming IPv4 connections will -# be made as mapped IPv4 addresses. -# -# port = -# -# Required. Sets the port number to use for this port. -# -# protocol = [ http, https, peer ] -# -# Required. A comma-separated list of protocols to support: -# -# http JSON-RPC over HTTP -# https JSON-RPC over HTTPS -# ws Websockets -# wss Secure Websockets -# peer Peer Protocol -# -# Restrictions: -# -# Only one port may be configured to support the peer protocol. -# A port cannot have websocket and non websocket protocols at the -# same time. It is possible have both Websockets and Secure Websockets -# together in one port. -# -# NOTE If no ports support the peer protocol, rippled cannot -# receive incoming peer connections or become a superpeer. -# -# limit = -# -# Optional. An integer value that will limit the number of connected -# clients that the port will accept. Once the limit is reached, new -# connections will be refused until other clients disconnect. -# Omit or set to 0 to allow unlimited numbers of clients. -# -# user = -# password = -# -# When set, these credentials will be required on HTTP/S requests. -# The credentials must be provided using HTTP's Basic Authentication -# headers. If either or both fields are empty, then no credentials are -# required. IP address restrictions, if any, will be checked in addition -# to the credentials specified here. -# -# When acting in the client role, rippled will supply these credentials -# using HTTP's Basic Authentication headers when making outbound HTTP/S -# requests. -# -# admin = [ IP, IP, IP, ... ] -# -# A comma-separated list of IP addresses. -# -# When set, grants administrative command access to the specified IP -# addresses. These commands may be issued over http, https, ws, or wss -# if configured on the port. If not provided, the default is to not allow -# administrative commands. -# -# NOTE A common configuration value for the admin field is "localhost". -# If you are listening on all IPv4/IPv6 addresses by specifing -# ip = :: then you can use admin = ::ffff:127.0.0.1,::1 to allow -# administrative access from both IPv4 and IPv6 localhost -# connections. -# -# *SECURITY WARNING* -# 0.0.0.0 or :: may be used to allow access from any IP address. It must -# be the only address specified and cannot be combined with other IPs. -# Use of this address can compromise server security, please consider its -# use carefully. -# -# admin_user = -# admin_password = -# -# When set, clients must provide these credentials in the submitted -# JSON for any administrative command requests submitted to the HTTP/S, -# WS, or WSS protocol interfaces. If administrative commands are -# disabled for a port, these credentials have no effect. -# -# When acting in the client role, rippled will supply these credentials -# in the submitted JSON for any administrative command requests when -# invoking JSON-RPC commands on remote servers. -# -# secure_gateway = [ IP, IP, IP, ... ] -# -# A comma-separated list of IP addresses. -# -# When set, allows the specified IP addresses to pass HTTP headers -# containing username and remote IP address for each session. If a -# non-empty username is passed in this way, then resource controls -# such as often resulting in "tooBusy" errors will be lifted. However, -# administrative RPC commands such as "stop" will not be allowed. -# The HTTP headers that secure_gateway hosts can set are X-User and -# X-Forwarded-For. Only the X-User header affects resource controls. -# However, both header values are logged to help identify user activity. -# If no X-User header is passed, or if its value is empty, then -# resource controls will default to those for non-administrative users. -# -# The secure_gateway IP addresses are intended to represent -# proxies. Since rippled trusts these hosts, they must be -# responsible for properly authenticating the remote user. -# -# The same IP address cannot be used in both "admin" and "secure_gateway" -# lists for the same port. In this case, rippled will abort with an error -# message to the console shortly after startup -# -# ssl_key = -# ssl_cert = -# ssl_chain = -# -# Use the specified files when configuring SSL on the port. -# -# NOTE If no files are specified and secure protocols are selected, -# rippled will generate an internal self-signed certificate. -# -# The files have these meanings: -# -# ssl_key -# -# Specifies the filename holding the SSL key in PEM format. -# -# ssl_cert -# -# Specifies the path to the SSL certificate file in PEM format. -# This is not needed if the chain includes it. -# -# ssl_chain -# -# If you need a certificate chain, specify the path to the -# certificate chain here. The chain may include the end certificate. -# -# ssl_ciphers = -# -# Control the ciphers which the server will support over SSL on the port, -# specified using the OpenSSL "cipher list format". -# -# NOTE If unspecified, rippled will automatically configure a modern -# cipher suite. This default suite should be widely supported. -# -# You should not modify this string unless you have a specific -# reason and cryptographic expertise. Incorrect modification may -# keep rippled from connecting to other instances of rippled or -# prevent RPC and WebSocket clients from connecting. -# -# send_queue_limit = [1..65535] -# -# A Websocket will disconnect when its send queue exceeds this limit. -# The default is 100. A larger value may help with erratic disconnects but -# may adversely affect server performance. -# -# WebSocket permessage-deflate extension options -# -# These settings configure the optional permessage-deflate extension -# options and may appear on any port configuration entry. They are meaningful -# only to ports which have enabled a WebSocket protocol. -# -# permessage_deflate = -# -# Determines if permessage_deflate extension negotiations are enabled. -# When enabled, clients may request the extension and the server will -# offer the enabled extension in response. -# -# client_max_window_bits = [9..15] -# server_max_window_bits = [9..15] -# client_no_context_takeover = -# server_no_context_takeover = -# -# These optional settings control options related to the permessage-deflate -# extension negotiation. For precise definitions of these fields please see -# the RFC 7692, "Compression Extensions for WebSocket": -# https://tools.ietf.org/html/rfc7692 -# -# compress_level = [0..9] -# -# When set, determines the amount of compression attempted, where 0 is -# the least amount and 9 is the most amount. Higher levels require more -# CPU resources. Levels 1 through 3 use a fast compression algorithm, -# while levels 4 through 9 use a more compact algorithm which uses more -# CPU resources. If unspecified, a default of 3 is used. -# -# memory_level = [1..9] -# -# When set, determines the relative amount of memory used to hold -# intermediate compression data. Higher numbers can give better compression -# ratios at the cost of higher memory and CPU resources. -# -# [rpc_startup] -# -# Specify a list of RPC commands to run at startup. -# -# Examples: -# { "command" : "server_info" } -# { "command" : "log_level", "partition" : "ripplecalc", "severity" : "trace" } -# -# -# -# [websocket_ping_frequency] -# -# -# -# The amount of time to wait in seconds, before sending a websocket 'ping' -# message. Ping messages are used to determine if the remote end of the -# connection is no longer available. -# -# -# [server_domain] -# -# domain name -# -# The domain under which a TOML file applicable to this server can be -# found. A server may lie about its domain so the TOML should contain -# a reference to this server by pubkey in the [nodes] array. -# -# -#------------------------------------------------------------------------------- -# -# 2. Peer Protocol -# -#----------------- -# -# These settings control security and access attributes of the Peer to Peer -# server section of the rippled process. Peer Protocol implements the -# Ripple Payment protocol. It is over peer connections that transactions -# and validations are passed from to machine to machine, to determine the -# contents of validated ledgers. -# -# -# -# [ips] -# -# List of hostnames or ips where the Ripple protocol is served. A default -# starter list is included in the code and used if no other hostnames are -# available. -# -# One address or domain name per line is allowed. A port may must be -# specified after adding a space to the address. The ordering of entries -# does not generally matter. -# -# The default list of entries is: -# - r.ripple.com 51235 -# - sahyadri.isrdc.in 51235 -# -# Examples: -# -# [ips] -# 192.168.0.1 -# 192.168.0.1 2459 -# r.ripple.com 51235 -# -# -# [ips_fixed] -# -# List of IP addresses or hostnames to which rippled should always attempt to -# maintain peer connections with. This is useful for manually forming private -# networks, for example to configure a validation server that connects to the -# Ripple network through a public-facing server, or for building a set -# of cluster peers. -# -# One address or domain names per line is allowed. A port must be specified -# after adding a space to the address. -# -# -# -# [peer_private] -# -# 0 or 1. -# -# 0: Request peers to broadcast your address. Normal outbound peer connections [default] -# 1: Request peers not broadcast your address. Only connect to configured peers. -# -# -# -# [peers_max] -# -# The largest number of desired peer connections (incoming or outgoing). -# Cluster and fixed peers do not count towards this total. There are -# implementation-defined lower limits imposed on this value for security -# purposes. -# -# -# -# [node_seed] -# -# This is used for clustering. To force a particular node seed or key, the -# key can be set here. The format is the same as the validation_seed field. -# To obtain a validation seed, use the validation_create command. -# -# Examples: RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE -# shfArahZT9Q9ckTf3s1psJ7C7qzVN -# -# -# -# [cluster_nodes] -# -# To extend full trust to other nodes, place their node public keys here. -# Generally, you should only do this for nodes under common administration. -# Node public keys start with an 'n'. To give a node a name for identification -# place a space after the public key and then the name. -# -# -# -# [max_transactions] -# -# Configure the maximum number of transactions to have in the job queue -# -# Must be a number between 100 and 1000, defaults to 250 -# -# -# [overlay] -# -# Controls settings related to the peer to peer overlay. -# -# A set of key/value pair parameters to configure the overlay. -# -# public_ip = -# -# If the server has a known, fixed public IPv4 address, -# specify that IP address here in dotted decimal notation. -# Peers will use this information to reject attempt to proxy -# connections to or from this server. -# -# ip_limit = -# -# The maximum number of incoming peer connections allowed by a single -# IP that isn't classified as "private" in RFC1918. The implementation -# imposes some hard and soft upper limits on this value to prevent a -# single host from consuming all inbound slots. If the value is not -# present the server will autoconfigure an appropriate limit. -# -# max_unknown_time = -# -# The maximum amount of time, in seconds, that an outbound connection -# is allowed to stay in the "unknown" tracking state. This option can -# take any value between 300 and 1800 seconds, inclusive. If the option -# is not present the server will autoconfigure an appropriate limit. -# -# The current default (which is subject to change) is 600 seconds. -# -# max_diverged_time = -# -# The maximum amount of time, in seconds, that an outbound connection -# is allowed to stay in the "diverged" tracking state. The option can -# take any value between 60 and 900 seconds, inclusive. If the option -# is not present the server will autoconfigure an appropriate limit. -# -# The current default (which is subject to change) is 300 seconds. -# -# -# [transaction_queue] EXPERIMENTAL -# -# This section is EXPERIMENTAL, and should not be -# present for production configuration settings. -# -# A set of key/value pair parameters to tune the performance of the -# transaction queue. -# -# ledgers_in_queue = -# -# The queue will be limited to this of average ledgers' -# worth of transactions. If the queue fills up, the transactions -# with the lowest fee levels will be dropped from the queue any -# time a transaction with a higher fee level is added. -# Default: 20. -# -# minimum_queue_size = -# -# The queue will always be able to hold at least this of -# transactions, regardless of recent ledger sizes or the value of -# ledgers_in_queue. Default: 2000. -# -# retry_sequence_percent = -# -# If a client replaces a transaction in the queue (same sequence -# number as a transaction already in the queue), the new -# transaction's fee must be more than percent higher -# than the original transaction's fee, or meet the current open -# ledger fee to be considered. Default: 25. -# -# minimum_escalation_multiplier = -# -# At ledger close time, the median fee level of the transactions -# in that ledger is used as a multiplier in escalation -# calculations of the next ledger. This minimum value ensures that -# the escalation is significant. Default: 500. -# -# minimum_txn_in_ledger = -# -# Minimum number of transactions that must be allowed into the -# ledger at the minimum required fee before the required fee -# escalates. Default: 5. -# -# minimum_txn_in_ledger_standalone = -# -# Like minimum_txn_in_ledger when rippled is running in standalone -# mode. Default: 1000. -# -# target_txn_in_ledger = -# -# Number of transactions allowed into the ledger at the minimum -# required fee that the queue will "work toward" as long as -# consensus stays healthy. The limit will grow quickly until it -# reaches or exceeds this number. After that the limit may still -# change, but will stay above the target. If consensus is not -# healthy, the limit will be clamped to this value or lower. -# Default: 50. -# -# maximum_txn_in_ledger = -# -# (Optional) Maximum number of transactions that will be allowed -# into the ledger at the minimum required fee before the required -# fee escalates. Default: no maximum. -# -# normal_consensus_increase_percent = -# -# (Optional) When the ledger has more transactions than "expected", -# and performance is humming along nicely, the expected ledger size -# is updated to the previous ledger size plus this percentage. -# Default: 20 -# -# slow_consensus_decrease_percent = -# -# (Optional) When consensus takes longer than appropriate, the -# expected ledger size is updated to the minimum of the previous -# ledger size or the "expected" ledger size minus this percentage. -# Default: 50 -# -# maximum_txn_per_account = -# -# Maximum number of transactions that one account can have in the -# queue at any given time. Default: 10. -# -# minimum_last_ledger_buffer = -# -# If a transaction has a LastLedgerSequence, it must be at least -# this much larger than the current open ledger sequence number. -# Default: 2. -# -# zero_basefee_transaction_feelevel = -# -# So we don't deal with infinite fee levels, treat any transaction -# with a 0 base fee (ie. SetRegularKey password recovery) as -# having this fee level. -# Default: 256000. -# -# -#------------------------------------------------------------------------------- -# -# 3. Protocol -# -#------------------- -# -# These settings affect the behavior of the server instance with respect -# to protocol level activities such as validating and closing ledgers -# adjusting fees in response to server overloads. -# -# -# -# -# [relay_proposals] -# -# Controls the relaying behavior for proposals received by this server that -# are issued by validators that are not on the server's UNL. -# -# Legal values are: "trusted" and "all". The default is "trusted". -# -# -# [relay_validations] -# -# Controls the relaying behavior for validations received by this server that -# are issued by validators that are not on the server's UNL. -# -# Legal values are: "trusted" and "all". The default is "all". -# -# -# -# -# -# [ledger_history] -# -# The number of past ledgers to acquire on server startup and the minimum to -# maintain while running. -# -# To serve clients, servers need historical ledger data. Servers that don't -# need to serve clients can set this to "none". Servers that want complete -# history can set this to "full". -# -# This must be less than or equal to online_delete (if online_delete is used) -# -# The default is: 256 -# -# -# -# [fetch_depth] -# -# The number of past ledgers to serve to other peers that request historical -# ledger data (or "full" for no limit). -# -# Servers that require low latency and high local performance may wish to -# restrict the historical ledgers they are willing to serve. Setting this -# below 32 can harm network stability as servers require easy access to -# recent history to stay in sync. Values below 128 are not recommended. -# -# The default is: full -# -# -# -# [validation_seed] -# -# To perform validation, this section should contain either a validation seed -# or key. The validation seed is used to generate the validation -# public/private key pair. To obtain a validation seed, use the -# validation_create command. -# -# Examples: RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE -# shfArahZT9Q9ckTf3s1psJ7C7qzVN -# -# -# -# [validator_token] -# -# This is an alternative to [validation_seed] that allows rippled to perform -# validation without having to store the validator keys on the network -# connected server. The field should contain a single token in the form of a -# base64-encoded blob. -# An external tool is available for generating validator keys and tokens. -# -# -# -# [validator_key_revocation] -# -# If a validator's secret key has been compromised, a revocation must be -# generated and added to this field. The revocation notifies peers that it is -# no longer safe to trust the revoked key. The field should contain a single -# revocation in the form of a base64-encoded blob. -# An external tool is available for generating and revoking validator keys. -# -# -# -# [validators_file] -# -# Path or name of a file that determines the nodes to always accept as validators. -# -# The contents of the file should include a [validators] and/or -# [validator_list_sites] and [validator_list_keys] entries. -# [validators] should be followed by a list of validation public keys of -# nodes, one per line. -# [validator_list_sites] should be followed by a list of URIs each serving a -# list of recommended validators. -# [validator_list_keys] should be followed by a list of keys belonging to -# trusted validator list publishers. Validator lists fetched from configured -# sites will only be considered if the list is accompanied by a valid -# signature from a trusted publisher key. -# -# Specify the file by its name or path. -# Unless an absolute path is specified, it will be considered relative to -# the folder in which the rippled.cfg file is located. -# -# Examples: -# /home/ripple/validators.txt -# C:/home/ripple/validators.txt -# -# Example content: -# [validators] -# n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7 -# n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj -# n9L81uNCaPgtUJfaHh89gmdvXKAmSt5Gdsw2g1iPWaPkAHW5Nm4C -# n9KiYM9CgngLvtRCQHZwgC2gjpdaZcCcbt3VboxiNFcKuwFVujzS -# n9LdgEtkmGB9E2h3K4Vp7iGUaKuq23Zr32ehxiU8FWY7xoxbWTSA -# -# -# -# [path_search] -# When searching for paths, the default search aggressiveness. This can take -# exponentially more resources as the size is increased. -# -# The default is: 7 -# -# [path_search_fast] -# [path_search_max] -# When searching for paths, the minimum and maximum search aggressiveness. -# -# If you do not need pathfinding, you can set path_search_max to zero to -# disable it and avoid some expensive bookkeeping. -# -# The default for 'path_search_fast' is 2. The default for 'path_search_max' is 10. -# -# [path_search_old] -# -# For clients that use the legacy path finding interfaces, the search -# aggressiveness to use. The default is 7. -# -# -# -# [fee_default] -# -# Sets the base cost of a transaction in drops. Used when the server has -# no other source of fee information, such as signing transactions offline. -# -# -# -# [workers] -# -# Configures the number of threads for processing work submitted by peers -# and clients. If not specified, then the value is automatically set to the -# number of processor threads plus 2 for networked nodes. Nodes running in -# stand alone mode default to 1 worker. -# -# -# -# [network_id] -# -# Specify the network which this server is configured to connect to and -# track. If set, the server will not establish connections with servers -# that are explicitly configured to track another network. -# -# Network identifiers are usually unsigned integers in the range 0 to -# 4294967295 inclusive. The server also maps the following well-known -# names to the corresponding numerical identifier: -# -# main -> 0 -# testnet -> 1 -# devnet -> 2 -# -# If this value is not specified the server is not explicitly configured -# to track a particular network. -# -# -# [ledger_replay] -# -# 0 or 1. -# -# 0: Disable the ledger replay feature [default] -# 1: Enable the ledger replay feature. With this feature enabled, when -# acquiring a ledger from the network, a rippled node only downloads -# the ledger header and the transactions instead of the whole ledger. -# And the ledger is built by applying the transactions to the parent -# ledger. -# -#------------------------------------------------------------------------------- -# -# 4. HTTPS Client -# -#---------------- -# -# The rippled server instance uses HTTPS GET requests in a variety of -# circumstances, including but not limited to contacting trusted domains to -# fetch information such as mapping an email address to a Ripple Payment -# Network address. -# -# [ssl_verify] -# -# 0 or 1. -# -# 0. HTTPS client connections will not verify certificates. -# 1. Certificates will be checked for HTTPS client connections. -# -# If not specified, this parameter defaults to 1. -# -# -# -# [ssl_verify_file] -# -# -# -# A file system path leading to the certificate verification file for -# HTTPS client requests. -# -# -# -# [ssl_verify_dir] -# -# -# -# -# A file system path leading to a file or directory containing the root -# certificates that the server will accept for verifying HTTP servers. -# Used only for outbound HTTPS client connections. -# -#------------------------------------------------------------------------------- -# -# 5. Reporting Mode -# -#------------ -# -# rippled has an optional operating mode called Reporting Mode. In Reporting -# Mode, rippled does not connect to the peer to peer network. Instead, rippled -# will continuously extract data from one or more rippled servers that are -# connected to the peer to peer network (referred to as an ETL source). -# Reporting mode servers will forward RPC requests that require access to the -# peer to peer network (submit, fee, etc) to an ETL source. -# -# [reporting] Settings for Reporting Mode. If and only if this section is -# present, rippled will start in reporting mode. This section -# contains a list of ETL source names, and key-value pairs. The -# ETL source names each correspond to a configuration file -# section; the names must match exactly. The key-value pairs are -# optional. -# -# -# [] -# -# A series of key/value pairs that specify an ETL source. -# -# source_ip = -# -# Required. IP address of the ETL source. Can also be a DNS record. -# -# source_ws_port = -# -# Required. Port on which ETL source is accepting unencrypted websocket -# connections. -# -# source_grpc_port = -# -# Required for ETL. Port on which ETL source is accepting gRPC requests. -# If this option is ommitted, this ETL source cannot actually be used for -# ETL; the Reporting Mode server can still forward RPCs to this ETL -# source, but cannot extract data from this ETL source. -# -# -# Key-value pairs (all optional): -# -# read_only Valid values: 0, 1. Default is 0. If set to 1, the server -# will start in strict read-only mode, and will not perform -# ETL. The server will still handle RPC requests, and will -# still forward RPC requests that require access to the p2p -# network. -# -# start_sequence -# Sequence of first ledger to extract if the database is empty. -# ETL extracts ledgers in order. If this setting is absent and -# the database is empty, ETL will start with the next ledger -# validated by the network. If this setting is present and the -# database is not empty, an exception is thrown. -# -# num_markers Degree of parallelism used during the initial ledger -# download. Only used if the database is empty. Valid values -# are 1-256. A higher degree of parallelism results in a -# faster download, but puts more load on the ETL source. -# Default is 2. -# -# Example: -# -# [reporting] -# etl_source1 -# etl_source2 -# read_only=0 -# start_sequence=32570 -# num_markers=8 -# -# [etl_source1] -# source_ip=1.2.3.4 -# source_ws_port=6005 -# source_grpc_port=50051 -# -# [etl_source2] -# source_ip=5.6.7.8 -# source_ws_port=6005 -# source_grpc_port=50051 -# -# Minimal Example: -# -# [reporting] -# etl_source1 -# -# [etl_source1] -# source_ip=1.2.3.4 -# source_ws_port=6005 -# source_grpc_port=50051 -# -# -# Notes: -# -# Reporting Mode requires Postgres (instead of SQLite). The Postgres -# connection info is specified under the [ledger_tx_tables] config section; -# see the Database section for further documentation. -# -# Each ETL source specified must have gRPC enabled (by adding a [port_grpc] -# section to the config). It is recommended to add a secure_gateway entry to -# the gRPC section, in order to bypass the server's rate limiting. -# This section needs to be added to the config of the ETL source, not -# the config of the reporting node. In the example below, the -# reporting server is running at 127.0.0.1. Multiple IPs can be -# specified in secure_gateway via a comma separated list. -# -# [port_grpc] -# ip = 0.0.0.0 -# port = 50051 -# secure_gateway = 127.0.0.1 -# -# -#------------------------------------------------------------------------------- -# -# 6. Database -# -#------------ -# -# rippled creates 4 SQLite database to hold bookkeeping information -# about transactions, local credentials, and various other things. -# It also creates the NodeDB, which holds all the objects that -# make up the current and historical ledgers. In Reporting Mode, rippled -# uses a Postgres database instead of SQLite. -# -# The simplest way to work with Postgres is to install it locally. -# When it is running, execute the initdb.sh script in the current -# directory as: sudo -u postgres ./initdb.sh -# This will create the rippled user and an empty database of the same name. -# -# The size of the NodeDB grows in proportion to the amount of new data and the -# amount of historical data (a configurable setting) so the performance of the -# underlying storage media where the NodeDB is placed can significantly affect -# the performance of the server. -# -# Partial pathnames will be considered relative to the location of -# the rippled.cfg file. -# -# [node_db] Settings for the Node Database (required) -# -# Format (without spaces): -# One or more lines of case-insensitive key / value pairs: -# '=' -# ... -# -# Example: -# type=nudb -# path=db/nudb -# -# The "type" field must be present and controls the choice of backend: -# -# type = NuDB -# -# NuDB is a high-performance database written by Ripple Labs and optimized -# for rippled and solid-state drives. -# -# NuDB maintains its high speed regardless of the amount of history -# stored. Online delete may be selected, but is not required. NuDB is -# available on all platforms that rippled runs on. -# -# type = RocksDB -# -# RocksDB is an open-source, general-purpose key/value store - see -# http://rocksdb.org/ for more details. -# -# RocksDB is an alternative backend for systems that don't use solid-state -# drives. Because RocksDB's performance degrades as it stores more data, -# keeping full history is not advised, and using online delete is -# recommended. -# -# type = Cassandra -# -# Apache Cassandra is an open-source, distributed key-value store - see -# https://cassandra.apache.org/ for more details. -# -# Cassandra is an alternative backend to be used only with Reporting Mode. -# See the Reporting Mode section for more details about Reporting Mode. -# -# Required keys for NuDB and RocksDB: -# -# path Location to store the database -# -# Required keys for Cassandra: -# -# contact_points IP of a node in the Cassandra cluster -# -# port CQL Native Transport Port -# -# secure_connect_bundle -# Absolute path to a secure connect bundle. When using -# a secure connect bundle, contact_points and port are -# not required. -# -# keyspace Name of Cassandra keyspace to use -# -# table_name Name of table in above keyspace to use -# -# Optional keys -# -# cache_size Size of cache for database records. Default is 16384. -# Setting this value to 0 will use the default value. -# -# cache_age Length of time in minutes to keep database records -# cached. Default is 5 minutes. Setting this value to -# 0 will use the default value. -# -# Note: if neither cache_size nor cache_age is -# specified, the cache for database records will not -# be created. If only one of cache_size or cache_age -# is specified, the cache will be created using the -# default value for the unspecified parameter. -# -# Note: the cache will not be created if online_delete -# is specified. -# -# Optional keys for NuDB or RocksDB: -# -# earliest_seq The default is 32570 to match the XRP ledger -# network's earliest allowed sequence. Alternate -# networks may set this value. Minimum value of 1. -# -# online_delete Minimum value of 256. Enable automatic purging -# of older ledger information. Maintain at least this -# number of ledger records online. Must be greater -# than or equal to ledger_history. -# -# These keys modify the behavior of online_delete, and thus are only -# relevant if online_delete is defined and non-zero: -# -# advisory_delete 0 for disabled, 1 for enabled. If set, the -# administrative RPC call "can_delete" is required -# to enable online deletion of ledger records. -# Online deletion does not run automatically if -# non-zero and the last deletion was on a ledger -# greater than the current "can_delete" setting. -# Default is 0. -# -# delete_batch When automatically purging, SQLite database -# records are deleted in batches. This value -# controls the maximum size of each batch. Larger -# batches keep the databases locked for more time, -# which may cause other functions to fall behind, -# and thus cause the node to lose sync. -# Default is 100. -# -# back_off_milliseconds -# Number of milliseconds to wait between -# online_delete batches to allow other functions -# to catch up. -# Default is 100. -# -# age_threshold_seconds -# The online delete process will only run if the -# latest validated ledger is younger than this -# number of seconds. -# Default is 60. -# -# recovery_wait_seconds -# The online delete process checks periodically -# that rippled is still in sync with the network, -# and that the validated ledger is less than -# 'age_threshold_seconds' old. By default, if it -# is not the online delete process aborts and -# tries again later. If 'recovery_wait_seconds' -# is set and rippled is out of sync, but likely to -# recover quickly, then online delete will wait -# this number of seconds for rippled to get back -# into sync before it aborts. -# Set this value if the node is otherwise staying -# in sync, or recovering quickly, but the online -# delete process is unable to finish. -# Default is unset. -# -# Optional keys for Cassandra: -# -# username Username to use if Cassandra cluster requires -# authentication -# -# password Password to use if Cassandra cluster requires -# authentication -# -# max_requests_outstanding -# Limits the maximum number of concurrent database -# writes. Default is 10 million. For slower clusters, -# large numbers of concurrent writes can overload the -# cluster. Setting this option can help eliminate -# write timeouts and other write errors due to the -# cluster being overloaded. -# -# Notes: -# The 'node_db' entry configures the primary, persistent storage. -# -# The 'import_db' is used with the '--import' command line option to -# migrate the specified database into the current database given -# in the [node_db] section. -# -# [import_db] Settings for performing a one-time import (optional) -# [database_path] Path to the book-keeping databases. -# -# The server creates and maintains 4 to 5 bookkeeping SQLite databases in -# the 'database_path' location. If you omit this configuration setting, -# the server creates a directory called "db" located in the same place as -# your rippled.cfg file. -# Partial pathnames are relative to the location of the rippled executable. -# -# [sqlite] Tuning settings for the SQLite databases (optional) -# -# Format (without spaces): -# One or more lines of case-insensitive key / value pairs: -# '=' -# ... -# -# Example 1: -# safety_level=low -# -# Example 2: -# journal_mode=off -# synchronous=off -# -# WARNING: These settings can have significant effects on data integrity, -# particularly in systemic failure scenarios. It is strongly recommended -# that they be left at their defaults unless the server is having -# performance issues during normal operation or during automatic purging -# (online_delete) operations. A warning will be logged on startup if -# 'ledger_history' is configured to store more than 10,000,000 ledgers and -# any of these settings are less safe than the default. This is due to the -# inordinate amount of time and bandwidth it will take to safely rebuild a -# corrupted database of that size from other peers. -# -# Optional keys: -# -# safety_level Valid values: high, low -# The default is "high", which tunes the SQLite -# databases in the most reliable mode, and is -# equivalent to: -# journal_mode=wal -# synchronous=normal -# temp_store=file -# "low" is equivalent to: -# journal_mode=memory -# synchronous=off -# temp_store=memory -# These "low" settings trade speed and reduced I/O -# for a higher risk of data loss. See the -# individual settings below for more information. -# This setting may not be combined with any of the -# other tuning settings: "journal_mode", -# "synchronous", or "temp_store". -# -# journal_mode Valid values: delete, truncate, persist, memory, wal, off -# The default is "wal", which uses a write-ahead -# log to implement database transactions. -# Alternately, "memory" saves disk I/O, but if -# rippled crashes during a transaction, the -# database is likely to be corrupted. -# See https://www.sqlite.org/pragma.html#pragma_journal_mode -# for more details about the available options. -# This setting may not be combined with the -# "safety_level" setting. -# -# synchronous Valid values: off, normal, full, extra -# The default is "normal", which works well with -# the "wal" journal mode. Alternatively, "off" -# allows rippled to continue as soon as data is -# passed to the OS, which can significantly -# increase speed, but risks data corruption if -# the host computer crashes before writing that -# data to disk. -# See https://www.sqlite.org/pragma.html#pragma_synchronous -# for more details about the available options. -# This setting may not be combined with the -# "safety_level" setting. -# -# temp_store Valid values: default, file, memory -# The default is "file", which will use files -# for temporary database tables and indices. -# Alternatively, "memory" may save I/O, but -# rippled does not currently use many, if any, -# of these temporary objects. -# See https://www.sqlite.org/pragma.html#pragma_temp_store -# for more details about the available options. -# This setting may not be combined with the -# "safety_level" setting. -# -# [ledger_tx_tables] (optional) -# -# conninfo Info for connecting to Postgres. Format is -# postgres://[username]:[password]@[ip]/[database]. -# The database and user must already exist. If this -# section is missing and rippled is running in -# Reporting Mode, rippled will connect as the -# user running rippled to a database with the -# same name. On Linux and Mac OS X, the connection -# will take place using the server's UNIX domain -# socket. On Windows, through the localhost IP -# address. Default is empty. -# -# use_tx_tables Valid values: 1, 0 -# The default is 1 (true). Determines whether to use -# the SQLite transaction database. If set to 0, -# rippled will not write to the transaction database, -# and will reject tx, account_tx and tx_history RPCs. -# In Reporting Mode, this setting is ignored. -# -# max_connections Valid values: any positive integer up to 64 bit -# storage length. This configures the maximum -# number of concurrent connections to postgres. -# Default is the maximum possible value to -# fit in a 64 bit integer. -# -# timeout Number of seconds after which idle postgres -# connections are discconnected. If set to 0, -# connections never timeout. Default is 600. -# -# -# remember_ip Value values: 1, 0 -# Default is 1 (true). Whether to cache host and -# port connection settings. -# -# -#------------------------------------------------------------------------------- -# -# 7. Diagnostics -# -#--------------- -# -# These settings are designed to help server administrators diagnose -# problems, and obtain detailed information about the activities being -# performed by the rippled process. -# -# -# -# [debug_logfile] -# -# Specifies where a debug logfile is kept. By default, no debug log is kept. -# Unless absolute, the path is relative the directory containing this file. -# -# Example: debug.log -# -# -# -# [insight] -# -# Configuration parameters for the Beast. Insight stats collection module. -# -# Insight is a module that collects information from the areas of rippled -# that have instrumentation. The configuration parameters control where the -# collection metrics are sent. The parameters are expressed as key = value -# pairs with no white space. The main parameter is the choice of server: -# -# "server" -# -# Choice of server to send metrics to. Currently the only choice is -# "statsd" which sends UDP packets to a StatsD daemon, which must be -# running while rippled is running. More information on StatsD is -# available here: -# https://github.com/b/statsd_spec -# -# When server=statsd, these additional keys are used: -# -# "address" The UDP address and port of the listening StatsD server, -# in the format, n.n.n.n:port. -# -# "prefix" A string prepended to each collected metric. This is used -# to distinguish between different running instances of rippled. -# -# If this section is missing, or the server type is unspecified or unknown, -# statistics are not collected or reported. -# -# Example: -# -# [insight] -# server=statsd -# address=192.168.0.95:4201 -# prefix=my_validator -# -# [perf] -# -# Configuration of performance logging. If enabled, write Json-formatted -# performance-oriented data periodically to a distinct log file. -# -# "perf_log" A string specifying the pathname of the performance log -# file. A relative pathname will log relative to the -# configuration directory. Required to enable -# performance logging. -# -# "log_interval" Integer value for number of seconds between writing -# to performance log. Default 1. -# -# Example: -# [perf] -# perf_log=/var/log/rippled/perf.log -# log_interval=2 -# -#------------------------------------------------------------------------------- -# -# 8. Voting -# -#---------- -# -# The vote settings configure settings for the entire Ripple network. -# While a single instance of rippled cannot unilaterally enforce network-wide -# settings, these choices become part of the instance's vote during the -# consensus process for each voting ledger. -# -# [voting] -# -# A set of key/value pair parameters used during voting ledgers. -# -# reference_fee = -# -# The cost of the reference transaction fee, specified in drops. -# The reference transaction is the simplest form of transaction. -# It represents an XRP payment between two parties. -# -# If this parameter is unspecified, rippled will use an internal -# default. Don't change this without understanding the consequences. -# -# Example: -# reference_fee = 10 # 10 drops -# -# account_reserve = -# -# The account reserve requirement is specified in drops. The portion of an -# account's XRP balance that is at or below the reserve may only be -# spent on transaction fees, and not transferred out of the account. -# -# If this parameter is unspecified, rippled will use an internal -# default. Don't change this without understanding the consequences. -# -# Example: -# account_reserve = 10000000 # 10 XRP -# -# owner_reserve = -# -# The owner reserve is the amount of XRP reserved in the account for -# each ledger item owned by the account. Ledger items an account may -# own include trust lines, open orders, and tickets. -# -# If this parameter is unspecified, rippled will use an internal -# default. Don't change this without understanding the consequences. -# -# Example: -# owner_reserve = 2000000 # 2 XRP -# -#------------------------------------------------------------------------------- -# -# 9. Misc Settings -# -#----------------- -# -# [node_size] -# -# Tunes the servers based on the expected load and available memory. Legal -# sizes are "tiny", "small", "medium", "large", and "huge". We recommend -# you start at the default and raise the setting if you have extra memory. -# -# The code attempts to automatically determine the appropriate size for -# this parameter based on the amount of RAM and the number of execution -# cores available to the server. The current decision matrix is: -# -# | | Cores | -# |---------|------------------------| -# | RAM | 1 | 2 or 3 | ≥ 4 | -# |---------|------|--------|--------| -# | < ~8GB | tiny | tiny | tiny | -# | < ~12GB | tiny | small | small | -# | < ~16GB | tiny | small | medium | -# | < ~24GB | tiny | small | large | -# | < ~32GB | tiny | small | huge | -# -# [signing_support] -# -# Specifies whether the server will accept "sign" and "sign_for" commands -# from remote users. Even if the commands are sent over a secure protocol -# like secure websocket, this should generally be discouraged, because it -# requires sending the secret to use for signing to the server. In order -# to sign transactions, users should prefer to use a standalone signing -# tool instead. -# -# This flag has no effect on the "sign" and "sign_for" command line options -# that rippled makes available. -# -# The default value of this field is "false" -# -# Example: -# -# [signing_support] -# true -# -# [crawl] -# -# List of options to control what data is reported through the /crawl endpoint -# See https://xrpl.org/peer-crawler.html -# -# -# -# Enable or disable access to /crawl requests. Default is '1' which -# enables access. -# -# overlay = -# -# Report information about peers this server is connected to, similar -# to the "peers" RPC API. Default is '1' which means to report peer -# overlay info. -# -# server = -# -# Report information about the local server, similar to the "server_state" -# RPC API. Default is '1' which means to report local server info. -# -# counts = -# -# Report information about the local server health counters, similar to -# the "get_counts" RPC API. Default is '0' which means not to report -# server counts. -# -# unl = -# -# Report information about the local server's validator lists, similar to -# the "validators" and "validator_list_sites" RPC APIs. Default is '1' -# which means to report server validator lists. -# -# Examples: -# -# [crawl] -# 0 -# -# [crawl] -# overlay = 1 -# server = 1 -# counts = 0 -# unl = 1 -# -# [vl] -# -# Options to control what data is reported through the /vl endpoint -# See [...] -# -# enable = -# -# Enable or disable access to /vl requests. Default is '1' which -# enables access. -# -# [beta_rpc_api] -# -# 0 or 1. -# -# 0: Disable the beta API version for JSON-RPC and WebSocket [default] -# 1: Enable the beta API version for testing. The beta API version -# contains breaking changes that require a new API version number. -# They are not ready for public consumption. -# -#------------------------------------------------------------------------------- -# -# 10. Example Settings -# -#-------------------- -# -# Administrators can use these values as a starting point for configuring -# their instance of rippled, but each value should be checked to make sure -# it meets the business requirements for the organization. -# -# Server -# -# These example configuration settings create these ports: -# -# "peer" -# -# Peer protocol open to everyone. This is required to accept -# incoming rippled connections. This does not affect automatic -# or manual outgoing Peer protocol connections. -# -# "rpc" -# -# Administrative RPC commands over HTTPS, when originating from -# the same machine (via the loopback adapter at 127.0.0.1). -# -# "wss_admin" -# -# Admin level API commands over Secure Websockets, when originating -# from the same machine (via the loopback adapter at 127.0.0.1). -# -# This port is commented out but can be enabled by removing -# the '#' from each corresponding line including the entry under [server] -# -# "wss_public" -# -# Guest level API commands over Secure Websockets, open to everyone. -# -# For HTTPS and Secure Websockets ports, if no certificate and key file -# are specified then a self-signed certificate will be generated on startup. -# If you have a certificate and key file, uncomment the corresponding lines -# and ensure the paths to the files are correct. -# -# NOTE -# -# To accept connections on well known ports such as 80 (HTTP) or -# 443 (HTTPS), most operating systems will require rippled to -# run with administrator privileges, or else rippled will not start. - -[server] -port_rpc_admin_local -port_peer -port_ws_admin_local -port_ws_public -#port_grpc -#ssl_key = /etc/ssl/private/server.key -#ssl_cert = /etc/ssl/certs/server.crt - -[port_rpc_admin_local] -port = 5006 -ip = 127.0.0.1 -admin = 127.0.0.1 -protocol = http - -[port_peer] -port = 51235 -ip = 0.0.0.0 -# alternatively, to accept connections on IPv4 + IPv6, use: -#ip = :: -protocol = peer - -[port_ws_admin_local] -port = 6007 -ip = 127.0.0.1 -admin = 127.0.0.1 -protocol = ws - -#[port_grpc#] -#port = 50051 -#ip = 0.0.0.0 -#secure_gateway = 127.0.0.1 - -[port_ws_public] -port = 6008 -ip = 127.0.0.1 -protocol = ws - -#------------------------------------------------------------------------------- - -# This is primary persistent datastore for rippled. This includes transaction -# metadata, account states, and ledger headers. Helpful information can be -# found at https://xrpl.org/capacity-planning.html#node-db-type -# type=NuDB is recommended for non-validators with fast SSDs. Validators or -# slow / spinning disks should use RocksDB. Caution: Spinning disks are -# not recommended. They do not perform well enough to consistently remain -# synced to the network. -# online_delete=512 is recommended to delete old ledgers while maintaining at -# least 512. -# advisory_delete=0 allows the online delete process to run automatically -# when the node has approximately two times the "online_delete" value of -# ledgers. No external administrative command is required to initiate -# deletion. -[node_db] -type=NuDB -path=/var/lib/rippled-reporting/db/nudb -# online_delete=512 # -advisory_delete=0 - -[database_path] -/var/lib/rippled-reporting/db - -# To use Postgres, uncomment this section and fill in the appropriate connection -# info. Postgres can only be used in Reporting Mode. -# To disable writing to the transaction database, uncomment this section, and -# set use_tx_tables=0 -# [ledger_tx_tables] -# conninfo = postgres://:@localhost/ -# use_tx_tables=1 - - -# This needs to be an absolute directory reference, not a relative one. -# Modify this value as required. -[debug_logfile] -/var/log/rippled-reporting/debug.log - -# To use the XRP test network -# (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html), -# use the following [ips] section: -# [ips] -# r.altnet.rippletest.net 51235 - -# File containing trusted validator keys or validator list publishers. -# Unless an absolute path is specified, it will be considered relative to the -# folder in which the rippled.cfg file is located. -[validators_file] -/opt/rippled-reporting/etc/validators.txt - -# Turn down default logging to save disk space in the long run. -# Valid values here are trace, debug, info, warning, error, and fatal -[rpc_startup] -{ "command": "log_level", "severity": "info" } - -# If ssl_verify is 1, certificates will be validated. -# To allow the use of self-signed certificates for development or internal use, -# set to ssl_verify to 0. -[ssl_verify] -1 - - -# To run in Reporting Mode, uncomment this section and fill in the appropriate -# connection info for one or more ETL sources. -[reporting] -etl_source - -[etl_source] -source_grpc_port=50051 -source_ws_port=6005 -source_ip=127.0.0.1 diff --git a/cmake/RippledCore.cmake b/cmake/RippledCore.cmake index 18a424c484b..3b850354ed4 100644 --- a/cmake/RippledCore.cmake +++ b/cmake/RippledCore.cmake @@ -129,14 +129,6 @@ if(xrpld) target_compile_definitions(rippled PRIVATE RIPPLED_RUNNING_IN_CI) endif () - if(reporting) - set(suffix -reporting) - set_target_properties(rippled PROPERTIES OUTPUT_NAME rippled-reporting) - get_target_property(BIN_NAME rippled OUTPUT_NAME) - message(STATUS "Reporting mode build: rippled renamed ${BIN_NAME}") - target_compile_definitions(rippled PRIVATE RIPPLED_REPORTING) - endif() - # any files that don't play well with unity should be added here if(tests) set_source_files_properties( diff --git a/cmake/RippledSettings.cmake b/cmake/RippledSettings.cmake index a431bb61389..b81843cd5b5 100644 --- a/cmake/RippledSettings.cmake +++ b/cmake/RippledSettings.cmake @@ -10,8 +10,6 @@ option(assert "Enables asserts, even in release builds" OFF) option(xrpld "Build xrpld" ON) -option(reporting "Build rippled with reporting mode enabled" OFF) - option(tests "Build tests" ON) option(unity "Creates a build using UNITY support in cmake. This is the default" ON) diff --git a/conanfile.py b/conanfile.py index 425fee8b682..14fc49a1946 100644 --- a/conanfile.py +++ b/conanfile.py @@ -15,7 +15,6 @@ class Xrpl(ConanFile): 'coverage': [True, False], 'fPIC': [True, False], 'jemalloc': [True, False], - 'reporting': [True, False], 'rocksdb': [True, False], 'shared': [True, False], 'static': [True, False], @@ -44,7 +43,6 @@ class Xrpl(ConanFile): 'coverage': False, 'fPIC': True, 'jemalloc': False, - 'reporting': False, 'rocksdb': True, 'shared': False, 'static': True, @@ -52,8 +50,6 @@ class Xrpl(ConanFile): 'unity': False, 'xrpld': False, - 'cassandra-cpp-driver/*:shared': False, - 'cassandra-cpp-driver/*:use_atomic': None, 'date/*:header_only': True, 'grpc/*:shared': False, 'grpc/*:secure': True, @@ -72,7 +68,6 @@ class Xrpl(ConanFile): 'libarchive/*:with_pcreposix': False, 'libarchive/*:with_xattr': False, 'libarchive/*:with_zlib': False, - 'libpq/*:shared': False, 'lz4/*:shared': False, 'openssl/*:shared': False, 'protobuf/*:shared': False, @@ -110,9 +105,6 @@ def requirements(self): self.requires('sqlite3/3.42.0', force=True) if self.options.jemalloc: self.requires('jemalloc/5.3.0') - if self.options.reporting: - self.requires('cassandra-cpp-driver/2.15.3') - self.requires('libpq/14.7') if self.options.rocksdb: self.requires('rocksdb/6.29.5') @@ -139,7 +131,6 @@ def generate(self): tc.variables['assert'] = self.options.assertions tc.variables['coverage'] = self.options.coverage tc.variables['jemalloc'] = self.options.jemalloc - tc.variables['reporting'] = self.options.reporting tc.variables['rocksdb'] = self.options.rocksdb tc.variables['BUILD_SHARED_LIBS'] = self.options.shared tc.variables['static'] = self.options.static diff --git a/include/xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.proto b/include/xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.proto index 995edba48a1..01a23fbe375 100644 --- a/include/xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.proto +++ b/include/xrpl/proto/org/xrpl/rpc/v1/xrp_ledger.proto @@ -11,7 +11,7 @@ import "org/xrpl/rpc/v1/get_ledger_diff.proto"; // These methods are binary only methods for retrieiving arbitrary ledger state -// via gRPC. These methods are used by clio and reporting mode, but can also be +// via gRPC. These methods are used by clio, but can also be // used by any client that wants to extract ledger state in an efficient manner. // They do not directly mimic the JSON equivalent methods. service XRPLedgerAPIService { diff --git a/include/xrpl/protocol/ErrorCodes.h b/include/xrpl/protocol/ErrorCodes.h index 6d5590ec605..d8ec3052b7b 100644 --- a/include/xrpl/protocol/ErrorCodes.h +++ b/include/xrpl/protocol/ErrorCodes.h @@ -136,8 +136,8 @@ enum error_code_i { rpcINVALID_LGR_RANGE = 79, rpcEXPIRED_VALIDATOR_LIST = 80, - // Reporting - rpcFAILED_TO_FORWARD = 90, + // unused = 90, + // DEPRECATED. New code must not use this value. rpcREPORTING_UNSUPPORTED = 91, rpcOBJECT_NOT_FOUND = 92, @@ -148,8 +148,7 @@ enum error_code_i { // Oracle rpcORACLE_MALFORMED = 94, - rpcLAST = - rpcORACLE_MALFORMED // rpcLAST should always equal the last code.= + rpcLAST = rpcORACLE_MALFORMED // rpcLAST should always equal the last code. }; /** Codes returned in the `warnings` array of certain RPC commands. @@ -160,7 +159,7 @@ enum warning_code_i { warnRPC_UNSUPPORTED_MAJORITY = 1001, warnRPC_AMENDMENT_BLOCKED = 1002, warnRPC_EXPIRED_VALIDATOR_LIST = 1003, - warnRPC_REPORTING = 1004 + // unused = 1004 }; //------------------------------------------------------------------------------ diff --git a/src/libxrpl/protocol/ErrorCodes.cpp b/src/libxrpl/protocol/ErrorCodes.cpp index 28024fab093..4c934f4fd53 100644 --- a/src/libxrpl/protocol/ErrorCodes.cpp +++ b/src/libxrpl/protocol/ErrorCodes.cpp @@ -71,7 +71,6 @@ constexpr static ErrorInfo unorderedErrorInfos[]{ {rpcDST_ISR_MALFORMED, "dstIsrMalformed", "Destination issuer is malformed.", 400}, {rpcEXCESSIVE_LGR_RANGE, "excessiveLgrRange", "Ledger range exceeds 1000.", 400}, {rpcFORBIDDEN, "forbidden", "Bad credentials.", 403}, - {rpcFAILED_TO_FORWARD, "failedToForward", "Failed to forward request to p2p node", 503}, {rpcHIGH_FEE, "highFee", "Current transaction fee exceeds your limit.", 402}, {rpcINTERNAL, "internal", "Internal error.", 500}, {rpcINVALID_LGR_RANGE, "invalidLgrRange", "Ledger range is invalid.", 400}, @@ -97,7 +96,6 @@ constexpr static ErrorInfo unorderedErrorInfos[]{ {rpcNO_PF_REQUEST, "noPathRequest", "No pathfinding request in progress.", 404}, {rpcOBJECT_NOT_FOUND, "objectNotFound", "The requested object was not found.", 404}, {rpcPUBLIC_MALFORMED, "publicMalformed", "Public key is malformed.", 400}, - {rpcREPORTING_UNSUPPORTED, "reportingUnsupported", "Requested operation not supported by reporting mode server", 405}, {rpcSENDMAX_MALFORMED, "sendMaxMalformed", "SendMax amount malformed.", 400}, {rpcSIGNING_MALFORMED, "signingMalformed", "Signing of transaction is malformed.", 400}, {rpcSLOW_DOWN, "slowDown", "You are placing too much load on the server.", 429}, diff --git a/src/test/rpc/ReportingETL_test.cpp b/src/test/rpc/ReportingETL_test.cpp deleted file mode 100644 index 8a030938832..00000000000 --- a/src/test/rpc/ReportingETL_test.cpp +++ /dev/null @@ -1,1144 +0,0 @@ - -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include - -#include -#include -#include -#include -#include - -namespace ripple { -namespace test { - -class ReportingETL_test : public beast::unit_test::suite -{ - // gRPC stuff - class GrpcLedgerClient : public GRPCTestClientBase - { - public: - org::xrpl::rpc::v1::GetLedgerRequest request; - org::xrpl::rpc::v1::GetLedgerResponse reply; - - explicit GrpcLedgerClient(std::string const& port) - : GRPCTestClientBase(port) - { - } - - void - GetLedger() - { - status = stub_->GetLedger(&context, request, &reply); - } - }; - void - testGetLedger() - { - testcase("GetLedger"); - using namespace test::jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = - *(*config)[SECTION_PORT_GRPC].get("port"); - Env env(*this, std::move(config)); - - env.close(); - - auto ledger = env.app().getLedgerMaster().getLedgerBySeq(3); - - BEAST_EXPECT(env.current()->info().seq == 4); - - auto grpcLedger = [&grpcPort]( - auto sequence, - bool transactions, - bool expand, - bool get_objects, - bool get_object_neighbors) { - GrpcLedgerClient grpcClient{grpcPort}; - - grpcClient.request.mutable_ledger()->set_sequence(sequence); - grpcClient.request.set_transactions(transactions); - grpcClient.request.set_expand(expand); - grpcClient.request.set_get_objects(get_objects); - grpcClient.request.set_get_object_neighbors(get_object_neighbors); - - grpcClient.GetLedger(); - return std::make_pair(grpcClient.status, grpcClient.reply); - }; - - { - auto [status, reply] = grpcLedger(3, false, false, false, false); - - BEAST_EXPECT(status.ok()); - BEAST_EXPECT(reply.validated()); - BEAST_EXPECT(!reply.has_hashes_list()); - BEAST_EXPECT(!reply.has_transactions_list()); - BEAST_EXPECT(!reply.skiplist_included()); - BEAST_EXPECT(reply.ledger_objects().objects_size() == 0); - - Serializer s; - addRaw(ledger->info(), s, true); - BEAST_EXPECT(s.slice() == makeSlice(reply.ledger_header())); - } - - Account const alice{"alice"}; - Account const bob{"bob"}; - env.fund(XRP(10000), alice); - env.fund(XRP(10000), bob); - env.close(); - - ledger = env.app().getLedgerMaster().getLedgerBySeq(4); - - std::vector hashes; - std::vector> transactions; - std::vector> metas; - for (auto& [sttx, meta] : ledger->txs) - { - hashes.push_back(sttx->getTransactionID()); - transactions.push_back(sttx); - metas.push_back(meta); - } - - Serializer s; - addRaw(ledger->info(), s, true); - - { - auto [status, reply] = grpcLedger(4, true, false, false, false); - BEAST_EXPECT(status.ok()); - BEAST_EXPECT(reply.validated()); - BEAST_EXPECT(reply.has_hashes_list()); - BEAST_EXPECT(reply.hashes_list().hashes_size() == hashes.size()); - BEAST_EXPECT( - uint256::fromVoid(reply.hashes_list().hashes(0).data()) == - hashes[0]); - BEAST_EXPECT( - uint256::fromVoid(reply.hashes_list().hashes(1).data()) == - hashes[1]); - BEAST_EXPECT( - uint256::fromVoid(reply.hashes_list().hashes(2).data()) == - hashes[2]); - BEAST_EXPECT( - uint256::fromVoid(reply.hashes_list().hashes(3).data()) == - hashes[3]); - - BEAST_EXPECT(!reply.has_transactions_list()); - BEAST_EXPECT(!reply.skiplist_included()); - BEAST_EXPECT(reply.ledger_objects().objects_size() == 0); - - BEAST_EXPECT(s.slice() == makeSlice(reply.ledger_header())); - } - - { - auto [status, reply] = grpcLedger(4, true, true, false, false); - - BEAST_EXPECT(status.ok()); - BEAST_EXPECT(reply.validated()); - BEAST_EXPECT(!reply.has_hashes_list()); - - BEAST_EXPECT(reply.has_transactions_list()); - BEAST_EXPECT(reply.transactions_list().transactions_size() == 4); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(0) - .transaction_blob()) == - transactions[0]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(0) - .metadata_blob()) == - metas[0]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(1) - .transaction_blob()) == - transactions[1]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(1) - .metadata_blob()) == - metas[1]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(2) - .transaction_blob()) == - transactions[2]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(2) - .metadata_blob()) == - metas[2]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(3) - .transaction_blob()) == - transactions[3]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(3) - .metadata_blob()) == - metas[3]->getSerializer().slice()); - - BEAST_EXPECT(!reply.skiplist_included()); - BEAST_EXPECT(reply.ledger_objects().objects_size() == 0); - - BEAST_EXPECT(s.slice() == makeSlice(reply.ledger_header())); - } - - { - auto [status, reply] = grpcLedger(4, true, true, true, false); - - BEAST_EXPECT(status.ok()); - BEAST_EXPECT(reply.validated()); - BEAST_EXPECT(!reply.has_hashes_list()); - - BEAST_EXPECT(reply.has_transactions_list()); - BEAST_EXPECT(reply.transactions_list().transactions_size() == 4); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(0) - .transaction_blob()) == - transactions[0]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(0) - .metadata_blob()) == - metas[0]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(1) - .transaction_blob()) == - transactions[1]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(1) - .metadata_blob()) == - metas[1]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(2) - .transaction_blob()) == - transactions[2]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(2) - .metadata_blob()) == - metas[2]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(3) - .transaction_blob()) == - transactions[3]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(3) - .metadata_blob()) == - metas[3]->getSerializer().slice()); - BEAST_EXPECT(reply.skiplist_included()); - - BEAST_EXPECT(s.slice() == makeSlice(reply.ledger_header())); - - auto parent = env.app().getLedgerMaster().getLedgerBySeq(3); - - SHAMap::Delta differences; - - int maxDifferences = std::numeric_limits::max(); - - bool res = parent->stateMap().compare( - ledger->stateMap(), differences, maxDifferences); - BEAST_EXPECT(res); - - size_t idx = 0; - for (auto& [k, v] : differences) - { - BEAST_EXPECT( - k == - uint256::fromVoid( - reply.ledger_objects().objects(idx).key().data())); - if (v.second) - { - BEAST_EXPECT( - v.second->slice() == - makeSlice(reply.ledger_objects().objects(idx).data())); - } - ++idx; - } - } - { - auto [status, reply] = grpcLedger(4, true, true, true, true); - - BEAST_EXPECT(status.ok()); - BEAST_EXPECT(reply.validated()); - BEAST_EXPECT(!reply.has_hashes_list()); - BEAST_EXPECT(reply.object_neighbors_included()); - - BEAST_EXPECT(reply.has_transactions_list()); - BEAST_EXPECT(reply.transactions_list().transactions_size() == 4); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(0) - .transaction_blob()) == - transactions[0]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(0) - .metadata_blob()) == - metas[0]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(1) - .transaction_blob()) == - transactions[1]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(1) - .metadata_blob()) == - metas[1]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(2) - .transaction_blob()) == - transactions[2]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(2) - .metadata_blob()) == - metas[2]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(3) - .transaction_blob()) == - transactions[3]->getSerializer().slice()); - - BEAST_EXPECT( - makeSlice(reply.transactions_list() - .transactions(3) - .metadata_blob()) == - metas[3]->getSerializer().slice()); - BEAST_EXPECT(reply.skiplist_included()); - - BEAST_EXPECT(s.slice() == makeSlice(reply.ledger_header())); - - auto parent = env.app().getLedgerMaster().getLedgerBySeq(3); - - SHAMap::Delta differences; - - int maxDifferences = std::numeric_limits::max(); - - bool res = parent->stateMap().compare( - ledger->stateMap(), differences, maxDifferences); - BEAST_EXPECT(res); - - size_t idx = 0; - - for (auto& [k, v] : differences) - { - auto obj = reply.ledger_objects().objects(idx); - BEAST_EXPECT(k == uint256::fromVoid(obj.key().data())); - if (v.second) - { - BEAST_EXPECT(v.second->slice() == makeSlice(obj.data())); - } - else - BEAST_EXPECT(obj.data().size() == 0); - - if (!(v.first && v.second)) - { - auto succ = ledger->stateMap().upper_bound(k); - auto pred = ledger->stateMap().lower_bound(k); - - if (succ != ledger->stateMap().end()) - BEAST_EXPECT( - succ->key() == - uint256::fromVoid(obj.successor().data())); - else - BEAST_EXPECT(obj.successor().size() == 0); - if (pred != ledger->stateMap().end()) - BEAST_EXPECT( - pred->key() == - uint256::fromVoid(obj.predecessor().data())); - else - BEAST_EXPECT(obj.predecessor().size() == 0); - } - ++idx; - } - } - - // Delete an account - - env(noop(alice)); - - std::uint32_t const ledgerCount{ - env.current()->seq() + 257 - env.seq(alice)}; - - for (std::uint32_t i = 0; i < ledgerCount; ++i) - env.close(); - - auto const acctDelFee{drops(env.current()->fees().increment)}; - env(acctdelete(alice, bob), fee(acctDelFee)); - env.close(); - - { - auto [status, reply] = - grpcLedger(env.closed()->seq(), true, true, true, true); - - BEAST_EXPECT(status.ok()); - BEAST_EXPECT(reply.validated()); - auto base = - env.app().getLedgerMaster().getLedgerBySeq(env.closed()->seq()); - - auto parent = env.app().getLedgerMaster().getLedgerBySeq( - env.closed()->seq() - 1); - - SHAMap::Delta differences; - - int maxDifferences = std::numeric_limits::max(); - - bool res = parent->stateMap().compare( - base->stateMap(), differences, maxDifferences); - BEAST_EXPECT(res); - - size_t idx = 0; - for (auto& [k, v] : differences) - { - auto obj = reply.ledger_objects().objects(idx); - BEAST_EXPECT(k == uint256::fromVoid(obj.key().data())); - if (v.second) - { - BEAST_EXPECT( - v.second->slice() == - makeSlice(reply.ledger_objects().objects(idx).data())); - } - else - BEAST_EXPECT(obj.data().size() == 0); - if (!(v.first && v.second)) - { - auto succ = base->stateMap().upper_bound(k); - auto pred = base->stateMap().lower_bound(k); - - if (succ != base->stateMap().end()) - BEAST_EXPECT( - succ->key() == - uint256::fromVoid(obj.successor().data())); - else - BEAST_EXPECT(obj.successor().size() == 0); - if (pred != base->stateMap().end()) - BEAST_EXPECT( - pred->key() == - uint256::fromVoid(obj.predecessor().data())); - else - BEAST_EXPECT(obj.predecessor().size() == 0); - } - - ++idx; - } - } - } - - // gRPC stuff - class GrpcLedgerDataClient : public GRPCTestClientBase - { - public: - org::xrpl::rpc::v1::GetLedgerDataRequest request; - org::xrpl::rpc::v1::GetLedgerDataResponse reply; - - explicit GrpcLedgerDataClient(std::string const& port) - : GRPCTestClientBase(port) - { - } - - void - GetLedgerData() - { - status = stub_->GetLedgerData(&context, request, &reply); - } - }; - void - testGetLedgerData() - { - testcase("GetLedgerData"); - using namespace test::jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = - *(*config)[SECTION_PORT_GRPC].get("port"); - Env env(*this, std::move(config)); - auto grpcLedgerData = [&grpcPort]( - auto sequence, std::string marker = "") { - GrpcLedgerDataClient grpcClient{grpcPort}; - - grpcClient.request.mutable_ledger()->set_sequence(sequence); - if (marker.size()) - { - grpcClient.request.set_marker(marker); - } - - grpcClient.GetLedgerData(); - return std::make_pair(grpcClient.status, grpcClient.reply); - }; - - Account const alice{"alice"}; - env.fund(XRP(100000), alice); - - int num_accounts = 10; - - for (auto i = 0; i < num_accounts; i++) - { - Account const bob{std::string("bob") + std::to_string(i)}; - env.fund(XRP(1000), bob); - } - env.close(); - - { - auto [status, reply] = grpcLedgerData(env.closed()->seq()); - BEAST_EXPECT(status.ok()); - - BEAST_EXPECT( - reply.ledger_objects().objects_size() == num_accounts + 4); - BEAST_EXPECT(reply.marker().size() == 0); - auto ledger = env.closed(); - size_t idx = 0; - for (auto& sle : ledger->sles) - { - BEAST_EXPECT( - sle->getSerializer().slice() == - makeSlice(reply.ledger_objects().objects(idx).data())); - ++idx; - } - } - - { - auto [status, reply] = - grpcLedgerData(env.closed()->seq(), "bad marker"); - BEAST_EXPECT(!status.ok()); - BEAST_EXPECT( - status.error_code() == grpc::StatusCode::INVALID_ARGUMENT); - } - - num_accounts = 3000; - - for (auto i = 0; i < num_accounts; i++) - { - Account const cat{std::string("cat") + std::to_string(i)}; - env.fund(XRP(1000), cat); - if (i % 100 == 0) - env.close(); - } - env.close(); - - { - auto [status, reply] = grpcLedgerData(env.closed()->seq()); - BEAST_EXPECT(status.ok()); - - int maxLimit = RPC::Tuning::pageLength(true); - BEAST_EXPECT(reply.ledger_objects().objects_size() == maxLimit); - BEAST_EXPECT(reply.marker().size() != 0); - - auto [status2, reply2] = - grpcLedgerData(env.closed()->seq(), reply.marker()); - BEAST_EXPECT(status2.ok()); - BEAST_EXPECT(reply2.marker().size() == 0); - - auto ledger = env.closed(); - size_t idx = 0; - for (auto& sle : ledger->sles) - { - auto& obj = idx < maxLimit - ? reply.ledger_objects().objects(idx) - : reply2.ledger_objects().objects(idx - maxLimit); - - BEAST_EXPECT( - sle->getSerializer().slice() == makeSlice(obj.data())); - ++idx; - } - BEAST_EXPECT( - idx == - reply.ledger_objects().objects_size() + - reply2.ledger_objects().objects_size()); - } - } - - // gRPC stuff - class GrpcLedgerDiffClient : public GRPCTestClientBase - { - public: - org::xrpl::rpc::v1::GetLedgerDiffRequest request; - org::xrpl::rpc::v1::GetLedgerDiffResponse reply; - - explicit GrpcLedgerDiffClient(std::string const& port) - : GRPCTestClientBase(port) - { - } - - void - GetLedgerDiff() - { - status = stub_->GetLedgerDiff(&context, request, &reply); - } - }; - - void - testGetLedgerDiff() - { - testcase("GetLedgerDiff"); - using namespace test::jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = - *(*config)[SECTION_PORT_GRPC].get("port"); - Env env(*this, std::move(config)); - - auto grpcLedgerDiff = [&grpcPort]( - auto baseSequence, auto desiredSequence) { - GrpcLedgerDiffClient grpcClient{grpcPort}; - - grpcClient.request.mutable_base_ledger()->set_sequence( - baseSequence); - grpcClient.request.mutable_desired_ledger()->set_sequence( - desiredSequence); - grpcClient.request.set_include_blobs(true); - - grpcClient.GetLedgerDiff(); - return std::make_pair(grpcClient.status, grpcClient.reply); - }; - - int num_accounts = 20; - for (auto i = 0; i < num_accounts; i++) - { - Account const cat{std::string("cat") + std::to_string(i)}; - env.fund(XRP(1000), cat); - if (i % 2 == 0) - env.close(); - } - env.close(); - - auto compareDiffs = [&](auto baseSequence, auto desiredSequence) { - auto [status, reply] = - grpcLedgerDiff(baseSequence, desiredSequence); - - BEAST_EXPECT(status.ok()); - auto desired = - env.app().getLedgerMaster().getLedgerBySeq(desiredSequence); - - auto base = - env.app().getLedgerMaster().getLedgerBySeq(baseSequence); - - SHAMap::Delta differences; - - int maxDifferences = std::numeric_limits::max(); - - bool res = base->stateMap().compare( - desired->stateMap(), differences, maxDifferences); - if (!BEAST_EXPECT(res)) - return false; - - size_t idx = 0; - for (auto& [k, v] : differences) - { - if (!BEAST_EXPECT( - k == - uint256::fromVoid( - reply.ledger_objects().objects(idx).key().data()))) - return false; - if (v.second) - { - if (!BEAST_EXPECT( - v.second->slice() == - makeSlice( - reply.ledger_objects().objects(idx).data()))) - return false; - } - - ++idx; - } - return true; - }; - - // Adjacent ledgers - BEAST_EXPECT( - compareDiffs(env.closed()->seq() - 1, env.closed()->seq())); - - // Adjacent ledgers further in the past - BEAST_EXPECT( - compareDiffs(env.closed()->seq() - 3, env.closed()->seq() - 2)); - - // Non-adjacent ledgers - BEAST_EXPECT( - compareDiffs(env.closed()->seq() - 5, env.closed()->seq() - 1)); - - // Adjacent ledgers but in reverse order - BEAST_EXPECT( - compareDiffs(env.closed()->seq(), env.closed()->seq() - 1)); - - // Non-adjacent ledgers in reverse order - BEAST_EXPECT( - compareDiffs(env.closed()->seq() - 1, env.closed()->seq() - 5)); - } - - // gRPC stuff - class GrpcLedgerEntryClient : public GRPCTestClientBase - { - public: - org::xrpl::rpc::v1::GetLedgerEntryRequest request; - org::xrpl::rpc::v1::GetLedgerEntryResponse reply; - - explicit GrpcLedgerEntryClient(std::string const& port) - : GRPCTestClientBase(port) - { - } - - void - GetLedgerEntry() - { - status = stub_->GetLedgerEntry(&context, request, &reply); - } - }; - - void - testGetLedgerEntry() - { - testcase("GetLedgerDiff"); - using namespace test::jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = - *(*config)[SECTION_PORT_GRPC].get("port"); - Env env(*this, std::move(config)); - - auto grpcLedgerEntry = [&grpcPort](auto sequence, auto key) { - GrpcLedgerEntryClient grpcClient{grpcPort}; - - grpcClient.request.mutable_ledger()->set_sequence(sequence); - grpcClient.request.set_key(key.data(), key.size()); - - grpcClient.GetLedgerEntry(); - return std::make_pair(grpcClient.status, grpcClient.reply); - }; - - Account const alice{"alice"}; - env.fund(XRP(1000), alice); - env.close(); - - for (auto& sle : env.closed()->sles) - { - auto [status, reply] = - grpcLedgerEntry(env.closed()->seq(), sle->key()); - - BEAST_EXPECT(status.ok()); - - BEAST_EXPECT( - uint256::fromVoid(reply.ledger_object().key().data()) == - sle->key()); - BEAST_EXPECT( - makeSlice(reply.ledger_object().data()) == - sle->getSerializer().slice()); - } - } - - void - testNeedCurrentOrClosed() - { - testcase("NeedCurrentOrClosed"); - - { - org::xrpl::rpc::v1::GetLedgerRequest request; - request.mutable_ledger()->set_sequence(1); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_hash(""); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_UNSPECIFIED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CURRENT); - BEAST_EXPECT(needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CLOSED); - BEAST_EXPECT(needCurrentOrClosed(request)); - } - - { - org::xrpl::rpc::v1::GetLedgerDataRequest request; - request.mutable_ledger()->set_sequence(1); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_hash(""); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_UNSPECIFIED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CURRENT); - BEAST_EXPECT(needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CLOSED); - BEAST_EXPECT(needCurrentOrClosed(request)); - } - - { - org::xrpl::rpc::v1::GetLedgerEntryRequest request; - request.mutable_ledger()->set_sequence(1); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_hash(""); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_UNSPECIFIED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CURRENT); - BEAST_EXPECT(needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CLOSED); - BEAST_EXPECT(needCurrentOrClosed(request)); - } - - { - org::xrpl::rpc::v1::GetLedgerDiffRequest request; - - // set desired ledger, so desired ledger does not need current or - // closed - request.mutable_base_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED); - - request.mutable_base_ledger()->set_sequence(1); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_base_ledger()->set_hash(""); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_base_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_base_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_UNSPECIFIED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_base_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CURRENT); - BEAST_EXPECT(needCurrentOrClosed(request)); - request.mutable_base_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CLOSED); - BEAST_EXPECT(needCurrentOrClosed(request)); - - // reset base ledger, so base ledger doesn't need current or closed - request.mutable_base_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED); - - request.mutable_desired_ledger()->set_sequence(1); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_desired_ledger()->set_hash(""); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_desired_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_desired_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_UNSPECIFIED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_desired_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CURRENT); - BEAST_EXPECT(needCurrentOrClosed(request)); - request.mutable_desired_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CLOSED); - BEAST_EXPECT(needCurrentOrClosed(request)); - - // both base and desired need current or closed - request.mutable_base_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CURRENT); - BEAST_EXPECT(needCurrentOrClosed(request)); - } - } - - void - testSecureGateway() - { - testcase("SecureGateway"); - using namespace test::jtx; - { - std::unique_ptr config = envconfig( - addGrpcConfigWithSecureGateway, getEnvLocalhostAddr()); - std::string grpcPort = - *(*config)[SECTION_PORT_GRPC].get("port"); - Env env(*this, std::move(config)); - - env.close(); - - auto ledger = env.app().getLedgerMaster().getLedgerBySeq(3); - - BEAST_EXPECT(env.current()->info().seq == 4); - - auto grpcLedger = [&grpcPort]( - auto sequence, - std::string const& clientIp, - std::string const& user) { - GrpcLedgerClient grpcClient{grpcPort}; - - grpcClient.request.mutable_ledger()->set_sequence(sequence); - grpcClient.request.set_client_ip(clientIp); - grpcClient.request.set_user(user); - - grpcClient.GetLedger(); - return std::make_pair(grpcClient.status, grpcClient.reply); - }; - - { - auto [status, reply] = - grpcLedger(env.current()->info().seq, "", ""); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedger(env.current()->info().seq, "", "ETL"); - BEAST_EXPECT(reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedger(env.current()->info().seq, "", "Reporting"); - BEAST_EXPECT(reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedger(env.current()->info().seq, "127.0.0.1", "ETL"); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedger(env.current()->info().seq, "127.0.0.1", ""); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - } - - { - std::string secureGatewayIp = "44.124.234.79"; - std::unique_ptr config = - envconfig(addGrpcConfigWithSecureGateway, secureGatewayIp); - std::string grpcPort = - *(*config)[SECTION_PORT_GRPC].get("port"); - Env env(*this, std::move(config)); - - env.close(); - - auto ledger = env.app().getLedgerMaster().getLedgerBySeq(3); - - BEAST_EXPECT(env.current()->info().seq == 4); - - auto grpcLedger = [&grpcPort]( - auto sequence, - std::string const& clientIp, - std::string const& user) { - GrpcLedgerClient grpcClient{grpcPort}; - - grpcClient.request.mutable_ledger()->set_sequence(sequence); - grpcClient.request.set_client_ip(clientIp); - grpcClient.request.set_user(user); - - grpcClient.GetLedger(); - return std::make_pair(grpcClient.status, grpcClient.reply); - }; - - { - auto [status, reply] = - grpcLedger(env.current()->info().seq, "", ""); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedger(env.current()->info().seq, "", "ETL"); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = grpcLedger( - env.current()->info().seq, secureGatewayIp, "ETL"); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedger(env.current()->info().seq, secureGatewayIp, ""); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - } - - { - std::unique_ptr config = envconfig( - addGrpcConfigWithSecureGateway, getEnvLocalhostAddr()); - std::string grpcPort = - *(*config)[SECTION_PORT_GRPC].get("port"); - Env env(*this, std::move(config)); - - env.close(); - - auto ledger = env.app().getLedgerMaster().getLedgerBySeq(3); - - BEAST_EXPECT(env.current()->info().seq == 4); - auto grpcLedgerData = [&grpcPort]( - auto sequence, - std::string const& clientIp, - std::string const& user) { - GrpcLedgerDataClient grpcClient{grpcPort}; - - grpcClient.request.mutable_ledger()->set_sequence(sequence); - grpcClient.request.set_client_ip(clientIp); - grpcClient.request.set_user(user); - - grpcClient.GetLedgerData(); - return std::make_pair(grpcClient.status, grpcClient.reply); - }; - { - auto [status, reply] = - grpcLedgerData(env.current()->info().seq, "", ""); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedgerData(env.current()->info().seq, "", "ETL"); - BEAST_EXPECT(reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedgerData(env.current()->info().seq, "", "Reporting"); - BEAST_EXPECT(reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = grpcLedgerData( - env.current()->info().seq, "127.0.0.1", "ETL"); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedgerData(env.current()->info().seq, "127.0.0.1", ""); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - } - { - std::string secureGatewayIp = "44.124.234.79"; - std::unique_ptr config = - envconfig(addGrpcConfigWithSecureGateway, secureGatewayIp); - std::string grpcPort = - *(*config)[SECTION_PORT_GRPC].get("port"); - Env env(*this, std::move(config)); - - env.close(); - - auto ledger = env.app().getLedgerMaster().getLedgerBySeq(3); - - BEAST_EXPECT(env.current()->info().seq == 4); - - auto grpcLedgerData = [&grpcPort]( - auto sequence, - std::string const& clientIp, - std::string const& user) { - GrpcLedgerDataClient grpcClient{grpcPort}; - - grpcClient.request.mutable_ledger()->set_sequence(sequence); - grpcClient.request.set_client_ip(clientIp); - grpcClient.request.set_user(user); - - grpcClient.GetLedgerData(); - return std::make_pair(grpcClient.status, grpcClient.reply); - }; - - { - auto [status, reply] = - grpcLedgerData(env.current()->info().seq, "", ""); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = - grpcLedgerData(env.current()->info().seq, "", "ETL"); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = grpcLedgerData( - env.current()->info().seq, secureGatewayIp, "ETL"); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - { - auto [status, reply] = grpcLedgerData( - env.current()->info().seq, secureGatewayIp, ""); - BEAST_EXPECT(!reply.is_unlimited()); - BEAST_EXPECT(status.ok()); - } - } - } - -public: - void - run() override - { - testGetLedger(); - - testGetLedgerData(); - - testGetLedgerDiff(); - - testGetLedgerEntry(); - - testNeedCurrentOrClosed(); - - testSecureGateway(); - } -}; - -BEAST_DEFINE_TESTSUITE_PRIO(ReportingETL, app, ripple, 2); - -} // namespace test -} // namespace ripple diff --git a/src/xrpld/app/ledger/AcceptedLedger.cpp b/src/xrpld/app/ledger/AcceptedLedger.cpp index 37c943679da..a82323f6286 100644 --- a/src/xrpld/app/ledger/AcceptedLedger.cpp +++ b/src/xrpld/app/ledger/AcceptedLedger.cpp @@ -36,17 +36,8 @@ AcceptedLedger::AcceptedLedger( ledger, item.first, item.second)); }; - if (app.config().reporting()) - { - auto const txs = flatFetchTransactions(*ledger, app); - transactions_.reserve(txs.size()); - insertAll(txs); - } - else - { - transactions_.reserve(256); - insertAll(ledger->txs); - } + transactions_.reserve(256); + insertAll(ledger->txs); std::sort( transactions_.begin(), diff --git a/src/xrpld/app/ledger/Ledger.cpp b/src/xrpld/app/ledger/Ledger.cpp index bcd3b6d4ba7..4991b551cd1 100644 --- a/src/xrpld/app/ledger/Ledger.cpp +++ b/src/xrpld/app/ledger/Ledger.cpp @@ -29,12 +29,10 @@ #include #include #include -#include #include #include #include #include -#include #include #include #include @@ -258,11 +256,6 @@ Ledger::Ledger( if (info_.txHash.isNonZero() && !txMap_.fetchRoot(SHAMapHash{info_.txHash}, nullptr)) { - if (config.reporting()) - { - // Reporting should never have incomplete data - Throw("Missing tx map root for ledger"); - } loaded = false; JLOG(j.warn()) << "Don't have transaction root for ledger" << info_.seq; } @@ -270,11 +263,6 @@ Ledger::Ledger( if (info_.accountHash.isNonZero() && !stateMap_.fetchRoot(SHAMapHash{info_.accountHash}, nullptr)) { - if (config.reporting()) - { - // Reporting should never have incomplete data - Throw("Missing state map root for ledger"); - } loaded = false; JLOG(j.warn()) << "Don't have state data root for ledger" << info_.seq; } @@ -289,7 +277,7 @@ Ledger::Ledger( if (!loaded) { info_.hash = calculateLedgerHash(info_); - if (acquire && !config.reporting()) + if (acquire) family.missingNodeAcquireByHash(info_.hash, info_.seq); } } @@ -1146,92 +1134,4 @@ loadByHash(uint256 const& ledgerHash, Application& app, bool acquire) return {}; } -std::vector< - std::pair, std::shared_ptr>> -flatFetchTransactions(Application& app, std::vector& nodestoreHashes) -{ - if (!app.config().reporting()) - { - assert(false); - Throw( - "flatFetchTransactions: not running in reporting mode"); - } - - std::vector< - std::pair, std::shared_ptr>> - txns; - auto start = std::chrono::system_clock::now(); - auto nodeDb = - dynamic_cast(&(app.getNodeStore())); - if (!nodeDb) - { - assert(false); - Throw( - "Called flatFetchTransactions but database is not DatabaseNodeImp"); - } - auto objs = nodeDb->fetchBatch(nodestoreHashes); - - auto end = std::chrono::system_clock::now(); - JLOG(app.journal("Ledger").debug()) - << " Flat fetch time : " << ((end - start).count() / 1000000000.0) - << " number of transactions " << nodestoreHashes.size(); - assert(objs.size() == nodestoreHashes.size()); - for (size_t i = 0; i < objs.size(); ++i) - { - uint256& nodestoreHash = nodestoreHashes[i]; - auto& obj = objs[i]; - if (obj) - { - auto node = SHAMapTreeNode::makeFromPrefix( - makeSlice(obj->getData()), SHAMapHash{nodestoreHash}); - if (!node) - { - assert(false); - Throw( - "flatFetchTransactions : Error making SHAMap node"); - } - auto item = (static_cast(node.get()))->peekItem(); - if (!item) - { - assert(false); - Throw( - "flatFetchTransactions : Error reading SHAMap node"); - } - auto txnPlusMeta = deserializeTxPlusMeta(*item); - if (!txnPlusMeta.first || !txnPlusMeta.second) - { - assert(false); - Throw( - "flatFetchTransactions : Error deserializing SHAMap node"); - } - txns.push_back(std::move(txnPlusMeta)); - } - else - { - assert(false); - Throw( - "flatFetchTransactions : Containing SHAMap node not found"); - } - } - return txns; -} -std::vector< - std::pair, std::shared_ptr>> -flatFetchTransactions(ReadView const& ledger, Application& app) -{ - if (!app.config().reporting()) - { - assert(false); - return {}; - } - - auto const db = - dynamic_cast(&app.getRelationalDatabase()); - if (!db) - Throw("Failed to get relational database"); - - auto nodestoreHashes = db->getTxHashes(ledger.info().seq); - - return flatFetchTransactions(app, nodestoreHashes); -} } // namespace ripple diff --git a/src/xrpld/app/ledger/Ledger.h b/src/xrpld/app/ledger/Ledger.h index 1591fae1472..0eb102eb518 100644 --- a/src/xrpld/app/ledger/Ledger.h +++ b/src/xrpld/app/ledger/Ledger.h @@ -454,32 +454,6 @@ loadByHash(uint256 const& ledgerHash, Application& app, bool acquire = true); extern std::tuple, std::uint32_t, uint256> getLatestLedger(Application& app); -// *** Reporting Mode Only *** -// Fetch all of the transactions contained in ledger from the nodestore. -// The transactions are fetched directly as a batch, instead of traversing the -// transaction SHAMap. Fetching directly is significantly faster than -// traversing, as there are less database reads, and all of the reads can -// executed concurrently. This function only works in reporting mode. -// @param ledger the ledger for which to fetch the contained transactions -// @param app reference to the Application -// @return vector of (transaction, metadata) pairs -extern std::vector< - std::pair, std::shared_ptr>> -flatFetchTransactions(ReadView const& ledger, Application& app); - -// *** Reporting Mode Only *** -// For each nodestore hash, fetch the transaction. -// The transactions are fetched directly as a batch, instead of traversing the -// transaction SHAMap. Fetching directly is significantly faster than -// traversing, as there are less database reads, and all of the reads can -// executed concurrently. This function only works in reporting mode. -// @param nodestoreHashes hashes of the transactions to fetch -// @param app reference to the Application -// @return vector of (transaction, metadata) pairs -extern std::vector< - std::pair, std::shared_ptr>> -flatFetchTransactions(Application& app, std::vector& nodestoreHashes); - /** Deserialize a SHAMapItem containing a single STTx Throw: diff --git a/src/xrpld/app/ledger/LedgerMaster.h b/src/xrpld/app/ledger/LedgerMaster.h index 5149424e285..dd7f0b6a614 100644 --- a/src/xrpld/app/ledger/LedgerMaster.h +++ b/src/xrpld/app/ledger/LedgerMaster.h @@ -46,24 +46,6 @@ namespace ripple { class Peer; class Transaction; -// This error is thrown when a codepath tries to access the open or closed -// ledger while the server is running in reporting mode. Any RPCs that request -// the open or closed ledger should be forwarded to a p2p node. Usually, the -// decision to forward is made based on the required condition of the handler, -// or which ledger is specified. However, there are some codepaths which are not -// covered by the aforementioned logic (though they probably should), so this -// error is thrown in case a codepath falls through the cracks. -class ReportingShouldProxy : public std::runtime_error -{ -public: - ReportingShouldProxy() - : std::runtime_error( - "Reporting mode has no open or closed ledger. Proxy this " - "request") - { - } -}; - // Tracks the current ledger and any ledgers in the process of closing // Tracks ledger history // Tracks held transactions @@ -97,10 +79,6 @@ class LedgerMaster : public AbstractFetchPackContainer std::shared_ptr getClosedLedger() { - if (app_.config().reporting()) - { - Throw(); - } return mClosedLedger.get(); } diff --git a/src/xrpld/app/ledger/detail/LedgerMaster.cpp b/src/xrpld/app/ledger/detail/LedgerMaster.cpp index dab8f838249..d1eeabeb619 100644 --- a/src/xrpld/app/ledger/detail/LedgerMaster.cpp +++ b/src/xrpld/app/ledger/detail/LedgerMaster.cpp @@ -34,10 +34,9 @@ #include #include #include -#include +#include #include #include -#include #include #include #include @@ -274,12 +273,6 @@ LedgerMaster::getValidatedLedgerAge() { using namespace std::chrono_literals; -#ifdef RIPPLED_REPORTING - if (app_.config().reporting()) - return static_cast(&app_.getRelationalDatabase()) - ->getValidatedLedgerAge(); -#endif - std::chrono::seconds valClose{mValidLedgerSign.load()}; if (valClose == 0s) { @@ -305,12 +298,6 @@ LedgerMaster::isCaughtUp(std::string& reason) { using namespace std::chrono_literals; -#ifdef RIPPLED_REPORTING - if (app_.config().reporting()) - return static_cast(&app_.getRelationalDatabase()) - ->isCaughtUp(reason); -#endif - if (getPublishedLedgerAge() > 3min) { reason = "No recently-published ledger"; @@ -600,9 +587,6 @@ LedgerMaster::clearLedger(std::uint32_t seq) bool LedgerMaster::isValidated(ReadView const& ledger) { - if (app_.config().reporting()) - return true; // Reporting mode only supports validated ledger - if (ledger.open()) return false; @@ -676,32 +660,6 @@ LedgerMaster::getFullValidatedRange( bool LedgerMaster::getValidatedRange(std::uint32_t& minVal, std::uint32_t& maxVal) { - if (app_.config().reporting()) - { - std::string res = getCompleteLedgers(); - try - { - if (res == "empty" || res == "error" || res.empty()) - return false; - else if (size_t delim = res.find('-'); delim != std::string::npos) - { - minVal = std::stol(res.substr(0, delim)); - maxVal = std::stol(res.substr(delim + 1)); - } - else - { - minVal = maxVal = std::stol(res); - } - return true; - } - catch (std::exception const& e) - { - JLOG(m_journal.error()) << "LedgerMaster::getValidatedRange: " - "exception parsing complete ledgers: " - << e.what(); - return false; - } - } if (!getFullValidatedRange(minVal, maxVal)) return false; @@ -1679,25 +1637,12 @@ LedgerMaster::peekMutex() std::shared_ptr LedgerMaster::getCurrentLedger() { - if (app_.config().reporting()) - { - Throw(); - } return app_.openLedger().current(); } std::shared_ptr LedgerMaster::getValidatedLedger() { -#ifdef RIPPLED_REPORTING - if (app_.config().reporting()) - { - auto seq = app_.getRelationalDatabase().getMaxLedgerSeq(); - if (!seq) - return {}; - return getLedgerBySeq(*seq); - } -#endif return mValidLedger.get(); } @@ -1726,11 +1671,6 @@ LedgerMaster::getPublishedLedger() std::string LedgerMaster::getCompleteLedgers() { -#ifdef RIPPLED_REPORTING - if (app_.config().reporting()) - return static_cast(&app_.getRelationalDatabase()) - ->getCompleteLedgers(); -#endif std::lock_guard sl(mCompleteLock); return to_string(mCompleteLedgers); } diff --git a/src/xrpld/app/ledger/detail/LedgerToJson.cpp b/src/xrpld/app/ledger/detail/LedgerToJson.cpp index 95b572e9736..9824b31d794 100644 --- a/src/xrpld/app/ledger/detail/LedgerToJson.cpp +++ b/src/xrpld/app/ledger/detail/LedgerToJson.cpp @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -232,14 +231,7 @@ fillJsonTx(Object& json, LedgerFill const& fill) } }; - if (fill.context && fill.context->app.config().reporting()) - { - appendAll(flatFetchTransactions(fill.ledger, fill.context->app)); - } - else - { - appendAll(fill.ledger.txs); - } + appendAll(fill.ledger.txs); } catch (std::exception const& ex) { diff --git a/src/xrpld/app/main/Application.cpp b/src/xrpld/app/main/Application.cpp index f3308a091dc..d234f539909 100644 --- a/src/xrpld/app/main/Application.cpp +++ b/src/xrpld/app/main/Application.cpp @@ -45,9 +45,8 @@ #include #include #include +#include #include -#include -#include #include #include #include @@ -236,7 +235,6 @@ class ApplicationImp : public Application, public BasicApp io_latency_sampler m_io_latency_sampler; std::unique_ptr grpcServer_; - std::unique_ptr reportingETL_; //-------------------------------------------------------------------------- @@ -296,8 +294,7 @@ class ApplicationImp : public Application, public BasicApp , m_jobQueue(std::make_unique( [](std::unique_ptr const& config) { - if (config->standalone() && !config->reporting() && - !config->FORCE_MULTI_THREAD) + if (config->standalone() && !config->FORCE_MULTI_THREAD) return 1; if (config->WORKERS) @@ -475,9 +472,6 @@ class ApplicationImp : public Application, public BasicApp std::chrono::milliseconds(100), get_io_service()) , grpcServer_(std::make_unique(*this)) - , reportingETL_( - config_->reporting() ? std::make_unique(*this) - : nullptr) { initAccountIdCache(config_->getValueFor(SizedItem::accountIdCacheSize)); @@ -786,16 +780,12 @@ class ApplicationImp : public Application, public BasicApp OpenLedger& openLedger() override { - if (config_->reporting()) - Throw(); return *openLedger_; } OpenLedger const& openLedger() const override { - if (config_->reporting()) - Throw(); return *openLedger_; } @@ -827,13 +817,6 @@ class ApplicationImp : public Application, public BasicApp return *mWalletDB; } - ReportingETL& - getReportingETL() override - { - assert(reportingETL_.get() != nullptr); - return *reportingETL_; - } - bool serverOkay(std::string& reason) override; @@ -1129,11 +1112,6 @@ class ApplicationImp : public Application, public BasicApp << "; size after: " << cachedSLEs_.size(); } -#ifdef RIPPLED_REPORTING - if (auto pg = dynamic_cast(&*mRelationalDatabase)) - pg->sweep(); -#endif - // Set timer to do another sweep later. setSweepTimer(); } @@ -1275,53 +1253,50 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) auto const startUp = config_->START_UP; JLOG(m_journal.debug()) << "startUp: " << startUp; - if (!config_->reporting()) + if (startUp == Config::FRESH) { - if (startUp == Config::FRESH) - { - JLOG(m_journal.info()) << "Starting new Ledger"; + JLOG(m_journal.info()) << "Starting new Ledger"; - startGenesisLedger(); - } - else if ( - startUp == Config::LOAD || startUp == Config::LOAD_FILE || - startUp == Config::REPLAY) - { - JLOG(m_journal.info()) << "Loading specified Ledger"; + startGenesisLedger(); + } + else if ( + startUp == Config::LOAD || startUp == Config::LOAD_FILE || + startUp == Config::REPLAY) + { + JLOG(m_journal.info()) << "Loading specified Ledger"; - if (!loadOldLedger( - config_->START_LEDGER, - startUp == Config::REPLAY, - startUp == Config::LOAD_FILE, - config_->TRAP_TX_HASH)) + if (!loadOldLedger( + config_->START_LEDGER, + startUp == Config::REPLAY, + startUp == Config::LOAD_FILE, + config_->TRAP_TX_HASH)) + { + JLOG(m_journal.error()) + << "The specified ledger could not be loaded."; + if (config_->FAST_LOAD) { - JLOG(m_journal.error()) - << "The specified ledger could not be loaded."; - if (config_->FAST_LOAD) - { - // Fall back to syncing from the network, such as - // when there's no existing data. - startGenesisLedger(); - } - else - { - return false; - } + // Fall back to syncing from the network, such as + // when there's no existing data. + startGenesisLedger(); + } + else + { + return false; } } - else if (startUp == Config::NETWORK) - { - // This should probably become the default once we have a stable - // network. - if (!config_->standalone()) - m_networkOPs->setNeedNetworkLedger(); + } + else if (startUp == Config::NETWORK) + { + // This should probably become the default once we have a stable + // network. + if (!config_->standalone()) + m_networkOPs->setNeedNetworkLedger(); - startGenesisLedger(); - } - else - { - startGenesisLedger(); - } + startGenesisLedger(); + } + else + { + startGenesisLedger(); } if (auto const& forcedRange = config().FORCED_LEDGER_RANGE_PRESENT) @@ -1330,8 +1305,7 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) forcedRange->first, forcedRange->second); } - if (!config().reporting()) - m_orderBookDB.setup(getLedgerMaster().getCurrentLedger()); + m_orderBookDB.setup(getLedgerMaster().getCurrentLedger()); nodeIdentity_ = getNodeIdentity(*this, cmdline); @@ -1341,60 +1315,55 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) return false; } - if (!config().reporting()) { - { - if (validatorKeys_.configInvalid()) - return false; - - if (!validatorManifests_->load( - getWalletDB(), - "ValidatorManifests", - validatorKeys_.manifest, - config() - .section(SECTION_VALIDATOR_KEY_REVOCATION) - .values())) - { - JLOG(m_journal.fatal()) - << "Invalid configured validator manifest."; - return false; - } + if (validatorKeys_.configInvalid()) + return false; - publisherManifests_->load(getWalletDB(), "PublisherManifests"); + if (!validatorManifests_->load( + getWalletDB(), + "ValidatorManifests", + validatorKeys_.manifest, + config().section(SECTION_VALIDATOR_KEY_REVOCATION).values())) + { + JLOG(m_journal.fatal()) << "Invalid configured validator manifest."; + return false; + } - // It is possible to have a valid ValidatorKeys object without - // setting the signingKey or masterKey. This occurs if the - // configuration file does not have either - // SECTION_VALIDATOR_TOKEN or SECTION_VALIDATION_SEED section. + publisherManifests_->load(getWalletDB(), "PublisherManifests"); - // masterKey for the configuration-file specified validator keys - std::optional localSigningKey; - if (validatorKeys_.keys) - localSigningKey = validatorKeys_.keys->publicKey; + // It is possible to have a valid ValidatorKeys object without + // setting the signingKey or masterKey. This occurs if the + // configuration file does not have either + // SECTION_VALIDATOR_TOKEN or SECTION_VALIDATION_SEED section. - // Setup trusted validators - if (!validators_->load( - localSigningKey, - config().section(SECTION_VALIDATORS).values(), - config().section(SECTION_VALIDATOR_LIST_KEYS).values())) - { - JLOG(m_journal.fatal()) - << "Invalid entry in validator configuration."; - return false; - } - } + // masterKey for the configuration-file specified validator keys + std::optional localSigningKey; + if (validatorKeys_.keys) + localSigningKey = validatorKeys_.keys->publicKey; - if (!validatorSites_->load( - config().section(SECTION_VALIDATOR_LIST_SITES).values())) + // Setup trusted validators + if (!validators_->load( + localSigningKey, + config().section(SECTION_VALIDATORS).values(), + config().section(SECTION_VALIDATOR_LIST_KEYS).values())) { JLOG(m_journal.fatal()) - << "Invalid entry in [" << SECTION_VALIDATOR_LIST_SITES << "]"; + << "Invalid entry in validator configuration."; return false; } + } - // Tell the AmendmentTable who the trusted validators are. - m_amendmentTable->trustChanged(validators_->getQuorumKeys().second); + if (!validatorSites_->load( + config().section(SECTION_VALIDATOR_LIST_SITES).values())) + { + JLOG(m_journal.fatal()) + << "Invalid entry in [" << SECTION_VALIDATOR_LIST_SITES << "]"; + return false; } + + // Tell the AmendmentTable who the trusted validators are. + m_amendmentTable->trustChanged(validators_->getQuorumKeys().second); + //---------------------------------------------------------------------- // // Server @@ -1406,23 +1375,19 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) // move the instantiation inside a conditional: // // if (!config_.standalone()) - if (!config_->reporting()) - { - overlay_ = make_Overlay( - *this, - setup_Overlay(*config_), - *serverHandler_, - *m_resourceManager, - *m_resolver, - get_io_service(), - *config_, - m_collectorManager->collector()); - add(*overlay_); // add to PropertyStream - } + overlay_ = make_Overlay( + *this, + setup_Overlay(*config_), + *serverHandler_, + *m_resourceManager, + *m_resolver, + get_io_service(), + *config_, + m_collectorManager->collector()); + add(*overlay_); // add to PropertyStream // start first consensus round - if (!config_->reporting() && - !m_networkOPs->beginConsensus( + if (!m_networkOPs->beginConsensus( m_ledgerMaster->getClosedLedger()->info().hash)) { JLOG(m_journal.fatal()) << "Unable to start consensus"; @@ -1536,9 +1501,6 @@ ApplicationImp::setup(boost::program_options::variables_map const& cmdline) validatorSites_->start(); - if (reportingETL_) - reportingETL_->start(); - return true; } @@ -1654,10 +1616,6 @@ ApplicationImp::run() m_inboundTransactions->stop(); m_inboundLedgers->stop(); ledgerCleaner_->stop(); - if (reportingETL_) - reportingETL_->stop(); - if (auto pg = dynamic_cast(&*mRelationalDatabase)) - pg->stop(); m_nodeStore->stop(); perfLog_->stop(); diff --git a/src/xrpld/app/main/Application.h b/src/xrpld/app/main/Application.h index d4871317e73..8f2dd606ded 100644 --- a/src/xrpld/app/main/Application.h +++ b/src/xrpld/app/main/Application.h @@ -100,8 +100,6 @@ class RelationalDatabase; class DatabaseCon; class SHAMapStore; -class ReportingETL; - using NodeCache = TaggedCache; template @@ -253,9 +251,6 @@ class Application : public beast::PropertyStream::Source virtual std::chrono::milliseconds getIOLatency() = 0; - virtual ReportingETL& - getReportingETL() = 0; - virtual bool serverOkay(std::string& reason) = 0; diff --git a/src/xrpld/app/main/GRPCServer.cpp b/src/xrpld/app/main/GRPCServer.cpp index 5d5a79db393..89c3d813caa 100644 --- a/src/xrpld/app/main/GRPCServer.cpp +++ b/src/xrpld/app/main/GRPCServer.cpp @@ -18,7 +18,6 @@ //============================================================================== #include -#include #include #include @@ -187,11 +186,6 @@ GRPCServerImpl::CallData::process( InfoSub::pointer(), apiVersion}, request_}; - if (shouldForwardToP2p(context, requiredCondition_)) - { - forwardToP2p(context); - return; - } // Make sure we can currently handle the rpc error_code_i conditionMetRes = @@ -207,18 +201,9 @@ GRPCServerImpl::CallData::process( } else { - try - { - std::pair result = - handler_(context); - setIsUnlimited(result.first, isUnlimited); - responder_.Finish(result.first, result.second, this); - } - catch (ReportingShouldProxy&) - { - forwardToP2p(context); - return; - } + std::pair result = handler_(context); + setIsUnlimited(result.first, isUnlimited); + responder_.Finish(result.first, result.second, this); } } } @@ -229,46 +214,6 @@ GRPCServerImpl::CallData::process( } } -template -void -GRPCServerImpl::CallData::forwardToP2p( - RPC::GRPCContext& context) -{ - if (auto descriptor = - Request::GetDescriptor()->FindFieldByName("client_ip")) - { - Request::GetReflection()->SetString(&request_, descriptor, ctx_.peer()); - JLOG(app_.journal("gRPCServer").debug()) - << "Set client_ip to " << ctx_.peer(); - } - else - { - assert(false); - Throw( - "Attempting to forward but no client_ip field in " - "protobuf message"); - } - auto stub = getP2pForwardingStub(context); - if (stub) - { - grpc::ClientContext clientContext; - Response response; - auto status = forward_(stub.get(), &clientContext, request_, &response); - responder_.Finish(response, status, this); - JLOG(app_.journal("gRPCServer").debug()) << "Forwarded request to tx"; - } - else - { - JLOG(app_.journal("gRPCServer").error()) - << "Failed to forward request to tx"; - grpc::Status status{ - grpc::StatusCode::INTERNAL, - "Attempted to act as proxy but failed " - "to create forwarding stub"}; - responder_.FinishWithError(status, this); - } -} - template bool GRPCServerImpl::CallData::isFinished() @@ -289,29 +234,10 @@ GRPCServerImpl::CallData::getRole(bool isUnlimited) { if (isUnlimited) return Role::IDENTIFIED; - else if (wasForwarded()) - return Role::PROXY; else return Role::USER; } -template -bool -GRPCServerImpl::CallData::wasForwarded() -{ - if (auto descriptor = - Request::GetDescriptor()->FindFieldByName("client_ip")) - { - std::string clientIp = - Request::GetReflection()->GetString(request_, descriptor); - if (!clientIp.empty()) - { - return true; - } - } - return false; -} - template std::optional GRPCServerImpl::CallData::getUser() @@ -338,35 +264,6 @@ GRPCServerImpl::CallData::getClientIpAddress() return {}; } -template -std::optional -GRPCServerImpl::CallData::getProxiedClientIpAddress() -{ - auto endpoint = getProxiedClientEndpoint(); - if (endpoint) - return endpoint->address(); - return {}; -} - -template -std::optional -GRPCServerImpl::CallData::getProxiedClientEndpoint() -{ - auto descriptor = Request::GetDescriptor()->FindFieldByName("client_ip"); - if (descriptor) - { - std::string clientIp = - Request::GetReflection()->GetString(request_, descriptor); - if (!clientIp.empty()) - { - JLOG(app_.journal("gRPCServer").debug()) - << "Got client_ip from request : " << clientIp; - return getEndpoint(clientIp); - } - } - return {}; -} - template std::optional GRPCServerImpl::CallData::getClientEndpoint() @@ -381,8 +278,7 @@ GRPCServerImpl::CallData::clientIsUnlimited() if (!getUser()) return false; auto clientIp = getClientIpAddress(); - auto proxiedIp = getProxiedClientIpAddress(); - if (clientIp && !proxiedIp) + if (clientIp) { for (auto& ip : secureGatewayIPs_) { @@ -414,11 +310,7 @@ Resource::Consumer GRPCServerImpl::CallData::getUsage() { auto endpoint = getClientEndpoint(); - auto proxiedEndpoint = getProxiedClientEndpoint(); - if (proxiedEndpoint) - return app_.getResourceManager().newInboundEndpoint( - beast::IP::from_asio(proxiedEndpoint.value())); - else if (endpoint) + if (endpoint) return app_.getResourceManager().newInboundEndpoint( beast::IP::from_asio(endpoint.value())); Throw("Failed to get client endpoint"); diff --git a/src/xrpld/app/main/Main.cpp b/src/xrpld/app/main/Main.cpp index 799911f63dd..154ac48763b 100644 --- a/src/xrpld/app/main/Main.cpp +++ b/src/xrpld/app/main/Main.cpp @@ -376,7 +376,6 @@ run(int argc, char** argv) "quorum", po::value(), "Override the minimum validation quorum.")( - "reportingReadOnly", "Run in read-only reporting mode")( "silent", "No output to the console after startup.")( "standalone,a", "Run with no peers.")("verbose,v", "Verbose logging.") @@ -401,9 +400,6 @@ run(int argc, char** argv) po::value(), "Trap a specific transaction during replay.")( "start", "Start from a fresh Ledger.")( - "startReporting", - po::value(), - "Start reporting from a fresh Ledger.")( "vacuum", "VACUUM the transaction db.")( "valid", "Consider the initial ledger a valid network ledger."); @@ -659,17 +655,6 @@ run(int argc, char** argv) config->START_UP = Config::FRESH; } - if (vm.count("startReporting")) - { - config->START_UP = Config::FRESH; - config->START_LEDGER = vm["startReporting"].as(); - } - - if (vm.count("reportingReadOnly")) - { - config->setReportingReadOnly(true); - } - if (vm.count("import")) config->doImport = true; diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index a7ee935f102..208aab05aa1 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -38,9 +38,7 @@ #include #include #include -#include #include -#include #include #include #include @@ -455,15 +453,6 @@ class NetworkOPsImp final : public NetworkOPs void pubValidation(std::shared_ptr const& val) override; - void - forwardValidation(Json::Value const& jvObj) override; - void - forwardManifest(Json::Value const& jvObj) override; - void - forwardProposedTransaction(Json::Value const& jvObj) override; - void - forwardProposedAccountTransaction(Json::Value const& jvObj) override; - //-------------------------------------------------------------------------- // // InfoSub::Source. @@ -2489,8 +2478,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) if (fp != 0) info[jss::fetch_pack] = Json::UInt(fp); - if (!app_.config().reporting()) - info[jss::peers] = Json::UInt(app_.overlay().size()); + info[jss::peers] = Json::UInt(app_.overlay().size()); Json::Value lastClose = Json::objectValue; lastClose[jss::proposers] = Json::UInt(mConsensus.prevProposers()); @@ -2513,85 +2501,80 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) if (admin) info[jss::load] = m_job_queue.getJson(); - if (!app_.config().reporting()) + if (auto const netid = app_.overlay().networkID()) + info[jss::network_id] = static_cast(*netid); + + auto const escalationMetrics = + app_.getTxQ().getMetrics(*app_.openLedger().current()); + + auto const loadFactorServer = app_.getFeeTrack().getLoadFactor(); + auto const loadBaseServer = app_.getFeeTrack().getLoadBase(); + /* Scale the escalated fee level to unitless "load factor". + In practice, this just strips the units, but it will continue + to work correctly if either base value ever changes. */ + auto const loadFactorFeeEscalation = + mulDiv( + escalationMetrics.openLedgerFeeLevel, + loadBaseServer, + escalationMetrics.referenceFeeLevel) + .value_or(ripple::muldiv_max); + + auto const loadFactor = std::max( + safe_cast(loadFactorServer), loadFactorFeeEscalation); + + if (!human) + { + info[jss::load_base] = loadBaseServer; + info[jss::load_factor] = trunc32(loadFactor); + info[jss::load_factor_server] = loadFactorServer; + + /* Json::Value doesn't support uint64, so clamp to max + uint32 value. This is mostly theoretical, since there + probably isn't enough extant XRP to drive the factor + that high. + */ + info[jss::load_factor_fee_escalation] = + escalationMetrics.openLedgerFeeLevel.jsonClipped(); + info[jss::load_factor_fee_queue] = + escalationMetrics.minProcessingFeeLevel.jsonClipped(); + info[jss::load_factor_fee_reference] = + escalationMetrics.referenceFeeLevel.jsonClipped(); + } + else { - if (auto const netid = app_.overlay().networkID()) - info[jss::network_id] = static_cast(*netid); - - auto const escalationMetrics = - app_.getTxQ().getMetrics(*app_.openLedger().current()); - - auto const loadFactorServer = app_.getFeeTrack().getLoadFactor(); - auto const loadBaseServer = app_.getFeeTrack().getLoadBase(); - /* Scale the escalated fee level to unitless "load factor". - In practice, this just strips the units, but it will continue - to work correctly if either base value ever changes. */ - auto const loadFactorFeeEscalation = - mulDiv( - escalationMetrics.openLedgerFeeLevel, - loadBaseServer, - escalationMetrics.referenceFeeLevel) - .value_or(ripple::muldiv_max); - - auto const loadFactor = std::max( - safe_cast(loadFactorServer), - loadFactorFeeEscalation); + info[jss::load_factor] = + static_cast(loadFactor) / loadBaseServer; - if (!human) + if (loadFactorServer != loadFactor) + info[jss::load_factor_server] = + static_cast(loadFactorServer) / loadBaseServer; + + if (admin) { - info[jss::load_base] = loadBaseServer; - info[jss::load_factor] = trunc32(loadFactor); - info[jss::load_factor_server] = loadFactorServer; - - /* Json::Value doesn't support uint64, so clamp to max - uint32 value. This is mostly theoretical, since there - probably isn't enough extant XRP to drive the factor - that high. - */ + std::uint32_t fee = app_.getFeeTrack().getLocalFee(); + if (fee != loadBaseServer) + info[jss::load_factor_local] = + static_cast(fee) / loadBaseServer; + fee = app_.getFeeTrack().getRemoteFee(); + if (fee != loadBaseServer) + info[jss::load_factor_net] = + static_cast(fee) / loadBaseServer; + fee = app_.getFeeTrack().getClusterFee(); + if (fee != loadBaseServer) + info[jss::load_factor_cluster] = + static_cast(fee) / loadBaseServer; + } + if (escalationMetrics.openLedgerFeeLevel != + escalationMetrics.referenceFeeLevel && + (admin || loadFactorFeeEscalation != loadFactor)) info[jss::load_factor_fee_escalation] = - escalationMetrics.openLedgerFeeLevel.jsonClipped(); + escalationMetrics.openLedgerFeeLevel.decimalFromReference( + escalationMetrics.referenceFeeLevel); + if (escalationMetrics.minProcessingFeeLevel != + escalationMetrics.referenceFeeLevel) info[jss::load_factor_fee_queue] = - escalationMetrics.minProcessingFeeLevel.jsonClipped(); - info[jss::load_factor_fee_reference] = - escalationMetrics.referenceFeeLevel.jsonClipped(); - } - else - { - info[jss::load_factor] = - static_cast(loadFactor) / loadBaseServer; - - if (loadFactorServer != loadFactor) - info[jss::load_factor_server] = - static_cast(loadFactorServer) / loadBaseServer; - - if (admin) - { - std::uint32_t fee = app_.getFeeTrack().getLocalFee(); - if (fee != loadBaseServer) - info[jss::load_factor_local] = - static_cast(fee) / loadBaseServer; - fee = app_.getFeeTrack().getRemoteFee(); - if (fee != loadBaseServer) - info[jss::load_factor_net] = - static_cast(fee) / loadBaseServer; - fee = app_.getFeeTrack().getClusterFee(); - if (fee != loadBaseServer) - info[jss::load_factor_cluster] = - static_cast(fee) / loadBaseServer; - } - if (escalationMetrics.openLedgerFeeLevel != - escalationMetrics.referenceFeeLevel && - (admin || loadFactorFeeEscalation != loadFactor)) - info[jss::load_factor_fee_escalation] = - escalationMetrics.openLedgerFeeLevel.decimalFromReference( - escalationMetrics.referenceFeeLevel); - if (escalationMetrics.minProcessingFeeLevel != - escalationMetrics.referenceFeeLevel) - info[jss::load_factor_fee_queue] = - escalationMetrics.minProcessingFeeLevel - .decimalFromReference( - escalationMetrics.referenceFeeLevel); - } + escalationMetrics.minProcessingFeeLevel.decimalFromReference( + escalationMetrics.referenceFeeLevel); } bool valid = false; @@ -2599,7 +2582,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) if (lpClosed) valid = true; - else if (!app_.config().reporting()) + else lpClosed = m_ledgerMaster.getClosedLedger(); if (lpClosed) @@ -2630,11 +2613,6 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) l[jss::close_time_offset] = static_cast(closeOffset.count()); -#if RIPPLED_REPORTING - std::int64_t const dbAge = - std::max(m_ledgerMaster.getValidatedLedgerAge().count(), 0L); - l[jss::age] = Json::UInt(dbAge); -#else constexpr std::chrono::seconds highAgeThreshold{1000000}; if (m_ledgerMaster.haveValidated()) { @@ -2654,7 +2632,6 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) Json::UInt(age < highAgeThreshold ? age.count() : 0); } } -#endif } if (valid) @@ -2671,19 +2648,12 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) accounting_.json(info); info[jss::uptime] = UptimeClock::now().time_since_epoch().count(); - if (!app_.config().reporting()) - { - info[jss::jq_trans_overflow] = - std::to_string(app_.overlay().getJqTransOverflow()); - info[jss::peer_disconnects] = - std::to_string(app_.overlay().getPeerDisconnect()); - info[jss::peer_disconnects_resources] = - std::to_string(app_.overlay().getPeerDisconnectCharges()); - } - else - { - info["reporting"] = app_.getReportingETL().getInfo(); - } + info[jss::jq_trans_overflow] = + std::to_string(app_.overlay().getJqTransOverflow()); + info[jss::peer_disconnects] = + std::to_string(app_.overlay().getPeerDisconnect()); + info[jss::peer_disconnects_resources] = + std::to_string(app_.overlay().getPeerDisconnectCharges()); // This array must be sorted in increasing order. static constexpr std::array protocols{ @@ -2779,77 +2749,6 @@ NetworkOPsImp::pubProposedTransaction( pubProposedAccountTransaction(ledger, transaction, result); } -void -NetworkOPsImp::forwardProposedTransaction(Json::Value const& jvObj) -{ - // reporting does not forward validated transactions - // validated transactions will be published to the proper streams when the - // etl process writes a validated ledger - if (jvObj[jss::validated].asBool()) - return; - { - std::lock_guard sl(mSubLock); - - auto it = mStreamMaps[sRTTransactions].begin(); - while (it != mStreamMaps[sRTTransactions].end()) - { - InfoSub::pointer p = it->second.lock(); - - if (p) - { - p->send(jvObj, true); - ++it; - } - else - { - it = mStreamMaps[sRTTransactions].erase(it); - } - } - } - - forwardProposedAccountTransaction(jvObj); -} - -void -NetworkOPsImp::forwardValidation(Json::Value const& jvObj) -{ - std::lock_guard sl(mSubLock); - - for (auto i = mStreamMaps[sValidations].begin(); - i != mStreamMaps[sValidations].end();) - { - if (auto p = i->second.lock()) - { - p->send(jvObj, true); - ++i; - } - else - { - i = mStreamMaps[sValidations].erase(i); - } - } -} - -void -NetworkOPsImp::forwardManifest(Json::Value const& jvObj) -{ - std::lock_guard sl(mSubLock); - - for (auto i = mStreamMaps[sManifests].begin(); - i != mStreamMaps[sManifests].end();) - { - if (auto p = i->second.lock()) - { - p->send(jvObj, true); - ++i; - } - else - { - i = mStreamMaps[sManifests].erase(i); - } - } -} - static void getAccounts(Json::Value const& jvObj, std::vector& accounts) { @@ -2868,74 +2767,6 @@ getAccounts(Json::Value const& jvObj, std::vector& accounts) } } -void -NetworkOPsImp::forwardProposedAccountTransaction(Json::Value const& jvObj) -{ - hash_set notify; - int iProposed = 0; - // check if there are any subscribers before attempting to parse the JSON - { - std::lock_guard sl(mSubLock); - - if (mSubRTAccount.empty()) - return; - } - - // parse the JSON outside of the lock - std::vector accounts; - if (jvObj.isMember(jss::transaction)) - { - try - { - getAccounts(jvObj[jss::transaction], accounts); - } - catch (...) - { - JLOG(m_journal.debug()) - << __func__ << " : " - << "error parsing json for accounts affected"; - return; - } - } - { - std::lock_guard sl(mSubLock); - - if (!mSubRTAccount.empty()) - { - for (auto const& affectedAccount : accounts) - { - auto simiIt = mSubRTAccount.find(affectedAccount); - if (simiIt != mSubRTAccount.end()) - { - auto it = simiIt->second.begin(); - - while (it != simiIt->second.end()) - { - InfoSub::pointer p = it->second.lock(); - - if (p) - { - notify.insert(p); - ++it; - ++iProposed; - } - else - it = simiIt->second.erase(it); - } - } - } - } - } - JLOG(m_journal.trace()) << "forwardProposedAccountTransaction:" - << " iProposed=" << iProposed; - - if (!notify.empty()) - { - for (InfoSub::ref isrListener : notify) - isrListener->send(jvObj, true); - } -} - void NetworkOPsImp::pubLedger(std::shared_ptr const& lpAccepted) { @@ -3052,8 +2883,6 @@ NetworkOPsImp::pubLedger(std::shared_ptr const& lpAccepted) void NetworkOPsImp::reportFeeChange() { - if (app_.config().reporting()) - return; ServerFeeSummary f{ app_.openLedger().current()->fees().base, app_.getTxQ().getMetrics(*app_.openLedger().current()), @@ -3533,30 +3362,8 @@ NetworkOPsImp::unsubAccountInternal( void NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) { - enum DatabaseType { Postgres, Sqlite, None }; + enum DatabaseType { Sqlite, None }; static const auto databaseType = [&]() -> DatabaseType { -#ifdef RIPPLED_REPORTING - if (app_.config().reporting()) - { - // Use a dynamic_cast to return DatabaseType::None - // on failure. - if (dynamic_cast(&app_.getRelationalDatabase())) - { - return DatabaseType::Postgres; - } - return DatabaseType::None; - } - else - { - // Use a dynamic_cast to return DatabaseType::None - // on failure. - if (dynamic_cast(&app_.getRelationalDatabase())) - { - return DatabaseType::Sqlite; - } - return DatabaseType::None; - } -#else // Use a dynamic_cast to return DatabaseType::None // on failure. if (dynamic_cast(&app_.getRelationalDatabase())) @@ -3564,7 +3371,6 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) return DatabaseType::Sqlite; } return DatabaseType::None; -#endif }(); if (databaseType == DatabaseType::None) @@ -3667,40 +3473,6 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) std::optional>> { switch (dbType) { - case Postgres: { - auto db = static_cast( - &app_.getRelationalDatabase()); - RelationalDatabase::AccountTxArgs args; - args.account = accountId; - LedgerRange range{minLedger, maxLedger}; - args.ledger = range; - args.marker = marker; - auto [txResult, status] = db->getAccountTx(args); - if (status != rpcSUCCESS) - { - JLOG(m_journal.debug()) - << "AccountHistory job for account " - << toBase58(accountId) - << " getAccountTx failed"; - return {}; - } - - if (auto txns = - std::get_if( - &txResult.transactions); - txns) - { - return std::make_pair(*txns, txResult.marker); - } - else - { - JLOG(m_journal.debug()) - << "AccountHistory job for account " - << toBase58(accountId) - << " getAccountTx wrong data"; - return {}; - } - } case Sqlite: { auto db = static_cast( &app_.getRelationalDatabase()); diff --git a/src/xrpld/app/misc/NetworkOPs.h b/src/xrpld/app/misc/NetworkOPs.h index d5f43a42972..166b9e9e11f 100644 --- a/src/xrpld/app/misc/NetworkOPs.h +++ b/src/xrpld/app/misc/NetworkOPs.h @@ -261,15 +261,6 @@ class NetworkOPs : public InfoSub::Source virtual void pubValidation(std::shared_ptr const& val) = 0; - virtual void - forwardValidation(Json::Value const& jvObj) = 0; - virtual void - forwardManifest(Json::Value const& jvObj) = 0; - virtual void - forwardProposedTransaction(Json::Value const& jvObj) = 0; - virtual void - forwardProposedAccountTransaction(Json::Value const& jvObj) = 0; - virtual void stateAccounting(Json::Value& obj) = 0; }; diff --git a/src/xrpld/app/misc/SHAMapStoreImp.cpp b/src/xrpld/app/misc/SHAMapStoreImp.cpp index 9344463295b..1ce862b095f 100644 --- a/src/xrpld/app/misc/SHAMapStoreImp.cpp +++ b/src/xrpld/app/misc/SHAMapStoreImp.cpp @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -120,13 +119,6 @@ SHAMapStoreImp::SHAMapStoreImp( if (deleteInterval_) { - if (app_.config().reporting()) - { - Throw( - "Reporting does not support online_delete. Remove " - "online_delete info from config"); - } - // Configuration that affects the behavior of online delete get_if_exists(section, "delete_batch", deleteBatch_); std::uint32_t temp; @@ -188,12 +180,6 @@ SHAMapStoreImp::makeNodeStore(int readThreads) if (deleteInterval_) { - if (app_.config().reporting()) - { - Throw( - "Reporting does not support online_delete. Remove " - "online_delete info from config"); - } SavedState state = state_db_.getState(); auto writableBackend = makeBackendRotating(state.writableDb); auto archiveBackend = makeBackendRotating(state.archiveDb); @@ -279,13 +265,6 @@ SHAMapStoreImp::copyNode(std::uint64_t& nodeCount, SHAMapTreeNode const& node) void SHAMapStoreImp::run() { - if (app_.config().reporting()) - { - assert(false); - Throw( - "Reporting does not support online_delete. Remove " - "online_delete info from config"); - } beast::setCurrentThreadName("SHAMapStore"); LedgerIndex lastRotated = state_db_.getState().lastRotated; netOPs_ = &app_.getOPs(); @@ -597,13 +576,6 @@ SHAMapStoreImp::freshenCaches() void SHAMapStoreImp::clearPrior(LedgerIndex lastRotated) { - if (app_.config().reporting()) - { - assert(false); - Throw( - "Reporting does not support online_delete. Remove " - "online_delete info from config"); - } // Do not allow ledgers to be acquired from the network // that are about to be deleted. minimumOnline_ = lastRotated + 1; diff --git a/src/xrpld/app/misc/detail/Transaction.cpp b/src/xrpld/app/misc/detail/Transaction.cpp index e0c3f260fe5..c8f9df232e0 100644 --- a/src/xrpld/app/misc/detail/Transaction.cpp +++ b/src/xrpld/app/misc/detail/Transaction.cpp @@ -21,11 +21,9 @@ #include #include #include -#include #include #include #include -#include #include #include #include @@ -130,20 +128,6 @@ Transaction::load( return load(id, app, op{range}, ec); } -Transaction::Locator -Transaction::locate(uint256 const& id, Application& app) -{ - auto const db = - dynamic_cast(&app.getRelationalDatabase()); - - if (!db) - { - Throw("Failed to get relational database"); - } - - return db->locateTransaction(id); -} - std::variant< std::pair, std::shared_ptr>, TxSearched> diff --git a/src/xrpld/app/rdb/README.md b/src/xrpld/app/rdb/README.md index f4cb5f203a4..81aaa32f2cf 100644 --- a/src/xrpld/app/rdb/README.md +++ b/src/xrpld/app/rdb/README.md @@ -28,9 +28,7 @@ src/xrpld/app/rdb/ │   ├── detail │   │   ├── Node.cpp │   │   ├── Node.h -│   │   ├── PostgresDatabase.cpp │   │   └── SQLiteDatabase.cpp -│   ├── PostgresDatabase.h │   └── SQLiteDatabase.h ├── detail │   ├── PeerFinder.cpp @@ -50,7 +48,6 @@ src/xrpld/app/rdb/ | File | Contents | | ----------- | ----------- | | `Node.[h\|cpp]` | Defines/Implements methods used by `SQLiteDatabase` for interacting with SQLite node databases| -| `PostgresDatabase.[h\|cpp]` | Defines/Implements the class `PostgresDatabase`/`PostgresDatabaseImp` which inherits from `RelationalDatabase` and is used to operate on the main stores | |`SQLiteDatabase.[h\|cpp]`| Defines/Implements the class `SQLiteDatabase`/`SQLiteDatabaseImp` which inherits from `RelationalDatabase` and is used to operate on the main stores | | `PeerFinder.[h\|cpp]` | Defines/Implements methods for interacting with the PeerFinder SQLite database | |`RelationalDatabase.cpp`| Implements the static method `RelationalDatabase::init` which is used to initialize an instance of `RelationalDatabase` | diff --git a/src/xrpld/app/rdb/RelationalDatabase.h b/src/xrpld/app/rdb/RelationalDatabase.h index b30c94153f7..00e236f20db 100644 --- a/src/xrpld/app/rdb/RelationalDatabase.h +++ b/src/xrpld/app/rdb/RelationalDatabase.h @@ -111,29 +111,6 @@ class RelationalDatabase std::optional marker; }; - /// Struct used to keep track of what to write to transactions and - /// account_transactions tables in Postgres - struct AccountTransactionsData - { - boost::container::flat_set accounts; - uint32_t ledgerSequence; - uint32_t transactionIndex; - uint256 txHash; - uint256 nodestoreHash; - - AccountTransactionsData( - TxMeta const& meta, - uint256 const& nodestoreHash, - beast::Journal j) - : accounts(meta.getAffectedAccounts()) - , ledgerSequence(meta.getLgrSeq()) - , transactionIndex(meta.getIndex()) - , txHash(meta.getTxID()) - , nodestoreHash(nodestoreHash) - { - } - }; - /** * @brief init Creates and returns an appropriate RelationalDatabase * instance based on configuration. diff --git a/src/xrpld/app/rdb/backend/PostgresDatabase.h b/src/xrpld/app/rdb/backend/PostgresDatabase.h deleted file mode 100644 index c2841cefd8c..00000000000 --- a/src/xrpld/app/rdb/backend/PostgresDatabase.h +++ /dev/null @@ -1,113 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_RDB_BACKEND_POSTGRESDATABASE_H_INCLUDED -#define RIPPLE_APP_RDB_BACKEND_POSTGRESDATABASE_H_INCLUDED - -#include - -namespace ripple { - -class PostgresDatabase : public RelationalDatabase -{ -public: - virtual void - stop() = 0; - - /** - * @brief sweep Sweeps the database. - */ - virtual void - sweep() = 0; - - /** - * @brief getCompleteLedgers Returns a string which contains a list of - * completed ledgers. - * @return String with completed ledger sequences - */ - virtual std::string - getCompleteLedgers() = 0; - - /** - * @brief getValidatedLedgerAge Returns the age of the last validated - * ledger. - * @return Age of the last validated ledger in seconds - */ - virtual std::chrono::seconds - getValidatedLedgerAge() = 0; - - /** - * @brief writeLedgerAndTransactions Writes new ledger and transaction data - * into the database. - * @param info Ledger info to write. - * @param accountTxData Transaction data to write - * @return True on success, false on failure. - */ - virtual bool - writeLedgerAndTransactions( - LedgerInfo const& info, - std::vector const& accountTxData) = 0; - - /** - * @brief getTxHashes Returns a vector of the hashes of transactions - * belonging to the ledger with the provided sequence. - * @param seq Ledger sequence - * @return Vector of transaction hashes - */ - virtual std::vector - getTxHashes(LedgerIndex seq) = 0; - - /** - * @brief getAccountTx Get the last account transactions specified by the - * AccountTxArgs struct. - * @param args Arguments which specify the account and which transactions to - * return. - * @return Vector of account transactions and the RPC status response. - */ - virtual std::pair - getAccountTx(AccountTxArgs const& args) = 0; - - /** - * @brief locateTransaction Returns information used to locate - * a transaction. - * @param id Hash of the transaction. - * @return Information used to locate a transaction. Contains a nodestore - * hash and a ledger sequence pair if the transaction was found. - * Otherwise, contains the range of ledgers present in the database - * at the time of search. - */ - virtual Transaction::Locator - locateTransaction(uint256 const& id) = 0; - - /** - * @brief isCaughtUp returns whether the database is caught up with the - * network - * @param[out] reason if the database is not caught up, reason contains a - * helpful message describing why - * @return false if the most recently written ledger has a close time - * over 3 minutes ago, or if there are no ledgers in the - * database. true otherwise - */ - virtual bool - isCaughtUp(std::string& reason) = 0; -}; - -} // namespace ripple - -#endif diff --git a/src/xrpld/app/rdb/backend/detail/PostgresDatabase.cpp b/src/xrpld/app/rdb/backend/detail/PostgresDatabase.cpp deleted file mode 100644 index ac1a9813c2b..00000000000 --- a/src/xrpld/app/rdb/backend/detail/PostgresDatabase.cpp +++ /dev/null @@ -1,1072 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -class PgPool; - -using AccountTxResult = RelationalDatabase::AccountTxResult; -using TxnsData = RelationalDatabase::AccountTxs; -using TxnsDataBinary = RelationalDatabase::MetaTxsList; - -class PostgresDatabaseImp final : public PostgresDatabase -{ -public: - PostgresDatabaseImp( - Application& app, - Config const& config, - JobQueue& jobQueue) - : app_(app) - , j_(app_.journal("PgPool")) - , pgPool_( -#ifdef RIPPLED_REPORTING - make_PgPool(config.section("ledger_tx_tables"), j_) -#endif - ) - { - assert(config.reporting()); -#ifdef RIPPLED_REPORTING - if (config.reporting() && !config.reportingReadOnly()) // use pg - { - initSchema(pgPool_); - } -#endif - } - - void - stop() override - { -#ifdef RIPPLED_REPORTING - pgPool_->stop(); -#endif - } - - void - sweep() override; - - std::optional - getMinLedgerSeq() override; - - std::optional - getMaxLedgerSeq() override; - - std::string - getCompleteLedgers() override; - - std::chrono::seconds - getValidatedLedgerAge() override; - - bool - writeLedgerAndTransactions( - LedgerInfo const& info, - std::vector const& accountTxData) override; - - std::optional - getLedgerInfoByIndex(LedgerIndex ledgerSeq) override; - - std::optional - getNewestLedgerInfo() override; - - std::optional - getLedgerInfoByHash(uint256 const& ledgerHash) override; - - uint256 - getHashByIndex(LedgerIndex ledgerIndex) override; - - std::optional - getHashesByIndex(LedgerIndex ledgerIndex) override; - - std::map - getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) override; - - std::vector - getTxHashes(LedgerIndex seq) override; - - std::vector> - getTxHistory(LedgerIndex startIndex) override; - - std::pair - getAccountTx(AccountTxArgs const& args) override; - - Transaction::Locator - locateTransaction(uint256 const& id) override; - - bool - ledgerDbHasSpace(Config const& config) override; - - bool - transactionDbHasSpace(Config const& config) override; - - bool - isCaughtUp(std::string& reason) override; - -private: - Application& app_; - beast::Journal j_; - std::shared_ptr pgPool_; - - bool - dbHasSpace(Config const& config); -}; - -/** - * @brief loadLedgerInfos Loads the ledger info for the specified - * ledger/s from the database - * @param pgPool Link to postgres database - * @param whichLedger Specifies the ledger to load via ledger sequence, - * ledger hash, a range of ledgers, or std::monostate - * (which loads the most recent) - * @param app Application - * @return Vector of LedgerInfos - */ -static std::vector -loadLedgerInfos( - std::shared_ptr const& pgPool, - std::variant< - std::monostate, - uint256, - uint32_t, - std::pair> const& whichLedger, - Application& app) -{ - std::vector infos; -#ifdef RIPPLED_REPORTING - auto log = app.journal("Ledger"); - assert(app.config().reporting()); - std::stringstream sql; - sql << "SELECT ledger_hash, prev_hash, account_set_hash, trans_set_hash, " - "total_coins, closing_time, prev_closing_time, close_time_res, " - "close_flags, ledger_seq FROM ledgers "; - - if (auto ledgerSeq = std::get_if(&whichLedger)) - { - sql << "WHERE ledger_seq = " + std::to_string(*ledgerSeq); - } - else if (auto ledgerHash = std::get_if(&whichLedger)) - { - sql << ("WHERE ledger_hash = \'\\x" + strHex(*ledgerHash) + "\'"); - } - else if ( - auto minAndMax = - std::get_if>(&whichLedger)) - { - sql - << ("WHERE ledger_seq >= " + std::to_string(minAndMax->first) + - " AND ledger_seq <= " + std::to_string(minAndMax->second)); - } - else - { - sql << ("ORDER BY ledger_seq desc LIMIT 1"); - } - sql << ";"; - - JLOG(log.trace()) << __func__ << " : sql = " << sql.str(); - - auto res = PgQuery(pgPool)(sql.str().data()); - if (!res) - { - JLOG(log.error()) << __func__ << " : Postgres response is null - sql = " - << sql.str(); - assert(false); - return {}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(log.error()) << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - sql = " << sql.str(); - assert(false); - return {}; - } - - JLOG(log.trace()) << __func__ << " Postgres result msg : " << res.msg(); - - if (res.isNull() || res.ntuples() == 0) - { - JLOG(log.debug()) << __func__ - << " : Ledger not found. sql = " << sql.str(); - return {}; - } - else if (res.ntuples() > 0) - { - if (res.nfields() != 10) - { - JLOG(log.error()) << __func__ - << " : Wrong number of fields in Postgres " - "response. Expected 10, but got " - << res.nfields() << " . sql = " << sql.str(); - assert(false); - return {}; - } - } - - for (size_t i = 0; i < res.ntuples(); ++i) - { - char const* hash = res.c_str(i, 0); - char const* prevHash = res.c_str(i, 1); - char const* accountHash = res.c_str(i, 2); - char const* txHash = res.c_str(i, 3); - std::int64_t totalCoins = res.asBigInt(i, 4); - std::int64_t closeTime = res.asBigInt(i, 5); - std::int64_t parentCloseTime = res.asBigInt(i, 6); - std::int64_t closeTimeRes = res.asBigInt(i, 7); - std::int64_t closeFlags = res.asBigInt(i, 8); - std::int64_t ledgerSeq = res.asBigInt(i, 9); - - JLOG(log.trace()) << __func__ << " - Postgres response = " << hash - << " , " << prevHash << " , " << accountHash << " , " - << txHash << " , " << totalCoins << ", " << closeTime - << ", " << parentCloseTime << ", " << closeTimeRes - << ", " << closeFlags << ", " << ledgerSeq - << " - sql = " << sql.str(); - JLOG(log.debug()) << __func__ - << " - Successfully fetched ledger with sequence = " - << ledgerSeq << " from Postgres"; - - using time_point = NetClock::time_point; - using duration = NetClock::duration; - - LedgerInfo info; - if (!info.parentHash.parseHex(prevHash + 2)) - assert(false); - if (!info.txHash.parseHex(txHash + 2)) - assert(false); - if (!info.accountHash.parseHex(accountHash + 2)) - assert(false); - info.drops = totalCoins; - info.closeTime = time_point{duration{closeTime}}; - info.parentCloseTime = time_point{duration{parentCloseTime}}; - info.closeFlags = closeFlags; - info.closeTimeResolution = duration{closeTimeRes}; - info.seq = ledgerSeq; - if (!info.hash.parseHex(hash + 2)) - assert(false); - info.validated = true; - infos.push_back(info); - } - -#endif - return infos; -} - -/** - * @brief loadLedgerHelper Load a ledger info from Postgres - * @param pgPool Link to postgres database - * @param whichLedger Specifies sequence or hash of ledger. Passing - * std::monostate loads the most recent ledger - * @param app The Application - * @return Ledger info - */ -static std::optional -loadLedgerHelper( - std::shared_ptr const& pgPool, - std::variant const& whichLedger, - Application& app) -{ - std::vector infos; - std::visit( - [&infos, &app, &pgPool](auto&& arg) { - infos = loadLedgerInfos(pgPool, arg, app); - }, - whichLedger); - assert(infos.size() <= 1); - if (!infos.size()) - return {}; - return infos[0]; -} - -#ifdef RIPPLED_REPORTING -static bool -writeToLedgersDB(LedgerInfo const& info, PgQuery& pgQuery, beast::Journal& j) -{ - JLOG(j.debug()) << __func__; - auto cmd = boost::format( - R"(INSERT INTO ledgers - VALUES (%u,'\x%s', '\x%s',%u,%u,%u,%u,%u,'\x%s','\x%s'))"); - - auto ledgerInsert = boost::str( - cmd % info.seq % strHex(info.hash) % strHex(info.parentHash) % - info.drops.drops() % info.closeTime.time_since_epoch().count() % - info.parentCloseTime.time_since_epoch().count() % - info.closeTimeResolution.count() % info.closeFlags % - strHex(info.accountHash) % strHex(info.txHash)); - JLOG(j.trace()) << __func__ << " : " - << " : " - << "query string = " << ledgerInsert; - - auto res = pgQuery(ledgerInsert.data()); - - return res; -} - -enum class DataFormat { binary, expanded }; -static std::variant -flatFetchTransactions( - Application& app, - std::vector& nodestoreHashes, - std::vector& ledgerSequences, - DataFormat format) -{ - std::variant ret; - if (format == DataFormat::binary) - ret = TxnsDataBinary(); - else - ret = TxnsData(); - - std::vector< - std::pair, std::shared_ptr>> - txns = flatFetchTransactions(app, nodestoreHashes); - for (size_t i = 0; i < txns.size(); ++i) - { - auto& [txn, meta] = txns[i]; - if (format == DataFormat::binary) - { - auto& transactions = std::get(ret); - Serializer txnSer = txn->getSerializer(); - Serializer metaSer = meta->getSerializer(); - // SerialIter it(item->slice()); - Blob txnBlob = txnSer.getData(); - Blob metaBlob = metaSer.getData(); - transactions.push_back( - std::make_tuple(txnBlob, metaBlob, ledgerSequences[i])); - } - else - { - auto& transactions = std::get(ret); - std::string reason; - auto txnRet = std::make_shared(txn, reason, app); - txnRet->setLedger(ledgerSequences[i]); - txnRet->setStatus(COMMITTED); - auto txMeta = std::make_shared( - txnRet->getID(), ledgerSequences[i], *meta); - transactions.push_back(std::make_pair(txnRet, txMeta)); - } - } - return ret; -} - -static std::pair -processAccountTxStoredProcedureResult( - RelationalDatabase::AccountTxArgs const& args, - Json::Value& result, - Application& app, - beast::Journal j) -{ - AccountTxResult ret; - ret.limit = args.limit; - - try - { - if (result.isMember("transactions")) - { - std::vector nodestoreHashes; - std::vector ledgerSequences; - for (auto& t : result["transactions"]) - { - if (t.isMember("ledger_seq") && t.isMember("nodestore_hash")) - { - uint32_t ledgerSequence = t["ledger_seq"].asUInt(); - std::string nodestoreHashHex = - t["nodestore_hash"].asString(); - nodestoreHashHex.erase(0, 2); - uint256 nodestoreHash; - if (!nodestoreHash.parseHex(nodestoreHashHex)) - assert(false); - - if (nodestoreHash.isNonZero()) - { - ledgerSequences.push_back(ledgerSequence); - nodestoreHashes.push_back(nodestoreHash); - } - else - { - assert(false); - return {ret, {rpcINTERNAL, "nodestoreHash is zero"}}; - } - } - else - { - assert(false); - return {ret, {rpcINTERNAL, "missing postgres fields"}}; - } - } - - assert(nodestoreHashes.size() == ledgerSequences.size()); - ret.transactions = flatFetchTransactions( - app, - nodestoreHashes, - ledgerSequences, - args.binary ? DataFormat::binary : DataFormat::expanded); - - JLOG(j.trace()) << __func__ << " : processed db results"; - - if (result.isMember("marker")) - { - auto& marker = result["marker"]; - assert(marker.isMember("ledger")); - assert(marker.isMember("seq")); - ret.marker = { - marker["ledger"].asUInt(), marker["seq"].asUInt()}; - } - assert(result.isMember("ledger_index_min")); - assert(result.isMember("ledger_index_max")); - ret.ledgerRange = { - result["ledger_index_min"].asUInt(), - result["ledger_index_max"].asUInt()}; - return {ret, rpcSUCCESS}; - } - else if (result.isMember("error")) - { - JLOG(j.debug()) - << __func__ << " : error = " << result["error"].asString(); - return { - ret, - RPC::Status{rpcINVALID_PARAMS, result["error"].asString()}}; - } - else - { - return {ret, {rpcINTERNAL, "unexpected Postgres response"}}; - } - } - catch (std::exception& e) - { - JLOG(j.debug()) << __func__ << " : " - << "Caught exception : " << e.what(); - return {ret, {rpcINTERNAL, e.what()}}; - } -} -#endif - -void -PostgresDatabaseImp::sweep() -{ -#ifdef RIPPLED_REPORTING - pgPool_->idleSweeper(); -#endif -} - -std::optional -PostgresDatabaseImp::getMinLedgerSeq() -{ -#ifdef RIPPLED_REPORTING - auto seq = PgQuery(pgPool_)("SELECT min_ledger()"); - if (!seq) - { - JLOG(j_.error()) << "Error querying minimum ledger sequence."; - } - else if (!seq.isNull()) - return seq.asInt(); -#endif - return {}; -} - -std::optional -PostgresDatabaseImp::getMaxLedgerSeq() -{ -#ifdef RIPPLED_REPORTING - auto seq = PgQuery(pgPool_)("SELECT max_ledger()"); - if (seq && !seq.isNull()) - return seq.asBigInt(); -#endif - return {}; -} - -std::string -PostgresDatabaseImp::getCompleteLedgers() -{ -#ifdef RIPPLED_REPORTING - auto range = PgQuery(pgPool_)("SELECT complete_ledgers()"); - if (range) - return range.c_str(); -#endif - return "error"; -} - -std::chrono::seconds -PostgresDatabaseImp::getValidatedLedgerAge() -{ - using namespace std::chrono_literals; -#ifdef RIPPLED_REPORTING - auto age = PgQuery(pgPool_)("SELECT age()"); - if (!age || age.isNull()) - JLOG(j_.debug()) << "No ledgers in database"; - else - return std::chrono::seconds{age.asInt()}; -#endif - return weeks{2}; -} - -bool -PostgresDatabaseImp::writeLedgerAndTransactions( - LedgerInfo const& info, - std::vector const& accountTxData) -{ -#ifdef RIPPLED_REPORTING - JLOG(j_.debug()) << __func__ << " : " - << "Beginning write to Postgres"; - - try - { - // Create a PgQuery object to run multiple commands over the same - // connection in a single transaction block. - PgQuery pg(pgPool_); - auto res = pg("BEGIN"); - if (!res || res.status() != PGRES_COMMAND_OK) - { - std::stringstream msg; - msg << "bulkWriteToTable : Postgres insert error: " << res.msg(); - Throw(msg.str()); - } - - // Writing to the ledgers db fails if the ledger already exists in the - // db. In this situation, the ETL process has detected there is another - // writer, and falls back to only publishing - if (!writeToLedgersDB(info, pg, j_)) - { - JLOG(j_.warn()) << __func__ << " : " - << "Failed to write to ledgers database."; - return false; - } - - std::stringstream transactionsCopyBuffer; - std::stringstream accountTransactionsCopyBuffer; - for (auto const& data : accountTxData) - { - std::string txHash = strHex(data.txHash); - std::string nodestoreHash = strHex(data.nodestoreHash); - auto idx = data.transactionIndex; - auto ledgerSeq = data.ledgerSequence; - - transactionsCopyBuffer << std::to_string(ledgerSeq) << '\t' - << std::to_string(idx) << '\t' << "\\\\x" - << txHash << '\t' << "\\\\x" << nodestoreHash - << '\n'; - - for (auto const& a : data.accounts) - { - std::string acct = strHex(a); - accountTransactionsCopyBuffer - << "\\\\x" << acct << '\t' << std::to_string(ledgerSeq) - << '\t' << std::to_string(idx) << '\n'; - } - } - - pg.bulkInsert("transactions", transactionsCopyBuffer.str()); - pg.bulkInsert( - "account_transactions", accountTransactionsCopyBuffer.str()); - - res = pg("COMMIT"); - if (!res || res.status() != PGRES_COMMAND_OK) - { - std::stringstream msg; - msg << "bulkWriteToTable : Postgres insert error: " << res.msg(); - assert(false); - Throw(msg.str()); - } - - JLOG(j_.info()) << __func__ << " : " - << "Successfully wrote to Postgres"; - return true; - } - catch (std::exception& e) - { - JLOG(j_.error()) << __func__ - << "Caught exception writing to Postgres : " - << e.what(); - assert(false); - return false; - } -#else - return false; -#endif -} - -std::optional -PostgresDatabaseImp::getLedgerInfoByIndex(LedgerIndex ledgerSeq) -{ - return loadLedgerHelper(pgPool_, ledgerSeq, app_); -} - -std::optional -PostgresDatabaseImp::getNewestLedgerInfo() -{ - return loadLedgerHelper(pgPool_, {}, app_); -} - -std::optional -PostgresDatabaseImp::getLedgerInfoByHash(uint256 const& ledgerHash) -{ - return loadLedgerHelper(pgPool_, ledgerHash, app_); -} - -uint256 -PostgresDatabaseImp::getHashByIndex(LedgerIndex ledgerIndex) -{ - auto infos = loadLedgerInfos(pgPool_, ledgerIndex, app_); - assert(infos.size() <= 1); - if (infos.size()) - return infos[0].hash; - return {}; -} - -std::optional -PostgresDatabaseImp::getHashesByIndex(LedgerIndex ledgerIndex) -{ - LedgerHashPair p; - auto infos = loadLedgerInfos(pgPool_, ledgerIndex, app_); - assert(infos.size() <= 1); - if (infos.size()) - { - p.ledgerHash = infos[0].hash; - p.parentHash = infos[0].parentHash; - return p; - } - return {}; -} - -std::map -PostgresDatabaseImp::getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) -{ - std::map ret; - auto infos = loadLedgerInfos(pgPool_, std::make_pair(minSeq, maxSeq), app_); - for (auto& info : infos) - { - ret[info.seq] = {info.hash, info.parentHash}; - } - return ret; -} - -std::vector -PostgresDatabaseImp::getTxHashes(LedgerIndex seq) -{ - std::vector nodestoreHashes; - -#ifdef RIPPLED_REPORTING - auto log = app_.journal("Ledger"); - - std::string query = - "SELECT nodestore_hash" - " FROM transactions " - " WHERE ledger_seq = " + - std::to_string(seq); - auto res = PgQuery(pgPool_)(query.c_str()); - - if (!res) - { - JLOG(log.error()) << __func__ - << " : Postgres response is null - query = " << query; - assert(false); - return {}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(log.error()) << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - query = " << query; - assert(false); - return {}; - } - - JLOG(log.trace()) << __func__ << " Postgres result msg : " << res.msg(); - - if (res.isNull() || res.ntuples() == 0) - { - JLOG(log.debug()) << __func__ - << " : Ledger not found. query = " << query; - return {}; - } - else if (res.ntuples() > 0) - { - if (res.nfields() != 1) - { - JLOG(log.error()) << __func__ - << " : Wrong number of fields in Postgres " - "response. Expected 1, but got " - << res.nfields() << " . query = " << query; - assert(false); - return {}; - } - } - - JLOG(log.trace()) << __func__ << " : result = " << res.c_str() - << " : query = " << query; - for (size_t i = 0; i < res.ntuples(); ++i) - { - char const* nodestoreHash = res.c_str(i, 0); - uint256 hash; - if (!hash.parseHex(nodestoreHash + 2)) - assert(false); - - nodestoreHashes.push_back(hash); - } -#endif - - return nodestoreHashes; -} - -std::vector> -PostgresDatabaseImp::getTxHistory(LedgerIndex startIndex) -{ - std::vector> ret; - -#ifdef RIPPLED_REPORTING - if (!app_.config().reporting()) - { - assert(false); - Throw( - "called getTxHistory but not in reporting mode"); - } - - std::string sql = boost::str( - boost::format("SELECT nodestore_hash, ledger_seq " - " FROM transactions" - " ORDER BY ledger_seq DESC LIMIT 20 " - "OFFSET %u;") % - startIndex); - - auto res = PgQuery(pgPool_)(sql.data()); - - if (!res) - { - JLOG(j_.error()) << __func__ - << " : Postgres response is null - sql = " << sql; - assert(false); - return {}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(j_.error()) << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - sql = " << sql; - assert(false); - return {}; - } - - JLOG(j_.trace()) << __func__ << " Postgres result msg : " << res.msg(); - - if (res.isNull() || res.ntuples() == 0) - { - JLOG(j_.debug()) << __func__ << " : Empty postgres response"; - assert(false); - return {}; - } - else if (res.ntuples() > 0) - { - if (res.nfields() != 2) - { - JLOG(j_.error()) << __func__ - << " : Wrong number of fields in Postgres " - "response. Expected 1, but got " - << res.nfields() << " . sql = " << sql; - assert(false); - return {}; - } - } - - JLOG(j_.trace()) << __func__ << " : Postgres result = " << res.c_str(); - - std::vector nodestoreHashes; - std::vector ledgerSequences; - for (size_t i = 0; i < res.ntuples(); ++i) - { - uint256 hash; - if (!hash.parseHex(res.c_str(i, 0) + 2)) - assert(false); - nodestoreHashes.push_back(hash); - ledgerSequences.push_back(res.asBigInt(i, 1)); - } - - auto txns = flatFetchTransactions(app_, nodestoreHashes); - for (size_t i = 0; i < txns.size(); ++i) - { - auto const& [sttx, meta] = txns[i]; - assert(sttx); - - std::string reason; - auto txn = std::make_shared(sttx, reason, app_); - txn->setLedger(ledgerSequences[i]); - txn->setStatus(COMMITTED); - ret.push_back(txn); - } - -#endif - return ret; -} - -std::pair -PostgresDatabaseImp::getAccountTx(AccountTxArgs const& args) -{ -#ifdef RIPPLED_REPORTING - pg_params dbParams; - - char const*& command = dbParams.first; - std::vector>& values = dbParams.second; - command = - "SELECT account_tx($1::bytea, $2::bool, " - "$3::bigint, $4::bigint, $5::bigint, $6::bytea, " - "$7::bigint, $8::bool, $9::bigint, $10::bigint)"; - values.resize(10); - values[0] = "\\x" + strHex(args.account); - values[1] = args.forward ? "true" : "false"; - - static std::uint32_t const page_length(200); - if (args.limit == 0 || args.limit > page_length) - values[2] = std::to_string(page_length); - else - values[2] = std::to_string(args.limit); - - if (args.ledger) - { - if (auto range = std::get_if(&args.ledger.value())) - { - values[3] = std::to_string(range->min); - values[4] = std::to_string(range->max); - } - else if (auto hash = std::get_if(&args.ledger.value())) - { - values[5] = ("\\x" + strHex(*hash)); - } - else if ( - auto sequence = std::get_if(&args.ledger.value())) - { - values[6] = std::to_string(*sequence); - } - else if (std::get_if(&args.ledger.value())) - { - // current, closed and validated are all treated as validated - values[7] = "true"; - } - else - { - JLOG(j_.error()) << "doAccountTxStoredProcedure - " - << "Error parsing ledger args"; - return {}; - } - } - - if (args.marker) - { - values[8] = std::to_string(args.marker->ledgerSeq); - values[9] = std::to_string(args.marker->txnSeq); - } - for (size_t i = 0; i < values.size(); ++i) - { - JLOG(j_.trace()) << "value " << std::to_string(i) << " = " - << (values[i] ? values[i].value() : "null"); - } - - auto res = PgQuery(pgPool_)(dbParams); - if (!res) - { - JLOG(j_.error()) << __func__ - << " : Postgres response is null - account = " - << strHex(args.account); - assert(false); - return {{}, {rpcINTERNAL, "Postgres error"}}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(j_.error()) << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - account = " << strHex(args.account); - assert(false); - return {{}, {rpcINTERNAL, "Postgres error"}}; - } - - JLOG(j_.trace()) << __func__ << " Postgres result msg : " << res.msg(); - if (res.isNull() || res.ntuples() == 0) - { - JLOG(j_.debug()) << __func__ - << " : No data returned from Postgres : account = " - << strHex(args.account); - - assert(false); - return {{}, {rpcINTERNAL, "Postgres error"}}; - } - - char const* resultStr = res.c_str(); - JLOG(j_.trace()) << __func__ << " : " - << "postgres result = " << resultStr - << " : account = " << strHex(args.account); - - Json::Value v; - Json::Reader reader; - bool success = reader.parse(resultStr, resultStr + strlen(resultStr), v); - if (success) - { - return processAccountTxStoredProcedureResult(args, v, app_, j_); - } -#endif - // This shouldn't happen. Postgres should return a parseable error - assert(false); - return {{}, {rpcINTERNAL, "Failed to deserialize Postgres result"}}; -} - -Transaction::Locator -PostgresDatabaseImp::locateTransaction(uint256 const& id) -{ -#ifdef RIPPLED_REPORTING - auto baseCmd = boost::format(R"(SELECT tx('%s');)"); - - std::string txHash = "\\x" + strHex(id); - std::string sql = boost::str(baseCmd % txHash); - - auto res = PgQuery(pgPool_)(sql.data()); - - if (!res) - { - JLOG(app_.journal("Transaction").error()) - << __func__ - << " : Postgres response is null - tx ID = " << strHex(id); - assert(false); - return {}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(app_.journal("Transaction").error()) - << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - tx ID = " << strHex(id); - assert(false); - return {}; - } - - JLOG(app_.journal("Transaction").trace()) - << __func__ << " Postgres result msg : " << res.msg(); - if (res.isNull() || res.ntuples() == 0) - { - JLOG(app_.journal("Transaction").debug()) - << __func__ - << " : No data returned from Postgres : tx ID = " << strHex(id); - // This shouldn't happen - assert(false); - return {}; - } - - char const* resultStr = res.c_str(); - JLOG(app_.journal("Transaction").debug()) - << "postgres result = " << resultStr; - - Json::Value v; - Json::Reader reader; - bool success = reader.parse(resultStr, resultStr + strlen(resultStr), v); - if (success) - { - if (v.isMember("nodestore_hash") && v.isMember("ledger_seq")) - { - uint256 nodestoreHash; - if (!nodestoreHash.parseHex( - v["nodestore_hash"].asString().substr(2))) - assert(false); - uint32_t ledgerSeq = v["ledger_seq"].asUInt(); - if (nodestoreHash.isNonZero()) - return {std::make_pair(nodestoreHash, ledgerSeq)}; - } - if (v.isMember("min_seq") && v.isMember("max_seq")) - { - return {ClosedInterval( - v["min_seq"].asUInt(), v["max_seq"].asUInt())}; - } - } -#endif - // Shouldn' happen. Postgres should return the ledger range searched if - // the transaction was not found - assert(false); - Throw( - "Transaction::Locate - Invalid Postgres response"); - return {}; -} - -bool -PostgresDatabaseImp::dbHasSpace(Config const& config) -{ - /* Postgres server could be running on a different machine. */ - - return true; -} - -bool -PostgresDatabaseImp::ledgerDbHasSpace(Config const& config) -{ - return dbHasSpace(config); -} - -bool -PostgresDatabaseImp::transactionDbHasSpace(Config const& config) -{ - return dbHasSpace(config); -} - -std::unique_ptr -getPostgresDatabase(Application& app, Config const& config, JobQueue& jobQueue) -{ - return std::make_unique(app, config, jobQueue); -} - -bool -PostgresDatabaseImp::isCaughtUp(std::string& reason) -{ -#ifdef RIPPLED_REPORTING - using namespace std::chrono_literals; - auto age = PgQuery(pgPool_)("SELECT age()"); - if (!age || age.isNull()) - { - reason = "No ledgers in database"; - return false; - } - if (std::chrono::seconds{age.asInt()} > 3min) - { - reason = "No recently-published ledger"; - return false; - } -#endif - return true; -} - -} // namespace ripple diff --git a/src/xrpld/app/rdb/detail/RelationalDatabase.cpp b/src/xrpld/app/rdb/detail/RelationalDatabase.cpp index 07dc27fd1d3..4a95134d705 100644 --- a/src/xrpld/app/rdb/detail/RelationalDatabase.cpp +++ b/src/xrpld/app/rdb/detail/RelationalDatabase.cpp @@ -26,9 +26,6 @@ namespace ripple { extern std::unique_ptr getSQLiteDatabase(Application& app, Config const& config, JobQueue& jobQueue); -extern std::unique_ptr -getPostgresDatabase(Application& app, Config const& config, JobQueue& jobQueue); - std::unique_ptr RelationalDatabase::init( Application& app, @@ -36,42 +33,30 @@ RelationalDatabase::init( JobQueue& jobQueue) { bool use_sqlite = false; - bool use_postgres = false; - if (config.reporting()) - { - use_postgres = true; - } - else + const Section& rdb_section{config.section(SECTION_RELATIONAL_DB)}; + if (!rdb_section.empty()) { - const Section& rdb_section{config.section(SECTION_RELATIONAL_DB)}; - if (!rdb_section.empty()) + if (boost::iequals(get(rdb_section, "backend"), "sqlite")) { - if (boost::iequals(get(rdb_section, "backend"), "sqlite")) - { - use_sqlite = true; - } - else - { - Throw( - "Invalid rdb_section backend value: " + - get(rdb_section, "backend")); - } + use_sqlite = true; } else { - use_sqlite = true; + Throw( + "Invalid rdb_section backend value: " + + get(rdb_section, "backend")); } } + else + { + use_sqlite = true; + } if (use_sqlite) { return getSQLiteDatabase(app, config, jobQueue); } - else if (use_postgres) - { - return getPostgresDatabase(app, config, jobQueue); - } return std::unique_ptr(); } diff --git a/src/xrpld/app/reporting/ETLHelpers.h b/src/xrpld/app/reporting/ETLHelpers.h deleted file mode 100644 index b11d2c4aa18..00000000000 --- a/src/xrpld/app/reporting/ETLHelpers.h +++ /dev/null @@ -1,195 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED -#define RIPPLE_APP_REPORTING_ETLHELPERS_H_INCLUDED -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -/// This datastructure is used to keep track of the sequence of the most recent -/// ledger validated by the network. There are two methods that will wait until -/// certain conditions are met. This datastructure is able to be "stopped". When -/// the datastructure is stopped, any threads currently waiting are unblocked. -/// Any later calls to methods of this datastructure will not wait. Once the -/// datastructure is stopped, the datastructure remains stopped for the rest of -/// its lifetime. -class NetworkValidatedLedgers -{ - // max sequence validated by network - std::optional max_; - - mutable std::mutex m_; - - mutable std::condition_variable cv_; - - bool stopping_ = false; - -public: - /// Notify the datastructure that idx has been validated by the network - /// @param idx sequence validated by network - void - push(uint32_t idx) - { - std::lock_guard lck(m_); - if (!max_ || idx > *max_) - max_ = idx; - cv_.notify_all(); - } - - /// Get most recently validated sequence. If no ledgers are known to have - /// been validated, this function waits until the next ledger is validated - /// @return sequence of most recently validated ledger. empty optional if - /// the datastructure has been stopped - std::optional - getMostRecent() const - { - std::unique_lock lck(m_); - cv_.wait(lck, [this]() { return max_ || stopping_; }); - return max_; - } - - /// Get most recently validated sequence. - /// @return sequence of most recently validated ledger, or empty optional - /// if no ledgers are known to have been validated. - std::optional - tryGetMostRecent() const - { - std::unique_lock lk(m_); - return max_; - } - - /// Waits for the sequence to be validated by the network - /// @param sequence to wait for - /// @return true if sequence was validated, false otherwise - /// a return value of false means the datastructure has been stopped - bool - waitUntilValidatedByNetwork(uint32_t sequence) - { - std::unique_lock lck(m_); - cv_.wait(lck, [sequence, this]() { - return (max_ && sequence <= *max_) || stopping_; - }); - return !stopping_; - } - - /// Puts the datastructure in the stopped state - /// Future calls to this datastructure will not block - /// This operation cannot be reversed - void - stop() - { - std::lock_guard lck(m_); - stopping_ = true; - cv_.notify_all(); - } -}; - -/// Generic thread-safe queue with an optional maximum size -/// Note, we can't use a lockfree queue here, since we need the ability to wait -/// for an element to be added or removed from the queue. These waits are -/// blocking calls. -template -class ThreadSafeQueue -{ - std::queue queue_; - - mutable std::mutex m_; - std::condition_variable cv_; - std::optional maxSize_; - -public: - /// @param maxSize maximum size of the queue. Calls that would cause the - /// queue to exceed this size will block until free space is available - explicit ThreadSafeQueue(uint32_t maxSize) : maxSize_(maxSize) - { - } - - /// Create a queue with no maximum size - ThreadSafeQueue() = default; - - /// @param elt element to push onto queue - /// if maxSize is set, this method will block until free space is available - void - push(T const& elt) - { - std::unique_lock lck(m_); - // if queue has a max size, wait until not full - if (maxSize_) - cv_.wait(lck, [this]() { return queue_.size() <= *maxSize_; }); - queue_.push(elt); - cv_.notify_all(); - } - - /// @param elt element to push onto queue. elt is moved from - /// if maxSize is set, this method will block until free space is available - void - push(T&& elt) - { - std::unique_lock lck(m_); - // if queue has a max size, wait until not full - if (maxSize_) - cv_.wait(lck, [this]() { return queue_.size() <= *maxSize_; }); - queue_.push(std::move(elt)); - cv_.notify_all(); - } - - /// @return element popped from queue. Will block until queue is non-empty - T - pop() - { - std::unique_lock lck(m_); - cv_.wait(lck, [this]() { return !queue_.empty(); }); - T ret = std::move(queue_.front()); - queue_.pop(); - // if queue has a max size, unblock any possible pushers - if (maxSize_) - cv_.notify_all(); - return ret; - } -}; - -/// Parititions the uint256 keyspace into numMarkers partitions, each of equal -/// size. -inline std::vector -getMarkers(size_t numMarkers) -{ - assert(numMarkers <= 256); - - unsigned char incr = 256 / numMarkers; - - std::vector markers; - markers.reserve(numMarkers); - uint256 base{0}; - for (size_t i = 0; i < numMarkers; ++i) - { - markers.push_back(base); - base.data()[0] += incr; - } - return markers; -} - -} // namespace ripple -#endif diff --git a/src/xrpld/app/reporting/ETLSource.cpp b/src/xrpld/app/reporting/ETLSource.cpp deleted file mode 100644 index 8a181c62b1d..00000000000 --- a/src/xrpld/app/reporting/ETLSource.cpp +++ /dev/null @@ -1,982 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include - -namespace ripple { - -// Create ETL source without grpc endpoint -// Fetch ledger and load initial ledger will fail for this source -// Primarily used in read-only mode, to monitor when ledgers are validated -ETLSource::ETLSource(std::string ip, std::string wsPort, ReportingETL& etl) - : ip_(ip) - , wsPort_(wsPort) - , etl_(etl) - , ioc_(etl.getApplication().getIOService()) - , ws_(std::make_unique< - boost::beast::websocket::stream>( - boost::asio::make_strand(ioc_))) - , resolver_(boost::asio::make_strand(ioc_)) - , networkValidatedLedgers_(etl_.getNetworkValidatedLedgers()) - , journal_(etl_.getApplication().journal("ReportingETL::ETLSource")) - , app_(etl_.getApplication()) - , timer_(ioc_) -{ -} - -ETLSource::ETLSource( - std::string ip, - std::string wsPort, - std::string grpcPort, - ReportingETL& etl) - : ip_(ip) - , wsPort_(wsPort) - , grpcPort_(grpcPort) - , etl_(etl) - , ioc_(etl.getApplication().getIOService()) - , ws_(std::make_unique< - boost::beast::websocket::stream>( - boost::asio::make_strand(ioc_))) - , resolver_(boost::asio::make_strand(ioc_)) - , networkValidatedLedgers_(etl_.getNetworkValidatedLedgers()) - , journal_(etl_.getApplication().journal("ReportingETL::ETLSource")) - , app_(etl_.getApplication()) - , timer_(ioc_) -{ - std::string connectionString; - try - { - connectionString = - beast::IP::Endpoint( - boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)) - .to_string(); - - JLOG(journal_.info()) - << "Using IP to connect to ETL source: " << connectionString; - } - catch (std::exception const&) - { - connectionString = "dns:" + ip_ + ":" + grpcPort_; - JLOG(journal_.info()) - << "Using DNS to connect to ETL source: " << connectionString; - } - try - { - stub_ = org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub( - grpc::CreateChannel( - connectionString, grpc::InsecureChannelCredentials())); - JLOG(journal_.info()) << "Made stub for remote = " << toString(); - } - catch (std::exception const& e) - { - JLOG(journal_.error()) << "Exception while creating stub = " << e.what() - << " . Remote = " << toString(); - } -} - -void -ETLSource::reconnect(boost::beast::error_code ec) -{ - connected_ = false; - // These are somewhat normal errors. operation_aborted occurs on shutdown, - // when the timer is cancelled. connection_refused will occur repeatedly - // if we cannot connect to the transaction processing process - if (ec != boost::asio::error::operation_aborted && - ec != boost::asio::error::connection_refused) - { - JLOG(journal_.error()) << __func__ << " : " - << "error code = " << ec << " - " << toString(); - } - else - { - JLOG(journal_.warn()) << __func__ << " : " - << "error code = " << ec << " - " << toString(); - } - - if (etl_.isStopping()) - { - JLOG(journal_.debug()) << __func__ << " : " << toString() - << " - etl is stopping. aborting reconnect"; - return; - } - - // exponentially increasing timeouts, with a max of 30 seconds - size_t waitTime = std::min(pow(2, numFailures_), 30.0); - numFailures_++; - timer_.expires_after(boost::asio::chrono::seconds(waitTime)); - timer_.async_wait([this, fname = __func__](auto ec) { - bool startAgain = (ec != boost::asio::error::operation_aborted); - JLOG(journal_.trace()) << fname << " async_wait : ec = " << ec; - close(startAgain); - }); -} - -void -ETLSource::close(bool startAgain) -{ - timer_.cancel(); - ioc_.post([this, startAgain]() { - if (closing_) - return; - - if (ws_->is_open()) - { - // onStop() also calls close(). If the async_close is called twice, - // an assertion fails. Using closing_ makes sure async_close is only - // called once - closing_ = true; - ws_->async_close( - boost::beast::websocket::close_code::normal, - [this, startAgain, fname = __func__](auto ec) { - if (ec) - { - JLOG(journal_.error()) - << fname << " async_close : " - << "error code = " << ec << " - " << toString(); - } - closing_ = false; - if (startAgain) - start(); - }); - } - else if (startAgain) - { - start(); - } - }); -} - -void -ETLSource::start() -{ - JLOG(journal_.trace()) << __func__ << " : " << toString(); - - auto const host = ip_; - auto const port = wsPort_; - - resolver_.async_resolve( - host, port, [this](auto ec, auto results) { onResolve(ec, results); }); -} - -void -ETLSource::onResolve( - boost::beast::error_code ec, - boost::asio::ip::tcp::resolver::results_type results) -{ - JLOG(journal_.trace()) << __func__ << " : ec = " << ec << " - " - << toString(); - if (ec) - { - // try again - reconnect(ec); - } - else - { - boost::beast::get_lowest_layer(*ws_).expires_after( - std::chrono::seconds(30)); - boost::beast::get_lowest_layer(*ws_).async_connect( - results, [this](auto ec, auto ep) { onConnect(ec, ep); }); - } -} - -void -ETLSource::onConnect( - boost::beast::error_code ec, - boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint) -{ - JLOG(journal_.trace()) << __func__ << " : ec = " << ec << " - " - << toString(); - if (ec) - { - // start over - reconnect(ec); - } - else - { - numFailures_ = 0; - // Turn off timeout on the tcp stream, because websocket stream has it's - // own timeout system - boost::beast::get_lowest_layer(*ws_).expires_never(); - - // Set suggested timeout settings for the websocket - ws_->set_option( - boost::beast::websocket::stream_base::timeout::suggested( - boost::beast::role_type::client)); - - // Set a decorator to change the User-Agent of the handshake - ws_->set_option(boost::beast::websocket::stream_base::decorator( - [](boost::beast::websocket::request_type& req) { - req.set( - boost::beast::http::field::user_agent, - std::string(BOOST_BEAST_VERSION_STRING) + - " websocket-client-async"); - })); - - // Update the host_ string. This will provide the value of the - // Host HTTP header during the WebSocket handshake. - // See https://tools.ietf.org/html/rfc7230#section-5.4 - auto host = ip_ + ':' + std::to_string(endpoint.port()); - // Perform the websocket handshake - ws_->async_handshake(host, "/", [this](auto ec) { onHandshake(ec); }); - } -} - -void -ETLSource::onHandshake(boost::beast::error_code ec) -{ - JLOG(journal_.trace()) << __func__ << " : ec = " << ec << " - " - << toString(); - if (ec) - { - // start over - reconnect(ec); - } - else - { - Json::Value jv; - jv["command"] = "subscribe"; - - jv["streams"] = Json::arrayValue; - Json::Value ledgerStream("ledger"); - jv["streams"].append(ledgerStream); - Json::Value txnStream("transactions_proposed"); - jv["streams"].append(txnStream); - Json::Value validationStream("validations"); - jv["streams"].append(validationStream); - Json::Value manifestStream("manifests"); - jv["streams"].append(manifestStream); - Json::FastWriter fastWriter; - - JLOG(journal_.trace()) << "Sending subscribe stream message"; - // Send the message - ws_->async_write( - boost::asio::buffer(fastWriter.write(jv)), - [this](auto ec, size_t size) { onWrite(ec, size); }); - } -} - -void -ETLSource::onWrite(boost::beast::error_code ec, size_t bytesWritten) -{ - JLOG(journal_.trace()) << __func__ << " : ec = " << ec << " - " - << toString(); - if (ec) - { - // start over - reconnect(ec); - } - else - { - ws_->async_read( - readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); }); - } -} - -void -ETLSource::onRead(boost::beast::error_code ec, size_t size) -{ - JLOG(journal_.trace()) << __func__ << " : ec = " << ec << " - " - << toString(); - // if error or error reading message, start over - if (ec) - { - reconnect(ec); - } - else - { - handleMessage(); - boost::beast::flat_buffer buffer; - swap(readBuffer_, buffer); - - JLOG(journal_.trace()) - << __func__ << " : calling async_read - " << toString(); - ws_->async_read( - readBuffer_, [this](auto ec, size_t size) { onRead(ec, size); }); - } -} - -bool -ETLSource::handleMessage() -{ - JLOG(journal_.trace()) << __func__ << " : " << toString(); - - setLastMsgTime(); - connected_ = true; - try - { - Json::Value response; - Json::Reader reader; - if (!reader.parse( - static_cast(readBuffer_.data().data()), response)) - { - JLOG(journal_.error()) - << __func__ << " : " - << "Error parsing stream message." - << " Message = " << readBuffer_.data().data(); - return false; - } - - uint32_t ledgerIndex = 0; - if (response.isMember("result")) - { - if (response["result"].isMember(jss::ledger_index)) - { - ledgerIndex = response["result"][jss::ledger_index].asUInt(); - } - if (response[jss::result].isMember(jss::validated_ledgers)) - { - setValidatedRange( - response[jss::result][jss::validated_ledgers].asString()); - } - JLOG(journal_.debug()) - << __func__ << " : " - << "Received a message on ledger " - << " subscription stream. Message : " - << response.toStyledString() << " - " << toString(); - } - else - { - if (etl_.getETLLoadBalancer().shouldPropagateStream(this)) - { - if (response.isMember(jss::transaction)) - { - etl_.getApplication().getOPs().forwardProposedTransaction( - response); - } - else if ( - response.isMember("type") && - response["type"] == "validationReceived") - { - etl_.getApplication().getOPs().forwardValidation(response); - } - else if ( - response.isMember("type") && - response["type"] == "manifestReceived") - { - etl_.getApplication().getOPs().forwardManifest(response); - } - } - - if (response.isMember("type") && response["type"] == "ledgerClosed") - { - JLOG(journal_.debug()) - << __func__ << " : " - << "Received a message on ledger " - << " subscription stream. Message : " - << response.toStyledString() << " - " << toString(); - if (response.isMember(jss::ledger_index)) - { - ledgerIndex = response[jss::ledger_index].asUInt(); - } - if (response.isMember(jss::validated_ledgers)) - { - setValidatedRange( - response[jss::validated_ledgers].asString()); - } - } - } - - if (ledgerIndex != 0) - { - JLOG(journal_.trace()) - << __func__ << " : " - << "Pushing ledger sequence = " << ledgerIndex << " - " - << toString(); - networkValidatedLedgers_.push(ledgerIndex); - } - return true; - } - catch (std::exception const& e) - { - JLOG(journal_.error()) << "Exception in handleMessage : " << e.what(); - return false; - } -} - -class AsyncCallData -{ - std::unique_ptr cur_; - std::unique_ptr next_; - - org::xrpl::rpc::v1::GetLedgerDataRequest request_; - std::unique_ptr context_; - - grpc::Status status_; - - unsigned char nextPrefix_; - - beast::Journal journal_; - -public: - AsyncCallData( - uint256& marker, - std::optional nextMarker, - uint32_t seq, - beast::Journal& j) - : journal_(j) - { - request_.mutable_ledger()->set_sequence(seq); - if (marker.isNonZero()) - { - request_.set_marker(marker.data(), marker.size()); - } - request_.set_user("ETL"); - nextPrefix_ = 0x00; - if (nextMarker) - nextPrefix_ = nextMarker->data()[0]; - - unsigned char prefix = marker.data()[0]; - - JLOG(journal_.debug()) - << "Setting up AsyncCallData. marker = " << strHex(marker) - << " . prefix = " << strHex(std::string(1, prefix)) - << " . nextPrefix_ = " << strHex(std::string(1, nextPrefix_)); - - assert(nextPrefix_ > prefix || nextPrefix_ == 0x00); - - cur_ = std::make_unique(); - - next_ = std::make_unique(); - - context_ = std::make_unique(); - } - - enum class CallStatus { MORE, DONE, ERRORED }; - CallStatus - process( - std::unique_ptr& stub, - grpc::CompletionQueue& cq, - ThreadSafeQueue>& queue, - bool abort = false) - { - JLOG(journal_.debug()) << "Processing calldata"; - if (abort) - { - JLOG(journal_.error()) << "AsyncCallData aborted"; - return CallStatus::ERRORED; - } - if (!status_.ok()) - { - JLOG(journal_.debug()) << "AsyncCallData status_ not ok: " - << " code = " << status_.error_code() - << " message = " << status_.error_message(); - return CallStatus::ERRORED; - } - if (!next_->is_unlimited()) - { - JLOG(journal_.warn()) - << "AsyncCallData is_unlimited is false. Make sure " - "secure_gateway is set correctly at the ETL source"; - assert(false); - } - - std::swap(cur_, next_); - - bool more = true; - - // if no marker returned, we are done - if (cur_->marker().size() == 0) - more = false; - - // if returned marker is greater than our end, we are done - unsigned char prefix = cur_->marker()[0]; - if (nextPrefix_ != 0x00 && prefix >= nextPrefix_) - more = false; - - // if we are not done, make the next async call - if (more) - { - request_.set_marker(std::move(cur_->marker())); - call(stub, cq); - } - - for (auto& obj : cur_->ledger_objects().objects()) - { - auto key = uint256::fromVoidChecked(obj.key()); - if (!key) - throw std::runtime_error("Received malformed object ID"); - - auto& data = obj.data(); - - SerialIter it{data.data(), data.size()}; - std::shared_ptr sle = std::make_shared(it, *key); - - queue.push(sle); - } - - return more ? CallStatus::MORE : CallStatus::DONE; - } - - void - call( - std::unique_ptr& stub, - grpc::CompletionQueue& cq) - { - context_ = std::make_unique(); - - std::unique_ptr> - rpc(stub->PrepareAsyncGetLedgerData(context_.get(), request_, &cq)); - - rpc->StartCall(); - - rpc->Finish(next_.get(), &status_, this); - } - - std::string - getMarkerPrefix() - { - if (next_->marker().size() == 0) - return ""; - else - return strHex(std::string{next_->marker().data()[0]}); - } -}; - -bool -ETLSource::loadInitialLedger( - uint32_t sequence, - ThreadSafeQueue>& writeQueue) -{ - if (!stub_) - return false; - - grpc::CompletionQueue cq; - - void* tag; - - bool ok = false; - - std::vector calls; - std::vector markers{getMarkers(etl_.getNumMarkers())}; - - for (size_t i = 0; i < markers.size(); ++i) - { - std::optional nextMarker; - if (i + 1 < markers.size()) - nextMarker = markers[i + 1]; - calls.emplace_back(markers[i], nextMarker, sequence, journal_); - } - - JLOG(journal_.debug()) << "Starting data download for ledger " << sequence - << ". Using source = " << toString(); - - for (auto& c : calls) - c.call(stub_, cq); - - size_t numFinished = 0; - bool abort = false; - while (numFinished < calls.size() && !etl_.isStopping() && - cq.Next(&tag, &ok)) - { - assert(tag); - - auto ptr = static_cast(tag); - - if (!ok) - { - JLOG(journal_.error()) << "loadInitialLedger - ok is false"; - return false; - // handle cancelled - } - else - { - JLOG(journal_.debug()) - << "Marker prefix = " << ptr->getMarkerPrefix(); - auto result = ptr->process(stub_, cq, writeQueue, abort); - if (result != AsyncCallData::CallStatus::MORE) - { - numFinished++; - JLOG(journal_.debug()) - << "Finished a marker. " - << "Current number of finished = " << numFinished; - } - if (result == AsyncCallData::CallStatus::ERRORED) - { - abort = true; - } - } - } - return !abort; -} - -std::pair -ETLSource::fetchLedger(uint32_t ledgerSequence, bool getObjects) -{ - org::xrpl::rpc::v1::GetLedgerResponse response; - if (!stub_) - return {{grpc::StatusCode::INTERNAL, "No Stub"}, response}; - - // ledger header with txns and metadata - org::xrpl::rpc::v1::GetLedgerRequest request; - grpc::ClientContext context; - request.mutable_ledger()->set_sequence(ledgerSequence); - request.set_transactions(true); - request.set_expand(true); - request.set_get_objects(getObjects); - request.set_user("ETL"); - grpc::Status status = stub_->GetLedger(&context, request, &response); - if (status.ok() && !response.is_unlimited()) - { - JLOG(journal_.warn()) << "ETLSource::fetchLedger - is_unlimited is " - "false. Make sure secure_gateway is set " - "correctly on the ETL source. source = " - << toString(); - assert(false); - } - return {status, std::move(response)}; -} - -ETLLoadBalancer::ETLLoadBalancer(ReportingETL& etl) - : etl_(etl) - , journal_(etl_.getApplication().journal("ReportingETL::LoadBalancer")) -{ -} - -void -ETLLoadBalancer::add( - std::string& host, - std::string& websocketPort, - std::string& grpcPort) -{ - std::unique_ptr ptr = - std::make_unique(host, websocketPort, grpcPort, etl_); - sources_.push_back(std::move(ptr)); - JLOG(journal_.info()) << __func__ << " : added etl source - " - << sources_.back()->toString(); -} - -void -ETLLoadBalancer::add(std::string& host, std::string& websocketPort) -{ - std::unique_ptr ptr = - std::make_unique(host, websocketPort, etl_); - sources_.push_back(std::move(ptr)); - JLOG(journal_.info()) << __func__ << " : added etl source - " - << sources_.back()->toString(); -} - -void -ETLLoadBalancer::loadInitialLedger( - uint32_t sequence, - ThreadSafeQueue>& writeQueue) -{ - execute( - [this, &sequence, &writeQueue](auto& source) { - bool res = source->loadInitialLedger(sequence, writeQueue); - if (!res) - { - JLOG(journal_.error()) << "Failed to download initial ledger. " - << " Sequence = " << sequence - << " source = " << source->toString(); - } - return res; - }, - sequence); -} - -std::optional -ETLLoadBalancer::fetchLedger(uint32_t ledgerSequence, bool getObjects) -{ - org::xrpl::rpc::v1::GetLedgerResponse response; - bool success = execute( - [&response, ledgerSequence, getObjects, this](auto& source) { - auto [status, data] = - source->fetchLedger(ledgerSequence, getObjects); - response = std::move(data); - if (status.ok() && response.validated()) - { - JLOG(journal_.info()) - << "Successfully fetched ledger = " << ledgerSequence - << " from source = " << source->toString(); - return true; - } - else - { - JLOG(journal_.warn()) - << "Error getting ledger = " << ledgerSequence - << " Reply : " << response.DebugString() - << " error_code : " << status.error_code() - << " error_msg : " << status.error_message() - << " source = " << source->toString(); - return false; - } - }, - ledgerSequence); - if (success) - return response; - else - return {}; -} - -std::unique_ptr -ETLLoadBalancer::getP2pForwardingStub() const -{ - if (sources_.size() == 0) - return nullptr; - srand((unsigned)time(0)); - auto sourceIdx = rand() % sources_.size(); - auto numAttempts = 0; - while (numAttempts < sources_.size()) - { - auto stub = sources_[sourceIdx]->getP2pForwardingStub(); - if (!stub) - { - sourceIdx = (sourceIdx + 1) % sources_.size(); - ++numAttempts; - continue; - } - return stub; - } - return nullptr; -} - -Json::Value -ETLLoadBalancer::forwardToP2p(RPC::JsonContext& context) const -{ - Json::Value res; - if (sources_.size() == 0) - return res; - srand((unsigned)time(0)); - auto sourceIdx = rand() % sources_.size(); - auto numAttempts = 0; - - auto mostRecent = etl_.getNetworkValidatedLedgers().tryGetMostRecent(); - while (numAttempts < sources_.size()) - { - auto increment = [&]() { - sourceIdx = (sourceIdx + 1) % sources_.size(); - ++numAttempts; - }; - auto& src = sources_[sourceIdx]; - if (mostRecent && !src->hasLedger(*mostRecent)) - { - increment(); - continue; - } - res = src->forwardToP2p(context); - if (!res.isMember("forwarded") || res["forwarded"] != true) - { - increment(); - continue; - } - return res; - } - RPC::Status err = {rpcFAILED_TO_FORWARD}; - err.inject(res); - return res; -} - -std::unique_ptr -ETLSource::getP2pForwardingStub() const -{ - if (!connected_) - return nullptr; - try - { - return org::xrpl::rpc::v1::XRPLedgerAPIService::NewStub( - grpc::CreateChannel( - beast::IP::Endpoint( - boost::asio::ip::make_address(ip_), std::stoi(grpcPort_)) - .to_string(), - grpc::InsecureChannelCredentials())); - } - catch (std::exception const&) - { - JLOG(journal_.error()) << "Failed to create grpc stub"; - return nullptr; - } -} - -Json::Value -ETLSource::forwardToP2p(RPC::JsonContext& context) const -{ - JLOG(journal_.debug()) << "Attempting to forward request to tx. " - << "request = " << context.params.toStyledString(); - - Json::Value response; - if (!connected_) - { - JLOG(journal_.error()) - << "Attempted to proxy but failed to connect to tx"; - return response; - } - namespace beast = boost::beast; // from - namespace http = beast::http; // from - namespace websocket = beast::websocket; // from - namespace net = boost::asio; // from - using tcp = boost::asio::ip::tcp; // from - Json::Value& request = context.params; - try - { - // The io_context is required for all I/O - net::io_context ioc; - - // These objects perform our I/O - tcp::resolver resolver{ioc}; - - JLOG(journal_.debug()) << "Creating websocket"; - auto ws = std::make_unique>(ioc); - - // Look up the domain name - auto const results = resolver.resolve(ip_, wsPort_); - - JLOG(journal_.debug()) << "Connecting websocket"; - // Make the connection on the IP address we get from a lookup - net::connect(ws->next_layer(), results.begin(), results.end()); - - // Set a decorator to change the User-Agent of the handshake - // and to tell rippled to charge the client IP for RPC - // resources. See "secure_gateway" in - // https://github.com/ripple/rippled/blob/develop/cfg/rippled-example.cfg - ws->set_option(websocket::stream_base::decorator( - [&context](websocket::request_type& req) { - req.set( - http::field::user_agent, - std::string(BOOST_BEAST_VERSION_STRING) + - " websocket-client-coro"); - req.set( - http::field::forwarded, - "for=" + context.consumer.to_string()); - })); - JLOG(journal_.debug()) << "client ip: " << context.consumer.to_string(); - - JLOG(journal_.debug()) << "Performing websocket handshake"; - // Perform the websocket handshake - ws->handshake(ip_, "/"); - - Json::FastWriter fastWriter; - - JLOG(journal_.debug()) << "Sending request"; - // Send the message - ws->write(net::buffer(fastWriter.write(request))); - - beast::flat_buffer buffer; - ws->read(buffer); - - Json::Reader reader; - if (!reader.parse( - static_cast(buffer.data().data()), response)) - { - JLOG(journal_.error()) << "Error parsing response"; - response[jss::error] = "Error parsing response from tx"; - } - JLOG(journal_.debug()) << "Successfully forward request"; - - response["forwarded"] = true; - return response; - } - catch (std::exception const& e) - { - JLOG(journal_.error()) << "Encountered exception : " << e.what(); - return response; - } -} - -template -bool -ETLLoadBalancer::execute(Func f, uint32_t ledgerSequence) -{ - srand((unsigned)time(0)); - auto sourceIdx = rand() % sources_.size(); - auto numAttempts = 0; - - while (!etl_.isStopping()) - { - auto& source = sources_[sourceIdx]; - - JLOG(journal_.debug()) - << __func__ << " : " - << "Attempting to execute func. ledger sequence = " - << ledgerSequence << " - source = " << source->toString(); - if (source->hasLedger(ledgerSequence)) - { - bool res = f(source); - if (res) - { - JLOG(journal_.debug()) - << __func__ << " : " - << "Successfully executed func at source = " - << source->toString() - << " - ledger sequence = " << ledgerSequence; - break; - } - else - { - JLOG(journal_.warn()) - << __func__ << " : " - << "Failed to execute func at source = " - << source->toString() - << " - ledger sequence = " << ledgerSequence; - } - } - else - { - JLOG(journal_.warn()) - << __func__ << " : " - << "Ledger not present at source = " << source->toString() - << " - ledger sequence = " << ledgerSequence; - } - sourceIdx = (sourceIdx + 1) % sources_.size(); - numAttempts++; - if (numAttempts % sources_.size() == 0) - { - // If another process loaded the ledger into the database, we can - // abort trying to fetch the ledger from a transaction processing - // process - if (etl_.getApplication().getLedgerMaster().getLedgerBySeq( - ledgerSequence)) - { - JLOG(journal_.warn()) - << __func__ << " : " - << "Error executing function. " - << " Tried all sources, but ledger was found in db." - << " Sequence = " << ledgerSequence; - return false; - } - JLOG(journal_.error()) - << __func__ << " : " - << "Error executing function " - << " - ledger sequence = " << ledgerSequence - << " - Tried all sources. Sleeping and trying again"; - std::this_thread::sleep_for(std::chrono::seconds(2)); - } - } - return !etl_.isStopping(); -} - -void -ETLLoadBalancer::start() -{ - for (auto& source : sources_) - source->start(); -} - -void -ETLLoadBalancer::stop() -{ - for (auto& source : sources_) - source->stop(); -} - -} // namespace ripple diff --git a/src/xrpld/app/reporting/ETLSource.h b/src/xrpld/app/reporting/ETLSource.h deleted file mode 100644 index 633b72afac1..00000000000 --- a/src/xrpld/app/reporting/ETLSource.h +++ /dev/null @@ -1,435 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED -#define RIPPLE_APP_REPORTING_ETLSOURCE_H_INCLUDED -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include - -namespace ripple { - -class ReportingETL; - -/// This class manages a connection to a single ETL source. This is almost -/// always a p2p node, but really could be another reporting node. This class -/// subscribes to the ledgers and transactions_proposed streams of the -/// associated p2p node, and keeps track of which ledgers the p2p node has. This -/// class also has methods for extracting said ledgers. Lastly this class -/// forwards transactions received on the transactions_proposed streams to any -/// subscribers. -class ETLSource -{ - std::string ip_; - - std::string wsPort_; - - std::string grpcPort_; - - ReportingETL& etl_; - - // a reference to the applications io_service - boost::asio::io_context& ioc_; - - std::unique_ptr stub_; - - std::unique_ptr> - ws_; - boost::asio::ip::tcp::resolver resolver_; - - boost::beast::flat_buffer readBuffer_; - - std::vector> validatedLedgers_; - - std::string validatedLedgersRaw_; - - NetworkValidatedLedgers& networkValidatedLedgers_; - - beast::Journal journal_; - - Application& app_; - - mutable std::mutex mtx_; - - size_t numFailures_ = 0; - - std::atomic_bool closing_ = false; - - std::atomic_bool connected_ = false; - - // true if this ETL source is forwarding transactions received on the - // transactions_proposed stream. There are usually multiple ETL sources, - // so to avoid forwarding the same transaction multiple times, we only - // forward from one particular ETL source at a time. - std::atomic_bool forwardingStream_ = false; - - // The last time a message was received on the ledgers stream - std::chrono::system_clock::time_point lastMsgTime_; - mutable std::mutex lastMsgTimeMtx_; - - // used for retrying connections - boost::asio::steady_timer timer_; - -public: - bool - isConnected() const - { - return connected_; - } - - std::chrono::system_clock::time_point - getLastMsgTime() const - { - std::lock_guard lck(lastMsgTimeMtx_); - return lastMsgTime_; - } - - void - setLastMsgTime() - { - std::lock_guard lck(lastMsgTimeMtx_); - lastMsgTime_ = std::chrono::system_clock::now(); - } - - /// Create ETL source without gRPC endpoint - /// Fetch ledger and load initial ledger will fail for this source - /// Primarly used in read-only mode, to monitor when ledgers are validated - ETLSource(std::string ip, std::string wsPort, ReportingETL& etl); - - /// Create ETL source with gRPC endpoint - ETLSource( - std::string ip, - std::string wsPort, - std::string grpcPort, - ReportingETL& etl); - - /// @param sequence ledger sequence to check for - /// @return true if this source has the desired ledger - bool - hasLedger(uint32_t sequence) const - { - std::lock_guard lck(mtx_); - for (auto& pair : validatedLedgers_) - { - if (sequence >= pair.first && sequence <= pair.second) - { - return true; - } - else if (sequence < pair.first) - { - // validatedLedgers_ is a sorted list of disjoint ranges - // if the sequence comes before this range, the sequence will - // come before all subsequent ranges - return false; - } - } - return false; - } - - /// process the validated range received on the ledgers stream. set the - /// appropriate member variable - /// @param range validated range received on ledgers stream - void - setValidatedRange(std::string const& range) - { - std::vector> pairs; - std::vector ranges; - boost::split(ranges, range, boost::is_any_of(",")); - for (auto& pair : ranges) - { - std::vector minAndMax; - - boost::split(minAndMax, pair, boost::is_any_of("-")); - - if (minAndMax.size() == 1) - { - uint32_t sequence = std::stoll(minAndMax[0]); - pairs.push_back(std::make_pair(sequence, sequence)); - } - else - { - assert(minAndMax.size() == 2); - uint32_t min = std::stoll(minAndMax[0]); - uint32_t max = std::stoll(minAndMax[1]); - pairs.push_back(std::make_pair(min, max)); - } - } - std::sort(pairs.begin(), pairs.end(), [](auto left, auto right) { - return left.first < right.first; - }); - - // we only hold the lock here, to avoid blocking while string processing - std::lock_guard lck(mtx_); - validatedLedgers_ = std::move(pairs); - validatedLedgersRaw_ = range; - } - - /// @return the validated range of this source - /// @note this is only used by server_info - std::string - getValidatedRange() const - { - std::lock_guard lck(mtx_); - - return validatedLedgersRaw_; - } - - /// Close the underlying websocket - void - stop() - { - JLOG(journal_.debug()) << __func__ << " : " - << "Closing websocket"; - - assert(ws_); - close(false); - } - - /// Fetch the specified ledger - /// @param ledgerSequence sequence of the ledger to fetch - /// @getObjects whether to get the account state diff between this ledger - /// and the prior one - /// @return the extracted data and the result status - std::pair - fetchLedger(uint32_t ledgerSequence, bool getObjects = true); - - std::string - toString() const - { - return "{ validated_ledger : " + getValidatedRange() + - " , ip : " + ip_ + " , web socket port : " + wsPort_ + - ", grpc port : " + grpcPort_ + " }"; - } - - Json::Value - toJson() const - { - Json::Value result(Json::objectValue); - result["connected"] = connected_.load(); - result["validated_ledgers_range"] = getValidatedRange(); - result["ip"] = ip_; - result["websocket_port"] = wsPort_; - result["grpc_port"] = grpcPort_; - auto last = getLastMsgTime(); - if (last.time_since_epoch().count() != 0) - result["last_message_arrival_time"] = - to_string(std::chrono::floor(last)); - return result; - } - - /// Download a ledger in full - /// @param ledgerSequence sequence of the ledger to download - /// @param writeQueue queue to push downloaded ledger objects - /// @return true if the download was successful - bool - loadInitialLedger( - uint32_t ledgerSequence, - ThreadSafeQueue>& writeQueue); - - /// Begin sequence of operations to connect to the ETL source and subscribe - /// to ledgers and transactions_proposed - void - start(); - - /// Attempt to reconnect to the ETL source - void - reconnect(boost::beast::error_code ec); - - /// Callback - void - onResolve( - boost::beast::error_code ec, - boost::asio::ip::tcp::resolver::results_type results); - - /// Callback - void - onConnect( - boost::beast::error_code ec, - boost::asio::ip::tcp::resolver::results_type::endpoint_type endpoint); - - /// Callback - void - onHandshake(boost::beast::error_code ec); - - /// Callback - void - onWrite(boost::beast::error_code ec, size_t size); - - /// Callback - void - onRead(boost::beast::error_code ec, size_t size); - - /// Handle the most recently received message - /// @return true if the message was handled successfully. false on error - bool - handleMessage(); - - /// Close the websocket - /// @param startAgain whether to reconnect - void - close(bool startAgain); - - /// Get grpc stub to forward requests to p2p node - /// @return stub to send requests to ETL source - std::unique_ptr - getP2pForwardingStub() const; - - /// Forward a JSON RPC request to a p2p node - /// @param context context of RPC request - /// @return response received from ETL source - Json::Value - forwardToP2p(RPC::JsonContext& context) const; -}; - -/// This class is used to manage connections to transaction processing processes -/// This class spawns a listener for each etl source, which listens to messages -/// on the ledgers stream (to keep track of which ledgers have been validated by -/// the network, and the range of ledgers each etl source has). This class also -/// allows requests for ledger data to be load balanced across all possible etl -/// sources. -class ETLLoadBalancer -{ -private: - ReportingETL& etl_; - - beast::Journal journal_; - - std::vector> sources_; - -public: - ETLLoadBalancer(ReportingETL& etl); - - /// Add an ETL source - /// @param host host or ip of ETL source - /// @param websocketPort port where ETL source accepts websocket connections - /// @param grpcPort port where ETL source accepts gRPC requests - void - add(std::string& host, std::string& websocketPort, std::string& grpcPort); - - /// Add an ETL source without gRPC support. This source will send messages - /// on the ledgers and transactions_proposed streams, but will not be able - /// to handle the gRPC requests that are used for ETL - /// @param host host or ip of ETL source - /// @param websocketPort port where ETL source accepts websocket connections - void - add(std::string& host, std::string& websocketPort); - - /// Load the initial ledger, writing data to the queue - /// @param sequence sequence of ledger to download - /// @param writeQueue queue to push downloaded data to - void - loadInitialLedger( - uint32_t sequence, - ThreadSafeQueue>& writeQueue); - - /// Fetch data for a specific ledger. This function will continuously try - /// to fetch data for the specified ledger until the fetch succeeds, the - /// ledger is found in the database, or the server is shutting down. - /// @param ledgerSequence sequence of ledger to fetch data for - /// @param getObjects if true, fetch diff between specified ledger and - /// previous - /// @return the extracted data, if extraction was successful. If the ledger - /// was found in the database or the server is shutting down, the optional - /// will be empty - std::optional - fetchLedger(uint32_t ledgerSequence, bool getObjects); - - /// Setup all of the ETL sources and subscribe to the necessary streams - void - start(); - - void - stop(); - - /// Determine whether messages received on the transactions_proposed stream - /// should be forwarded to subscribing clients. The server subscribes to - /// transactions_proposed, validations, and manifests on multiple - /// ETLSources, yet only forwards messages from one source at any given time - /// (to avoid sending duplicate messages to clients). - /// @param in ETLSource in question - /// @return true if messages should be forwarded - bool - shouldPropagateStream(ETLSource* in) const - { - for (auto& src : sources_) - { - assert(src); - // We pick the first ETLSource encountered that is connected - if (src->isConnected()) - { - if (src.get() == in) - return true; - else - return false; - } - } - - // If no sources connected, then this stream has not been forwarded. - return true; - } - - Json::Value - toJson() const - { - Json::Value ret(Json::arrayValue); - for (auto& src : sources_) - { - ret.append(src->toJson()); - } - return ret; - } - - /// Randomly select a p2p node to forward a gRPC request to - /// @return gRPC stub to forward requests to p2p node - std::unique_ptr - getP2pForwardingStub() const; - - /// Forward a JSON RPC request to a randomly selected p2p node - /// @param context context of the request - /// @return response received from p2p node - Json::Value - forwardToP2p(RPC::JsonContext& context) const; - -private: - /// f is a function that takes an ETLSource as an argument and returns a - /// bool. Attempt to execute f for one randomly chosen ETLSource that has - /// the specified ledger. If f returns false, another randomly chosen - /// ETLSource is used. The process repeats until f returns true. - /// @param f function to execute. This function takes the ETL source as an - /// argument, and returns a bool. - /// @param ledgerSequence f is executed for each ETLSource that has this - /// ledger - /// @return true if f was eventually executed successfully. false if the - /// ledger was found in the database or the server is shutting down - template - bool - execute(Func f, uint32_t ledgerSequence); -}; - -} // namespace ripple -#endif diff --git a/src/xrpld/app/reporting/P2pProxy.cpp b/src/xrpld/app/reporting/P2pProxy.cpp deleted file mode 100644 index df699f4faf0..00000000000 --- a/src/xrpld/app/reporting/P2pProxy.cpp +++ /dev/null @@ -1,84 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include - -namespace ripple { - -Json::Value -forwardToP2p(RPC::JsonContext& context) -{ - return context.app.getReportingETL().getETLLoadBalancer().forwardToP2p( - context); -} - -std::unique_ptr -getP2pForwardingStub(RPC::Context& context) -{ - return context.app.getReportingETL() - .getETLLoadBalancer() - .getP2pForwardingStub(); -} - -// We only forward requests where ledger_index is "current" or "closed" -// otherwise, attempt to handle here -bool -shouldForwardToP2p(RPC::JsonContext& context) -{ - if (!context.app.config().reporting()) - return false; - - Json::Value& params = context.params; - std::string strCommand = params.isMember(jss::command) - ? params[jss::command].asString() - : params[jss::method].asString(); - - JLOG(context.j.trace()) << "COMMAND:" << strCommand; - JLOG(context.j.trace()) << "REQUEST:" << params; - auto handler = RPC::getHandler( - context.apiVersion, context.app.config().BETA_RPC_API, strCommand); - if (!handler) - { - JLOG(context.j.error()) - << "Error getting handler. command = " << strCommand; - return false; - } - - if (handler->condition_ == RPC::NEEDS_CURRENT_LEDGER || - handler->condition_ == RPC::NEEDS_CLOSED_LEDGER) - { - return true; - } - - if (params.isMember(jss::ledger_index)) - { - auto indexValue = params[jss::ledger_index]; - if (indexValue.isString()) - { - auto index = indexValue.asString(); - return index == "current" || index == "closed"; - } - } - return false; -} - -} // namespace ripple diff --git a/src/xrpld/app/reporting/P2pProxy.h b/src/xrpld/app/reporting/P2pProxy.h deleted file mode 100644 index d49389f42d3..00000000000 --- a/src/xrpld/app/reporting/P2pProxy.h +++ /dev/null @@ -1,113 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_REPORTING_P2PPROXY_H_INCLUDED -#define RIPPLE_APP_REPORTING_P2PPROXY_H_INCLUDED - -#include -#include -#include -#include - -#include -#include - -namespace ripple { -/// Forward a JSON request to a p2p node and return the response -/// @param context context of the request -/// @return response from p2p node -Json::Value -forwardToP2p(RPC::JsonContext& context); - -/// Whether a request should be forwarded, based on request parameters -/// @param context context of the request -/// @return true if should be forwarded -bool -shouldForwardToP2p(RPC::JsonContext& context); - -template -bool -needCurrentOrClosed(Request& request) -{ - // These are the only gRPC requests that specify a ledger - if constexpr ( - std::is_same::value || - std::is_same:: - value || - std::is_same::value) - { - if (request.ledger().ledger_case() == - org::xrpl::rpc::v1::LedgerSpecifier::LedgerCase::kShortcut) - { - if (request.ledger().shortcut() != - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED && - request.ledger().shortcut() != - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_UNSPECIFIED) - return true; - } - } - // GetLedgerDiff specifies two ledgers - else if constexpr (std::is_same< - Request, - org::xrpl::rpc::v1::GetLedgerDiffRequest>::value) - { - auto help = [](auto specifier) { - if (specifier.ledger_case() == - org::xrpl::rpc::v1::LedgerSpecifier::LedgerCase::kShortcut) - { - if (specifier.shortcut() != - org::xrpl::rpc::v1::LedgerSpecifier:: - SHORTCUT_VALIDATED && - specifier.shortcut() != - org::xrpl::rpc::v1::LedgerSpecifier:: - SHORTCUT_UNSPECIFIED) - return true; - } - return false; - }; - return help(request.base_ledger()) || help(request.desired_ledger()); - } - return false; -} - -/// Whether a request should be forwarded, based on request parameters -/// @param context context of the request -/// @condition required condition for the request -/// @return true if should be forwarded -template -bool -shouldForwardToP2p(RPC::GRPCContext& context, RPC::Condition condition) -{ - if (!context.app.config().reporting()) - return false; - if (condition == RPC::NEEDS_CURRENT_LEDGER || - condition == RPC::NEEDS_CLOSED_LEDGER) - return true; - - return needCurrentOrClosed(context.params); -} - -/// Get stub used to forward gRPC requests to a p2p node -/// @param context context of the request -/// @return stub to forward requests -std::unique_ptr -getP2pForwardingStub(RPC::Context& context); - -} // namespace ripple -#endif diff --git a/src/xrpld/app/reporting/README.md b/src/xrpld/app/reporting/README.md deleted file mode 100644 index f55b2d8d60d..00000000000 --- a/src/xrpld/app/reporting/README.md +++ /dev/null @@ -1,110 +0,0 @@ -# Reporting mode - -Reporting mode is a special operating mode of rippled, designed to handle RPCs -for validated data. A server running in reporting mode does not connect to the -p2p network, but rather extracts validated data from a node that is connected -to the p2p network. To run rippled in reporting mode, you must also run a -separate rippled node in p2p mode, to use as an ETL source. Multiple reporting -nodes can share access to the same network accessible databases (Postgres and -Cassandra); at any given time, only one reporting node will be performing ETL -and writing to the databases, while the others simply read from the databases. -A server running in reporting mode will forward any requests that require access -to the p2p network to a p2p node. - -# Reporting ETL -A single reporting node has one or more ETL sources, specified in the config -file. A reporting node will subscribe to the "ledgers" stream of each of the ETL -sources. This stream sends a message whenever a new ledger is validated. Upon -receiving a message on the stream, reporting will then fetch the data associated -with the newly validated ledger from one of the ETL sources. The fetch is -performed via a gRPC request ("GetLedger"). This request returns the ledger -header, transactions+metadata blobs, and every ledger object -added/modified/deleted as part of this ledger. ETL then writes all of this data -to the databases, and moves on to the next ledger. ETL does not apply -transactions, but rather extracts the already computed results of those -transactions (all of the added/modified/deleted SHAMap leaf nodes of the state -tree). The new SHAMap inner nodes are computed by the ETL writer; this computation mainly -involves manipulating child pointers and recomputing hashes, logic which is -buried inside of SHAMap. - -If the database is entirely empty, ETL must download an entire ledger in full -(as opposed to just the diff, as described above). This download is done via the -"GetLedgerData" gRPC request. "GetLedgerData" allows clients to page through an -entire ledger over several RPC calls. ETL will page through an entire ledger, -and write each object to the database. - -If the database is not empty, the reporting node will first come up in a "soft" -read-only mode. In read-only mode, the server does not perform ETL and simply -publishes new ledgers as they are written to the database. -If the database is not updated within a certain time period -(currently hard coded at 20 seconds), the reporting node will begin the ETL -process and start writing to the database. Postgres will report an error when -trying to write a record with a key that already exists. ETL uses this error to -determine that another process is writing to the database, and subsequently -falls back to a soft read-only mode. Reporting nodes can also operate in strict -read-only mode, in which case they will never write to the database. - -# Database Nuances -The database schema for reporting mode does not allow any history gaps. -Attempting to write a ledger to a non-empty database where the previous ledger -does not exist will return an error. - -The databases must be set up prior to running reporting mode. This requires -creating the Postgres database, and setting up the Cassandra keyspace. Reporting -mode will create the objects table in Cassandra if the table does not yet exist. - -Creating the Postgres database: -``` -$ psql -h [host] -U [user] -postgres=# create database [database]; -``` -Creating the keyspace: -``` -$ cqlsh [host] [port] -> CREATE KEYSPACE rippled WITH REPLICATION = - {'class' : 'SimpleStrategy', 'replication_factor' : 3 }; -``` -A replication factor of 3 is recommended. However, when running locally, only a -replication factor of 1 is supported. - -Online delete is not supported by reporting mode and must be done manually. The -easiest way to do this would be to setup a second Cassandra keyspace and -Postgres database, bring up a single reporting mode instance that uses those -databases, and start ETL at a ledger of your choosing (via --startReporting on -the command line). Once this node is caught up, the other databases can be -deleted. - -To delete: -``` -$ psql -h [host] -U [user] -d [database] -reporting=$ truncate table ledgers cascade; -``` -``` -$ cqlsh [host] [port] -> truncate table objects; -``` -# Proxy -RPCs that require access to the p2p network and/or the open ledger are forwarded -from the reporting node to one of the ETL sources. The request is not processed -prior to forwarding, and the response is delivered as-is to the client. -Reporting will forward any requests that always require p2p/open ledger access -(fee and submit, for instance). In addition, any request that explicitly -requests data from the open or closed ledger (via setting -"ledger_index":"current" or "ledger_index":"closed"), will be forwarded to a -p2p node. - -For the stream "transactions_proposed" (AKA "rt_transactions"), reporting -subscribes to the "transactions_proposed" streams of each ETL source, and then -forwards those messages to any clients subscribed to the same stream on the -reporting node. A reporting node will subscribe to the stream on each ETL -source, but will only forward the messages from one of the streams at any given -time (to avoid sending the same message more than once to the same client). - -# API changes -A reporting node defaults to only returning validated data. If a ledger is not -specified, the most recently validated ledger is used. This is in contrast to -the normal rippled behavior, where the open ledger is used by default. - -Reporting will reject all subscribe requests for streams "server", "manifests", -"validations", "peer_status" and "consensus". - diff --git a/src/xrpld/app/reporting/ReportingETL.cpp b/src/xrpld/app/reporting/ReportingETL.cpp deleted file mode 100644 index 2f6411b0808..00000000000 --- a/src/xrpld/app/reporting/ReportingETL.cpp +++ /dev/null @@ -1,960 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -namespace detail { -/// Convenience function for printing out basic ledger info -std::string -toString(LedgerInfo const& info) -{ - std::stringstream ss; - ss << "LedgerInfo { Sequence : " << info.seq - << " Hash : " << strHex(info.hash) << " TxHash : " << strHex(info.txHash) - << " AccountHash : " << strHex(info.accountHash) - << " ParentHash : " << strHex(info.parentHash) << " }"; - return ss.str(); -} -} // namespace detail - -void -ReportingETL::consumeLedgerData( - std::shared_ptr& ledger, - ThreadSafeQueue>& writeQueue) -{ - std::shared_ptr sle; - size_t num = 0; - while (!stopping_ && (sle = writeQueue.pop())) - { - assert(sle); - if (!ledger->exists(sle->key())) - ledger->rawInsert(sle); - - if (flushInterval_ != 0 && (num % flushInterval_) == 0) - { - JLOG(journal_.debug()) << "Flushing! key = " << strHex(sle->key()); - ledger->stateMap().flushDirty(hotACCOUNT_NODE); - } - ++num; - } -} - -std::vector -ReportingETL::insertTransactions( - std::shared_ptr& ledger, - org::xrpl::rpc::v1::GetLedgerResponse& data) -{ - std::vector accountTxData; - for (auto& txn : data.transactions_list().transactions()) - { - auto& raw = txn.transaction_blob(); - - SerialIter it{raw.data(), raw.size()}; - STTx sttx{it}; - - auto txSerializer = std::make_shared(sttx.getSerializer()); - - TxMeta txMeta{ - sttx.getTransactionID(), ledger->info().seq, txn.metadata_blob()}; - - auto metaSerializer = - std::make_shared(txMeta.getAsObject().getSerializer()); - - JLOG(journal_.trace()) - << __func__ << " : " - << "Inserting transaction = " << sttx.getTransactionID(); - uint256 nodestoreHash = ledger->rawTxInsertWithHash( - sttx.getTransactionID(), txSerializer, metaSerializer); - accountTxData.emplace_back(txMeta, std::move(nodestoreHash), journal_); - } - return accountTxData; -} - -std::shared_ptr -ReportingETL::loadInitialLedger(uint32_t startingSequence) -{ - // check that database is actually empty - auto ledger = std::const_pointer_cast( - app_.getLedgerMaster().getValidatedLedger()); - if (ledger) - { - JLOG(journal_.fatal()) << __func__ << " : " - << "Database is not empty"; - assert(false); - return {}; - } - - // fetch the ledger from the network. This function will not return until - // either the fetch is successful, or the server is being shutdown. This - // only fetches the ledger header and the transactions+metadata - std::optional ledgerData{ - fetchLedgerData(startingSequence)}; - if (!ledgerData) - return {}; - - LedgerInfo lgrInfo = - deserializeHeader(makeSlice(ledgerData->ledger_header()), true); - - JLOG(journal_.debug()) << __func__ << " : " - << "Deserialized ledger header. " - << detail::toString(lgrInfo); - - ledger = - std::make_shared(lgrInfo, app_.config(), app_.getNodeFamily()); - ledger->stateMap().clearSynching(); - ledger->txMap().clearSynching(); - -#ifdef RIPPLED_REPORTING - std::vector accountTxData = - insertTransactions(ledger, *ledgerData); -#endif - - auto start = std::chrono::system_clock::now(); - - ThreadSafeQueue> writeQueue; - std::thread asyncWriter{[this, &ledger, &writeQueue]() { - consumeLedgerData(ledger, writeQueue); - }}; - - // download the full account state map. This function downloads full ledger - // data and pushes the downloaded data into the writeQueue. asyncWriter - // consumes from the queue and inserts the data into the Ledger object. - // Once the below call returns, all data has been pushed into the queue - loadBalancer_.loadInitialLedger(startingSequence, writeQueue); - - // null is used to represent the end of the queue - std::shared_ptr null; - writeQueue.push(null); - // wait for the writer to finish - asyncWriter.join(); - - if (!stopping_) - { - flushLedger(ledger); - if (app_.config().reporting()) - { -#ifdef RIPPLED_REPORTING - dynamic_cast(&app_.getRelationalDatabase()) - ->writeLedgerAndTransactions(ledger->info(), accountTxData); -#endif - } - } - auto end = std::chrono::system_clock::now(); - JLOG(journal_.debug()) << "Time to download and store ledger = " - << ((end - start).count()) / 1000000000.0; - return ledger; -} - -void -ReportingETL::flushLedger(std::shared_ptr& ledger) -{ - JLOG(journal_.debug()) << __func__ << " : " - << "Flushing ledger. " - << detail::toString(ledger->info()); - // These are recomputed in setImmutable - auto& accountHash = ledger->info().accountHash; - auto& txHash = ledger->info().txHash; - auto& ledgerHash = ledger->info().hash; - - assert( - ledger->info().seq < XRP_LEDGER_EARLIEST_FEES || - ledger->read(keylet::fees())); - ledger->setImmutable(false); - auto start = std::chrono::system_clock::now(); - - auto numFlushed = ledger->stateMap().flushDirty(hotACCOUNT_NODE); - - auto numTxFlushed = ledger->txMap().flushDirty(hotTRANSACTION_NODE); - - { - Serializer s(128); - s.add32(HashPrefix::ledgerMaster); - addRaw(ledger->info(), s); - app_.getNodeStore().store( - hotLEDGER, - std::move(s.modData()), - ledger->info().hash, - ledger->info().seq); - } - - app_.getNodeStore().sync(); - - auto end = std::chrono::system_clock::now(); - - JLOG(journal_.debug()) << __func__ << " : " - << "Flushed " << numFlushed - << " nodes to nodestore from stateMap"; - JLOG(journal_.debug()) << __func__ << " : " - << "Flushed " << numTxFlushed - << " nodes to nodestore from txMap"; - - JLOG(journal_.debug()) << __func__ << " : " - << "Flush took " - << (end - start).count() / 1000000000.0 - << " seconds"; - - if (numFlushed == 0) - { - JLOG(journal_.fatal()) << __func__ << " : " - << "Flushed 0 nodes from state map"; - assert(false); - } - if (numTxFlushed == 0) - { - JLOG(journal_.warn()) << __func__ << " : " - << "Flushed 0 nodes from tx map"; - } - - // Make sure calculated hashes are correct - if (ledger->stateMap().getHash().as_uint256() != accountHash) - { - JLOG(journal_.fatal()) - << __func__ << " : " - << "State map hash does not match. " - << "Expected hash = " << strHex(accountHash) << "Actual hash = " - << strHex(ledger->stateMap().getHash().as_uint256()); - Throw("state map hash mismatch"); - } - - if (ledger->txMap().getHash().as_uint256() != txHash) - { - JLOG(journal_.fatal()) - << __func__ << " : " - << "Tx map hash does not match. " - << "Expected hash = " << strHex(txHash) << "Actual hash = " - << strHex(ledger->txMap().getHash().as_uint256()); - Throw("tx map hash mismatch"); - } - - if (ledger->info().hash != ledgerHash) - { - JLOG(journal_.fatal()) - << __func__ << " : " - << "Ledger hash does not match. " - << "Expected hash = " << strHex(ledgerHash) - << "Actual hash = " << strHex(ledger->info().hash); - Throw("ledger hash mismatch"); - } - - JLOG(journal_.info()) << __func__ << " : " - << "Successfully flushed ledger! " - << detail::toString(ledger->info()); -} - -void -ReportingETL::publishLedger(std::shared_ptr& ledger) -{ - app_.getOPs().pubLedger(ledger); - - setLastPublish(); -} - -bool -ReportingETL::publishLedger(uint32_t ledgerSequence, uint32_t maxAttempts) -{ - JLOG(journal_.info()) << __func__ << " : " - << "Attempting to publish ledger = " - << ledgerSequence; - size_t numAttempts = 0; - while (!stopping_) - { - auto ledger = app_.getLedgerMaster().getLedgerBySeq(ledgerSequence); - - if (!ledger) - { - JLOG(journal_.warn()) - << __func__ << " : " - << "Trying to publish. Could not find ledger with sequence = " - << ledgerSequence; - // We try maxAttempts times to publish the ledger, waiting one - // second in between each attempt. - // If the ledger is not present in the database after maxAttempts, - // we attempt to take over as the writer. If the takeover fails, - // doContinuousETL will return, and this node will go back to - // publishing. - // If the node is in strict read only mode, we simply - // skip publishing this ledger and return false indicating the - // publish failed - if (numAttempts >= maxAttempts) - { - JLOG(journal_.error()) << __func__ << " : " - << "Failed to publish ledger after " - << numAttempts << " attempts."; - if (!readOnly_) - { - JLOG(journal_.info()) << __func__ << " : " - << "Attempting to become ETL writer"; - return false; - } - else - { - JLOG(journal_.debug()) - << __func__ << " : " - << "In strict read-only mode. " - << "Skipping publishing this ledger. " - << "Beginning fast forward."; - return false; - } - } - else - { - std::this_thread::sleep_for(std::chrono::seconds(1)); - ++numAttempts; - } - continue; - } - - publishStrand_.post([this, ledger, fname = __func__]() { - app_.getOPs().pubLedger(ledger); - setLastPublish(); - JLOG(journal_.info()) - << fname << " : " - << "Published ledger. " << detail::toString(ledger->info()); - }); - return true; - } - return false; -} - -std::optional -ReportingETL::fetchLedgerData(uint32_t idx) -{ - JLOG(journal_.debug()) << __func__ << " : " - << "Attempting to fetch ledger with sequence = " - << idx; - - std::optional response = - loadBalancer_.fetchLedger(idx, false); - JLOG(journal_.trace()) << __func__ << " : " - << "GetLedger reply = " << response->DebugString(); - return response; -} - -std::optional -ReportingETL::fetchLedgerDataAndDiff(uint32_t idx) -{ - JLOG(journal_.debug()) << __func__ << " : " - << "Attempting to fetch ledger with sequence = " - << idx; - - std::optional response = - loadBalancer_.fetchLedger(idx, true); - JLOG(journal_.trace()) << __func__ << " : " - << "GetLedger reply = " << response->DebugString(); - return response; -} - -std::pair, std::vector> -ReportingETL::buildNextLedger( - std::shared_ptr& next, - org::xrpl::rpc::v1::GetLedgerResponse& rawData) -{ - JLOG(journal_.info()) << __func__ << " : " - << "Beginning ledger update"; - - LedgerInfo lgrInfo = - deserializeHeader(makeSlice(rawData.ledger_header()), true); - - JLOG(journal_.debug()) << __func__ << " : " - << "Deserialized ledger header. " - << detail::toString(lgrInfo); - - next->setLedgerInfo(lgrInfo); - - next->stateMap().clearSynching(); - next->txMap().clearSynching(); - - std::vector accountTxData{ - insertTransactions(next, rawData)}; - - JLOG(journal_.debug()) - << __func__ << " : " - << "Inserted all transactions. Number of transactions = " - << rawData.transactions_list().transactions_size(); - - for (auto& obj : rawData.ledger_objects().objects()) - { - auto key = uint256::fromVoidChecked(obj.key()); - if (!key) - throw std::runtime_error("Recevied malformed object ID"); - - auto& data = obj.data(); - - // indicates object was deleted - if (data.size() == 0) - { - JLOG(journal_.trace()) << __func__ << " : " - << "Erasing object = " << *key; - if (next->exists(*key)) - next->rawErase(*key); - } - else - { - SerialIter it{data.data(), data.size()}; - std::shared_ptr sle = std::make_shared(it, *key); - - if (next->exists(*key)) - { - JLOG(journal_.trace()) << __func__ << " : " - << "Replacing object = " << *key; - next->rawReplace(sle); - } - else - { - JLOG(journal_.trace()) << __func__ << " : " - << "Inserting object = " << *key; - next->rawInsert(sle); - } - } - } - JLOG(journal_.debug()) - << __func__ << " : " - << "Inserted/modified/deleted all objects. Number of objects = " - << rawData.ledger_objects().objects_size(); - - if (!rawData.skiplist_included()) - { - next->updateSkipList(); - JLOG(journal_.warn()) - << __func__ << " : " - << "tx process is not sending skiplist. This indicates that the tx " - "process is parsing metadata instead of doing a SHAMap diff. " - "Make sure tx process is running the same code as reporting to " - "use SHAMap diff instead of parsing metadata"; - } - - JLOG(journal_.debug()) << __func__ << " : " - << "Finished ledger update. " - << detail::toString(next->info()); - return {std::move(next), std::move(accountTxData)}; -} - -// Database must be populated when this starts -std::optional -ReportingETL::runETLPipeline(uint32_t startSequence) -{ - /* - * Behold, mortals! This function spawns three separate threads, which talk - * to each other via 2 different thread safe queues and 1 atomic variable. - * All threads and queues are function local. This function returns when all - * of the threads exit. There are two termination conditions: the first is - * if the load thread encounters a write conflict. In this case, the load - * thread sets writeConflict, an atomic bool, to true, which signals the - * other threads to stop. The second termination condition is when the - * entire server is shutting down, which is detected in one of three ways: - * 1. isStopping() returns true if the server is shutting down - * 2. networkValidatedLedgers_.waitUntilValidatedByNetwork returns - * false, signaling the wait was aborted. - * 3. fetchLedgerDataAndDiff returns an empty optional, signaling the fetch - * was aborted. - * In all cases, the extract thread detects this condition, - * and pushes an empty optional onto the transform queue. The transform - * thread, upon popping an empty optional, pushes an empty optional onto the - * load queue, and then returns. The load thread, upon popping an empty - * optional, returns. - */ - - JLOG(journal_.debug()) << __func__ << " : " - << "Starting etl pipeline"; - writing_ = true; - - std::shared_ptr parent = std::const_pointer_cast( - app_.getLedgerMaster().getLedgerBySeq(startSequence - 1)); - if (!parent) - { - assert(false); - Throw("runETLPipeline: parent ledger is null"); - } - - std::atomic_bool writeConflict = false; - std::optional lastPublishedSequence; - constexpr uint32_t maxQueueSize = 1000; - - ThreadSafeQueue> - transformQueue{maxQueueSize}; - - std::thread extracter{[this, - &startSequence, - &writeConflict, - &transformQueue]() { - beast::setCurrentThreadName("rippled: ReportingETL extract"); - uint32_t currentSequence = startSequence; - - // there are two stopping conditions here. - // First, if there is a write conflict in the load thread, the ETL - // mechanism should stop. - // The other stopping condition is if the entire server is shutting - // down. This can be detected in a variety of ways. See the comment - // at the top of the function - while (networkValidatedLedgers_.waitUntilValidatedByNetwork( - currentSequence) && - !writeConflict && !isStopping()) - { - auto start = std::chrono::system_clock::now(); - std::optional fetchResponse{ - fetchLedgerDataAndDiff(currentSequence)}; - // if the fetch is unsuccessful, stop. fetchLedger only returns - // false if the server is shutting down, or if the ledger was - // found in the database (which means another process already - // wrote the ledger that this process was trying to extract; - // this is a form of a write conflict). Otherwise, - // fetchLedgerDataAndDiff will keep trying to fetch the - // specified ledger until successful - if (!fetchResponse) - { - break; - } - auto end = std::chrono::system_clock::now(); - - auto time = ((end - start).count()) / 1000000000.0; - auto tps = - fetchResponse->transactions_list().transactions_size() / time; - - JLOG(journal_.debug()) << "Extract phase time = " << time - << " . Extract phase tps = " << tps; - - transformQueue.push(std::move(fetchResponse)); - ++currentSequence; - } - // empty optional tells the transformer to shut down - transformQueue.push({}); - }}; - - ThreadSafeQueue, - std::vector>>> - loadQueue{maxQueueSize}; - std::thread transformer{[this, - &parent, - &writeConflict, - &loadQueue, - &transformQueue]() { - beast::setCurrentThreadName("rippled: ReportingETL transform"); - - assert(parent); - parent = std::make_shared(*parent, NetClock::time_point{}); - while (!writeConflict) - { - std::optional fetchResponse{ - transformQueue.pop()}; - // if fetchResponse is an empty optional, the extracter thread has - // stopped and the transformer should stop as well - if (!fetchResponse) - { - break; - } - if (isStopping()) - continue; - - auto start = std::chrono::system_clock::now(); - auto [next, accountTxData] = - buildNextLedger(parent, *fetchResponse); - auto end = std::chrono::system_clock::now(); - - auto duration = ((end - start).count()) / 1000000000.0; - JLOG(journal_.debug()) << "transform time = " << duration; - // The below line needs to execute before pushing to the queue, in - // order to prevent this thread and the loader thread from accessing - // the same SHAMap concurrently - parent = std::make_shared(*next, NetClock::time_point{}); - loadQueue.push( - std::make_pair(std::move(next), std::move(accountTxData))); - } - // empty optional tells the loader to shutdown - loadQueue.push({}); - }}; - - std::thread loader{[this, - &lastPublishedSequence, - &loadQueue, - &writeConflict]() { - beast::setCurrentThreadName("rippled: ReportingETL load"); - size_t totalTransactions = 0; - double totalTime = 0; - while (!writeConflict) - { - std::optional, - std::vector>> - result{loadQueue.pop()}; - // if result is an empty optional, the transformer thread has - // stopped and the loader should stop as well - if (!result) - break; - if (isStopping()) - continue; - - auto& ledger = result->first; - auto& accountTxData = result->second; - - auto start = std::chrono::system_clock::now(); - // write to the key-value store - flushLedger(ledger); - - auto mid = std::chrono::system_clock::now(); - // write to RDBMS - // if there is a write conflict, some other process has already - // written this ledger and has taken over as the ETL writer -#ifdef RIPPLED_REPORTING - if (!dynamic_cast(&app_.getRelationalDatabase()) - ->writeLedgerAndTransactions( - ledger->info(), accountTxData)) - writeConflict = true; -#endif - auto end = std::chrono::system_clock::now(); - - if (!writeConflict) - { - publishLedger(ledger); - lastPublishedSequence = ledger->info().seq; - } - // print some performance numbers - auto kvTime = ((mid - start).count()) / 1000000000.0; - auto relationalTime = ((end - mid).count()) / 1000000000.0; - - size_t numTxns = accountTxData.size(); - totalTime += kvTime; - totalTransactions += numTxns; - JLOG(journal_.info()) - << "Load phase of etl : " - << "Successfully published ledger! Ledger info: " - << detail::toString(ledger->info()) - << ". txn count = " << numTxns - << ". key-value write time = " << kvTime - << ". relational write time = " << relationalTime - << ". key-value tps = " << numTxns / kvTime - << ". relational tps = " << numTxns / relationalTime - << ". total key-value tps = " << totalTransactions / totalTime; - } - }}; - - // wait for all of the threads to stop - loader.join(); - extracter.join(); - transformer.join(); - writing_ = false; - - JLOG(journal_.debug()) << __func__ << " : " - << "Stopping etl pipeline"; - - return lastPublishedSequence; -} - -// main loop. The software begins monitoring the ledgers that are validated -// by the nework. The member networkValidatedLedgers_ keeps track of the -// sequences of ledgers validated by the network. Whenever a ledger is validated -// by the network, the software looks for that ledger in the database. Once the -// ledger is found in the database, the software publishes that ledger to the -// ledgers stream. If a network validated ledger is not found in the database -// after a certain amount of time, then the software attempts to take over -// responsibility of the ETL process, where it writes new ledgers to the -// database. The software will relinquish control of the ETL process if it -// detects that another process has taken over ETL. -void -ReportingETL::monitor() -{ - auto ledger = std::const_pointer_cast( - app_.getLedgerMaster().getValidatedLedger()); - if (!ledger) - { - JLOG(journal_.info()) << __func__ << " : " - << "Database is empty. Will download a ledger " - "from the network."; - if (startSequence_) - { - JLOG(journal_.info()) - << __func__ << " : " - << "ledger sequence specified in config. " - << "Will begin ETL process starting with ledger " - << *startSequence_; - ledger = loadInitialLedger(*startSequence_); - } - else - { - JLOG(journal_.info()) - << __func__ << " : " - << "Waiting for next ledger to be validated by network..."; - std::optional mostRecentValidated = - networkValidatedLedgers_.getMostRecent(); - if (mostRecentValidated) - { - JLOG(journal_.info()) << __func__ << " : " - << "Ledger " << *mostRecentValidated - << " has been validated. " - << "Downloading..."; - ledger = loadInitialLedger(*mostRecentValidated); - } - else - { - JLOG(journal_.info()) << __func__ << " : " - << "The wait for the next validated " - << "ledger has been aborted. " - << "Exiting monitor loop"; - return; - } - } - } - else - { - if (startSequence_) - { - Throw( - "start sequence specified but db is already populated"); - } - JLOG(journal_.info()) - << __func__ << " : " - << "Database already populated. Picking up from the tip of history"; - } - if (!ledger) - { - JLOG(journal_.error()) - << __func__ << " : " - << "Failed to load initial ledger. Exiting monitor loop"; - return; - } - else - { - publishLedger(ledger); - } - uint32_t nextSequence = ledger->info().seq + 1; - - JLOG(journal_.debug()) << __func__ << " : " - << "Database is populated. " - << "Starting monitor loop. sequence = " - << nextSequence; - while (!stopping_ && - networkValidatedLedgers_.waitUntilValidatedByNetwork(nextSequence)) - { - JLOG(journal_.info()) << __func__ << " : " - << "Ledger with sequence = " << nextSequence - << " has been validated by the network. " - << "Attempting to find in database and publish"; - // Attempt to take over responsibility of ETL writer after 10 failed - // attempts to publish the ledger. publishLedger() fails if the - // ledger that has been validated by the network is not found in the - // database after the specified number of attempts. publishLedger() - // waits one second between each attempt to read the ledger from the - // database - // - // In strict read-only mode, when the software fails to find a - // ledger in the database that has been validated by the network, - // the software will only try to publish subsequent ledgers once, - // until one of those ledgers is found in the database. Once the - // software successfully publishes a ledger, the software will fall - // back to the normal behavior of trying several times to publish - // the ledger that has been validated by the network. In this - // manner, a reporting processing running in read-only mode does not - // need to restart if the database is wiped. - constexpr size_t timeoutSeconds = 10; - bool success = publishLedger(nextSequence, timeoutSeconds); - if (!success) - { - JLOG(journal_.warn()) - << __func__ << " : " - << "Failed to publish ledger with sequence = " << nextSequence - << " . Beginning ETL"; - // doContinousETLPipelined returns the most recent sequence - // published empty optional if no sequence was published - std::optional lastPublished = - runETLPipeline(nextSequence); - JLOG(journal_.info()) << __func__ << " : " - << "Aborting ETL. Falling back to publishing"; - // if no ledger was published, don't increment nextSequence - if (lastPublished) - nextSequence = *lastPublished + 1; - } - else - { - ++nextSequence; - } - } -} - -void -ReportingETL::monitorReadOnly() -{ - JLOG(journal_.debug()) << "Starting reporting in strict read only mode"; - std::optional mostRecent = - networkValidatedLedgers_.getMostRecent(); - if (!mostRecent) - return; - uint32_t sequence = *mostRecent; - bool success = true; - while (!stopping_ && - networkValidatedLedgers_.waitUntilValidatedByNetwork(sequence)) - { - success = publishLedger(sequence, success ? 30 : 1); - ++sequence; - } -} - -void -ReportingETL::doWork() -{ - worker_ = std::thread([this]() { - beast::setCurrentThreadName("rippled: ReportingETL worker"); - if (readOnly_) - monitorReadOnly(); - else - monitor(); - }); -} - -ReportingETL::ReportingETL(Application& app) - : app_(app) - , journal_(app.journal("ReportingETL")) - , publishStrand_(app_.getIOService()) - , loadBalancer_(*this) -{ - // if present, get endpoint from config - if (app_.config().exists("reporting")) - { -#ifndef RIPPLED_REPORTING - Throw( - "Config file specifies reporting, but software was not built with " - "-Dreporting=1. To use reporting, configure CMake with " - "-Dreporting=1"); -#endif - if (!app_.config().useTxTables()) - Throw( - "Reporting requires tx tables. Set use_tx_tables=1 in config " - "file, under [ledger_tx_tables] section"); - Section section = app_.config().section("reporting"); - - JLOG(journal_.debug()) << "Parsing config info"; - - auto& vals = section.values(); - for (auto& v : vals) - { - JLOG(journal_.debug()) << "val is " << v; - Section source = app_.config().section(v); - - auto optIp = source.get("source_ip"); - if (!optIp) - continue; - - auto optWsPort = source.get("source_ws_port"); - if (!optWsPort) - continue; - - auto optGrpcPort = source.get("source_grpc_port"); - if (!optGrpcPort) - { - // add source without grpc port - // used in read-only mode to detect when new ledgers have - // been validated. Used for publishing - if (app_.config().reportingReadOnly()) - loadBalancer_.add(*optIp, *optWsPort); - continue; - } - - loadBalancer_.add(*optIp, *optWsPort, *optGrpcPort); - } - - // this is true iff --reportingReadOnly was passed via command line - readOnly_ = app_.config().reportingReadOnly(); - - // if --reportingReadOnly was not passed via command line, check config - // file. Command line takes precedence - if (!readOnly_) - { - auto const optRO = section.get("read_only"); - if (optRO) - { - readOnly_ = (*optRO == "true" || *optRO == "1"); - app_.config().setReportingReadOnly(readOnly_); - } - } - - // lambda throws a useful message if string to integer conversion fails - auto asciiToIntThrows = - [](auto& dest, std::string const& src, char const* onError) { - char const* const srcEnd = src.data() + src.size(); - auto [ptr, err] = std::from_chars(src.data(), srcEnd, dest); - - if (err == std::errc()) - // skip whitespace at end of string - while (ptr != srcEnd && - std::isspace(static_cast(*ptr))) - ++ptr; - - // throw if - // o conversion error or - // o entire string is not consumed - if (err != std::errc() || ptr != srcEnd) - Throw(onError + src); - }; - - // handle command line arguments - if (app_.config().START_UP == Config::StartUpType::FRESH && !readOnly_) - { - asciiToIntThrows( - *startSequence_, - app_.config().START_LEDGER, - "Expected integral START_LEDGER command line argument. Got: "); - } - // if not passed via command line, check config for start sequence - if (!startSequence_) - { - auto const optStartSeq = section.get("start_sequence"); - if (optStartSeq) - { - // set a value so we can dereference - startSequence_ = 0; - asciiToIntThrows( - *startSequence_, - *optStartSeq, - "Expected integral start_sequence config entry. Got: "); - } - } - - auto const optFlushInterval = section.get("flush_interval"); - if (optFlushInterval) - asciiToIntThrows( - flushInterval_, - *optFlushInterval, - "Expected integral flush_interval config entry. Got: "); - - auto const optNumMarkers = section.get("num_markers"); - if (optNumMarkers) - asciiToIntThrows( - numMarkers_, - *optNumMarkers, - "Expected integral num_markers config entry. Got: "); - } -} - -} // namespace ripple diff --git a/src/xrpld/app/reporting/ReportingETL.h b/src/xrpld/app/reporting/ReportingETL.h deleted file mode 100644 index fc15f90b43b..00000000000 --- a/src/xrpld/app/reporting/ReportingETL.h +++ /dev/null @@ -1,367 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_APP_REPORTING_REPORTINGETL_H_INCLUDED -#define RIPPLE_APP_REPORTING_REPORTINGETL_H_INCLUDED - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include - -#include -#include -#include - -#include -namespace ripple { - -using AccountTransactionsData = RelationalDatabase::AccountTransactionsData; - -/** - * This class is responsible for continuously extracting data from a - * p2p node, and writing that data to the databases. Usually, multiple different - * processes share access to the same network accessible databases, in which - * case only one such process is performing ETL and writing to the database. The - * other processes simply monitor the database for new ledgers, and publish - * those ledgers to the various subscription streams. If a monitoring process - * determines that the ETL writer has failed (no new ledgers written for some - * time), the process will attempt to become the ETL writer. If there are - * multiple monitoring processes that try to become the ETL writer at the same - * time, one will win out, and the others will fall back to - * monitoring/publishing. In this sense, this class dynamically transitions from - * monitoring to writing and from writing to monitoring, based on the activity - * of other processes running on different machines. - */ -class ReportingETL -{ -private: - Application& app_; - - beast::Journal journal_; - - std::thread worker_; - - /// Strand to ensure that ledgers are published in order. - /// If ETL is started far behind the network, ledgers will be written and - /// published very rapidly. Monitoring processes will publish ledgers as - /// they are written. However, to publish a ledger, the monitoring process - /// needs to read all of the transactions for that ledger from the database. - /// Reading the transactions from the database requires network calls, which - /// can be slow. It is imperative however that the monitoring processes keep - /// up with the writer, else the monitoring processes will not be able to - /// detect if the writer failed. Therefore, publishing each ledger (which - /// includes reading all of the transactions from the database) is done from - /// the application wide asio io_service, and a strand is used to ensure - /// ledgers are published in order - boost::asio::io_context::strand publishStrand_; - - /// Mechanism for communicating with ETL sources. ETLLoadBalancer wraps an - /// arbitrary number of ETL sources and load balances ETL requests across - /// those sources. - ETLLoadBalancer loadBalancer_; - - /// Mechanism for detecting when the network has validated a new ledger. - /// This class provides a way to wait for a specific ledger to be validated - NetworkValidatedLedgers networkValidatedLedgers_; - - /// Whether the software is stopping - std::atomic_bool stopping_ = false; - - /// Used to determine when to write to the database during the initial - /// ledger download. By default, the software downloads an entire ledger and - /// then writes to the database. If flushInterval_ is non-zero, the software - /// will write to the database as new ledger data (SHAMap leaf nodes) - /// arrives. It is not neccesarily more effient to write the data as it - /// arrives, as different SHAMap leaf nodes share the same SHAMap inner - /// nodes; flushing prematurely can result in the same SHAMap inner node - /// being written to the database more than once. It is recommended to use - /// the default value of 0 for this variable; however, different values can - /// be experimented with if better performance is desired. - size_t flushInterval_ = 0; - - /// This variable controls the number of GetLedgerData calls that will be - /// executed in parallel during the initial ledger download. GetLedgerData - /// allows clients to page through a ledger over many RPC calls. - /// GetLedgerData returns a marker that is used as an offset in a subsequent - /// call. If numMarkers_ is greater than 1, there will be multiple chains of - /// GetLedgerData calls iterating over different parts of the same ledger in - /// parallel. This can dramatically speed up the time to download the - /// initial ledger. However, a higher value for this member variable puts - /// more load on the ETL source. - size_t numMarkers_ = 2; - - /// Whether the process is in strict read-only mode. In strict read-only - /// mode, the process will never attempt to become the ETL writer, and will - /// only publish ledgers as they are written to the database. - bool readOnly_ = false; - - /// Whether the process is writing to the database. Used by server_info - std::atomic_bool writing_ = false; - - /// Ledger sequence to start ETL from. If this is empty, ETL will start from - /// the next ledger validated by the network. If this is set, and the - /// database is already populated, an error is thrown. - std::optional startSequence_; - - /// The time that the most recently published ledger was published. Used by - /// server_info - std::chrono::time_point lastPublish_; - - std::mutex publishTimeMtx_; - - std::chrono::time_point - getLastPublish() - { - std::unique_lock lck(publishTimeMtx_); - return lastPublish_; - } - - void - setLastPublish() - { - std::unique_lock lck(publishTimeMtx_); - lastPublish_ = std::chrono::system_clock::now(); - } - - /// Download a ledger with specified sequence in full, via GetLedgerData, - /// and write the data to the databases. This takes several minutes or - /// longer. - /// @param sequence the sequence of the ledger to download - /// @return The ledger downloaded, with a full transaction and account state - /// map - std::shared_ptr - loadInitialLedger(uint32_t sequence); - - /// Run ETL. Extracts ledgers and writes them to the database, until a write - /// conflict occurs (or the server shuts down). - /// @note database must already be populated when this function is called - /// @param startSequence the first ledger to extract - /// @return the last ledger written to the database, if any - std::optional - runETLPipeline(uint32_t startSequence); - - /// Monitor the network for newly validated ledgers. Also monitor the - /// database to see if any process is writing those ledgers. This function - /// is called when the application starts, and will only return when the - /// application is shutting down. If the software detects the database is - /// empty, this function will call loadInitialLedger(). If the software - /// detects ledgers are not being written, this function calls - /// runETLPipeline(). Otherwise, this function publishes ledgers as they are - /// written to the database. - void - monitor(); - - /// Monitor the database for newly written ledgers. - /// Similar to the monitor(), except this function will never call - /// runETLPipeline() or loadInitialLedger(). This function only publishes - /// ledgers as they are written to the database. - void - monitorReadOnly(); - - /// Extract data for a particular ledger from an ETL source. This function - /// continously tries to extract the specified ledger (using all available - /// ETL sources) until the extraction succeeds, or the server shuts down. - /// @param sequence sequence of the ledger to extract - /// @return ledger header and transaction+metadata blobs. Empty optional - /// if the server is shutting down - std::optional - fetchLedgerData(uint32_t sequence); - - /// Extract data for a particular ledger from an ETL source. This function - /// continously tries to extract the specified ledger (using all available - /// ETL sources) until the extraction succeeds, or the server shuts down. - /// @param sequence sequence of the ledger to extract - /// @return ledger header, transaction+metadata blobs, and all ledger - /// objects created, modified or deleted between this ledger and the parent. - /// Empty optional if the server is shutting down - std::optional - fetchLedgerDataAndDiff(uint32_t sequence); - - /// Insert all of the extracted transactions into the ledger - /// @param ledger ledger to insert transactions into - /// @param data data extracted from an ETL source - /// @return struct that contains the neccessary info to write to the - /// transctions and account_transactions tables in Postgres (mostly - /// transaction hashes, corresponding nodestore hashes and affected - /// accounts) - std::vector - insertTransactions( - std::shared_ptr& ledger, - org::xrpl::rpc::v1::GetLedgerResponse& data); - - /// Build the next ledger using the previous ledger and the extracted data. - /// This function calls insertTransactions() - /// @note rawData should be data that corresponds to the ledger immediately - /// following parent - /// @param parent the previous ledger - /// @param rawData data extracted from an ETL source - /// @return the newly built ledger and data to write to Postgres - std::pair, std::vector> - buildNextLedger( - std::shared_ptr& parent, - org::xrpl::rpc::v1::GetLedgerResponse& rawData); - - /// Write all new data to the key-value store - /// @param ledger ledger with new data to write - void - flushLedger(std::shared_ptr& ledger); - - /// Attempt to read the specified ledger from the database, and then publish - /// that ledger to the ledgers stream. - /// @param ledgerSequence the sequence of the ledger to publish - /// @param maxAttempts the number of times to attempt to read the ledger - /// from the database. 1 attempt per second - /// @return whether the ledger was found in the database and published - bool - publishLedger(uint32_t ledgerSequence, uint32_t maxAttempts = 10); - - /// Publish the passed in ledger - /// @param ledger the ledger to publish - void - publishLedger(std::shared_ptr& ledger); - - /// Consume data from a queue and insert that data into the ledger - /// This function will continue to pull from the queue until the queue - /// returns nullptr. This is used during the initial ledger download - /// @param ledger the ledger to insert data into - /// @param writeQueue the queue with extracted data - void - consumeLedgerData( - std::shared_ptr& ledger, - ThreadSafeQueue>& writeQueue); - -public: - explicit ReportingETL(Application& app); - - ~ReportingETL() - { - } - - NetworkValidatedLedgers& - getNetworkValidatedLedgers() - { - return networkValidatedLedgers_; - } - - bool - isStopping() const - { - return stopping_; - } - - /// Get the number of markers to use during the initial ledger download. - /// This is equivelent to the degree of parallelism during the initial - /// ledger download - /// @return the number of markers - uint32_t - getNumMarkers() - { - return numMarkers_; - } - - Application& - getApplication() - { - return app_; - } - - beast::Journal& - getJournal() - { - return journal_; - } - - Json::Value - getInfo() - { - Json::Value result(Json::objectValue); - - result["etl_sources"] = loadBalancer_.toJson(); - result["is_writer"] = writing_.load(); - auto last = getLastPublish(); - if (last.time_since_epoch().count() != 0) - result["last_publish_time"] = - to_string(std::chrono::floor( - getLastPublish())); - return result; - } - - /// start all of the necessary components and begin ETL - void - start() - { - JLOG(journal_.info()) << "Starting reporting etl"; - assert(app_.config().reporting()); - assert(app_.config().standalone()); - assert(app_.config().reportingReadOnly() == readOnly_); - - stopping_ = false; - - loadBalancer_.start(); - doWork(); - } - - void - stop() - { - JLOG(journal_.info()) << "onStop called"; - JLOG(journal_.debug()) << "Stopping Reporting ETL"; - stopping_ = true; - networkValidatedLedgers_.stop(); - loadBalancer_.stop(); - - JLOG(journal_.debug()) << "Stopped loadBalancer"; - if (worker_.joinable()) - worker_.join(); - - JLOG(journal_.debug()) << "Joined worker thread"; - } - - ETLLoadBalancer& - getETLLoadBalancer() - { - return loadBalancer_; - } - -private: - void - doWork(); -}; - -} // namespace ripple -#endif diff --git a/src/xrpld/core/Config.h b/src/xrpld/core/Config.h index e63e6d2f356..2d8340e0747 100644 --- a/src/xrpld/core/Config.h +++ b/src/xrpld/core/Config.h @@ -127,10 +127,6 @@ class Config : public BasicConfig */ bool RUN_STANDALONE = false; - bool RUN_REPORTING = false; - - bool REPORTING_READ_ONLY = false; - bool USE_TX_TABLES = true; /** Determines if the server will sign a tx, given an account's secret seed. @@ -347,11 +343,6 @@ class Config : public BasicConfig { return RUN_STANDALONE; } - bool - reporting() const - { - return RUN_REPORTING; - } bool useTxTables() const @@ -359,18 +350,6 @@ class Config : public BasicConfig return USE_TX_TABLES; } - bool - reportingReadOnly() const - { - return REPORTING_READ_ONLY; - } - - void - setReportingReadOnly(bool b) - { - REPORTING_READ_ONLY = b; - } - bool canSign() const { diff --git a/src/xrpld/core/DatabaseCon.h b/src/xrpld/core/DatabaseCon.h index d2f6b0a4f05..0ded37b1739 100644 --- a/src/xrpld/core/DatabaseCon.h +++ b/src/xrpld/core/DatabaseCon.h @@ -87,7 +87,6 @@ class DatabaseCon Config::StartUpType startUp = Config::NORMAL; bool standAlone = false; - bool reporting = false; boost::filesystem::path dataDir; // Indicates whether or not to return the `globalPragma` // from commonPragma() @@ -118,8 +117,7 @@ class DatabaseCon std::array const& initSQL) // Use temporary files or regular DB files? : DatabaseCon( - setup.standAlone && !setup.reporting && - setup.startUp != Config::LOAD && + setup.standAlone && setup.startUp != Config::LOAD && setup.startUp != Config::LOAD_FILE && setup.startUp != Config::REPLAY ? "" diff --git a/src/xrpld/core/Pg.cpp b/src/xrpld/core/Pg.cpp deleted file mode 100644 index 829e17658c1..00000000000 --- a/src/xrpld/core/Pg.cpp +++ /dev/null @@ -1,1415 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifdef RIPPLED_REPORTING -// Need raw socket manipulation to determine if postgres socket IPv4 or 6. -#if defined(_WIN32) -#include -#include -#else -#include -#include -#include -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -static void -noticeReceiver(void* arg, PGresult const* res) -{ - beast::Journal& j = *static_cast(arg); - JLOG(j.info()) << "server message: " << PQresultErrorMessage(res); -} - -//----------------------------------------------------------------------------- - -std::string -PgResult::msg() const -{ - if (error_.has_value()) - { - std::stringstream ss; - ss << error_->first << ": " << error_->second; - return ss.str(); - } - if (result_) - return "ok"; - - // Must be stopping. - return "stopping"; -} - -//----------------------------------------------------------------------------- - -/* - Connecting described in: - https://www.postgresql.org/docs/10/libpq-connect.html - */ -void -Pg::connect() -{ - if (conn_) - { - // Nothing to do if we already have a good connection. - if (PQstatus(conn_.get()) == CONNECTION_OK) - return; - /* Try resetting connection. */ - PQreset(conn_.get()); - } - else // Make new connection. - { - conn_.reset(PQconnectdbParams( - reinterpret_cast(&config_.keywordsIdx[0]), - reinterpret_cast(&config_.valuesIdx[0]), - 0)); - if (!conn_) - Throw("No db connection struct"); - } - - /** Results from a synchronous connection attempt can only be either - * CONNECTION_OK or CONNECTION_BAD. */ - if (PQstatus(conn_.get()) == CONNECTION_BAD) - { - std::stringstream ss; - ss << "DB connection status " << PQstatus(conn_.get()) << ": " - << PQerrorMessage(conn_.get()); - Throw(ss.str()); - } - - // Log server session console messages. - PQsetNoticeReceiver( - conn_.get(), noticeReceiver, const_cast(&j_)); -} - -PgResult -Pg::query(char const* command, std::size_t nParams, char const* const* values) -{ - // The result object must be freed using the libpq API PQclear() call. - pg_result_type ret{nullptr, [](PGresult* result) { PQclear(result); }}; - // Connect then submit query. - while (true) - { - { - std::lock_guard lock(mutex_); - if (stop_) - return PgResult(); - } - try - { - connect(); - if (nParams) - { - // PQexecParams can process only a single command. - ret.reset(PQexecParams( - conn_.get(), - command, - nParams, - nullptr, - values, - nullptr, - nullptr, - 0)); - } - else - { - // PQexec can process multiple commands separated by - // semi-colons. Returns the response from the last - // command processed. - ret.reset(PQexec(conn_.get(), command)); - } - if (!ret) - Throw("no result structure returned"); - break; - } - catch (std::exception const& e) - { - // Sever connection and retry until successful. - disconnect(); - JLOG(j_.error()) << "database error, retrying: " << e.what(); - std::this_thread::sleep_for(std::chrono::seconds(1)); - } - } - - // Ensure proper query execution. - switch (PQresultStatus(ret.get())) - { - case PGRES_TUPLES_OK: - case PGRES_COMMAND_OK: - case PGRES_COPY_IN: - case PGRES_COPY_OUT: - case PGRES_COPY_BOTH: - break; - default: { - std::stringstream ss; - ss << "bad query result: " << PQresStatus(PQresultStatus(ret.get())) - << " error message: " << PQerrorMessage(conn_.get()) - << ", number of tuples: " << PQntuples(ret.get()) - << ", number of fields: " << PQnfields(ret.get()); - JLOG(j_.error()) << ss.str(); - PgResult retRes(ret.get(), conn_.get()); - disconnect(); - return retRes; - } - } - - return PgResult(std::move(ret)); -} - -static pg_formatted_params -formatParams(pg_params const& dbParams, beast::Journal const& j) -{ - std::vector> const& values = dbParams.second; - /* Convert vector to C-style array of C-strings for postgres API. - std::nullopt is a proxy for NULL since an empty std::string is - 0 length but not NULL. */ - std::vector valuesIdx; - valuesIdx.reserve(values.size()); - std::stringstream ss; - bool first = true; - for (auto const& value : values) - { - if (value) - { - valuesIdx.push_back(value->c_str()); - ss << value->c_str(); - } - else - { - valuesIdx.push_back(nullptr); - ss << "(null)"; - } - if (first) - first = false; - else - ss << ','; - } - - JLOG(j.trace()) << "query: " << dbParams.first << ". params: " << ss.str(); - return valuesIdx; -} - -PgResult -Pg::query(pg_params const& dbParams) -{ - char const* const& command = dbParams.first; - auto const formattedParams = formatParams(dbParams, j_); - return query( - command, - formattedParams.size(), - formattedParams.size() - ? reinterpret_cast(&formattedParams[0]) - : nullptr); -} - -void -Pg::bulkInsert(char const* table, std::string const& records) -{ - // https://www.postgresql.org/docs/12/libpq-copy.html#LIBPQ-COPY-SEND - assert(conn_.get()); - static auto copyCmd = boost::format(R"(COPY %s FROM stdin)"); - auto res = query(boost::str(copyCmd % table).c_str()); - if (!res || res.status() != PGRES_COPY_IN) - { - std::stringstream ss; - ss << "bulkInsert to " << table - << ". Postgres insert error: " << res.msg(); - if (res) - ss << ". Query status not PGRES_COPY_IN: " << res.status(); - Throw(ss.str()); - } - - if (PQputCopyData(conn_.get(), records.c_str(), records.size()) == -1) - { - std::stringstream ss; - ss << "bulkInsert to " << table - << ". PQputCopyData error: " << PQerrorMessage(conn_.get()); - disconnect(); - Throw(ss.str()); - } - - if (PQputCopyEnd(conn_.get(), nullptr) == -1) - { - std::stringstream ss; - ss << "bulkInsert to " << table - << ". PQputCopyEnd error: " << PQerrorMessage(conn_.get()); - disconnect(); - Throw(ss.str()); - } - - // The result object must be freed using the libpq API PQclear() call. - pg_result_type copyEndResult{ - nullptr, [](PGresult* result) { PQclear(result); }}; - copyEndResult.reset(PQgetResult(conn_.get())); - ExecStatusType status = PQresultStatus(copyEndResult.get()); - if (status != PGRES_COMMAND_OK) - { - std::stringstream ss; - ss << "bulkInsert to " << table - << ". PQputCopyEnd status not PGRES_COMMAND_OK: " << status; - disconnect(); - Throw(ss.str()); - } -} - -bool -Pg::clear() -{ - if (!conn_) - return false; - - // The result object must be freed using the libpq API PQclear() call. - pg_result_type res{nullptr, [](PGresult* result) { PQclear(result); }}; - - // Consume results until no more, or until the connection is severed. - do - { - res.reset(PQgetResult(conn_.get())); - if (!res) - break; - - // Pending bulk copy operations may leave the connection in such a - // state that it must be disconnected. - switch (PQresultStatus(res.get())) - { - case PGRES_COPY_IN: - if (PQputCopyEnd(conn_.get(), nullptr) != -1) - break; - [[fallthrough]]; // avoids compiler warning - case PGRES_COPY_OUT: - case PGRES_COPY_BOTH: - conn_.reset(); - default:; - } - } while (res && conn_); - - return conn_ != nullptr; -} - -//----------------------------------------------------------------------------- - -PgPool::PgPool(Section const& pgConfig, beast::Journal j) : j_(j) -{ - // Make sure that boost::asio initializes the SSL library. - { - static boost::asio::ssl::detail::openssl_init initSsl; - } - // Don't have postgres client initialize SSL. - PQinitOpenSSL(0, 0); - - /* - Connect to postgres to create low level connection parameters - with optional caching of network address info for subsequent connections. - See https://www.postgresql.org/docs/10/libpq-connect.html - - For bounds checking of postgres connection data received from - the network: the largest size for any connection field in - PG source code is 64 bytes as of 5/2019. There are 29 fields. - */ - constexpr std::size_t maxFieldSize = 1024; - constexpr std::size_t maxFields = 1000; - - // The connection object must be freed using the libpq API PQfinish() call. - pg_connection_type conn( - PQconnectdb(get(pgConfig, "conninfo").c_str()), - [](PGconn* conn) { PQfinish(conn); }); - if (!conn) - Throw("Can't create DB connection."); - if (PQstatus(conn.get()) != CONNECTION_OK) - { - std::stringstream ss; - ss << "Initial DB connection failed: " << PQerrorMessage(conn.get()); - Throw(ss.str()); - } - - int const sockfd = PQsocket(conn.get()); - if (sockfd == -1) - Throw("No DB socket is open."); - struct sockaddr_storage addr; - socklen_t len = sizeof(addr); - if (getpeername(sockfd, reinterpret_cast(&addr), &len) == - -1) - { - Throw( - errno, std::generic_category(), "Can't get server address info."); - } - - // Set "port" and "hostaddr" if we're caching it. - bool const remember_ip = get(pgConfig, "remember_ip", true); - - if (remember_ip) - { - config_.keywords.push_back("port"); - config_.keywords.push_back("hostaddr"); - std::string port; - std::string hostaddr; - - if (addr.ss_family == AF_INET) - { - hostaddr.assign(INET_ADDRSTRLEN, '\0'); - struct sockaddr_in const& ainfo = - reinterpret_cast(addr); - port = std::to_string(ntohs(ainfo.sin_port)); - if (!inet_ntop( - AF_INET, &ainfo.sin_addr, &hostaddr[0], hostaddr.size())) - { - Throw( - errno, - std::generic_category(), - "Can't get IPv4 address string."); - } - } - else if (addr.ss_family == AF_INET6) - { - hostaddr.assign(INET6_ADDRSTRLEN, '\0'); - struct sockaddr_in6 const& ainfo = - reinterpret_cast(addr); - port = std::to_string(ntohs(ainfo.sin6_port)); - if (!inet_ntop( - AF_INET6, &ainfo.sin6_addr, &hostaddr[0], hostaddr.size())) - { - Throw( - errno, - std::generic_category(), - "Can't get IPv6 address string."); - } - } - - config_.values.push_back(port.c_str()); - config_.values.push_back(hostaddr.c_str()); - } - std::unique_ptr connOptions( - PQconninfo(conn.get()), - [](PQconninfoOption* opts) { PQconninfoFree(opts); }); - if (!connOptions) - Throw("Can't get DB connection options."); - - std::size_t nfields = 0; - for (PQconninfoOption* option = connOptions.get(); - option->keyword != nullptr; - ++option) - { - if (++nfields > maxFields) - { - std::stringstream ss; - ss << "DB returned connection options with > " << maxFields - << " fields."; - Throw(ss.str()); - } - - if (!option->val || - (remember_ip && - (!strcmp(option->keyword, "hostaddr") || - !strcmp(option->keyword, "port")))) - { - continue; - } - - if (strlen(option->keyword) > maxFieldSize || - strlen(option->val) > maxFieldSize) - { - std::stringstream ss; - ss << "DB returned a connection option name or value with\n"; - ss << "excessive size (>" << maxFieldSize << " bytes).\n"; - ss << "option (possibly truncated): " - << std::string_view( - option->keyword, - std::min(strlen(option->keyword), maxFieldSize)) - << '\n'; - ss << " value (possibly truncated): " - << std::string_view( - option->val, std::min(strlen(option->val), maxFieldSize)); - Throw(ss.str()); - } - config_.keywords.push_back(option->keyword); - config_.values.push_back(option->val); - } - - config_.keywordsIdx.reserve(config_.keywords.size() + 1); - config_.valuesIdx.reserve(config_.values.size() + 1); - for (std::size_t n = 0; n < config_.keywords.size(); ++n) - { - config_.keywordsIdx.push_back(config_.keywords[n].c_str()); - config_.valuesIdx.push_back(config_.values[n].c_str()); - } - config_.keywordsIdx.push_back(nullptr); - config_.valuesIdx.push_back(nullptr); - - get_if_exists(pgConfig, "max_connections", config_.max_connections); - std::size_t timeout; - if (get_if_exists(pgConfig, "timeout", timeout)) - config_.timeout = std::chrono::seconds(timeout); -} - -void -PgPool::setup() -{ - { - std::stringstream ss; - ss << "max_connections: " << config_.max_connections << ", " - << "timeout: " << config_.timeout.count() << ", " - << "connection params: "; - bool first = true; - for (std::size_t i = 0; i < config_.keywords.size(); ++i) - { - if (first) - first = false; - else - ss << ", "; - ss << config_.keywords[i] << ": " - << (config_.keywords[i] == "password" ? "*" : config_.values[i]); - } - JLOG(j_.debug()) << ss.str(); - } -} - -void -PgPool::stop() -{ - std::lock_guard lock(mutex_); - stop_ = true; - cond_.notify_all(); - idle_.clear(); - JLOG(j_.info()) << "stopped"; -} - -void -PgPool::idleSweeper() -{ - std::size_t before, after; - { - std::lock_guard lock(mutex_); - before = idle_.size(); - if (config_.timeout != std::chrono::seconds(0)) - { - auto const found = - idle_.upper_bound(clock_type::now() - config_.timeout); - for (auto it = idle_.begin(); it != found;) - { - it = idle_.erase(it); - --connections_; - } - } - after = idle_.size(); - } - - JLOG(j_.info()) << "Idle sweeper. connections: " << connections_ - << ". checked out: " << connections_ - after - << ". idle before, after sweep: " << before << ", " - << after; -} - -std::unique_ptr -PgPool::checkout() -{ - std::unique_ptr ret; - std::unique_lock lock(mutex_); - do - { - if (stop_) - return {}; - - // If there is a connection in the pool, return the most recent. - if (idle_.size()) - { - auto entry = idle_.rbegin(); - ret = std::move(entry->second); - idle_.erase(std::next(entry).base()); - } - // Otherwise, return a new connection unless over threshold. - else if (connections_ < config_.max_connections) - { - ++connections_; - ret = std::make_unique(config_, j_, stop_, mutex_); - } - // Otherwise, wait until a connection becomes available or we stop. - else - { - JLOG(j_.error()) << "No database connections available."; - cond_.wait(lock); - } - } while (!ret && !stop_); - lock.unlock(); - - return ret; -} - -void -PgPool::checkin(std::unique_ptr& pg) -{ - if (pg) - { - std::lock_guard lock(mutex_); - if (!stop_ && pg->clear()) - { - idle_.emplace(clock_type::now(), std::move(pg)); - } - else - { - --connections_; - pg.reset(); - } - } - - cond_.notify_all(); -} - -//----------------------------------------------------------------------------- - -std::shared_ptr -make_PgPool(Section const& pgConfig, beast::Journal j) -{ - auto ret = std::make_shared(pgConfig, j); - ret->setup(); - return ret; -} - -//----------------------------------------------------------------------------- - -/** Postgres Schema Management - * - * The postgres schema has several properties to facilitate - * consistent deployments, including upgrades. It is not recommended to - * upgrade the schema concurrently. - * - * Initial deployment should be against a completely fresh database. The - * postgres user must have the CREATE TABLE privilege. - * - * With postgres configured, the first step is to apply the version_query - * schema and consume the results. This script returns the currently - * installed schema version, if configured, or 0 if not. It is idempotent. - * - * If the version installed on the database is equal to the - * LATEST_SCHEMA_VERSION, then no action should take place. - * - * If the version on the database is 0, then the entire latest schema - * should be deployed with the applySchema() function. - * Each version that is developed is fully - * represented in the full_schemata array with each version equal to the - * text in the array's index position. For example, index position 1 - * contains the full schema version 1. Position 2 contains schema version 2. - * Index 0 should never be referenced and its value only a placeholder. - * If a fresh installation is aborted, then subsequent fresh installations - * should install the same version previously attempted, even if there - * exists a newer version. The initSchema() function performs this task. - * Therefore, previous schema versions should remain in the array - * without modification as new versions are developed and placed after them. - * Once the schema is succesffuly deployed, applySchema() persists the - * schema version to the database. - * - * If the current version of the database is greater than 0, then it means - * that a previous schema version is already present. In this case, the database - * schema needs to be updated incrementally for each subsequent version. - * Again, applySchema() is used to upgrade the schema. Schema upgrades are - * in the upgrade_schemata array. Each entry by index position represents - * the database schema version from which the upgrade begins. Each upgrade - * sets the database to the next version. Schema upgrades can only safely - * happen from one version to the next. To upgrade several versions of schema, - * upgrade incrementally for each version that separates the current from the - * latest. For example, to upgrade from version 5 to version 6 of the schema, - * use upgrade_schemata[5]. To upgrade from version 1 to version 4, use - * upgrade_schemata[1], upgrade_schemata[2], and upgrade_schemata[3] in - * sequence. - * - * To upgrade the schema past version 1, the following variables must be - * updated: - * 1) LATEST_SCHEMA_VERSION must be set to the new version. - * 2) A new entry must be placed at the end of the full_schemata array. This - * entry should have the entire schema so that fresh installations can - * be performed with it. The index position must be equal to the - * LATEST_SCHEMA_VERSION. - * 3) A new entry must be placed at the end of the upgrade_schemata array. - * This entry should only contain commands to upgrade the schema from - * the immediately previous version to the new version. - * - * It is up to the developer to ensure that all schema commands are idempotent. - * This protects against 2 things: - * 1) Resuming schema installation after a problem. - * 2) Concurrent schema updates from multiple processes. - * - * There are several things that must considered for upgrading existing - * schemata to avoid stability and performance problems. Some examples and - * suggestions follow. - * - Schema changes such as creating new columns and indices can consume - * a lot of time. Therefore, before such changes, a separate script should - * be executed by the user to perform the schema upgrade prior to restarting - * rippled. - * - Stored functions cannot be dropped while being accessed. Also, - * dropping stored functions can be ambiguous if multiple functions with - * the same name but different signatures exist. Further, stored function - * behavior from one schema version to the other would likely be handled - * differently by rippled. In this case, it is likely that the functions - * themselves should be versioned such as by appending a number to the - * end of the name (abcf becomes abcf_2, abcf_3, etc.) - * - * Essentially, each schema upgrade will have its own factors to impact - * service availability and function. - */ - -#define LATEST_SCHEMA_VERSION 1 - -char const* version_query = R"( -CREATE TABLE IF NOT EXISTS version (version int NOT NULL, - fresh_pending int NOT NULL); - --- Version 0 means that no schema has been fully deployed. -DO $$ -BEGIN - IF NOT EXISTS (SELECT 1 FROM version) THEN - INSERT INTO version VALUES (0, 0); -END IF; -END $$; - --- Function to set the schema version. _in_pending should only be set to --- non-zero prior to an attempt to initialize the schema from scratch. --- After successful initialization, this should set to 0. --- _in_version should be set to the version of schema that has been applied --- once successful application has occurred. -CREATE OR REPLACE FUNCTION set_schema_version ( - _in_version int, - _in_pending int -) RETURNS void AS $$ -DECLARE - _current_version int; -BEGIN - IF _in_version IS NULL OR _in_pending IS NULL THEN RETURN; END IF; - IF EXISTS (SELECT 1 FROM version) THEN DELETE FROM version; END IF; - INSERT INTO version VALUES (_in_version, _in_pending); - RETURN; -END; -$$ LANGUAGE plpgsql; - --- PQexec() returns the output of the last statement in its response. -SELECT * FROM version; -)"; - -std::array full_schemata = { - // version 0: - "There is no such thing as schema version 0." - - // version 1: - , - R"( --- Table to store ledger headers. -CREATE TABLE IF NOT EXISTS ledgers ( - ledger_seq bigint PRIMARY KEY, - ledger_hash bytea NOT NULL, - prev_hash bytea NOT NULL, - total_coins bigint NOT NULL, - closing_time bigint NOT NULL, - prev_closing_time bigint NOT NULL, - close_time_res bigint NOT NULL, - close_flags bigint NOT NULL, - account_set_hash bytea NOT NULL, - trans_set_hash bytea NOT NULL -); - --- Index for lookups by ledger hash. -CREATE INDEX IF NOT EXISTS ledgers_ledger_hash_idx ON ledgers - USING hash (ledger_hash); - --- Transactions table. Deletes from the ledger table --- cascade here based on ledger_seq. -CREATE TABLE IF NOT EXISTS transactions ( - ledger_seq bigint NOT NULL, - transaction_index bigint NOT NULL, - trans_id bytea NOT NULL, - nodestore_hash bytea NOT NULL, - constraint transactions_pkey PRIMARY KEY (ledger_seq, transaction_index), - constraint transactions_fkey FOREIGN KEY (ledger_seq) - REFERENCES ledgers (ledger_seq) ON DELETE CASCADE -); - --- Index for lookups by transaction hash. -CREATE INDEX IF NOT EXISTS transactions_trans_id_idx ON transactions - USING hash (trans_id); - --- Table that maps accounts to transactions affecting them. Deletes from the --- ledger table by way of transactions table cascade here based on ledger_seq. -CREATE TABLE IF NOT EXISTS account_transactions ( - account bytea NOT NULL, - ledger_seq bigint NOT NULL, - transaction_index bigint NOT NULL, - constraint account_transactions_pkey PRIMARY KEY (account, ledger_seq, - transaction_index), - constraint account_transactions_fkey FOREIGN KEY (ledger_seq, - transaction_index) REFERENCES transactions ( - ledger_seq, transaction_index) ON DELETE CASCADE -); - --- Index to allow for fast cascading deletions and referential integrity. -CREATE INDEX IF NOT EXISTS fki_account_transactions_idx ON - account_transactions USING btree (ledger_seq, transaction_index); - --- Avoid inadvertent administrative tampering with committed data. -CREATE OR REPLACE RULE ledgers_update_protect AS ON UPDATE TO - ledgers DO INSTEAD NOTHING; -CREATE OR REPLACE RULE transactions_update_protect AS ON UPDATE TO - transactions DO INSTEAD NOTHING; -CREATE OR REPLACE RULE account_transactions_update_protect AS ON UPDATE TO - account_transactions DO INSTEAD NOTHING; - --- Stored procedure to assist with the tx() RPC call. Takes transaction hash --- as input. If found, returns the ledger sequence in which it was applied. --- If not, returns the range of ledgers searched. -CREATE OR REPLACE FUNCTION tx ( - _in_trans_id bytea -) RETURNS jsonb AS $$ -DECLARE - _min_seq bigint := min_ledger(); - _max_seq bigint := max_ledger(); - _ledger_seq bigint; - _nodestore_hash bytea; -BEGIN - - IF _min_seq IS NULL THEN - RETURN jsonb_build_object('error', 'empty database'); - END IF; - IF length(_in_trans_id) != 32 THEN - RETURN jsonb_build_object('error', '_in_trans_id size: ' - || to_char(length(_in_trans_id), '999')); - END IF; - - EXECUTE 'SELECT nodestore_hash, ledger_seq - FROM transactions - WHERE trans_id = $1 - AND ledger_seq BETWEEN $2 AND $3 - ' INTO _nodestore_hash, _ledger_seq USING _in_trans_id, _min_seq, _max_seq; - IF _nodestore_hash IS NULL THEN - RETURN jsonb_build_object('min_seq', _min_seq, 'max_seq', _max_seq); - END IF; - RETURN jsonb_build_object('nodestore_hash', _nodestore_hash, 'ledger_seq', - _ledger_seq); -END; -$$ LANGUAGE plpgsql; - --- Return the earliest ledger sequence intended for range operations --- that protect the bottom of the range from deletion. Return NULL if empty. -CREATE OR REPLACE FUNCTION min_ledger () RETURNS bigint AS $$ -DECLARE - _min_seq bigint := (SELECT ledger_seq from min_seq); -BEGIN - IF _min_seq IS NULL THEN - RETURN (SELECT ledger_seq FROM ledgers ORDER BY ledger_seq ASC LIMIT 1); - ELSE - RETURN _min_seq; - END IF; -END; -$$ LANGUAGE plpgsql; - --- Return the latest ledger sequence in the database, or NULL if empty. -CREATE OR REPLACE FUNCTION max_ledger () RETURNS bigint AS $$ -BEGIN - RETURN (SELECT ledger_seq FROM ledgers ORDER BY ledger_seq DESC LIMIT 1); -END; -$$ LANGUAGE plpgsql; - --- account_tx() RPC helper. From the rippled reporting process, only the --- parameters without defaults are required. For the parameters with --- defaults, validation should be done by rippled, such as: --- _in_account_id should be a valid xrp base58 address. --- _in_forward either true or false according to the published api --- _in_limit should be validated and not simply passed through from --- client. --- --- For _in_ledger_index_min and _in_ledger_index_max, if passed in the --- request, verify that their type is int and pass through as is. --- For _ledger_hash, verify and convert from hex length 32 bytes and --- prepend with \x (\\x C++). --- --- For _in_ledger_index, if the input type is integer, then pass through --- as is. If the type is string and contents = validated, then do not --- set _in_ledger_index. Instead set _in_invalidated to TRUE. --- --- There is no need for rippled to do any type of lookup on max/min --- ledger range, lookup of hash, or the like. This functions does those --- things, including error responses if bad input. Only the above must --- be done to set the correct search range. --- --- If a marker is present in the request, verify the members 'ledger' --- and 'seq' are integers and they correspond to _in_marker_seq --- _in_marker_index. --- To reiterate: --- JSON input field 'ledger' corresponds to _in_marker_seq --- JSON input field 'seq' corresponds to _in_marker_index -CREATE OR REPLACE FUNCTION account_tx ( - _in_account_id bytea, - _in_forward bool, - _in_limit bigint, - _in_ledger_index_min bigint = NULL, - _in_ledger_index_max bigint = NULL, - _in_ledger_hash bytea = NULL, - _in_ledger_index bigint = NULL, - _in_validated bool = NULL, - _in_marker_seq bigint = NULL, - _in_marker_index bigint = NULL -) RETURNS jsonb AS $$ -DECLARE - _min bigint; - _max bigint; - _sort_order text := (SELECT CASE WHEN _in_forward IS TRUE THEN - 'ASC' ELSE 'DESC' END); - _marker bool; - _between_min bigint; - _between_max bigint; - _sql text; - _cursor refcursor; - _result jsonb; - _record record; - _tally bigint := 0; - _ret_marker jsonb; - _transactions jsonb[] := '{}'; -BEGIN - IF _in_ledger_index_min IS NOT NULL OR - _in_ledger_index_max IS NOT NULL THEN - _min := (SELECT CASE WHEN _in_ledger_index_min IS NULL - THEN min_ledger() ELSE greatest( - _in_ledger_index_min, min_ledger()) END); - _max := (SELECT CASE WHEN _in_ledger_index_max IS NULL OR - _in_ledger_index_max = -1 THEN max_ledger() ELSE - least(_in_ledger_index_max, max_ledger()) END); - - IF _max < _min THEN - RETURN jsonb_build_object('error', 'max is less than min ledger'); - END IF; - - ELSIF _in_ledger_hash IS NOT NULL OR _in_ledger_index IS NOT NULL - OR _in_validated IS TRUE THEN - IF _in_ledger_hash IS NOT NULL THEN - IF length(_in_ledger_hash) != 32 THEN - RETURN jsonb_build_object('error', '_in_ledger_hash size: ' - || to_char(length(_in_ledger_hash), '999')); - END IF; - EXECUTE 'SELECT ledger_seq - FROM ledgers - WHERE ledger_hash = $1' - INTO _min USING _in_ledger_hash::bytea; - ELSE - IF _in_ledger_index IS NOT NULL AND _in_validated IS TRUE THEN - RETURN jsonb_build_object('error', - '_in_ledger_index cannot be set and _in_validated true'); - END IF; - IF _in_validated IS TRUE THEN - _in_ledger_index := max_ledger(); - END IF; - _min := (SELECT ledger_seq - FROM ledgers - WHERE ledger_seq = _in_ledger_index); - END IF; - IF _min IS NULL THEN - RETURN jsonb_build_object('error', 'ledger not found'); - END IF; - _max := _min; - ELSE - _min := min_ledger(); - _max := max_ledger(); - END IF; - - IF _in_marker_seq IS NOT NULL OR _in_marker_index IS NOT NULL THEN - _marker := TRUE; - IF _in_marker_seq IS NULL OR _in_marker_index IS NULL THEN - -- The rippled implementation returns no transaction results - -- if either of these values are missing. - _between_min := 0; - _between_max := 0; - ELSE - IF _in_forward IS TRUE THEN - _between_min := _in_marker_seq; - _between_max := _max; - ELSE - _between_min := _min; - _between_max := _in_marker_seq; - END IF; - END IF; - ELSE - _marker := FALSE; - _between_min := _min; - _between_max := _max; - END IF; - IF _between_max < _between_min THEN - RETURN jsonb_build_object('error', 'ledger search range is ' - || to_char(_between_min, '999') || '-' - || to_char(_between_max, '999')); - END IF; - - _sql := format(' - SELECT transactions.ledger_seq, transactions.transaction_index, - transactions.trans_id, transactions.nodestore_hash - FROM transactions - INNER JOIN account_transactions - ON transactions.ledger_seq = - account_transactions.ledger_seq - AND transactions.transaction_index = - account_transactions.transaction_index - WHERE account_transactions.account = $1 - AND account_transactions.ledger_seq BETWEEN $2 AND $3 - ORDER BY transactions.ledger_seq %s, transactions.transaction_index %s - ', _sort_order, _sort_order); - - OPEN _cursor FOR EXECUTE _sql USING _in_account_id, _between_min, - _between_max; - LOOP - FETCH _cursor INTO _record; - IF _record IS NULL THEN EXIT; END IF; - IF _marker IS TRUE THEN - IF _in_marker_seq = _record.ledger_seq THEN - IF _in_forward IS TRUE THEN - IF _in_marker_index > _record.transaction_index THEN - CONTINUE; - END IF; - ELSE - IF _in_marker_index < _record.transaction_index THEN - CONTINUE; - END IF; - END IF; - END IF; - _marker := FALSE; - END IF; - - _tally := _tally + 1; - IF _tally > _in_limit THEN - _ret_marker := jsonb_build_object( - 'ledger', _record.ledger_seq, - 'seq', _record.transaction_index); - EXIT; - END IF; - - -- Is the transaction index in the tx object? - _transactions := _transactions || jsonb_build_object( - 'ledger_seq', _record.ledger_seq, - 'transaction_index', _record.transaction_index, - 'trans_id', _record.trans_id, - 'nodestore_hash', _record.nodestore_hash); - - END LOOP; - CLOSE _cursor; - - _result := jsonb_build_object('ledger_index_min', _min, - 'ledger_index_max', _max, - 'transactions', _transactions); - IF _ret_marker IS NOT NULL THEN - _result := _result || jsonb_build_object('marker', _ret_marker); - END IF; - RETURN _result; -END; -$$ LANGUAGE plpgsql; - --- Trigger prior to insert on ledgers table. Validates length of hash fields. --- Verifies ancestry based on ledger_hash & prev_hash as follows: --- 1) If ledgers is empty, allows insert. --- 2) For each new row, check for previous and later ledgers by a single --- sequence. For each that exist, confirm ancestry based on hashes. --- 3) Disallow inserts with no prior or next ledger by sequence if any --- ledgers currently exist. This disallows gaps to be introduced by --- way of inserting. -CREATE OR REPLACE FUNCTION insert_ancestry() RETURNS TRIGGER AS $$ -DECLARE - _parent bytea; - _child bytea; -BEGIN - IF length(NEW.ledger_hash) != 32 OR length(NEW.prev_hash) != 32 THEN - RAISE 'ledger_hash and prev_hash must each be 32 bytes: %', NEW; - END IF; - - IF (SELECT ledger_hash - FROM ledgers - ORDER BY ledger_seq DESC - LIMIT 1) = NEW.prev_hash THEN RETURN NEW; END IF; - - IF NOT EXISTS (SELECT 1 FROM LEDGERS) THEN RETURN NEW; END IF; - - _parent := (SELECT ledger_hash - FROM ledgers - WHERE ledger_seq = NEW.ledger_seq - 1); - _child := (SELECT prev_hash - FROM ledgers - WHERE ledger_seq = NEW.ledger_seq + 1); - IF _parent IS NULL AND _child IS NULL THEN - RAISE 'Ledger Ancestry error: orphan.'; - END IF; - IF _parent != NEW.prev_hash THEN - RAISE 'Ledger Ancestry error: bad parent.'; - END IF; - IF _child != NEW.ledger_hash THEN - RAISE 'Ledger Ancestry error: bad child.'; - END IF; - - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - --- Trigger function prior to delete on ledgers table. Disallow gaps from --- forming. Do not allow deletions if both the previous and next ledgers --- are present. In other words, only allow either the least or greatest --- to be deleted. -CREATE OR REPLACE FUNCTION delete_ancestry () RETURNS TRIGGER AS $$ -BEGIN - IF EXISTS (SELECT 1 - FROM ledgers - WHERE ledger_seq = OLD.ledger_seq + 1) - AND EXISTS (SELECT 1 - FROM ledgers - WHERE ledger_seq = OLD.ledger_seq - 1) THEN - RAISE 'Ledger Ancestry error: Can only delete the least or greatest ' - 'ledger.'; - END IF; - RETURN OLD; -END; -$$ LANGUAGE plpgsql; - --- Track the minimum sequence that should be used for ranged queries --- with protection against deletion during the query. This should --- be updated before calling online_delete() to not block deleting that --- range. -CREATE TABLE IF NOT EXISTS min_seq ( - ledger_seq bigint NOT NULL -); - --- Set the minimum sequence for use in ranged queries with protection --- against deletion greater than or equal to the input parameter. This --- should be called prior to online_delete() with the same parameter --- value so that online_delete() is not blocked by range queries --- that are protected against concurrent deletion of the ledger at --- the bottom of the range. This function needs to be called from a --- separate transaction from that which executes online_delete(). -CREATE OR REPLACE FUNCTION prepare_delete ( - _in_last_rotated bigint -) RETURNS void AS $$ -BEGIN - IF EXISTS (SELECT 1 FROM min_seq) THEN - DELETE FROM min_seq; - END IF; - INSERT INTO min_seq VALUES (_in_last_rotated + 1); -END; -$$ LANGUAGE plpgsql; - --- Function to delete old data. All data belonging to ledgers prior to and --- equal to the _in_seq parameter will be deleted. This should be --- called with the input parameter equivalent to the value of lastRotated --- in rippled's online_delete routine. -CREATE OR REPLACE FUNCTION online_delete ( - _in_seq bigint -) RETURNS void AS $$ -BEGIN - DELETE FROM LEDGERS WHERE ledger_seq <= _in_seq; -END; -$$ LANGUAGE plpgsql; - --- Function to delete data from the top of the ledger range. Delete --- everything greater than the input parameter. --- It doesn't do a normal range delete because of the trigger protecting --- deletions causing gaps. Instead, it walks back from the greatest ledger. -CREATE OR REPLACE FUNCTION delete_above ( - _in_seq bigint -) RETURNS void AS $$ -DECLARE - _max_seq bigint := max_ledger(); - _i bigint := _max_seq; -BEGIN - IF _max_seq IS NULL THEN RETURN; END IF; - LOOP - IF _i <= _in_seq THEN RETURN; END IF; - EXECUTE 'DELETE FROM ledgers WHERE ledger_seq = $1' USING _i; - _i := _i - 1; - END LOOP; -END; -$$ LANGUAGE plpgsql; - --- Verify correct ancestry of ledgers in database: --- Table to persist last-confirmed latest ledger with proper ancestry. -CREATE TABLE IF NOT EXISTS ancestry_verified ( - ledger_seq bigint NOT NULL -); - --- Function to verify ancestry of ledgers based on ledger_hash and prev_hash. --- Upon failure, returns ledger sequence failing ancestry check. --- Otherwise, returns NULL. --- _in_full: If TRUE, verify entire table. Else verify starting from --- value in ancestry_verfied table. If no value, then start --- from lowest ledger. --- _in_persist: If TRUE, persist the latest ledger with correct ancestry. --- If an exception was raised because of failure, persist --- the latest ledger prior to that which failed. --- _in_min: If set and _in_full is not true, the starting ledger from which --- to verify. --- _in_max: If set and _in_full is not true, the latest ledger to verify. -CREATE OR REPLACE FUNCTION check_ancestry ( - _in_full bool = FALSE, - _in_persist bool = TRUE, - _in_min bigint = NULL, - _in_max bigint = NULL -) RETURNS bigint AS $$ -DECLARE - _min bigint; - _max bigint; - _last_verified bigint; - _parent ledgers; - _current ledgers; - _cursor refcursor; -BEGIN - IF _in_full IS TRUE AND - (_in_min IS NOT NULL) OR (_in_max IS NOT NULL) THEN - RAISE 'Cannot specify manual range and do full check.'; - END IF; - - IF _in_min IS NOT NULL THEN - _min := _in_min; - ELSIF _in_full IS NOT TRUE THEN - _last_verified := (SELECT ledger_seq FROM ancestry_verified); - IF _last_verified IS NULL THEN - _min := min_ledger(); - ELSE - _min := _last_verified + 1; - END IF; - ELSE - _min := min_ledger(); - END IF; - EXECUTE 'SELECT * FROM ledgers WHERE ledger_seq = $1' - INTO _parent USING _min - 1; - IF _last_verified IS NOT NULL AND _parent IS NULL THEN - RAISE 'Verified ledger % doesn''t exist.', _last_verified; - END IF; - - IF _in_max IS NOT NULL THEN - _max := _in_max; - ELSE - _max := max_ledger(); - END IF; - - OPEN _cursor FOR EXECUTE 'SELECT * - FROM ledgers - WHERE ledger_seq BETWEEN $1 AND $2 - ORDER BY ledger_seq ASC' - USING _min, _max; - LOOP - FETCH _cursor INTO _current; - IF _current IS NULL THEN EXIT; END IF; - IF _parent IS NOT NULL THEN - IF _current.prev_hash != _parent.ledger_hash THEN - CLOSE _cursor; - RETURN _current.ledger_seq; - RAISE 'Ledger ancestry failure current, parent:% %', - _current, _parent; - END IF; - END IF; - _parent := _current; - END LOOP; - CLOSE _cursor; - - IF _in_persist IS TRUE AND _parent IS NOT NULL THEN - DELETE FROM ancestry_verified; - INSERT INTO ancestry_verified VALUES (_parent.ledger_seq); - END IF; - - RETURN NULL; -END; -$$ LANGUAGE plpgsql; - --- Return number of whole seconds since the latest ledger was inserted, based --- on ledger close time (not wall clock) of the insert. --- Note that ledgers.closing_time is number of seconds since the XRP --- epoch, which is 01/01/2000 00:00:00. This in turn is 946684800 seconds --- after the UNIX epoch. This conforms to the "age" field in the --- server_info RPC call. -CREATE OR REPLACE FUNCTION age () RETURNS bigint AS $$ -BEGIN - RETURN (EXTRACT(EPOCH FROM (now())) - - (946684800 + (SELECT closing_time - FROM ledgers - ORDER BY ledger_seq DESC - LIMIT 1)))::bigint; -END; -$$ LANGUAGE plpgsql; - --- Return range of ledgers, or empty if none. This conforms to the --- "complete_ledgers" field of the server_info RPC call. Note --- that ledger gaps are prevented for reporting mode so the range --- is simply the set between the least and greatest ledgers. -CREATE OR REPLACE FUNCTION complete_ledgers () RETURNS text AS $$ -DECLARE - _min bigint := min_ledger(); - _max bigint := max_ledger(); -BEGIN - IF _min IS NULL THEN RETURN 'empty'; END IF; - IF _min = _max THEN RETURN _min; END IF; - RETURN _min || '-' || _max; -END; -$$ LANGUAGE plpgsql; - -)" - - // version 2: - // , R"(Full idempotent text of schema version 2)" - - // version 3: - // , R"(Full idempotent text of schema version 3)" - - // version 4: - // , R"(Full idempotent text of schema version 4)" - - // ... - - // version n: - // , R"(Full idempotent text of schema version n)" -}; - -std::array upgrade_schemata = { - // upgrade from version 0: - "There is no upgrade path from version 0. Instead, install " - "from full_schemata." - // upgrade from version 1 to 2: - //, R"(Text to idempotently upgrade from version 1 to 2)" - // upgrade from version 2 to 3: - //, R"(Text to idempotently upgrade from version 2 to 3)" - // upgrade from version 3 to 4: - //, R"(Text to idempotently upgrade from version 3 to 4)" - // ... - // upgrade from version n-1 to n: - //, R"(Text to idempotently upgrade from version n-1 to n)" -}; - -/** Apply schema to postgres. - * - * The schema text should contain idempotent SQL & plpgSQL statements. - * Once completed, the version of the schema will be persisted. - * - * Throws upon error. - * - * @param pool Postgres connection pool manager. - * @param schema SQL commands separated by semi-colon. - * @param currentVersion The current version of the schema on the database. - * @param schemaVersion The version that will be in place once the schema - * has been applied. - */ -void -applySchema( - std::shared_ptr const& pool, - char const* schema, - std::uint32_t currentVersion, - std::uint32_t schemaVersion) -{ - if (currentVersion != 0 && schemaVersion != currentVersion + 1) - { - assert(false); - std::stringstream ss; - ss << "Schema upgrade versions past initial deployment must increase " - "monotonically. Versions: current, target: " - << currentVersion << ", " << schemaVersion; - Throw(ss.str()); - } - - auto res = PgQuery(pool)({schema, {}}); - if (!res) - { - std::stringstream ss; - ss << "Error applying schema from version " << currentVersion << "to " - << schemaVersion << ": " << res.msg(); - Throw(ss.str()); - } - - auto cmd = boost::format(R"(SELECT set_schema_version(%u, 0))"); - res = PgQuery(pool)({boost::str(cmd % schemaVersion).c_str(), {}}); - if (!res) - { - std::stringstream ss; - ss << "Error setting schema version from " << currentVersion << " to " - << schemaVersion << ": " << res.msg(); - Throw(ss.str()); - } -} - -void -initSchema(std::shared_ptr const& pool) -{ - // Figure out what schema version, if any, is already installed. - auto res = PgQuery(pool)({version_query, {}}); - if (!res) - { - std::stringstream ss; - ss << "Error getting database schema version: " << res.msg(); - Throw(ss.str()); - } - std::uint32_t currentSchemaVersion = res.asInt(); - std::uint32_t const pendingSchemaVersion = res.asInt(0, 1); - - // Nothing to do if we are on the latest schema; - if (currentSchemaVersion == LATEST_SCHEMA_VERSION) - return; - - if (currentSchemaVersion == 0) - { - // If a fresh install has not been completed, then re-attempt - // the install of the same schema version. - std::uint32_t const freshVersion = - pendingSchemaVersion ? pendingSchemaVersion : LATEST_SCHEMA_VERSION; - // Persist that we are attempting a fresh install to the latest version. - // This protects against corruption in an aborted install that is - // followed by a fresh installation attempt with a new schema. - auto cmd = boost::format(R"(SELECT set_schema_version(0, %u))"); - res = PgQuery(pool)({boost::str(cmd % freshVersion).c_str(), {}}); - if (!res) - { - std::stringstream ss; - ss << "Error setting schema version from " << currentSchemaVersion - << " to " << freshVersion << ": " << res.msg(); - Throw(ss.str()); - } - - // Install the full latest schema. - applySchema( - pool, - full_schemata[freshVersion], - currentSchemaVersion, - freshVersion); - currentSchemaVersion = freshVersion; - } - - // Incrementally upgrade one version at a time until latest. - for (; currentSchemaVersion < LATEST_SCHEMA_VERSION; ++currentSchemaVersion) - { - applySchema( - pool, - upgrade_schemata[currentSchemaVersion], - currentSchemaVersion, - currentSchemaVersion + 1); - } -} - -} // namespace ripple -#endif diff --git a/src/xrpld/core/Pg.h b/src/xrpld/core/Pg.h deleted file mode 100644 index c1333ed66fb..00000000000 --- a/src/xrpld/core/Pg.h +++ /dev/null @@ -1,520 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifdef RIPPLED_REPORTING -#ifndef RIPPLE_CORE_PG_H_INCLUDED -#define RIPPLE_CORE_PG_H_INCLUDED - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -// These postgres structs must be freed only by the postgres API. -using pg_result_type = std::unique_ptr; -using pg_connection_type = std::unique_ptr; - -/** first: command - * second: parameter values - * - * The 2nd member takes an optional string to - * distinguish between NULL parameters and empty strings. An empty - * item corresponds to a NULL parameter. - * - * Postgres reads each parameter as a c-string, regardless of actual type. - * Binary types (bytea) need to be converted to hex and prepended with - * \x ("\\x"). - */ -using pg_params = - std::pair>>; - -/** Parameter values for pg API. */ -using pg_formatted_params = std::vector; - -/** Parameters for managing postgres connections. */ -struct PgConfig -{ - /** Maximum connections allowed to db. */ - std::size_t max_connections{std::numeric_limits::max()}; - /** Close idle connections past this duration. */ - std::chrono::seconds timeout{600}; - - /** Index of DB connection parameter names. */ - std::vector keywordsIdx; - /** DB connection parameter names. */ - std::vector keywords; - /** Index of DB connection parameter values. */ - std::vector valuesIdx; - /** DB connection parameter values. */ - std::vector values; -}; - -//----------------------------------------------------------------------------- - -/** Class that operates on postgres query results. - * - * The functions that return results do not check first whether the - * expected results are actually there. Therefore, the caller first needs - * to check whether or not a valid response was returned using the operator - * bool() overload. If number of tuples or fields are unknown, then check - * those. Each result field should be checked for null before attempting - * to return results. Finally, the caller must know the type of the field - * before calling the corresponding function to return a field. Postgres - * internally stores each result field as null-terminated strings. - */ -class PgResult -{ - // The result object must be freed using the libpq API PQclear() call. - pg_result_type result_{nullptr, [](PGresult* result) { PQclear(result); }}; - std::optional> error_; - -public: - /** Constructor for when the process is stopping. - * - */ - PgResult() - { - } - - /** Constructor for successful query results. - * - * @param result Query result. - */ - explicit PgResult(pg_result_type&& result) : result_(std::move(result)) - { - } - - /** Constructor for failed query results. - * - * @param result Query result that contains error information. - * @param conn Postgres connection that contains error information. - */ - PgResult(PGresult* result, PGconn* conn) - : error_({PQresultStatus(result), PQerrorMessage(conn)}) - { - } - - /** Return field as a null-terminated string pointer. - * - * Note that this function does not guarantee that the result struct - * exists, or that the row and fields exist, or that the field is - * not null. - * - * @param ntuple Row number. - * @param nfield Field number. - * @return Field contents. - */ - char const* - c_str(int ntuple = 0, int nfield = 0) const - { - return PQgetvalue(result_.get(), ntuple, nfield); - } - - /** Return field as equivalent to Postgres' INT type (32 bit signed). - * - * Note that this function does not guarantee that the result struct - * exists, or that the row and fields exist, or that the field is - * not null, or that the type is that requested. - - * @param ntuple Row number. - * @param nfield Field number. - * @return Field contents. - */ - std::int32_t - asInt(int ntuple = 0, int nfield = 0) const - { - return boost::lexical_cast( - PQgetvalue(result_.get(), ntuple, nfield)); - } - - /** Return field as equivalent to Postgres' BIGINT type (64 bit signed). - * - * Note that this function does not guarantee that the result struct - * exists, or that the row and fields exist, or that the field is - * not null, or that the type is that requested. - - * @param ntuple Row number. - * @param nfield Field number. - * @return Field contents. - */ - std::int64_t - asBigInt(int ntuple = 0, int nfield = 0) const - { - return boost::lexical_cast( - PQgetvalue(result_.get(), ntuple, nfield)); - } - - /** Returns whether the field is NULL or not. - * - * Note that this function does not guarantee that the result struct - * exists, or that the row and fields exist. - * - * @param ntuple Row number. - * @param nfield Field number. - * @return Whether field is NULL. - */ - bool - isNull(int ntuple = 0, int nfield = 0) const - { - return PQgetisnull(result_.get(), ntuple, nfield); - } - - /** Check whether a valid response occurred. - * - * @return Whether or not the query returned a valid response. - */ - operator bool() const - { - return result_ != nullptr; - } - - /** Message describing the query results suitable for diagnostics. - * - * If error, then the postgres error type and message are returned. - * Otherwise, "ok" - * - * @return Query result message. - */ - std::string - msg() const; - - /** Get number of rows in result. - * - * Note that this function does not guarantee that the result struct - * exists. - * - * @return Number of result rows. - */ - int - ntuples() const - { - return PQntuples(result_.get()); - } - - /** Get number of fields in result. - * - * Note that this function does not guarantee that the result struct - * exists. - * - * @return Number of result fields. - */ - int - nfields() const - { - return PQnfields(result_.get()); - } - - /** Return result status of the command. - * - * Note that this function does not guarantee that the result struct - * exists. - * - * @return - */ - ExecStatusType - status() const - { - return PQresultStatus(result_.get()); - } -}; - -/* Class that contains and operates upon a postgres connection. */ -class Pg -{ - friend class PgPool; - friend class PgQuery; - - PgConfig const& config_; - beast::Journal const j_; - bool& stop_; - std::mutex& mutex_; - - // The connection object must be freed using the libpq API PQfinish() call. - pg_connection_type conn_{nullptr, [](PGconn* conn) { PQfinish(conn); }}; - - /** Clear results from the connection. - * - * Results from previous commands must be cleared before new commands - * can be processed. This function should be called on connections - * that weren't processed completely before being reused, such as - * when being checked-in. - * - * @return whether or not connection still exists. - */ - bool - clear(); - - /** Connect to postgres. - * - * Idempotently connects to postgres by first checking whether an - * existing connection is already present. If connection is not present - * or in an errored state, reconnects to the database. - */ - void - connect(); - - /** Disconnect from postgres. */ - void - disconnect() - { - conn_.reset(); - } - - /** Execute postgres query. - * - * If parameters are included, then the command should contain only a - * single SQL statement. If no parameters, then multiple SQL statements - * delimited by semi-colons can be processed. The response is from - * the last command executed. - * - * @param command postgres API command string. - * @param nParams postgres API number of parameters. - * @param values postgres API array of parameter. - * @return Query result object. - */ - PgResult - query(char const* command, std::size_t nParams, char const* const* values); - - /** Execute postgres query with no parameters. - * - * @param command Query string. - * @return Query result object; - */ - PgResult - query(char const* command) - { - return query(command, 0, nullptr); - } - - /** Execute postgres query with parameters. - * - * @param dbParams Database command and parameter values. - * @return Query result object. - */ - PgResult - query(pg_params const& dbParams); - - /** Insert multiple records into a table using Postgres' bulk COPY. - * - * Throws upon error. - * - * @param table Name of table for import. - * @param records Records in the COPY IN format. - */ - void - bulkInsert(char const* table, std::string const& records); - -public: - /** Constructor for Pg class. - * - * @param config Config parameters. - * @param j Logger object. - * @param stop Reference to connection pool's stop flag. - * @param mutex Reference to connection pool's mutex. - */ - Pg(PgConfig const& config, - beast::Journal const j, - bool& stop, - std::mutex& mutex) - : config_(config), j_(j), stop_(stop), mutex_(mutex) - { - } -}; - -//----------------------------------------------------------------------------- - -/** Database connection pool. - * - * Allow re-use of postgres connections. Postgres connections are created - * as needed until configurable limit is reached. After use, each connection - * is placed in a container ordered by time of use. Each request for - * a connection grabs the most recently used connection from the container. - * If none are available, a new connection is used (up to configured limit). - * Idle connections are destroyed periodically after configurable - * timeout duration. - * - * This should be stored as a shared pointer so PgQuery objects can safely - * outlive it. - */ -class PgPool -{ - friend class PgQuery; - - using clock_type = std::chrono::steady_clock; - - PgConfig config_; - beast::Journal const j_; - std::mutex mutex_; - std::condition_variable cond_; - std::size_t connections_{}; - bool stop_{false}; - - /** Idle database connections ordered by timestamp to allow timing out. */ - std::multimap, std::unique_ptr> - idle_; - - /** Get a postgres connection object. - * - * Return the most recent idle connection in the pool, if available. - * Otherwise, return a new connection unless we're at the threshold. - * If so, then wait until a connection becomes available. - * - * @return Postgres object. - */ - std::unique_ptr - checkout(); - - /** Return a postgres object to the pool for reuse. - * - * If connection is healthy, place in pool for reuse. After calling this, - * the container no longer have a connection unless checkout() is called. - * - * @param pg Pg object. - */ - void - checkin(std::unique_ptr& pg); - -public: - /** Connection pool constructor. - * - * @param pgConfig Postgres config. - * @param j Logger object. - */ - PgPool(Section const& pgConfig, beast::Journal j); - - /** Initiate idle connection timer. - * - * The PgPool object needs to be fully constructed to support asynchronous - * operations. - */ - void - setup(); - - /** Prepare for process shutdown. */ - void - stop(); - - /** Disconnect idle postgres connections. */ - void - idleSweeper(); -}; - -//----------------------------------------------------------------------------- - -/** Class to query postgres. - * - * This class should be used by functions outside of this - * compilation unit for querying postgres. It automatically acquires and - * relinquishes a database connection to handle each query. - */ -class PgQuery -{ -private: - std::shared_ptr pool_; - std::unique_ptr pg_; - -public: - PgQuery() = delete; - - PgQuery(std::shared_ptr const& pool) - : pool_(pool), pg_(pool->checkout()) - { - } - - ~PgQuery() - { - pool_->checkin(pg_); - } - - /** Execute postgres query with parameters. - * - * @param dbParams Database command with parameters. - * @return Result of query, including errors. - */ - PgResult - operator()(pg_params const& dbParams) - { - if (!pg_) // It means we're stopping. Return empty result. - return PgResult(); - return pg_->query(dbParams); - } - - /** Execute postgres query with only command statement. - * - * @param command Command statement. - * @return Result of query, including errors. - */ - PgResult - operator()(char const* command) - { - return operator()(pg_params{command, {}}); - } - - /** Insert multiple records into a table using Postgres' bulk COPY. - * - * Throws upon error. - * - * @param table Name of table for import. - * @param records Records in the COPY IN format. - */ - void - bulkInsert(char const* table, std::string const& records) - { - pg_->bulkInsert(table, records); - } -}; - -//----------------------------------------------------------------------------- - -/** Create Postgres connection pool manager. - * - * @param pgConfig Configuration for Postgres. - * @param j Logger object. - * @return Postgres connection pool manager - */ -std::shared_ptr -make_PgPool(Section const& pgConfig, beast::Journal j); - -/** Initialize the Postgres schema. - * - * This function ensures that the database is running the latest version - * of the schema. - * - * @param pool Postgres connection pool manager. - */ -void -initSchema(std::shared_ptr const& pool); - -} // namespace ripple - -#endif // RIPPLE_CORE_PG_H_INCLUDED -#endif // RIPPLED_REPORTING diff --git a/src/xrpld/core/detail/Config.cpp b/src/xrpld/core/detail/Config.cpp index 07d269883e2..885951111c0 100644 --- a/src/xrpld/core/detail/Config.cpp +++ b/src/xrpld/core/detail/Config.cpp @@ -382,11 +382,6 @@ Config::setup( // Update default values load(); - if (exists("reporting")) - { - RUN_REPORTING = true; - RUN_STANDALONE = true; - } { // load() may have set a new value for the dataDir std::string const dbPath(legacy("database_path")); diff --git a/src/xrpld/core/detail/DatabaseCon.cpp b/src/xrpld/core/detail/DatabaseCon.cpp index c23c7491b04..10b34efd41e 100644 --- a/src/xrpld/core/detail/DatabaseCon.cpp +++ b/src/xrpld/core/detail/DatabaseCon.cpp @@ -109,7 +109,6 @@ setup_DatabaseCon(Config const& c, std::optional j) setup.startUp = c.START_UP; setup.standAlone = c.standalone(); - setup.reporting = c.reporting(); setup.dataDir = c.legacy("database_path"); if (!setup.standAlone && setup.dataDir.empty()) { diff --git a/src/xrpld/nodestore/Backend.h b/src/xrpld/nodestore/Backend.h index 85d38ec0a7b..29f37553327 100644 --- a/src/xrpld/nodestore/Backend.h +++ b/src/xrpld/nodestore/Backend.h @@ -39,29 +39,6 @@ namespace NodeStore { class Backend { public: - template - struct Counters - { - Counters() = default; - Counters(Counters const&) = default; - - template - Counters(Counters const& other) - : writeDurationUs(other.writeDurationUs) - , writeRetries(other.writeRetries) - , writesDelayed(other.writesDelayed) - , readRetries(other.readRetries) - , readErrors(other.readErrors) - { - } - - T writeDurationUs = {}; - T writeRetries = {}; - T writesDelayed = {}; - T readRetries = {}; - T readErrors = {}; - }; - /** Destroy the backend. All open files are closed and flushed. If there are batched writes @@ -174,17 +151,6 @@ class Backend /** Returns the number of file descriptors the backend expects to need. */ virtual int fdRequired() const = 0; - - /** Returns read and write stats. - - @note The Counters struct is specific to and only used - by CassandraBackend. - */ - virtual std::optional> - counters() const - { - return std::nullopt; - } }; } // namespace NodeStore diff --git a/src/xrpld/nodestore/Database.h b/src/xrpld/nodestore/Database.h index daf0483e890..bd25046fee2 100644 --- a/src/xrpld/nodestore/Database.h +++ b/src/xrpld/nodestore/Database.h @@ -302,17 +302,6 @@ class Database virtual void for_each(std::function)> f) = 0; - /** Retrieve backend read and write stats. - - @note The Counters struct is specific to and only used - by CassandraBackend. - */ - virtual std::optional> - getCounters() const - { - return std::nullopt; - } - void threadEntry(); }; diff --git a/src/xrpld/nodestore/backend/CassandraFactory.cpp b/src/xrpld/nodestore/backend/CassandraFactory.cpp deleted file mode 100644 index c53e7709587..00000000000 --- a/src/xrpld/nodestore/backend/CassandraFactory.cpp +++ /dev/null @@ -1,983 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifdef RIPPLED_REPORTING - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { -namespace NodeStore { - -void -writeCallback(CassFuture* fut, void* cbData); -void -readCallback(CassFuture* fut, void* cbData); - -class CassandraBackend : public Backend -{ -private: - // convenience function for one-off queries. For normal reads and writes, - // use the prepared statements insert_ and select_ - CassStatement* - makeStatement(char const* query, std::size_t params) - { - CassStatement* ret = cass_statement_new(query, params); - CassError rc = - cass_statement_set_consistency(ret, CASS_CONSISTENCY_QUORUM); - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "nodestore: Error setting query consistency: " << query - << ", result: " << rc << ", " << cass_error_desc(rc); - Throw(ss.str()); - } - return ret; - } - - beast::Journal const j_; - // size of a key - size_t const keyBytes_; - - Section const config_; - - std::atomic open_{false}; - - // mutex used for open() and close() - std::mutex mutex_; - - std::unique_ptr session_{ - nullptr, - [](CassSession* session) { - // Try to disconnect gracefully. - CassFuture* fut = cass_session_close(session); - cass_future_wait(fut); - cass_future_free(fut); - cass_session_free(session); - }}; - - // Database statements cached server side. Using these is more efficient - // than making a new statement - const CassPrepared* insert_ = nullptr; - const CassPrepared* select_ = nullptr; - - // io_context used for exponential backoff for write retries - boost::asio::io_context ioContext_; - std::optional work_; - std::thread ioThread_; - - // maximum number of concurrent in flight requests. New requests will wait - // for earlier requests to finish if this limit is exceeded - uint32_t maxRequestsOutstanding = 10000000; - std::atomic_uint32_t numRequestsOutstanding_ = 0; - - // mutex and condition_variable to limit the number of concurrent in flight - // requests - std::mutex throttleMutex_; - std::condition_variable throttleCv_; - - // writes are asynchronous. This mutex and condition_variable is used to - // wait for all writes to finish - std::mutex syncMutex_; - std::condition_variable syncCv_; - - Counters> counters_; - -public: - CassandraBackend( - size_t keyBytes, - Section const& keyValues, - beast::Journal journal) - : j_(journal), keyBytes_(keyBytes), config_(keyValues) - { - } - - ~CassandraBackend() override - { - close(); - } - - std::string - getName() override - { - return "cassandra"; - } - - bool - isOpen() override - { - return open_; - } - - // Setup all of the necessary components for talking to the database. - // Create the table if it doesn't exist already - // @param createIfMissing ignored - void - open(bool createIfMissing) override - { - if (open_) - { - assert(false); - JLOG(j_.error()) << "database is already open"; - return; - } - - std::lock_guard lock(mutex_); - CassCluster* cluster = cass_cluster_new(); - if (!cluster) - Throw( - "nodestore:: Failed to create CassCluster"); - - std::string secureConnectBundle = get(config_, "secure_connect_bundle"); - - if (!secureConnectBundle.empty()) - { - /* Setup driver to connect to the cloud using the secure connection - * bundle */ - if (cass_cluster_set_cloud_secure_connection_bundle( - cluster, secureConnectBundle.c_str()) != CASS_OK) - { - JLOG(j_.error()) << "Unable to configure cloud using the " - "secure connection bundle: " - << secureConnectBundle; - Throw( - "nodestore: Failed to connect using secure connection " - "bundle"); - return; - } - } - else - { - std::string contact_points = get(config_, "contact_points"); - if (contact_points.empty()) - { - Throw( - "nodestore: Missing contact_points in Cassandra config"); - } - CassError rc = cass_cluster_set_contact_points( - cluster, contact_points.c_str()); - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "nodestore: Error setting Cassandra contact_points: " - << contact_points << ", result: " << rc << ", " - << cass_error_desc(rc); - - Throw(ss.str()); - } - - int port = get(config_, "port"); - if (port) - { - rc = cass_cluster_set_port(cluster, port); - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "nodestore: Error setting Cassandra port: " << port - << ", result: " << rc << ", " << cass_error_desc(rc); - - Throw(ss.str()); - } - } - } - cass_cluster_set_token_aware_routing(cluster, cass_true); - CassError rc = cass_cluster_set_protocol_version( - cluster, CASS_PROTOCOL_VERSION_V4); - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "nodestore: Error setting cassandra protocol version: " - << ", result: " << rc << ", " << cass_error_desc(rc); - - Throw(ss.str()); - } - - std::string username = get(config_, "username"); - if (username.size()) - { - std::cout << "user = " << username - << " password = " << get(config_, "password") - << std::endl; - cass_cluster_set_credentials( - cluster, username.c_str(), get(config_, "password").c_str()); - } - - unsigned int const ioThreads = get(config_, "io_threads", 4); - maxRequestsOutstanding = - get(config_, "max_requests_outstanding", 10000000); - JLOG(j_.info()) << "Configuring Cassandra driver to use " << ioThreads - << " IO threads. Capping maximum pending requests at " - << maxRequestsOutstanding; - rc = cass_cluster_set_num_threads_io(cluster, ioThreads); - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "nodestore: Error setting Cassandra io threads to " - << ioThreads << ", result: " << rc << ", " - << cass_error_desc(rc); - Throw(ss.str()); - } - - rc = cass_cluster_set_queue_size_io( - cluster, - maxRequestsOutstanding); // This number needs to scale w/ the - // number of request per sec - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "nodestore: Error setting Cassandra max core connections per " - "host" - << ", result: " << rc << ", " << cass_error_desc(rc); - std::cout << ss.str() << std::endl; - return; - ; - } - cass_cluster_set_request_timeout(cluster, 2000); - - std::string certfile = get(config_, "certfile"); - if (certfile.size()) - { - std::ifstream fileStream( - boost::filesystem::path(certfile).string(), std::ios::in); - if (!fileStream) - { - std::stringstream ss; - ss << "opening config file " << certfile; - Throw( - errno, std::generic_category(), ss.str()); - } - std::string cert( - std::istreambuf_iterator{fileStream}, - std::istreambuf_iterator{}); - if (fileStream.bad()) - { - std::stringstream ss; - ss << "reading config file " << certfile; - Throw( - errno, std::generic_category(), ss.str()); - } - - CassSsl* context = cass_ssl_new(); - cass_ssl_set_verify_flags(context, CASS_SSL_VERIFY_NONE); - rc = cass_ssl_add_trusted_cert(context, cert.c_str()); - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "nodestore: Error setting Cassandra ssl context: " << rc - << ", " << cass_error_desc(rc); - Throw(ss.str()); - } - - cass_cluster_set_ssl(cluster, context); - cass_ssl_free(context); - } - - std::string keyspace = get(config_, "keyspace"); - if (keyspace.empty()) - { - Throw( - "nodestore: Missing keyspace in Cassandra config"); - } - - std::string tableName = get(config_, "table_name"); - if (tableName.empty()) - { - Throw( - "nodestore: Missing table name in Cassandra config"); - } - - cass_cluster_set_connect_timeout(cluster, 10000); - - CassStatement* statement; - CassFuture* fut; - bool setupSessionAndTable = false; - while (!setupSessionAndTable) - { - std::this_thread::sleep_for(std::chrono::seconds(1)); - session_.reset(cass_session_new()); - assert(session_); - - fut = cass_session_connect_keyspace( - session_.get(), cluster, keyspace.c_str()); - rc = cass_future_error_code(fut); - cass_future_free(fut); - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "nodestore: Error connecting Cassandra session keyspace: " - << rc << ", " << cass_error_desc(rc); - JLOG(j_.error()) << ss.str(); - continue; - } - - std::stringstream query; - query << "CREATE TABLE IF NOT EXISTS " << tableName - << " ( hash blob PRIMARY KEY, object blob)"; - - statement = makeStatement(query.str().c_str(), 0); - fut = cass_session_execute(session_.get(), statement); - rc = cass_future_error_code(fut); - cass_future_free(fut); - cass_statement_free(statement); - if (rc != CASS_OK && rc != CASS_ERROR_SERVER_INVALID_QUERY) - { - std::stringstream ss; - ss << "nodestore: Error creating Cassandra table: " << rc - << ", " << cass_error_desc(rc); - JLOG(j_.error()) << ss.str(); - continue; - } - - query.str(""); - query << "SELECT * FROM " << tableName << " LIMIT 1"; - statement = makeStatement(query.str().c_str(), 0); - fut = cass_session_execute(session_.get(), statement); - rc = cass_future_error_code(fut); - cass_future_free(fut); - cass_statement_free(statement); - if (rc != CASS_OK) - { - if (rc == CASS_ERROR_SERVER_INVALID_QUERY) - { - JLOG(j_.warn()) << "table not here yet, sleeping 1s to " - "see if table creation propagates"; - continue; - } - else - { - std::stringstream ss; - ss << "nodestore: Error checking for table: " << rc << ", " - << cass_error_desc(rc); - JLOG(j_.error()) << ss.str(); - continue; - } - } - - setupSessionAndTable = true; - } - - cass_cluster_free(cluster); - - bool setupPreparedStatements = false; - while (!setupPreparedStatements) - { - std::this_thread::sleep_for(std::chrono::seconds(1)); - std::stringstream query; - query << "INSERT INTO " << tableName - << " (hash, object) VALUES (?, ?)"; - CassFuture* prepare_future = - cass_session_prepare(session_.get(), query.str().c_str()); - - /* Wait for the statement to prepare and get the result */ - rc = cass_future_error_code(prepare_future); - - if (rc != CASS_OK) - { - /* Handle error */ - cass_future_free(prepare_future); - - std::stringstream ss; - ss << "nodestore: Error preparing insert : " << rc << ", " - << cass_error_desc(rc); - JLOG(j_.error()) << ss.str(); - continue; - } - - /* Get the prepared object from the future */ - insert_ = cass_future_get_prepared(prepare_future); - - /* The future can be freed immediately after getting the prepared - * object - */ - cass_future_free(prepare_future); - - query.str(""); - query << "SELECT object FROM " << tableName << " WHERE hash = ?"; - prepare_future = - cass_session_prepare(session_.get(), query.str().c_str()); - - /* Wait for the statement to prepare and get the result */ - rc = cass_future_error_code(prepare_future); - - if (rc != CASS_OK) - { - /* Handle error */ - cass_future_free(prepare_future); - - std::stringstream ss; - ss << "nodestore: Error preparing select : " << rc << ", " - << cass_error_desc(rc); - JLOG(j_.error()) << ss.str(); - continue; - } - - /* Get the prepared object from the future */ - select_ = cass_future_get_prepared(prepare_future); - - /* The future can be freed immediately after getting the prepared - * object - */ - cass_future_free(prepare_future); - setupPreparedStatements = true; - } - - work_.emplace(ioContext_); - ioThread_ = std::thread{[this]() { ioContext_.run(); }}; - open_ = true; - } - - // Close the connection to the database - void - close() override - { - { - std::lock_guard lock(mutex_); - if (insert_) - { - cass_prepared_free(insert_); - insert_ = nullptr; - } - if (select_) - { - cass_prepared_free(select_); - select_ = nullptr; - } - work_.reset(); - ioThread_.join(); - } - open_ = false; - } - - // Synchronously fetch the object with key key and store the result in pno - // @param key the key of the object - // @param pno object in which to store the result - // @return result status of query - Status - fetch(void const* key, std::shared_ptr* pno) override - { - JLOG(j_.trace()) << "Fetching from cassandra"; - CassStatement* statement = cass_prepared_bind(select_); - cass_statement_set_consistency(statement, CASS_CONSISTENCY_QUORUM); - CassError rc = cass_statement_bind_bytes( - statement, 0, static_cast(key), keyBytes_); - if (rc != CASS_OK) - { - cass_statement_free(statement); - JLOG(j_.error()) << "Binding Cassandra fetch query: " << rc << ", " - << cass_error_desc(rc); - pno->reset(); - return backendError; - } - CassFuture* fut; - do - { - fut = cass_session_execute(session_.get(), statement); - rc = cass_future_error_code(fut); - if (rc != CASS_OK) - { - std::stringstream ss; - ss << "Cassandra fetch error"; - ss << ", retrying"; - ++counters_.readRetries; - ss << ": " << cass_error_desc(rc); - JLOG(j_.warn()) << ss.str(); - } - } while (rc != CASS_OK); - - CassResult const* res = cass_future_get_result(fut); - cass_statement_free(statement); - cass_future_free(fut); - - CassRow const* row = cass_result_first_row(res); - if (!row) - { - cass_result_free(res); - pno->reset(); - return notFound; - } - cass_byte_t const* buf; - std::size_t bufSize; - rc = cass_value_get_bytes(cass_row_get_column(row, 0), &buf, &bufSize); - if (rc != CASS_OK) - { - cass_result_free(res); - pno->reset(); - JLOG(j_.error()) << "Cassandra fetch result error: " << rc << ", " - << cass_error_desc(rc); - ++counters_.readErrors; - return backendError; - } - - nudb::detail::buffer bf; - std::pair uncompressed = - nodeobject_decompress(buf, bufSize, bf); - DecodedBlob decoded(key, uncompressed.first, uncompressed.second); - cass_result_free(res); - - if (!decoded.wasOk()) - { - pno->reset(); - JLOG(j_.error()) << "Cassandra error decoding result: " << rc - << ", " << cass_error_desc(rc); - ++counters_.readErrors; - return dataCorrupt; - } - *pno = decoded.createObject(); - return ok; - } - - struct ReadCallbackData - { - CassandraBackend& backend; - const void* const key; - std::shared_ptr& result; - std::condition_variable& cv; - - std::atomic_uint32_t& numFinished; - size_t batchSize; - - ReadCallbackData( - CassandraBackend& backend, - const void* const key, - std::shared_ptr& result, - std::condition_variable& cv, - std::atomic_uint32_t& numFinished, - size_t batchSize) - : backend(backend) - , key(key) - , result(result) - , cv(cv) - , numFinished(numFinished) - , batchSize(batchSize) - { - } - - ReadCallbackData(ReadCallbackData const& other) = default; - }; - - std::pair>, Status> - fetchBatch(std::vector const& hashes) override - { - std::size_t const numHashes = hashes.size(); - JLOG(j_.trace()) << "Fetching " << numHashes - << " records from Cassandra"; - std::atomic_uint32_t numFinished = 0; - std::condition_variable cv; - std::mutex mtx; - std::vector> results{numHashes}; - std::vector> cbs; - cbs.reserve(numHashes); - for (std::size_t i = 0; i < hashes.size(); ++i) - { - cbs.push_back(std::make_shared( - *this, - static_cast(hashes[i]), - results[i], - cv, - numFinished, - numHashes)); - read(*cbs[i]); - } - assert(results.size() == cbs.size()); - - std::unique_lock lck(mtx); - cv.wait(lck, [&numFinished, &numHashes]() { - return numFinished == numHashes; - }); - - JLOG(j_.trace()) << "Fetched " << numHashes - << " records from Cassandra"; - return {results, ok}; - } - - void - read(ReadCallbackData& data) - { - CassStatement* statement = cass_prepared_bind(select_); - cass_statement_set_consistency(statement, CASS_CONSISTENCY_QUORUM); - CassError rc = cass_statement_bind_bytes( - statement, 0, static_cast(data.key), keyBytes_); - if (rc != CASS_OK) - { - size_t batchSize = data.batchSize; - if (++(data.numFinished) == batchSize) - data.cv.notify_all(); - cass_statement_free(statement); - JLOG(j_.error()) << "Binding Cassandra fetch query: " << rc << ", " - << cass_error_desc(rc); - return; - } - - CassFuture* fut = cass_session_execute(session_.get(), statement); - - cass_statement_free(statement); - - cass_future_set_callback(fut, readCallback, static_cast(&data)); - cass_future_free(fut); - } - - struct WriteCallbackData - { - CassandraBackend* backend; - // The shared pointer to the node object must exist until it's - // confirmed persisted. Otherwise, it can become deleted - // prematurely if other copies are removed from caches. - std::shared_ptr no; - std::optional e; - std::pair compressed; - std::chrono::steady_clock::time_point begin; - // The data is stored in this buffer. The void* in the above member - // is a pointer into the below buffer - nudb::detail::buffer bf; - std::atomic& totalWriteRetries; - - uint32_t currentRetries = 0; - - WriteCallbackData( - CassandraBackend* f, - std::shared_ptr const& nobj, - std::atomic& retries) - : backend(f), no(nobj), totalWriteRetries(retries) - { - e.emplace(no); - - compressed = - NodeStore::nodeobject_compress(e->getData(), e->getSize(), bf); - } - }; - - void - write(WriteCallbackData& data, bool isRetry) - { - { - // We limit the total number of concurrent inflight writes. This is - // a client side throttling to prevent overloading the database. - // This is mostly useful when the very first ledger is being written - // in full, which is several millions records. On sufficiently large - // Cassandra clusters, this throttling is not needed; the default - // value of maxRequestsOutstanding is 10 million, which is more - // records than are present in any single ledger - std::unique_lock lck(throttleMutex_); - if (!isRetry && numRequestsOutstanding_ > maxRequestsOutstanding) - { - JLOG(j_.trace()) << __func__ << " : " - << "Max outstanding requests reached. " - << "Waiting for other requests to finish"; - ++counters_.writesDelayed; - throttleCv_.wait(lck, [this]() { - return numRequestsOutstanding_ < maxRequestsOutstanding; - }); - } - } - - CassStatement* statement = cass_prepared_bind(insert_); - cass_statement_set_consistency(statement, CASS_CONSISTENCY_QUORUM); - CassError rc = cass_statement_bind_bytes( - statement, - 0, - static_cast(data.e->getKey()), - keyBytes_); - if (rc != CASS_OK) - { - cass_statement_free(statement); - std::stringstream ss; - ss << "Binding cassandra insert hash: " << rc << ", " - << cass_error_desc(rc); - JLOG(j_.error()) << __func__ << " : " << ss.str(); - Throw(ss.str()); - } - rc = cass_statement_bind_bytes( - statement, - 1, - static_cast(data.compressed.first), - data.compressed.second); - if (rc != CASS_OK) - { - cass_statement_free(statement); - std::stringstream ss; - ss << "Binding cassandra insert object: " << rc << ", " - << cass_error_desc(rc); - JLOG(j_.error()) << __func__ << " : " << ss.str(); - Throw(ss.str()); - } - data.begin = std::chrono::steady_clock::now(); - CassFuture* fut = cass_session_execute(session_.get(), statement); - cass_statement_free(statement); - - cass_future_set_callback(fut, writeCallback, static_cast(&data)); - cass_future_free(fut); - } - - void - store(std::shared_ptr const& no) override - { - JLOG(j_.trace()) << "Writing to cassandra"; - WriteCallbackData* data = - new WriteCallbackData(this, no, counters_.writeRetries); - - ++numRequestsOutstanding_; - write(*data, false); - } - - void - storeBatch(Batch const& batch) override - { - for (auto const& no : batch) - { - store(no); - } - } - - void - sync() override - { - std::unique_lock lck(syncMutex_); - - syncCv_.wait(lck, [this]() { return numRequestsOutstanding_ == 0; }); - } - - // Iterate through entire table and execute f(). Used for import only, - // with database not being written to, so safe to paginate through - // objects table with LIMIT x OFFSET y. - void - for_each(std::function)> f) override - { - assert(false); - Throw("not implemented"); - } - - int - getWriteLoad() override - { - return 0; - } - - void - setDeletePath() override - { - } - - int - fdRequired() const override - { - return 0; - } - - std::optional> - counters() const override - { - return counters_; - } - - friend void - writeCallback(CassFuture* fut, void* cbData); - - friend void - readCallback(CassFuture* fut, void* cbData); -}; - -// Process the result of an asynchronous read. Retry on error -// @param fut cassandra future associated with the read -// @param cbData struct that holds the request parameters -void -readCallback(CassFuture* fut, void* cbData) -{ - CassandraBackend::ReadCallbackData& requestParams = - *static_cast(cbData); - - CassError rc = cass_future_error_code(fut); - - if (rc != CASS_OK) - { - ++(requestParams.backend.counters_.readRetries); - JLOG(requestParams.backend.j_.warn()) - << "Cassandra fetch error : " << rc << " : " << cass_error_desc(rc) - << " - retrying"; - // Retry right away. The only time the cluster should ever be overloaded - // is when the very first ledger is being written in full (millions of - // writes at once), during which no reads should be occurring. If reads - // are timing out, the code/architecture should be modified to handle - // greater read load, as opposed to just exponential backoff - requestParams.backend.read(requestParams); - } - else - { - auto finish = [&requestParams]() { - size_t batchSize = requestParams.batchSize; - if (++(requestParams.numFinished) == batchSize) - requestParams.cv.notify_all(); - }; - CassResult const* res = cass_future_get_result(fut); - - CassRow const* row = cass_result_first_row(res); - if (!row) - { - cass_result_free(res); - JLOG(requestParams.backend.j_.error()) - << "Cassandra fetch get row error : " << rc << ", " - << cass_error_desc(rc); - finish(); - return; - } - cass_byte_t const* buf; - std::size_t bufSize; - rc = cass_value_get_bytes(cass_row_get_column(row, 0), &buf, &bufSize); - if (rc != CASS_OK) - { - cass_result_free(res); - JLOG(requestParams.backend.j_.error()) - << "Cassandra fetch get bytes error : " << rc << ", " - << cass_error_desc(rc); - ++requestParams.backend.counters_.readErrors; - finish(); - return; - } - nudb::detail::buffer bf; - std::pair uncompressed = - nodeobject_decompress(buf, bufSize, bf); - DecodedBlob decoded( - requestParams.key, uncompressed.first, uncompressed.second); - cass_result_free(res); - - if (!decoded.wasOk()) - { - JLOG(requestParams.backend.j_.fatal()) - << "Cassandra fetch error - data corruption : " << rc << ", " - << cass_error_desc(rc); - ++requestParams.backend.counters_.readErrors; - finish(); - return; - } - requestParams.result = decoded.createObject(); - finish(); - } -} - -// Process the result of an asynchronous write. Retry on error -// @param fut cassandra future associated with the write -// @param cbData struct that holds the request parameters -void -writeCallback(CassFuture* fut, void* cbData) -{ - CassandraBackend::WriteCallbackData& requestParams = - *static_cast(cbData); - CassandraBackend& backend = *requestParams.backend; - auto rc = cass_future_error_code(fut); - if (rc != CASS_OK) - { - JLOG(backend.j_.error()) - << "ERROR!!! Cassandra insert error: " << rc << ", " - << cass_error_desc(rc) << ", retrying "; - ++requestParams.totalWriteRetries; - // exponential backoff with a max wait of 2^10 ms (about 1 second) - auto wait = std::chrono::milliseconds( - lround(std::pow(2, std::min(10u, requestParams.currentRetries)))); - ++requestParams.currentRetries; - std::shared_ptr timer = - std::make_shared( - backend.ioContext_, std::chrono::steady_clock::now() + wait); - timer->async_wait([timer, &requestParams, &backend]( - const boost::system::error_code& error) { - backend.write(requestParams, true); - }); - } - else - { - backend.counters_.writeDurationUs += - std::chrono::duration_cast( - std::chrono::steady_clock::now() - requestParams.begin) - .count(); - --(backend.numRequestsOutstanding_); - - backend.throttleCv_.notify_all(); - if (backend.numRequestsOutstanding_ == 0) - backend.syncCv_.notify_all(); - delete &requestParams; - } -} - -//------------------------------------------------------------------------------ - -class CassandraFactory : public Factory -{ -public: - CassandraFactory() - { - Manager::instance().insert(*this); - } - - ~CassandraFactory() override - { - Manager::instance().erase(*this); - } - - std::string - getName() const override - { - return "cassandra"; - } - - std::unique_ptr - createInstance( - size_t keyBytes, - Section const& keyValues, - std::size_t burstSize, - Scheduler& scheduler, - beast::Journal journal) override - { - return std::make_unique(keyBytes, keyValues, journal); - } -}; - -static CassandraFactory cassandraFactory; - -} // namespace NodeStore -} // namespace ripple -#endif diff --git a/src/xrpld/nodestore/detail/Database.cpp b/src/xrpld/nodestore/detail/Database.cpp index 60cfb35051c..da15088e895 100644 --- a/src/xrpld/nodestore/detail/Database.cpp +++ b/src/xrpld/nodestore/detail/Database.cpp @@ -273,15 +273,6 @@ Database::getCountsJson(Json::Value& obj) obj[jss::node_written_bytes] = std::to_string(storeSz_); obj[jss::node_read_bytes] = std::to_string(fetchSz_); obj[jss::node_reads_duration_us] = std::to_string(fetchDurationUs_); - - if (auto c = getCounters()) - { - obj[jss::node_read_errors] = std::to_string(c->readErrors); - obj[jss::node_read_retries] = std::to_string(c->readRetries); - obj[jss::node_write_retries] = std::to_string(c->writeRetries); - obj[jss::node_writes_delayed] = std::to_string(c->writesDelayed); - obj[jss::node_writes_duration_us] = std::to_string(c->writeDurationUs); - } } } // namespace NodeStore diff --git a/src/xrpld/nodestore/detail/DatabaseNodeImp.h b/src/xrpld/nodestore/detail/DatabaseNodeImp.h index c2bf237b943..b8a9a3fa2b4 100644 --- a/src/xrpld/nodestore/detail/DatabaseNodeImp.h +++ b/src/xrpld/nodestore/detail/DatabaseNodeImp.h @@ -150,12 +150,6 @@ class DatabaseNodeImp : public Database { backend_->for_each(f); } - - std::optional> - getCounters() const override - { - return backend_->counters(); - } }; } // namespace NodeStore diff --git a/src/xrpld/nodestore/detail/ManagerImp.cpp b/src/xrpld/nodestore/detail/ManagerImp.cpp index 019dd1f8122..56dc66ee644 100644 --- a/src/xrpld/nodestore/detail/ManagerImp.cpp +++ b/src/xrpld/nodestore/detail/ManagerImp.cpp @@ -55,12 +55,6 @@ ManagerImp::make_Backend( auto factory{find(type)}; if (!factory) { -#ifndef RIPPLED_REPORTING - if (boost::iequals(type, "cassandra")) - Throw( - "To use Cassandra as a nodestore, build rippled with " - "-Dreporting=ON"); -#endif missing_backend(); } diff --git a/src/xrpld/rpc/detail/DeliveredAmount.cpp b/src/xrpld/rpc/detail/DeliveredAmount.cpp index 7874997e24f..93af8599146 100644 --- a/src/xrpld/rpc/detail/DeliveredAmount.cpp +++ b/src/xrpld/rpc/detail/DeliveredAmount.cpp @@ -119,20 +119,10 @@ canHaveDeliveredAmount( { // These lambdas are used to compute the values lazily auto const getFix1623Enabled = [&context]() -> bool { - if (context.app.config().reporting()) - { - auto const view = context.ledgerMaster.getValidatedLedger(); - if (!view) - return false; - return view->rules().enabled(fix1623); - } - else - { - auto const view = context.app.openLedger().current(); - if (!view) - return false; - return view->rules().enabled(fix1623); - } + auto const view = context.app.openLedger().current(); + if (!view) + return false; + return view->rules().enabled(fix1623); }; return canHaveDeliveredAmountHelp( diff --git a/src/xrpld/rpc/detail/Handler.cpp b/src/xrpld/rpc/detail/Handler.cpp index d4a3fda380f..90dee4475a1 100644 --- a/src/xrpld/rpc/detail/Handler.cpp +++ b/src/xrpld/rpc/detail/Handler.cpp @@ -106,11 +106,7 @@ Handler const handlerArray[]{ {"feature", byRef(&doFeature), Role::USER, NO_CONDITION}, {"fee", byRef(&doFee), Role::USER, NEEDS_CURRENT_LEDGER}, {"fetch_info", byRef(&doFetchInfo), Role::ADMIN, NO_CONDITION}, -#ifdef RIPPLED_REPORTING - {"gateway_balances", byRef(&doGatewayBalances), Role::ADMIN, NO_CONDITION}, -#else {"gateway_balances", byRef(&doGatewayBalances), Role::USER, NO_CONDITION}, -#endif {"get_counts", byRef(&doGetCounts), Role::ADMIN, NO_CONDITION}, {"get_aggregate_price", byRef(&doGetAggregatePrice), diff --git a/src/xrpld/rpc/detail/Handler.h b/src/xrpld/rpc/detail/Handler.h index 81fbc2be321..cb1a2579ecb 100644 --- a/src/xrpld/rpc/detail/Handler.h +++ b/src/xrpld/rpc/detail/Handler.h @@ -81,22 +81,6 @@ template error_code_i conditionMet(Condition condition_required, T& context) { - if (context.app.config().reporting()) - { - if (condition_required == NEEDS_CURRENT_LEDGER) - { - return rpcNO_CURRENT; - } - else if (condition_required == NEEDS_CLOSED_LEDGER) - { - return rpcNO_CLOSED; - } - else - { - return rpcSUCCESS; - } - } - if (context.app.getOPs().isAmendmentBlocked() && (condition_required != NO_CONDITION)) { diff --git a/src/xrpld/rpc/detail/RPCHandler.cpp b/src/xrpld/rpc/detail/RPCHandler.cpp index 8504fe72a83..19b33709c83 100644 --- a/src/xrpld/rpc/detail/RPCHandler.cpp +++ b/src/xrpld/rpc/detail/RPCHandler.cpp @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -206,11 +205,6 @@ callMethod( perfLog.rpcFinish(name, curId); return ret; } - catch (ReportingShouldProxy&) - { - result = forwardToP2p(context); - return rpcSUCCESS; - } catch (std::exception& e) { perfLog.rpcError(name, curId); @@ -226,36 +220,9 @@ callMethod( } // namespace -void -injectReportingWarning(RPC::JsonContext& context, Json::Value& result) -{ - if (context.app.config().reporting()) - { - Json::Value warnings{Json::arrayValue}; - Json::Value& w = warnings.append(Json::objectValue); - w[jss::id] = warnRPC_REPORTING; - w[jss::message] = - "This is a reporting server. " - " The default behavior of a reporting server is to only" - " return validated data. If you are looking for not yet" - " validated data, include \"ledger_index : current\"" - " in your request, which will cause this server to forward" - " the request to a p2p node. If the forward is successful" - " the response will include \"forwarded\" : \"true\""; - result[jss::warnings] = std::move(warnings); - } -} - Status doCommand(RPC::JsonContext& context, Json::Value& result) { - if (shouldForwardToP2p(context)) - { - result = forwardToP2p(context); - injectReportingWarning(context, result); - // this return value is ignored - return rpcSUCCESS; - } Handler const* handler = nullptr; if (auto error = fillHandler(context, handler)) { @@ -285,7 +252,6 @@ doCommand(RPC::JsonContext& context, Json::Value& result) else { auto ret = callMethod(context, method, handler->name_, result); - injectReportingWarning(context, result); return ret; } } diff --git a/src/xrpld/rpc/detail/RPCHelpers.cpp b/src/xrpld/rpc/detail/RPCHelpers.cpp index 71513ddcd5c..fa66fecfbba 100644 --- a/src/xrpld/rpc/detail/RPCHelpers.cpp +++ b/src/xrpld/rpc/detail/RPCHelpers.cpp @@ -329,9 +329,9 @@ getAccountObjects( namespace { bool -isValidatedOld(LedgerMaster& ledgerMaster, bool standaloneOrReporting) +isValidatedOld(LedgerMaster& ledgerMaster, bool standalone) { - if (standaloneOrReporting) + if (standalone) return false; return ledgerMaster.getValidatedLedgerAge() > Tuning::maxValidatedLedgerAge; @@ -371,12 +371,10 @@ ledgerFromRequest(T& ledger, JsonContext& context) auto const index = indexValue.asString(); - if (index == "current" || - (index.empty() && !context.app.config().reporting())) + if (index == "current" || index.empty()) return getLedger(ledger, LedgerShortcut::CURRENT, context); - if (index == "validated" || - (index.empty() && context.app.config().reporting())) + if (index == "validated") return getLedger(ledger, LedgerShortcut::VALIDATED, context); if (index == "closed") @@ -442,13 +440,8 @@ ledgerFromSpecifier( [[fallthrough]]; case LedgerCase::LEDGER_NOT_SET: { auto const shortcut = specifier.shortcut(); - // note, unspecified defaults to validated in reporting mode if (shortcut == - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED || - (shortcut == - org::xrpl::rpc::v1::LedgerSpecifier:: - SHORTCUT_UNSPECIFIED && - context.app.config().reporting())) + org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED) { return getLedger(ledger, LedgerShortcut::VALIDATED, context); } @@ -492,8 +485,6 @@ getLedger(T& ledger, uint32_t ledgerIndex, Context& context) ledger = context.ledgerMaster.getLedgerBySeq(ledgerIndex); if (ledger == nullptr) { - if (context.app.config().reporting()) - return {rpcLGR_NOT_FOUND, "ledgerNotFound"}; auto cur = context.ledgerMaster.getCurrentLedger(); if (cur->info().seq == ledgerIndex) { @@ -520,10 +511,7 @@ template Status getLedger(T& ledger, LedgerShortcut shortcut, Context& context) { - if (isValidatedOld( - context.ledgerMaster, - context.app.config().standalone() || - context.app.config().reporting())) + if (isValidatedOld(context.ledgerMaster, context.app.config().standalone())) { if (context.apiVersion == 1) return {rpcNO_NETWORK, "InsufficientNetworkMode"}; @@ -546,18 +534,11 @@ getLedger(T& ledger, LedgerShortcut shortcut, Context& context) { if (shortcut == LedgerShortcut::CURRENT) { - if (context.app.config().reporting()) - return { - rpcLGR_NOT_FOUND, - "Reporting does not track current ledger"}; ledger = context.ledgerMaster.getCurrentLedger(); assert(ledger->open()); } else if (shortcut == LedgerShortcut::CLOSED) { - if (context.app.config().reporting()) - return { - rpcLGR_NOT_FOUND, "Reporting does not track closed ledger"}; ledger = context.ledgerMaster.getClosedLedger(); assert(!ledger->open()); } @@ -1030,9 +1011,6 @@ getAPIVersionNumber(Json::Value const& jv, bool betaEnabled) std::variant, Json::Value> getLedgerByContext(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - auto const hasHash = context.params.isMember(jss::ledger_hash); auto const hasIndex = context.params.isMember(jss::ledger_index); std::uint32_t ledgerIndex = 0; diff --git a/src/xrpld/rpc/detail/TransactionSign.cpp b/src/xrpld/rpc/detail/TransactionSign.cpp index 1fee84c683b..65ee50c0891 100644 --- a/src/xrpld/rpc/detail/TransactionSign.cpp +++ b/src/xrpld/rpc/detail/TransactionSign.cpp @@ -827,11 +827,7 @@ transactionSign( if (!preprocResult.second) return preprocResult.first; - std::shared_ptr ledger; - if (app.config().reporting()) - ledger = app.getLedgerMaster().getValidatedLedger(); - else - ledger = app.openLedger().current(); + std::shared_ptr ledger = app.openLedger().current(); // Make sure the STTx makes a legitimate Transaction. std::pair txn = transactionConstructImpl(preprocResult.second, ledger->rules(), app); diff --git a/src/xrpld/rpc/handlers/AccountTx.cpp b/src/xrpld/rpc/handlers/AccountTx.cpp index 3b9165eecf1..a85abd86682 100644 --- a/src/xrpld/rpc/handlers/AccountTx.cpp +++ b/src/xrpld/rpc/handlers/AccountTx.cpp @@ -22,9 +22,7 @@ #include #include #include -#include #include -#include #include #include #include @@ -218,16 +216,6 @@ std::pair doAccountTxHelp(RPC::Context& context, AccountTxArgs const& args) { context.loadType = Resource::feeMediumBurdenRPC; - if (context.app.config().reporting()) - { - auto const db = dynamic_cast( - &context.app.getRelationalDatabase()); - - if (!db) - Throw("Failed to get relational database"); - - return db->getAccountTx(args); - } AccountTxResult result; @@ -391,8 +379,6 @@ populateJsonResponse( response[jss::marker][jss::ledger] = result.marker->ledgerSeq; response[jss::marker][jss::seq] = result.marker->txnSeq; } - if (context.app.config().reporting()) - response["used_postgres"] = true; } JLOG(context.j.debug()) << __func__ << " : finished"; diff --git a/src/xrpld/rpc/handlers/CanDelete.cpp b/src/xrpld/rpc/handlers/CanDelete.cpp index db9fdf7c5d0..df2301d03e0 100644 --- a/src/xrpld/rpc/handlers/CanDelete.cpp +++ b/src/xrpld/rpc/handlers/CanDelete.cpp @@ -34,9 +34,6 @@ namespace ripple { Json::Value doCanDelete(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return RPC::make_error(rpcREPORTING_UNSUPPORTED); - if (!context.app.getSHAMapStore().advisoryDelete()) return RPC::make_error(rpcNOT_ENABLED); diff --git a/src/xrpld/rpc/handlers/Connect.cpp b/src/xrpld/rpc/handlers/Connect.cpp index dadf0a0515e..c564319dc8b 100644 --- a/src/xrpld/rpc/handlers/Connect.cpp +++ b/src/xrpld/rpc/handlers/Connect.cpp @@ -37,9 +37,6 @@ namespace ripple { Json::Value doConnect(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - if (context.app.config().standalone()) return "cannot connect in standalone mode"; diff --git a/src/xrpld/rpc/handlers/ConsensusInfo.cpp b/src/xrpld/rpc/handlers/ConsensusInfo.cpp index 42fbb60ba76..ce727bb4006 100644 --- a/src/xrpld/rpc/handlers/ConsensusInfo.cpp +++ b/src/xrpld/rpc/handlers/ConsensusInfo.cpp @@ -30,9 +30,6 @@ namespace ripple { Json::Value doConsensusInfo(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - Json::Value ret(Json::objectValue); ret[jss::info] = context.netOps.getConsensusInfo(); diff --git a/src/xrpld/rpc/handlers/Feature1.cpp b/src/xrpld/rpc/handlers/Feature1.cpp index c06756ca00a..75e583a352c 100644 --- a/src/xrpld/rpc/handlers/Feature1.cpp +++ b/src/xrpld/rpc/handlers/Feature1.cpp @@ -35,9 +35,6 @@ namespace ripple { Json::Value doFeature(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - if (context.params.isMember(jss::feature)) { // ensure that the `feature` param is a string diff --git a/src/xrpld/rpc/handlers/FetchInfo.cpp b/src/xrpld/rpc/handlers/FetchInfo.cpp index 113ae78a35c..a4287266e52 100644 --- a/src/xrpld/rpc/handlers/FetchInfo.cpp +++ b/src/xrpld/rpc/handlers/FetchInfo.cpp @@ -30,9 +30,6 @@ namespace ripple { Json::Value doFetchInfo(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - Json::Value ret(Json::objectValue); if (context.params.isMember(jss::clear) && diff --git a/src/xrpld/rpc/handlers/GetCounts.cpp b/src/xrpld/rpc/handlers/GetCounts.cpp index 035d698a5d4..690106ebbd2 100644 --- a/src/xrpld/rpc/handlers/GetCounts.cpp +++ b/src/xrpld/rpc/handlers/GetCounts.cpp @@ -71,7 +71,7 @@ getCountsJson(Application& app, int minObjectCount) ret[k] = v; } - if (!app.config().reporting() && app.config().useTxTables()) + if (app.config().useTxTables()) { auto const db = dynamic_cast(&app.getRelationalDatabase()); diff --git a/src/xrpld/rpc/handlers/LedgerAccept.cpp b/src/xrpld/rpc/handlers/LedgerAccept.cpp index 742a84fbb4e..dbd7eb9f1ca 100644 --- a/src/xrpld/rpc/handlers/LedgerAccept.cpp +++ b/src/xrpld/rpc/handlers/LedgerAccept.cpp @@ -36,7 +36,7 @@ doLedgerAccept(RPC::JsonContext& context) { Json::Value jvResult; - if (!context.app.config().standalone() || context.app.config().reporting()) + if (!context.app.config().standalone()) { jvResult[jss::error] = "notStandAlone"; } diff --git a/src/xrpld/rpc/handlers/LedgerHandler.cpp b/src/xrpld/rpc/handlers/LedgerHandler.cpp index 6d695abc85f..2bf4fb09f94 100644 --- a/src/xrpld/rpc/handlers/LedgerHandler.cpp +++ b/src/xrpld/rpc/handlers/LedgerHandler.cpp @@ -40,8 +40,7 @@ LedgerHandler::check() { auto const& params = context_.params; bool needsLedger = params.isMember(jss::ledger) || - params.isMember(jss::ledger_hash) || - params.isMember(jss::ledger_index) || context_.app.config().reporting(); + params.isMember(jss::ledger_hash) || params.isMember(jss::ledger_index); if (!needsLedger) return Status::OK; diff --git a/src/xrpld/rpc/handlers/Manifest.cpp b/src/xrpld/rpc/handlers/Manifest.cpp index 700d6ab39df..1debd48422a 100644 --- a/src/xrpld/rpc/handlers/Manifest.cpp +++ b/src/xrpld/rpc/handlers/Manifest.cpp @@ -29,9 +29,6 @@ namespace ripple { Json::Value doManifest(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - auto& params = context.params; if (!params.isMember(jss::public_key)) diff --git a/src/xrpld/rpc/handlers/Peers.cpp b/src/xrpld/rpc/handlers/Peers.cpp index 718070ec927..f3be0df558e 100644 --- a/src/xrpld/rpc/handlers/Peers.cpp +++ b/src/xrpld/rpc/handlers/Peers.cpp @@ -31,9 +31,6 @@ namespace ripple { Json::Value doPeers(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - Json::Value jvResult(Json::objectValue); jvResult[jss::peers] = context.app.overlay().json(); diff --git a/src/xrpld/rpc/handlers/Reservations.cpp b/src/xrpld/rpc/handlers/Reservations.cpp index 57a8fb3664a..1ff2d506afa 100644 --- a/src/xrpld/rpc/handlers/Reservations.cpp +++ b/src/xrpld/rpc/handlers/Reservations.cpp @@ -34,9 +34,6 @@ namespace ripple { Json::Value doPeerReservationsAdd(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - auto const& params = context.params; if (!params.isMember(jss::public_key)) @@ -90,9 +87,6 @@ doPeerReservationsAdd(RPC::JsonContext& context) Json::Value doPeerReservationsDel(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - auto const& params = context.params; // We repeat much of the parameter parsing from `doPeerReservationsAdd`. @@ -120,9 +114,6 @@ doPeerReservationsDel(RPC::JsonContext& context) Json::Value doPeerReservationsList(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - auto const& reservations = context.app.peerReservations().list(); // Enumerate the reservations in context.app.peerReservations() // as a Json::Value. diff --git a/src/xrpld/rpc/handlers/ServerInfo.cpp b/src/xrpld/rpc/handlers/ServerInfo.cpp index 769974985da..72beb37ed64 100644 --- a/src/xrpld/rpc/handlers/ServerInfo.cpp +++ b/src/xrpld/rpc/handlers/ServerInfo.cpp @@ -19,7 +19,6 @@ #include #include -#include #include #include #include @@ -330,14 +329,6 @@ doServerInfo(RPC::JsonContext& context) context.params.isMember(jss::counters) && context.params[jss::counters].asBool()); - if (context.app.config().reporting()) - { - Json::Value const proxied = forwardToP2p(context); - auto const lf = proxied[jss::result][jss::info][jss::load_factor]; - auto const vq = proxied[jss::result][jss::info][jss::validation_quorum]; - ret[jss::info][jss::validation_quorum] = vq.isNull() ? 1 : vq; - ret[jss::info][jss::load_factor] = lf.isNull() ? 1 : lf; - } return ret; } diff --git a/src/xrpld/rpc/handlers/Subscribe.cpp b/src/xrpld/rpc/handlers/Subscribe.cpp index 9f9181e1ab2..66fe89dea04 100644 --- a/src/xrpld/rpc/handlers/Subscribe.cpp +++ b/src/xrpld/rpc/handlers/Subscribe.cpp @@ -128,8 +128,6 @@ doSubscribe(RPC::JsonContext& context) std::string streamName = it.asString(); if (streamName == "server") { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); context.netOps.subServer( ispSub, jvResult, context.role == Role::ADMIN); } @@ -161,16 +159,12 @@ doSubscribe(RPC::JsonContext& context) } else if (streamName == "peer_status") { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); if (context.role != Role::ADMIN) return rpcError(rpcNO_PERMISSION); context.netOps.subPeerStatus(ispSub); } else if (streamName == "consensus") { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); context.netOps.subConsensus(ispSub); } else diff --git a/src/xrpld/rpc/handlers/Tx.cpp b/src/xrpld/rpc/handlers/Tx.cpp index e32d926e566..ba103d186fc 100644 --- a/src/xrpld/rpc/handlers/Tx.cpp +++ b/src/xrpld/rpc/handlers/Tx.cpp @@ -70,128 +70,9 @@ struct TxArgs std::optional> ledgerRange; }; -std::pair -doTxPostgres(RPC::Context& context, TxArgs const& args) -{ - if (!context.app.config().reporting()) - { - assert(false); - Throw( - "Called doTxPostgres yet not in reporting mode"); - } - - TxResult res; - res.searchedAll = TxSearched::unknown; - - if (!args.hash) - return { - res, - {rpcNOT_IMPL, - "Use of CTIDs on reporting mode is not currently supported."}}; - - JLOG(context.j.debug()) << "Fetching from postgres"; - Transaction::Locator locator = - Transaction::locate(*(args.hash), context.app); - - std::pair, std::shared_ptr> - pair; - // database returned the nodestore hash. Fetch the txn directly from the - // nodestore. Don't traverse the transaction SHAMap - if (locator.isFound()) - { - auto start = std::chrono::system_clock::now(); - if (auto obj = context.app.getNodeFamily().db().fetchNodeObject( - locator.getNodestoreHash(), locator.getLedgerSequence())) - { - auto node = SHAMapTreeNode::makeFromPrefix( - makeSlice(obj->getData()), - SHAMapHash{locator.getNodestoreHash()}); - if (!node) - { - assert(false); - return {res, {rpcINTERNAL, "Error making SHAMap node"}}; - } - auto item = (static_cast(node.get()))->peekItem(); - if (!item) - { - assert(false); - return {res, {rpcINTERNAL, "Error reading SHAMap node"}}; - } - - auto [sttx, meta] = deserializeTxPlusMeta(*item); - JLOG(context.j.debug()) << "Successfully fetched from db"; - - if (!sttx || !meta) - { - assert(false); - return {res, {rpcINTERNAL, "Error deserializing SHAMap node"}}; - } - std::string reason; - res.txn = std::make_shared(sttx, reason, context.app); - res.txn->setLedger(locator.getLedgerSequence()); - res.txn->setStatus(COMMITTED); - if (args.binary) - { - SerialIter it(item->slice()); - it.skip(it.getVLDataLength()); // skip transaction - Blob blob = it.getVL(); - res.meta = std::move(blob); - } - else - { - res.meta = std::make_shared( - *(args.hash), res.txn->getLedger(), *meta); - } - res.validated = true; - - auto const ledgerInfo = - context.app.getRelationalDatabase().getLedgerInfoByIndex( - locator.getLedgerSequence()); - res.closeTime = ledgerInfo->closeTime; - res.ledgerHash = ledgerInfo->hash; - - return {res, rpcSUCCESS}; - } - else - { - JLOG(context.j.error()) << "Failed to fetch from db"; - assert(false); - return {res, {rpcINTERNAL, "Containing SHAMap node not found"}}; - } - auto end = std::chrono::system_clock::now(); - JLOG(context.j.debug()) << "tx flat fetch time : " - << ((end - start).count() / 1000000000.0); - } - // database did not find the transaction, and returned the ledger range - // that was searched - else - { - if (args.ledgerRange) - { - auto range = locator.getLedgerRangeSearched(); - auto min = args.ledgerRange->first; - auto max = args.ledgerRange->second; - if (min >= range.lower() && max <= range.upper()) - { - res.searchedAll = TxSearched::all; - } - else - { - res.searchedAll = TxSearched::some; - } - } - return {res, rpcTXN_NOT_FOUND}; - } - // database didn't return anything. This shouldn't happen - assert(false); - return {res, {rpcINTERNAL, "unexpected Postgres response"}}; -} - std::pair doTxHelp(RPC::Context& context, TxArgs args) { - if (context.app.config().reporting()) - return doTxPostgres(context, args); TxResult result; ClosedInterval range; @@ -344,7 +225,7 @@ populateJsonResponse( } // Note, result.ledgerHash is only set in a closed or validated - // ledger - as seen in `doTxHelp` and `doTxPostgres` + // ledger - as seen in `doTxHelp` if (result.ledgerHash) response[jss::ledger_hash] = to_string(*result.ledgerHash); diff --git a/src/xrpld/rpc/handlers/TxHistory.cpp b/src/xrpld/rpc/handlers/TxHistory.cpp index de86b182534..1122eab51c3 100644 --- a/src/xrpld/rpc/handlers/TxHistory.cpp +++ b/src/xrpld/rpc/handlers/TxHistory.cpp @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -60,8 +59,6 @@ doTxHistory(RPC::JsonContext& context) Json::Value obj; Json::Value& txs = obj[jss::txs]; obj[jss::index] = startIndex; - if (context.app.config().reporting()) - obj["used_postgres"] = true; for (auto const& t : trans) { diff --git a/src/xrpld/rpc/handlers/UnlList.cpp b/src/xrpld/rpc/handlers/UnlList.cpp index 78bd3f14eab..b3394534372 100644 --- a/src/xrpld/rpc/handlers/UnlList.cpp +++ b/src/xrpld/rpc/handlers/UnlList.cpp @@ -29,8 +29,6 @@ namespace ripple { Json::Value doUnlList(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); Json::Value obj(Json::objectValue); context.app.validators().for_each_listed( diff --git a/src/xrpld/rpc/handlers/ValidatorListSites.cpp b/src/xrpld/rpc/handlers/ValidatorListSites.cpp index 902c373766f..39bc4e36471 100644 --- a/src/xrpld/rpc/handlers/ValidatorListSites.cpp +++ b/src/xrpld/rpc/handlers/ValidatorListSites.cpp @@ -28,9 +28,6 @@ namespace ripple { Json::Value doValidatorListSites(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - return context.app.validatorSites().getJson(); } diff --git a/src/xrpld/rpc/handlers/Validators.cpp b/src/xrpld/rpc/handlers/Validators.cpp index 4048e8962de..599e76f847a 100644 --- a/src/xrpld/rpc/handlers/Validators.cpp +++ b/src/xrpld/rpc/handlers/Validators.cpp @@ -28,9 +28,6 @@ namespace ripple { Json::Value doValidators(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); - return context.app.validators().getJson(); } diff --git a/src/xrpld/shamap/Family.h b/src/xrpld/shamap/Family.h index 6559ce5059b..bbb22c273d0 100644 --- a/src/xrpld/shamap/Family.h +++ b/src/xrpld/shamap/Family.h @@ -65,8 +65,6 @@ class Family sweep() = 0; /** Acquire ledger that has a missing node by ledger sequence - * - * Throw if in reporting mode. * * @param refNum Sequence of ledger to acquire. * @param nodeHash Hash of missing node to report in throw. diff --git a/src/xrpld/shamap/detail/NodeFamily.cpp b/src/xrpld/shamap/detail/NodeFamily.cpp index 01440a48799..bf95003aef8 100644 --- a/src/xrpld/shamap/detail/NodeFamily.cpp +++ b/src/xrpld/shamap/detail/NodeFamily.cpp @@ -69,14 +69,6 @@ void NodeFamily::missingNodeAcquireBySeq(std::uint32_t seq, uint256 const& nodeHash) { JLOG(j_.error()) << "Missing node in " << seq; - if (app_.config().reporting()) - { - std::stringstream ss; - ss << "Node not read, likely a Cassandra error in ledger seq " << seq - << " object hash " << nodeHash; - Throw(ss.str()); - } - std::unique_lock lock(maxSeqMutex_); if (maxSeq_ == 0) { From cad8970a57501fd081788bed5aa15cf9a4472183 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Wed, 28 Aug 2024 14:23:38 -0500 Subject: [PATCH 17/26] refactor: Remove dead headers (#5081) --- bin/ci/README.md | 26 -- bin/ci/build.sh | 31 -- bin/ci/test.sh | 41 -- bin/ci/ubuntu/build-and-test.sh | 274 ------------ bin/ci/ubuntu/build-in-docker.sh | 36 -- bin/ci/ubuntu/travis-cache-start.sh | 44 -- include/xrpl/beast/test/fail_counter.h | 160 ------- include/xrpl/beast/test/fail_stream.h | 182 -------- include/xrpl/beast/test/pipe_stream.h | 482 --------------------- include/xrpl/beast/test/sig_wait.h | 29 -- include/xrpl/beast/test/string_iostream.h | 166 ------- include/xrpl/beast/test/string_istream.h | 155 ------- include/xrpl/beast/test/string_ostream.h | 137 ------ include/xrpl/beast/test/test_allocator.h | 159 ------- include/xrpl/beast/unit_test/dstream.h | 121 ------ include/xrpl/beast/utility/hash_pair.h | 72 --- src/xrpld/peerfinder/detail/Logic.h | 1 - src/xrpld/peerfinder/detail/Reporting.h | 49 --- src/xrpld/peerfinder/sim/FunctionQueue.h | 100 ----- src/xrpld/peerfinder/sim/GraphAlgorithms.h | 76 ---- src/xrpld/peerfinder/sim/Message.h | 47 -- src/xrpld/peerfinder/sim/NodeSnapshot.h | 37 -- src/xrpld/peerfinder/sim/Params.h | 45 -- src/xrpld/peerfinder/sim/Predicates.h | 87 ---- 24 files changed, 2557 deletions(-) delete mode 100644 bin/ci/README.md delete mode 100755 bin/ci/build.sh delete mode 100755 bin/ci/test.sh delete mode 100755 bin/ci/ubuntu/build-and-test.sh delete mode 100755 bin/ci/ubuntu/build-in-docker.sh delete mode 100755 bin/ci/ubuntu/travis-cache-start.sh delete mode 100644 include/xrpl/beast/test/fail_counter.h delete mode 100644 include/xrpl/beast/test/fail_stream.h delete mode 100644 include/xrpl/beast/test/pipe_stream.h delete mode 100644 include/xrpl/beast/test/sig_wait.h delete mode 100644 include/xrpl/beast/test/string_iostream.h delete mode 100644 include/xrpl/beast/test/string_istream.h delete mode 100644 include/xrpl/beast/test/string_ostream.h delete mode 100644 include/xrpl/beast/test/test_allocator.h delete mode 100644 include/xrpl/beast/unit_test/dstream.h delete mode 100644 include/xrpl/beast/utility/hash_pair.h delete mode 100644 src/xrpld/peerfinder/detail/Reporting.h delete mode 100644 src/xrpld/peerfinder/sim/FunctionQueue.h delete mode 100644 src/xrpld/peerfinder/sim/GraphAlgorithms.h delete mode 100644 src/xrpld/peerfinder/sim/Message.h delete mode 100644 src/xrpld/peerfinder/sim/NodeSnapshot.h delete mode 100644 src/xrpld/peerfinder/sim/Params.h delete mode 100644 src/xrpld/peerfinder/sim/Predicates.h diff --git a/bin/ci/README.md b/bin/ci/README.md deleted file mode 100644 index 32ae24b3a20..00000000000 --- a/bin/ci/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# Continuous Integration (CI) Scripts - -In this directory are two scripts, `build.sh` and `test.sh` used for building -and testing rippled. - -(For now, they assume Bash and Linux. Once I get Windows containers for -testing, I'll try them there, but if Bash is not available, then they will -soon be joined by PowerShell scripts `build.ps` and `test.ps`.) - -We don't want these scripts to require arcane invocations that can only be -pieced together from within a CI configuration. We want something that humans -can easily invoke, read, and understand, for when we eventually have to test -and debug them interactively. That means: - -(1) They should work with no arguments. -(2) They should document their arguments. -(3) They should expand short arguments into long arguments. - -While we want to provide options for common use cases, we don't need to offer -the kitchen sink. We can rightfully expect users with esoteric, complicated -needs to write their own scripts. - -To make argument-handling easy for us, the implementers, we can just take all -arguments from environment variables. They have the nice advantage that every -command-line uses named arguments. For the benefit of us and our users, we -document those variables at the top of each script. diff --git a/bin/ci/build.sh b/bin/ci/build.sh deleted file mode 100755 index fa7a0c96829..00000000000 --- a/bin/ci/build.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash - -set -o xtrace -set -o errexit - -# The build system. Either 'Unix Makefiles' or 'Ninja'. -GENERATOR=${GENERATOR:-Unix Makefiles} -# The compiler. Either 'gcc' or 'clang'. -COMPILER=${COMPILER:-gcc} -# The build type. Either 'Debug' or 'Release'. -BUILD_TYPE=${BUILD_TYPE:-Debug} -# Additional arguments to CMake. -# We use the `-` substitution here instead of `:-` so that callers can erase -# the default by setting `$CMAKE_ARGS` to the empty string. -CMAKE_ARGS=${CMAKE_ARGS-'-Dwerr=ON'} - -# https://gitlab.kitware.com/cmake/cmake/issues/18865 -CMAKE_ARGS="-DBoost_NO_BOOST_CMAKE=ON ${CMAKE_ARGS}" - -if [[ ${COMPILER} == 'gcc' ]]; then - export CC='gcc' - export CXX='g++' -elif [[ ${COMPILER} == 'clang' ]]; then - export CC='clang' - export CXX='clang++' -fi - -mkdir build -cd build -cmake -G "${GENERATOR}" -DCMAKE_BUILD_TYPE=${BUILD_TYPE} ${CMAKE_ARGS} .. -cmake --build . -- -j $(nproc) diff --git a/bin/ci/test.sh b/bin/ci/test.sh deleted file mode 100755 index 11615d732b7..00000000000 --- a/bin/ci/test.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash - -set -o xtrace -set -o errexit - -# Set to 'true' to run the known "manual" tests in rippled. -MANUAL_TESTS=${MANUAL_TESTS:-false} -# The maximum number of concurrent tests. -CONCURRENT_TESTS=${CONCURRENT_TESTS:-$(nproc)} -# The path to rippled. -RIPPLED=${RIPPLED:-build/rippled} -# Additional arguments to rippled. -RIPPLED_ARGS=${RIPPLED_ARGS:-} - -function join_by { local IFS="$1"; shift; echo "$*"; } - -declare -a manual_tests=( - 'beast.chrono.abstract_clock' - 'beast.unit_test.print' - 'ripple.NodeStore.Timing' - 'ripple.app.Flow_manual' - 'ripple.app.NoRippleCheckLimits' - 'ripple.app.PayStrandAllPairs' - 'ripple.consensus.ByzantineFailureSim' - 'ripple.consensus.DistributedValidators' - 'ripple.consensus.ScaleFreeSim' - 'ripple.tx.CrossingLimits' - 'ripple.tx.FindOversizeCross' - 'ripple.tx.Offer_manual' - 'ripple.tx.OversizeMeta' - 'ripple.tx.PlumpBook' -) - -if [[ ${MANUAL_TESTS} == 'true' ]]; then - RIPPLED_ARGS+=" --unittest=$(join_by , "${manual_tests[@]}")" -else - RIPPLED_ARGS+=" --unittest --quiet --unittest-log" -fi -RIPPLED_ARGS+=" --unittest-jobs ${CONCURRENT_TESTS}" - -${RIPPLED} ${RIPPLED_ARGS} diff --git a/bin/ci/ubuntu/build-and-test.sh b/bin/ci/ubuntu/build-and-test.sh deleted file mode 100755 index 2c1734863fb..00000000000 --- a/bin/ci/ubuntu/build-and-test.sh +++ /dev/null @@ -1,274 +0,0 @@ -#!/usr/bin/env bash -set -ex - -function version_ge() { test "$(echo "$@" | tr " " "\n" | sort -rV | head -n 1)" == "$1"; } - -__dirname=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) -echo "using CC: ${CC}" -"${CC}" --version -export CC - -COMPNAME=$(basename $CC) -echo "using CXX: ${CXX:-notset}" -if [[ $CXX ]]; then - "${CXX}" --version - export CXX -fi -: ${BUILD_TYPE:=Debug} -echo "BUILD TYPE: ${BUILD_TYPE}" - -: ${TARGET:=install} -echo "BUILD TARGET: ${TARGET}" - -JOBS=${NUM_PROCESSORS:-2} -if [[ ${TRAVIS:-false} != "true" ]]; then - JOBS=$((JOBS+1)) -fi - -if [[ ! -z "${CMAKE_EXE:-}" ]] ; then - export PATH="$(dirname ${CMAKE_EXE}):$PATH" -fi - -if [ -x /usr/bin/time ] ; then - : ${TIME:="Duration: %E"} - export TIME - time=/usr/bin/time -else - time= -fi - -echo "Building rippled" -: ${CMAKE_EXTRA_ARGS:=""} -if [[ ${NINJA_BUILD:-} == true ]]; then - CMAKE_EXTRA_ARGS+=" -G Ninja" -fi - -coverage=false -if [[ "${TARGET}" == "coverage" ]] ; then - echo "coverage option detected." - coverage=true -fi - -cmake --version -CMAKE_VER=$(cmake --version | cut -d " " -f 3 | head -1) - -# -# allow explicit setting of the name of the build -# dir, otherwise default to the compiler.build_type -# -: "${BUILD_DIR:=${COMPNAME}.${BUILD_TYPE}}" -BUILDARGS="--target ${TARGET}" -BUILDTOOLARGS="" -if version_ge $CMAKE_VER "3.12.0" ; then - BUILDARGS+=" --parallel" -fi - -if [[ ${NINJA_BUILD:-} == false ]]; then - if version_ge $CMAKE_VER "3.12.0" ; then - BUILDARGS+=" ${JOBS}" - else - BUILDTOOLARGS+=" -j ${JOBS}" - fi -fi - -if [[ ${VERBOSE_BUILD:-} == true ]]; then - CMAKE_EXTRA_ARGS+=" -DCMAKE_VERBOSE_MAKEFILE=ON" - if version_ge $CMAKE_VER "3.14.0" ; then - BUILDARGS+=" --verbose" - else - if [[ ${NINJA_BUILD:-} == false ]]; then - BUILDTOOLARGS+=" verbose=1" - else - BUILDTOOLARGS+=" -v" - fi - fi -fi - -if [[ ${USE_CCACHE:-} == true ]]; then - echo "using ccache with basedir [${CCACHE_BASEDIR:-}]" - CMAKE_EXTRA_ARGS+=" -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache" -fi -if [ -d "build/${BUILD_DIR}" ]; then - rm -rf "build/${BUILD_DIR}" -fi - -mkdir -p "build/${BUILD_DIR}" -pushd "build/${BUILD_DIR}" - -# cleanup possible artifacts -rm -fv CMakeFiles/CMakeOutput.log CMakeFiles/CMakeError.log -# Clean up NIH directories which should be git repos, but aren't -for nih_path in ${NIH_CACHE_ROOT}/*/*/*/src ${NIH_CACHE_ROOT}/*/*/src -do - for dir in lz4 snappy rocksdb - do - if [ -e ${nih_path}/${dir} -a \! -e ${nih_path}/${dir}/.git ] - then - ls -la ${nih_path}/${dir}* - rm -rfv ${nih_path}/${dir}* - fi - done -done - -# generate -${time} cmake ../.. -DCMAKE_BUILD_TYPE=${BUILD_TYPE} ${CMAKE_EXTRA_ARGS} -# Display the cmake output, to help with debugging if something fails -for file in CMakeOutput.log CMakeError.log -do - if [ -f CMakeFiles/${file} ] - then - ls -l CMakeFiles/${file} - cat CMakeFiles/${file} - fi -done -# build -export DESTDIR=$(pwd)/_INSTALLED_ - -${time} eval cmake --build . ${BUILDARGS} -- ${BUILDTOOLARGS} - -if [[ ${TARGET} == "docs" ]]; then - ## mimic the standard test output for docs build - ## to make controlling processes like jenkins happy - if [ -f docs/html/index.html ]; then - echo "1 case, 1 test total, 0 failures" - else - echo "1 case, 1 test total, 1 failures" - fi - exit -fi -popd - -if [[ "${TARGET}" == "validator-keys" ]] ; then - export APP_PATH="$PWD/build/${BUILD_DIR}/validator-keys/validator-keys" -else - export APP_PATH="$PWD/build/${BUILD_DIR}/rippled" -fi -echo "using APP_PATH: ${APP_PATH}" - -# See what we've actually built -ldd ${APP_PATH} - -: ${APP_ARGS:=} - -if [[ "${TARGET}" == "validator-keys" ]] ; then - APP_ARGS="--unittest" -else - function join_by { local IFS="$1"; shift; echo "$*"; } - - # This is a list of manual tests - # in rippled that we want to run - # ORDER matters here...sorted in approximately - # descending execution time (longest running tests at top) - declare -a manual_tests=( - 'ripple.ripple_data.reduce_relay_simulate' - 'ripple.tx.Offer_manual' - 'ripple.tx.CrossingLimits' - 'ripple.tx.PlumpBook' - 'ripple.app.Flow_manual' - 'ripple.tx.OversizeMeta' - 'ripple.consensus.DistributedValidators' - 'ripple.app.NoRippleCheckLimits' - 'ripple.ripple_data.compression' - 'ripple.NodeStore.Timing' - 'ripple.consensus.ByzantineFailureSim' - 'beast.chrono.abstract_clock' - 'beast.unit_test.print' - ) - if [[ ${TRAVIS:-false} != "true" ]]; then - # these two tests cause travis CI to run out of memory. - # TODO: investigate possible workarounds. - manual_tests=( - 'ripple.consensus.ScaleFreeSim' - 'ripple.tx.FindOversizeCross' - "${manual_tests[@]}" - ) - fi - - if [[ ${MANUAL_TESTS:-} == true ]]; then - APP_ARGS+=" --unittest=$(join_by , "${manual_tests[@]}")" - else - APP_ARGS+=" --unittest --quiet --unittest-log" - fi - if [[ ${coverage} == false && ${PARALLEL_TESTS:-} == true ]]; then - APP_ARGS+=" --unittest-jobs ${JOBS}" - fi - - if [[ ${IPV6_TESTS:-} == true ]]; then - APP_ARGS+=" --unittest-ipv6" - fi -fi - -if [[ ${coverage} == true && $CC =~ ^gcc ]]; then - # Push the results (lcov.info) to codecov - codecov -X gcov # don't even try and look for .gcov files ;) - find . -name "*.gcda" | xargs rm -f -fi - -if [[ ${SKIP_TESTS:-} == true ]]; then - echo "skipping tests." - exit -fi - -ulimit -a -corepat=$(cat /proc/sys/kernel/core_pattern) -if [[ ${corepat} =~ ^[:space:]*\| ]] ; then - echo "WARNING: core pattern is piping - can't search for core files" - look_core=false -else - look_core=true - coredir=$(dirname ${corepat}) -fi -if [[ ${look_core} == true ]]; then - before=$(ls -A1 ${coredir}) -fi - -set +e -echo "Running tests for ${APP_PATH}" -if [[ ${MANUAL_TESTS:-} == true && ${PARALLEL_TESTS:-} != true ]]; then - for t in "${manual_tests[@]}" ; do - ${APP_PATH} --unittest=${t} - TEST_STAT=$? - if [[ $TEST_STAT -ne 0 ]] ; then - break - fi - done -else - ${APP_PATH} ${APP_ARGS} - TEST_STAT=$? -fi -set -e - -if [[ ${look_core} == true ]]; then - after=$(ls -A1 ${coredir}) - oIFS="${IFS}" - IFS=$'\n\r' - found_core=false - for l in $(diff -w --suppress-common-lines <(echo "$before") <(echo "$after")) ; do - if [[ "$l" =~ ^[[:space:]]*\>[[:space:]]*(.+)$ ]] ; then - corefile="${BASH_REMATCH[1]}" - echo "FOUND core dump file at '${coredir}/${corefile}'" - gdb_output=$(/bin/mktemp /tmp/gdb_output_XXXXXXXXXX.txt) - found_core=true - gdb \ - -ex "set height 0" \ - -ex "set logging file ${gdb_output}" \ - -ex "set logging on" \ - -ex "print 'ripple::BuildInfo::versionString'" \ - -ex "thread apply all backtrace full" \ - -ex "info inferiors" \ - -ex quit \ - "$APP_PATH" \ - "${coredir}/${corefile}" &> /dev/null - - echo -e "CORE INFO: \n\n $(cat ${gdb_output}) \n\n)" - fi - done - IFS="${oIFS}" -fi - -if [[ ${found_core} == true ]]; then - exit -1 -else - exit $TEST_STAT -fi - diff --git a/bin/ci/ubuntu/build-in-docker.sh b/bin/ci/ubuntu/build-in-docker.sh deleted file mode 100755 index feeabb1189a..00000000000 --- a/bin/ci/ubuntu/build-in-docker.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash -# run our build script in a docker container -# using travis-ci hosts -set -eux - -function join_by { local IFS="$1"; shift; echo "$*"; } - -set +x -echo "VERBOSE_BUILD=true" > /tmp/co.env -matchers=( - 'TRAVIS.*' 'CI' 'CC' 'CXX' - 'BUILD_TYPE' 'TARGET' 'MAX_TIME' - 'CODECOV.+' 'CMAKE.*' '.+_TESTS' - '.+_OPTIONS' 'NINJA.*' 'NUM_.+' - 'NIH_.+' 'BOOST.*' '.*CCACHE.*') - -matchstring=$(join_by '|' "${matchers[@]}") -echo "MATCHSTRING IS:: $matchstring" -env | grep -E "^(${matchstring})=" >> /tmp/co.env -set -x -# need to eliminate TRAVIS_CMD...don't want to pass it to the container -cat /tmp/co.env | grep -v TRAVIS_CMD > /tmp/co.env.2 -mv /tmp/co.env.2 /tmp/co.env -cat /tmp/co.env -mkdir -p -m 0777 ${TRAVIS_BUILD_DIR}/cores -echo "${TRAVIS_BUILD_DIR}/cores/%e.%p" | sudo tee /proc/sys/kernel/core_pattern -docker run \ - -t --env-file /tmp/co.env \ - -v ${TRAVIS_HOME}:${TRAVIS_HOME} \ - -w ${TRAVIS_BUILD_DIR} \ - --cap-add SYS_PTRACE \ - --ulimit "core=-1" \ - $DOCKER_IMAGE \ - /bin/bash -c 'if [[ $CC =~ ([[:alpha:]]+)-([[:digit:].]+) ]] ; then sudo update-alternatives --set ${BASH_REMATCH[1]} /usr/bin/$CC; fi; bin/ci/ubuntu/build-and-test.sh' - - diff --git a/bin/ci/ubuntu/travis-cache-start.sh b/bin/ci/ubuntu/travis-cache-start.sh deleted file mode 100755 index 6811acb9043..00000000000 --- a/bin/ci/ubuntu/travis-cache-start.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash -# some cached files create churn, so save them here for -# later restoration before packing the cache -set -eux -clean_cache="travis_clean_cache" -if [[ ! ( "${TRAVIS_JOB_NAME}" =~ "windows" || \ - "${TRAVIS_JOB_NAME}" =~ "prereq-keep" ) ]] && \ - ( [[ "${TRAVIS_COMMIT_MESSAGE}" =~ "${clean_cache}" ]] || \ - ( [[ -v TRAVIS_PULL_REQUEST_SHA && \ - "${TRAVIS_PULL_REQUEST_SHA}" != "" ]] && \ - git log -1 "${TRAVIS_PULL_REQUEST_SHA}" | grep -cq "${clean_cache}" - - ) - ) -then - find ${TRAVIS_HOME}/_cache -maxdepth 2 -type d - rm -rf ${TRAVIS_HOME}/_cache - mkdir -p ${TRAVIS_HOME}/_cache -fi - -pushd ${TRAVIS_HOME} -if [ -f cache_ignore.tar ] ; then - rm -f cache_ignore.tar -fi - -if [ -d _cache/nih_c ] ; then - find _cache/nih_c -name "build.ninja" | tar rf cache_ignore.tar --files-from - - find _cache/nih_c -name ".ninja_deps" | tar rf cache_ignore.tar --files-from - - find _cache/nih_c -name ".ninja_log" | tar rf cache_ignore.tar --files-from - - find _cache/nih_c -name "*.log" | tar rf cache_ignore.tar --files-from - - find _cache/nih_c -name "*.tlog" | tar rf cache_ignore.tar --files-from - - # show .a files in the cache, for sanity checking - find _cache/nih_c -name "*.a" -ls -fi - -if [ -d _cache/ccache ] ; then - find _cache/ccache -name "stats" | tar rf cache_ignore.tar --files-from - -fi - -if [ -f cache_ignore.tar ] ; then - tar -tf cache_ignore.tar -fi -popd - - diff --git a/include/xrpl/beast/test/fail_counter.h b/include/xrpl/beast/test/fail_counter.h deleted file mode 100644 index d0cae77ddad..00000000000 --- a/include/xrpl/beast/test/fail_counter.h +++ /dev/null @@ -1,160 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_TEST_FAIL_COUNTER_HPP -#define BEAST_TEST_FAIL_COUNTER_HPP - -#include -#include - -namespace beast { -namespace test { - -enum class error { fail_error = 1 }; - -namespace detail { - -class fail_error_category : public boost::system::error_category -{ -public: - const char* - name() const noexcept override - { - return "test"; - } - - std::string - message(int ev) const override - { - switch (static_cast(ev)) - { - default: - case error::fail_error: - return "test error"; - } - } - - boost::system::error_condition - default_error_condition(int ev) const noexcept override - { - return boost::system::error_condition{ev, *this}; - } - - bool - equivalent(int ev, boost::system::error_condition const& condition) - const noexcept override - { - return condition.value() == ev && &condition.category() == this; - } - - bool - equivalent(error_code const& error, int ev) const noexcept override - { - return error.value() == ev && &error.category() == this; - } -}; - -inline boost::system::error_category const& -get_error_category() -{ - static fail_error_category const cat{}; - return cat; -} - -} // namespace detail - -inline error_code -make_error_code(error ev) -{ - return error_code{ - static_cast::type>(ev), - detail::get_error_category()}; -} - -/** An error code with an error set on default construction - - Default constructed versions of this object will have - an error code set right away. This helps tests find code - which forgets to clear the error code on success. -*/ -struct fail_error_code : error_code -{ - fail_error_code() : error_code(make_error_code(error::fail_error)) - { - } - - template - fail_error_code(Arg0&& arg0, ArgN&&... argn) - : error_code(arg0, std::forward(argn)...) - { - } -}; - -/** A countdown to simulated failure. - - On the Nth operation, the class will fail with the specified - error code, or the default error code of @ref error::fail_error. -*/ -class fail_counter -{ - std::size_t n_; - error_code ec_; - -public: - fail_counter(fail_counter&&) = default; - - /** Construct a counter. - - @param The 0-based index of the operation to fail on or after. - */ - explicit fail_counter( - std::size_t n, - error_code ev = make_error_code(error::fail_error)) - : n_(n), ec_(ev) - { - } - - /// Throw an exception on the Nth failure - void - fail() - { - if (n_ > 0) - --n_; - if (!n_) - BOOST_THROW_EXCEPTION(system_error{ec_}); - } - - /// Set an error code on the Nth failure - bool - fail(error_code& ec) - { - if (n_ > 0) - --n_; - if (!n_) - { - ec = ec_; - return true; - } - ec.assign(0, ec.category()); - return false; - } -}; - -} // namespace test -} // namespace beast - -namespace boost { -namespace system { -template <> -struct is_error_code_enum -{ - static bool const value = true; -}; -} // namespace system -} // namespace boost - -#endif diff --git a/include/xrpl/beast/test/fail_stream.h b/include/xrpl/beast/test/fail_stream.h deleted file mode 100644 index 161e73ef091..00000000000 --- a/include/xrpl/beast/test/fail_stream.h +++ /dev/null @@ -1,182 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_TEST_FAIL_STREAM_HPP -#define BEAST_TEST_FAIL_STREAM_HPP - -#include -#include -#include -#include -#include -#include -#include - -namespace beast { -namespace test { - -/** A stream wrapper that fails. - - On the Nth operation, the stream will fail with the specified - error code, or the default error code of invalid_argument. -*/ -template -class fail_stream -{ - boost::optional fc_; - fail_counter* pfc_; - NextLayer next_layer_; - -public: - using next_layer_type = typename std::remove_reference::type; - - using lowest_layer_type = typename get_lowest_layer::type; - - fail_stream(fail_stream&&) = delete; - fail_stream(fail_stream const&) = delete; - fail_stream& - operator=(fail_stream&&) = delete; - fail_stream& - operator=(fail_stream const&) = delete; - - template - explicit fail_stream(std::size_t n, Args&&... args) - : fc_(n), pfc_(&*fc_), next_layer_(std::forward(args)...) - { - } - - template - explicit fail_stream(fail_counter& fc, Args&&... args) - : pfc_(&fc), next_layer_(std::forward(args)...) - { - } - - next_layer_type& - next_layer() - { - return next_layer_; - } - - lowest_layer_type& - lowest_layer() - { - return next_layer_.lowest_layer(); - } - - lowest_layer_type const& - lowest_layer() const - { - return next_layer_.lowest_layer(); - } - - boost::asio::io_service& - get_io_service() - { - return next_layer_.get_io_service(); - } - - template - std::size_t - read_some(MutableBufferSequence const& buffers) - { - pfc_->fail(); - return next_layer_.read_some(buffers); - } - - template - std::size_t - read_some(MutableBufferSequence const& buffers, error_code& ec) - { - if (pfc_->fail(ec)) - return 0; - return next_layer_.read_some(buffers, ec); - } - - template - async_return_type - async_read_some(MutableBufferSequence const& buffers, ReadHandler&& handler) - { - error_code ec; - if (pfc_->fail(ec)) - { - async_completion init{ - handler}; - next_layer_.get_io_service().post( - bind_handler(init.completion_handler, ec, 0)); - return init.result.get(); - } - return next_layer_.async_read_some( - buffers, std::forward(handler)); - } - - template - std::size_t - write_some(ConstBufferSequence const& buffers) - { - pfc_->fail(); - return next_layer_.write_some(buffers); - } - - template - std::size_t - write_some(ConstBufferSequence const& buffers, error_code& ec) - { - if (pfc_->fail(ec)) - return 0; - return next_layer_.write_some(buffers, ec); - } - - template - async_return_type - async_write_some(ConstBufferSequence const& buffers, WriteHandler&& handler) - { - error_code ec; - if (pfc_->fail(ec)) - { - async_completion init{ - handler}; - next_layer_.get_io_service().post( - bind_handler(init.completion_handler, ec, 0)); - return init.result.get(); - } - return next_layer_.async_write_some( - buffers, std::forward(handler)); - } - - friend void - teardown( - websocket::teardown_tag, - fail_stream& stream, - boost::system::error_code& ec) - { - if (stream.pfc_->fail(ec)) - return; - beast::websocket_helpers::call_teardown(stream.next_layer(), ec); - } - - template - friend void - async_teardown( - websocket::teardown_tag, - fail_stream& stream, - TeardownHandler&& handler) - { - error_code ec; - if (stream.pfc_->fail(ec)) - { - stream.get_io_service().post(bind_handler(std::move(handler), ec)); - return; - } - beast::websocket_helpers::call_async_teardown( - stream.next_layer(), std::forward(handler)); - } -}; - -} // namespace test -} // namespace beast - -#endif diff --git a/include/xrpl/beast/test/pipe_stream.h b/include/xrpl/beast/test/pipe_stream.h deleted file mode 100644 index 762419a539a..00000000000 --- a/include/xrpl/beast/test/pipe_stream.h +++ /dev/null @@ -1,482 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_TEST_PIPE_STREAM_HPP -#define BEAST_TEST_PIPE_STREAM_HPP - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace beast { -namespace test { - -/** A bidirectional in-memory communication channel - - An instance of this class provides a client and server - endpoint that are automatically connected to each other - similarly to a connected socket. - - Test pipes are used to facilitate writing unit tests - where the behavior of the transport is tightly controlled - to help illuminate all code paths (for code coverage) -*/ -class pipe -{ -public: - using buffer_type = flat_buffer; - -private: - struct read_op - { - virtual ~read_op() = default; - virtual void - operator()() = 0; - }; - - struct state - { - std::mutex m; - buffer_type b; - std::condition_variable cv; - std::unique_ptr op; - bool eof = false; - }; - - state s_[2]; - -public: - /** Represents an endpoint. - - Each pipe has a client stream and a server stream. - */ - class stream - { - friend class pipe; - - template - class read_op_impl; - - state& in_; - state& out_; - boost::asio::io_service& ios_; - fail_counter* fc_ = nullptr; - std::size_t read_max_ = (std::numeric_limits::max)(); - std::size_t write_max_ = (std::numeric_limits::max)(); - - stream(state& in, state& out, boost::asio::io_service& ios) - : in_(in), out_(out), ios_(ios), buffer(in_.b) - { - } - - public: - using buffer_type = pipe::buffer_type; - - /// Direct access to the underlying buffer - buffer_type& buffer; - - /// Counts the number of read calls - std::size_t nread = 0; - - /// Counts the number of write calls - std::size_t nwrite = 0; - - ~stream() = default; - stream(stream&&) = default; - - /// Set the fail counter on the object - void - fail(fail_counter& fc) - { - fc_ = &fc; - } - - /// Return the `io_service` associated with the object - boost::asio::io_service& - get_io_service() - { - return ios_; - } - - /// Set the maximum number of bytes returned by read_some - void - read_size(std::size_t n) - { - read_max_ = n; - } - - /// Set the maximum number of bytes returned by write_some - void - write_size(std::size_t n) - { - write_max_ = n; - } - - /// Returns a string representing the pending input data - string_view - str() const - { - using boost::asio::buffer_cast; - using boost::asio::buffer_size; - return { - buffer_cast(*in_.b.data().begin()), - buffer_size(*in_.b.data().begin())}; - } - - /// Clear the buffer holding the input data - void - clear() - { - in_.b.consume((std::numeric_limits::max)()); - } - - /** Close the stream. - - The other end of the pipe will see - `boost::asio::error::eof` on read. - */ - template - void - close(); - - template - std::size_t - read_some(MutableBufferSequence const& buffers); - - template - std::size_t - read_some(MutableBufferSequence const& buffers, error_code& ec); - - template - async_return_type - async_read_some( - MutableBufferSequence const& buffers, - ReadHandler&& handler); - - template - std::size_t - write_some(ConstBufferSequence const& buffers); - - template - std::size_t - write_some(ConstBufferSequence const& buffers, error_code&); - - template - async_return_type - async_write_some( - ConstBufferSequence const& buffers, - WriteHandler&& handler); - - friend void - teardown( - websocket::teardown_tag, - stream&, - boost::system::error_code& ec) - { - ec.assign(0, ec.category()); - } - - template - friend void - async_teardown( - websocket::teardown_tag, - stream& s, - TeardownHandler&& handler) - { - s.get_io_service().post( - bind_handler(std::move(handler), error_code{})); - } - }; - - /** Constructor. - - The client and server endpoints will use the same `io_service`. - */ - explicit pipe(boost::asio::io_service& ios) - : client(s_[0], s_[1], ios), server(s_[1], s_[0], ios) - { - } - - /** Constructor. - - The client and server endpoints will different `io_service` objects. - */ - explicit pipe(boost::asio::io_service& ios1, boost::asio::io_service& ios2) - : client(s_[0], s_[1], ios1), server(s_[1], s_[0], ios2) - { - } - - /// Represents the client endpoint - stream client; - - /// Represents the server endpoint - stream server; -}; - -//------------------------------------------------------------------------------ - -template -class pipe::stream::read_op_impl : public pipe::read_op -{ - stream& s_; - Buffers b_; - Handler h_; - -public: - read_op_impl(stream& s, Buffers const& b, Handler&& h) - : s_(s), b_(b), h_(std::move(h)) - { - } - - read_op_impl(stream& s, Buffers const& b, Handler const& h) - : s_(s), b_(b), h_(h) - { - } - - void - operator()() override; -}; - -//------------------------------------------------------------------------------ - -template -void -pipe::stream::read_op_impl::operator()() -{ - using boost::asio::buffer_copy; - using boost::asio::buffer_size; - s_.ios_.post([&]() { - BOOST_ASSERT(s_.in_.op); - std::unique_lock lock{s_.in_.m}; - if (s_.in_.b.size() > 0) - { - auto const bytes_transferred = - buffer_copy(b_, s_.in_.b.data(), s_.read_max_); - s_.in_.b.consume(bytes_transferred); - auto& s = s_; - Handler h{std::move(h_)}; - lock.unlock(); - s.in_.op.reset(nullptr); - ++s.nread; - s.ios_.post( - bind_handler(std::move(h), error_code{}, bytes_transferred)); - } - else - { - BOOST_ASSERT(s_.in_.eof); - auto& s = s_; - Handler h{std::move(h_)}; - lock.unlock(); - s.in_.op.reset(nullptr); - ++s.nread; - s.ios_.post(bind_handler(std::move(h), boost::asio::error::eof, 0)); - } - }); -} - -//------------------------------------------------------------------------------ - -template -void -pipe::stream::close() -{ - std::lock_guard lock{out_.m}; - out_.eof = true; - if (out_.op) - out_.op.get()->operator()(); - else - out_.cv.notify_all(); -} - -template -std::size_t -pipe::stream::read_some(MutableBufferSequence const& buffers) -{ - static_assert( - is_mutable_buffer_sequence::value, - "MutableBufferSequence requirements not met"); - error_code ec; - auto const n = read_some(buffers, ec); - if (ec) - BOOST_THROW_EXCEPTION(system_error{ec}); - return n; -} - -template -std::size_t -pipe::stream::read_some(MutableBufferSequence const& buffers, error_code& ec) -{ - static_assert( - is_mutable_buffer_sequence::value, - "MutableBufferSequence requirements not met"); - using boost::asio::buffer_copy; - using boost::asio::buffer_size; - BOOST_ASSERT(!in_.op); - BOOST_ASSERT(buffer_size(buffers) > 0); - if (fc_ && fc_->fail(ec)) - return 0; - std::unique_lock lock{in_.m}; - in_.cv.wait(lock, [&]() { return in_.b.size() > 0 || in_.eof; }); - std::size_t bytes_transferred; - if (in_.b.size() > 0) - { - ec.assign(0, ec.category()); - bytes_transferred = buffer_copy(buffers, in_.b.data(), read_max_); - in_.b.consume(bytes_transferred); - } - else - { - BOOST_ASSERT(in_.eof); - bytes_transferred = 0; - ec = boost::asio::error::eof; - } - ++nread; - return bytes_transferred; -} - -template -async_return_type -pipe::stream::async_read_some( - MutableBufferSequence const& buffers, - ReadHandler&& handler) -{ - static_assert( - is_mutable_buffer_sequence::value, - "MutableBufferSequence requirements not met"); - using boost::asio::buffer_copy; - using boost::asio::buffer_size; - BOOST_ASSERT(!in_.op); - BOOST_ASSERT(buffer_size(buffers) > 0); - async_completion init{handler}; - if (fc_) - { - error_code ec; - if (fc_->fail(ec)) - return ios_.post(bind_handler(init.completion_handler, ec, 0)); - } - { - std::unique_lock lock{in_.m}; - if (in_.eof) - { - lock.unlock(); - ++nread; - ios_.post(bind_handler( - init.completion_handler, boost::asio::error::eof, 0)); - } - else if (buffer_size(buffers) == 0 || buffer_size(in_.b.data()) > 0) - { - auto const bytes_transferred = - buffer_copy(buffers, in_.b.data(), read_max_); - in_.b.consume(bytes_transferred); - lock.unlock(); - ++nread; - ios_.post(bind_handler( - init.completion_handler, error_code{}, bytes_transferred)); - } - else - { - in_.op.reset( - new read_op_impl< - handler_type, - MutableBufferSequence>{ - *this, buffers, init.completion_handler}); - } - } - return init.result.get(); -} - -template -std::size_t -pipe::stream::write_some(ConstBufferSequence const& buffers) -{ - static_assert( - is_const_buffer_sequence::value, - "ConstBufferSequence requirements not met"); - BOOST_ASSERT(!out_.eof); - error_code ec; - auto const bytes_transferred = write_some(buffers, ec); - if (ec) - BOOST_THROW_EXCEPTION(system_error{ec}); - return bytes_transferred; -} - -template -std::size_t -pipe::stream::write_some(ConstBufferSequence const& buffers, error_code& ec) -{ - static_assert( - is_const_buffer_sequence::value, - "ConstBufferSequence requirements not met"); - using boost::asio::buffer_copy; - using boost::asio::buffer_size; - BOOST_ASSERT(!out_.eof); - if (fc_ && fc_->fail(ec)) - return 0; - auto const n = (std::min)(buffer_size(buffers), write_max_); - std::unique_lock lock{out_.m}; - auto const bytes_transferred = buffer_copy(out_.b.prepare(n), buffers); - out_.b.commit(bytes_transferred); - lock.unlock(); - if (out_.op) - out_.op.get()->operator()(); - else - out_.cv.notify_all(); - ++nwrite; - ec.assign(0, ec.category()); - return bytes_transferred; -} - -template -async_return_type -pipe::stream::async_write_some( - ConstBufferSequence const& buffers, - WriteHandler&& handler) -{ - static_assert( - is_const_buffer_sequence::value, - "ConstBufferSequence requirements not met"); - using boost::asio::buffer_copy; - using boost::asio::buffer_size; - BOOST_ASSERT(!out_.eof); - async_completion init{handler}; - if (fc_) - { - error_code ec; - if (fc_->fail(ec)) - return ios_.post(bind_handler(init.completion_handler, ec, 0)); - } - auto const n = (std::min)(buffer_size(buffers), write_max_); - std::unique_lock lock{out_.m}; - auto const bytes_transferred = buffer_copy(out_.b.prepare(n), buffers); - out_.b.commit(bytes_transferred); - lock.unlock(); - if (out_.op) - out_.op.get()->operator()(); - else - out_.cv.notify_all(); - ++nwrite; - ios_.post( - bind_handler(init.completion_handler, error_code{}, bytes_transferred)); - return init.result.get(); -} - -} // namespace test -} // namespace beast - -#endif diff --git a/include/xrpl/beast/test/sig_wait.h b/include/xrpl/beast/test/sig_wait.h deleted file mode 100644 index 92720561fa6..00000000000 --- a/include/xrpl/beast/test/sig_wait.h +++ /dev/null @@ -1,29 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_TEST_SIG_WAIT_HPP -#define BEAST_TEST_SIG_WAIT_HPP - -#include - -namespace beast { -namespace test { - -/// Block until SIGINT or SIGTERM is received. -inline void -sig_wait() -{ - boost::asio::io_service ios; - boost::asio::signal_set signals(ios, SIGINT, SIGTERM); - signals.async_wait([&](boost::system::error_code const&, int) {}); - ios.run(); -} - -} // namespace test -} // namespace beast - -#endif diff --git a/include/xrpl/beast/test/string_iostream.h b/include/xrpl/beast/test/string_iostream.h deleted file mode 100644 index bed6299a2bc..00000000000 --- a/include/xrpl/beast/test/string_iostream.h +++ /dev/null @@ -1,166 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_TEST_STRING_IOSTREAM_HPP -#define BEAST_TEST_STRING_IOSTREAM_HPP - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace beast { -namespace test { - -/** A SyncStream and AsyncStream that reads from a string and writes to another - string. - - This class behaves like a socket, except that written data is - appended to a string exposed as a public data member, and when - data is read it comes from a string provided at construction. -*/ -class string_iostream -{ - std::string s_; - boost::asio::const_buffer cb_; - boost::asio::io_service& ios_; - std::size_t read_max_; - -public: - std::string str; - - string_iostream( - boost::asio::io_service& ios, - std::string s, - std::size_t read_max = (std::numeric_limits::max)()) - : s_(std::move(s)) - , cb_(boost::asio::buffer(s_)) - , ios_(ios) - , read_max_(read_max) - { - } - - boost::asio::io_service& - get_io_service() - { - return ios_; - } - - template - std::size_t - read_some(MutableBufferSequence const& buffers) - { - error_code ec; - auto const n = read_some(buffers, ec); - if (ec) - BOOST_THROW_EXCEPTION(system_error{ec}); - return n; - } - - template - std::size_t - read_some(MutableBufferSequence const& buffers, error_code& ec) - { - auto const n = - boost::asio::buffer_copy(buffers, buffer_prefix(read_max_, cb_)); - if (n > 0) - { - ec.assign(0, ec.category()); - cb_ = cb_ + n; - } - else - { - ec = boost::asio::error::eof; - } - return n; - } - - template - async_return_type - async_read_some(MutableBufferSequence const& buffers, ReadHandler&& handler) - { - auto const n = - boost::asio::buffer_copy(buffers, boost::asio::buffer(s_)); - error_code ec; - if (n > 0) - s_.erase(0, n); - else - ec = boost::asio::error::eof; - async_completion init{ - handler}; - ios_.post(bind_handler(init.completion_handler, ec, n)); - return init.result.get(); - } - - template - std::size_t - write_some(ConstBufferSequence const& buffers) - { - error_code ec; - auto const n = write_some(buffers, ec); - if (ec) - BOOST_THROW_EXCEPTION(system_error{ec}); - return n; - } - - template - std::size_t - write_some(ConstBufferSequence const& buffers, error_code& ec) - { - ec.assign(0, ec.category()); - using boost::asio::buffer_cast; - using boost::asio::buffer_size; - auto const n = buffer_size(buffers); - str.reserve(str.size() + n); - for (boost::asio::const_buffer buffer : buffers) - str.append(buffer_cast(buffer), buffer_size(buffer)); - return n; - } - - template - async_return_type - async_write_some(ConstBufferSequence const& buffers, WriteHandler&& handler) - { - error_code ec; - auto const bytes_transferred = write_some(buffers, ec); - async_completion init{ - handler}; - get_io_service().post( - bind_handler(init.completion_handler, ec, bytes_transferred)); - return init.result.get(); - } - - friend void - teardown( - websocket::teardown_tag, - string_iostream&, - boost::system::error_code& ec) - { - ec.assign(0, ec.category()); - } - - template - friend void - async_teardown( - websocket::teardown_tag, - string_iostream& stream, - TeardownHandler&& handler) - { - stream.get_io_service().post( - bind_handler(std::move(handler), error_code{})); - } -}; - -} // namespace test -} // namespace beast - -#endif diff --git a/include/xrpl/beast/test/string_istream.h b/include/xrpl/beast/test/string_istream.h deleted file mode 100644 index 83cb3cfef5d..00000000000 --- a/include/xrpl/beast/test/string_istream.h +++ /dev/null @@ -1,155 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_TEST_STRING_ISTREAM_HPP -#define BEAST_TEST_STRING_ISTREAM_HPP - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace beast { -namespace test { - -/** A SyncStream and AsyncStream that reads from a string. - - This class behaves like a socket, except that written data is simply - discarded, and when data is read it comes from a string provided - at construction. -*/ -class string_istream -{ - std::string s_; - boost::asio::const_buffer cb_; - boost::asio::io_service& ios_; - std::size_t read_max_; - -public: - string_istream( - boost::asio::io_service& ios, - std::string s, - std::size_t read_max = (std::numeric_limits::max)()) - : s_(std::move(s)) - , cb_(boost::asio::buffer(s_)) - , ios_(ios) - , read_max_(read_max) - { - } - - boost::asio::io_service& - get_io_service() - { - return ios_; - } - - template - std::size_t - read_some(MutableBufferSequence const& buffers) - { - error_code ec; - auto const n = read_some(buffers, ec); - if (ec) - BOOST_THROW_EXCEPTION(system_error{ec}); - return n; - } - - template - std::size_t - read_some(MutableBufferSequence const& buffers, error_code& ec) - { - auto const n = boost::asio::buffer_copy(buffers, cb_, read_max_); - if (n > 0) - { - ec.assign(0, ec.category()); - cb_ = cb_ + n; - } - else - { - ec = boost::asio::error::eof; - } - return n; - } - - template - async_return_type - async_read_some(MutableBufferSequence const& buffers, ReadHandler&& handler) - { - auto const n = - boost::asio::buffer_copy(buffers, boost::asio::buffer(s_)); - error_code ec; - if (n > 0) - s_.erase(0, n); - else - ec = boost::asio::error::eof; - async_completion init{ - handler}; - ios_.post(bind_handler(init.completion_handler, ec, n)); - return init.result.get(); - } - - template - std::size_t - write_some(ConstBufferSequence const& buffers) - { - error_code ec; - auto const n = write_some(buffers, ec); - if (ec) - BOOST_THROW_EXCEPTION(system_error{ec}); - return n; - } - - template - std::size_t - write_some(ConstBufferSequence const& buffers, error_code& ec) - { - ec.assign(0, ec.category()); - return boost::asio::buffer_size(buffers); - } - - template - async_return_type - async_write_some(ConstBuffeSequence const& buffers, WriteHandler&& handler) - { - async_completion init{ - handler}; - ios_.post(bind_handler( - init.completion_handler, - error_code{}, - boost::asio::buffer_size(buffers))); - return init.result.get(); - } - - friend void - teardown( - websocket::teardown_tag, - string_istream&, - boost::system::error_code& ec) - { - ec.assign(0, ec.category()); - } - - template - friend void - async_teardown( - websocket::teardown_tag, - string_istream& stream, - TeardownHandler&& handler) - { - stream.get_io_service().post( - bind_handler(std::move(handler), error_code{})); - } -}; - -} // namespace test -} // namespace beast - -#endif diff --git a/include/xrpl/beast/test/string_ostream.h b/include/xrpl/beast/test/string_ostream.h deleted file mode 100644 index 9edf69be88f..00000000000 --- a/include/xrpl/beast/test/string_ostream.h +++ /dev/null @@ -1,137 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_TEST_STRING_OSTREAM_HPP -#define BEAST_TEST_STRING_OSTREAM_HPP - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace beast { -namespace test { - -class string_ostream -{ - boost::asio::io_service& ios_; - std::size_t write_max_; - -public: - std::string str; - - explicit string_ostream( - boost::asio::io_service& ios, - std::size_t write_max = (std::numeric_limits::max)()) - : ios_(ios), write_max_(write_max) - { - } - - boost::asio::io_service& - get_io_service() - { - return ios_; - } - - template - std::size_t - read_some(MutableBufferSequence const& buffers) - { - error_code ec; - auto const n = read_some(buffers, ec); - if (ec) - BOOST_THROW_EXCEPTION(system_error{ec}); - return n; - } - - template - std::size_t - read_some(MutableBufferSequence const&, error_code& ec) - { - ec = boost::asio::error::eof; - return 0; - } - - template - async_return_type - async_read_some(MutableBufferSequence const&, ReadHandler&& handler) - { - async_completion init{ - handler}; - ios_.post( - bind_handler(init.completion_handler, boost::asio::error::eof, 0)); - return init.result.get(); - } - - template - std::size_t - write_some(ConstBufferSequence const& buffers) - { - error_code ec; - auto const n = write_some(buffers, ec); - if (ec) - BOOST_THROW_EXCEPTION(system_error{ec}); - return n; - } - - template - std::size_t - write_some(ConstBufferSequence const& buffers, error_code& ec) - { - ec.assign(0, ec.category()); - using boost::asio::buffer_cast; - using boost::asio::buffer_size; - auto const n = (std::min)(buffer_size(buffers), write_max_); - str.reserve(str.size() + n); - for (boost::asio::const_buffer buffer : buffer_prefix(n, buffers)) - str.append(buffer_cast(buffer), buffer_size(buffer)); - return n; - } - - template - async_return_type - async_write_some(ConstBufferSequence const& buffers, WriteHandler&& handler) - { - error_code ec; - auto const bytes_transferred = write_some(buffers, ec); - async_completion init{ - handler}; - get_io_service().post( - bind_handler(init.completion_handler, ec, bytes_transferred)); - return init.result.get(); - } - - friend void - teardown( - websocket::teardown_tag, - string_ostream&, - boost::system::error_code& ec) - { - ec.assign(0, ec.category()); - } - - template - friend void - async_teardown( - websocket::teardown_tag, - string_ostream& stream, - TeardownHandler&& handler) - { - stream.get_io_service().post( - bind_handler(std::move(handler), error_code{})); - } -}; - -} // namespace test -} // namespace beast - -#endif diff --git a/include/xrpl/beast/test/test_allocator.h b/include/xrpl/beast/test/test_allocator.h deleted file mode 100644 index 598e22c2766..00000000000 --- a/include/xrpl/beast/test/test_allocator.h +++ /dev/null @@ -1,159 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_TEST_TEST_ALLOCATOR_HPP -#define BEAST_TEST_TEST_ALLOCATOR_HPP - -#include -#include -#include - -namespace beast { -namespace test { - -struct test_allocator_info -{ - std::size_t id; - std::size_t ncopy = 0; - std::size_t nmove = 0; - std::size_t nmassign = 0; - std::size_t ncpassign = 0; - std::size_t nselect = 0; - - test_allocator_info() - : id([] { - static std::atomic sid(0); - return ++sid; - }()) - { - } -}; - -template -class test_allocator; - -template -struct test_allocator_base -{ -}; - -template -struct test_allocator_base -{ - static test_allocator - select_on_container_copy_construction( - test_allocator const& a) - { - return test_allocator{}; - } -}; - -template -class test_allocator - : public test_allocator_base -{ - std::shared_ptr info_; - - template - friend class test_allocator; - -public: - using value_type = T; - - using propagate_on_container_copy_assignment = - std::integral_constant; - - using propagate_on_container_move_assignment = - std::integral_constant; - - using propagate_on_container_swap = std::integral_constant; - - template - struct rebind - { - using other = test_allocator; - }; - - test_allocator() : info_(std::make_shared()) - { - } - - test_allocator(test_allocator const& u) noexcept : info_(u.info_) - { - ++info_->ncopy; - } - - template - test_allocator( - test_allocator const& u) noexcept - : info_(u.info_) - { - ++info_->ncopy; - } - - test_allocator(test_allocator&& t) : info_(t.info_) - { - ++info_->nmove; - } - - test_allocator& - operator=(test_allocator const& u) noexcept - { - info_ = u.info_; - ++info_->ncpassign; - return *this; - } - - test_allocator& - operator=(test_allocator&& u) noexcept - { - info_ = u.info_; - ++info_->nmassign; - return *this; - } - - value_type* - allocate(std::size_t n) - { - return static_cast(::operator new(n * sizeof(value_type))); - } - - void - deallocate(value_type* p, std::size_t) noexcept - { - ::operator delete(p); - } - - bool - operator==(test_allocator const& other) const - { - return id() == other.id() || Equal; - } - - bool - operator!=(test_allocator const& other) const - { - return !this->operator==(other); - } - - std::size_t - id() const - { - return info_->id; - } - - test_allocator_info const* - operator->() const - { - return info_.get(); - } -}; - -} // namespace test -} // namespace beast - -#endif diff --git a/include/xrpl/beast/unit_test/dstream.h b/include/xrpl/beast/unit_test/dstream.h deleted file mode 100644 index ff2036a12cb..00000000000 --- a/include/xrpl/beast/unit_test/dstream.h +++ /dev/null @@ -1,121 +0,0 @@ -// -// Copyright (c) 2013-2017 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BEAST_UNIT_TEST_DSTREAM_HPP -#define BEAST_UNIT_TEST_DSTREAM_HPP - -#include -#include -#include -#include -#include -#include - -#ifdef BOOST_WINDOWS -#include -//#include -#endif - -namespace beast { -namespace unit_test { - -#ifdef BOOST_WINDOWS - -namespace detail { - -template -class dstream_buf : public std::basic_stringbuf -{ - using ostream = std::basic_ostream; - - bool dbg_; - ostream& os_; - - template - void - write(T const*) = delete; - - void - write(char const* s) - { - if (dbg_) - /*boost::detail::winapi*/ ::OutputDebugStringA(s); - os_ << s; - } - - void - write(wchar_t const* s) - { - if (dbg_) - /*boost::detail::winapi*/ ::OutputDebugStringW(s); - os_ << s; - } - -public: - explicit dstream_buf(ostream& os) - : os_(os), dbg_(/*boost::detail::winapi*/ ::IsDebuggerPresent() != 0) - { - } - - ~dstream_buf() - { - sync(); - } - - int - sync() override - { - write(this->str().c_str()); - this->str(""); - return 0; - } -}; - -} // namespace detail - -/** std::ostream with Visual Studio IDE redirection. - - Instances of this stream wrap a specified `std::ostream` - (such as `std::cout` or `std::cerr`). If the IDE debugger - is attached when the stream is created, output will be - additionally copied to the Visual Studio Output window. -*/ -template < - class CharT, - class Traits = std::char_traits, - class Allocator = std::allocator> -class basic_dstream : public std::basic_ostream -{ - detail::dstream_buf buf_; - -public: - /** Construct a stream. - - @param os The output stream to wrap. - */ - explicit basic_dstream(std::ostream& os) - : std::basic_ostream(&buf_), buf_(os) - { - if (os.flags() & std::ios::unitbuf) - std::unitbuf(*this); - } -}; - -using dstream = basic_dstream; -using dwstream = basic_dstream; - -#else - -using dstream = std::ostream&; -using dwstream = std::wostream&; - -#endif - -} // namespace unit_test -} // namespace beast - -#endif diff --git a/include/xrpl/beast/utility/hash_pair.h b/include/xrpl/beast/utility/hash_pair.h deleted file mode 100644 index 08042b34778..00000000000 --- a/include/xrpl/beast/utility/hash_pair.h +++ /dev/null @@ -1,72 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of Beast: https://github.com/vinniefalco/Beast - Copyright 2013, Vinnie Falco - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef BEAST_UTILITY_HASH_PAIR_H_INCLUDED -#define BEAST_UTILITY_HASH_PAIR_H_INCLUDED - -#include -#include - -#include -#include - -namespace std { - -/** Specialization of std::hash for any std::pair type. */ -template -struct hash> - : private boost::base_from_member, 0>, - private boost::base_from_member, 1> -{ -private: - using first_hash = boost::base_from_member, 0>; - using second_hash = boost::base_from_member, 1>; - -public: - hash() - { - } - - hash( - std::hash const& first_hash_, - std::hash const& second_hash_) - : first_hash(first_hash_), second_hash(second_hash_) - { - } - - std::size_t - operator()(std::pair const& value) - { - std::size_t result(first_hash::member(value.first)); - boost::hash_combine(result, second_hash::member(value.second)); - return result; - } - - std::size_t - operator()(std::pair const& value) const - { - std::size_t result(first_hash::member(value.first)); - boost::hash_combine(result, second_hash::member(value.second)); - return result; - } -}; - -} // namespace std - -#endif diff --git a/src/xrpld/peerfinder/detail/Logic.h b/src/xrpld/peerfinder/detail/Logic.h index 49b71a6a545..0403530ecf2 100644 --- a/src/xrpld/peerfinder/detail/Logic.h +++ b/src/xrpld/peerfinder/detail/Logic.h @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include diff --git a/src/xrpld/peerfinder/detail/Reporting.h b/src/xrpld/peerfinder/detail/Reporting.h deleted file mode 100644 index 25c36ea3f27..00000000000 --- a/src/xrpld/peerfinder/detail/Reporting.h +++ /dev/null @@ -1,49 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_PEERFINDER_REPORTING_H_INCLUDED -#define RIPPLE_PEERFINDER_REPORTING_H_INCLUDED - -namespace ripple { -namespace PeerFinder { - -/** Severity levels for test reporting. - This allows more fine grained control over reporting for diagnostics. -*/ -struct Reporting -{ - explicit Reporting() = default; - - // Report simulation parameters - static bool const params = true; - - // Report simulation crawl time-evolution - static bool const crawl = true; - - // Report nodes aggregate statistics - static bool const nodes = true; - - // Report nodes detailed information - static bool const dump_nodes = false; -}; - -} // namespace PeerFinder -} // namespace ripple - -#endif diff --git a/src/xrpld/peerfinder/sim/FunctionQueue.h b/src/xrpld/peerfinder/sim/FunctionQueue.h deleted file mode 100644 index d2fbb6dc26b..00000000000 --- a/src/xrpld/peerfinder/sim/FunctionQueue.h +++ /dev/null @@ -1,100 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_PEERFINDER_SIM_FUNCTIONQUEUE_H_INCLUDED -#define RIPPLE_PEERFINDER_SIM_FUNCTIONQUEUE_H_INCLUDED - -namespace ripple { -namespace PeerFinder { -namespace Sim { - -/** Maintains a queue of functors that can be called later. */ -class FunctionQueue -{ -public: - explicit FunctionQueue() = default; - -private: - class BasicWork - { - public: - virtual ~BasicWork() - { - } - virtual void - operator()() = 0; - }; - - template - class Work : public BasicWork - { - public: - explicit Work(Function f) : m_f(f) - { - } - void - operator()() - { - (m_f)(); - } - - private: - Function m_f; - }; - - std::list> m_work; - -public: - /** Returns `true` if there is no remaining work */ - bool - empty() - { - return m_work.empty(); - } - - /** Queue a function. - Function must be callable with this signature: - void (void) - */ - template - void - post(Function f) - { - m_work.emplace_back(std::make_unique>(f)); - } - - /** Run all pending functions. - The functions will be invoked in the order they were queued. - */ - void - run() - { - while (!m_work.empty()) - { - (*m_work.front())(); - m_work.pop_front(); - } - } -}; - -} // namespace Sim -} // namespace PeerFinder -} // namespace ripple - -#endif diff --git a/src/xrpld/peerfinder/sim/GraphAlgorithms.h b/src/xrpld/peerfinder/sim/GraphAlgorithms.h deleted file mode 100644 index b11ba42a7a7..00000000000 --- a/src/xrpld/peerfinder/sim/GraphAlgorithms.h +++ /dev/null @@ -1,76 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_PEERFINDER_SIM_GRAPHALGORITHMS_H_INCLUDED -#define RIPPLE_PEERFINDER_SIM_GRAPHALGORITHMS_H_INCLUDED - -namespace ripple { -namespace PeerFinder { -namespace Sim { - -template -struct VertexTraits; - -/** Call a function for each vertex in a connected graph. - Function will be called with this signature: - void (Vertex&, std::size_t diameter); -*/ - -template -void -breadth_first_traverse(Vertex& start, Function f) -{ - using Traits = VertexTraits; - using Edges = typename Traits::Edges; - using Edge = typename Traits::Edge; - - using Probe = std::pair; - using Work = std::deque; - using Visited = std::set; - Work work; - Visited visited; - work.emplace_back(&start, 0); - int diameter(0); - while (!work.empty()) - { - Probe const p(work.front()); - work.pop_front(); - if (visited.find(p.first) != visited.end()) - continue; - diameter = std::max(p.second, diameter); - visited.insert(p.first); - for (typename Edges::iterator iter(Traits::edges(*p.first).begin()); - iter != Traits::edges(*p.first).end(); - ++iter) - { - Vertex* v(Traits::vertex(*iter)); - if (visited.find(v) != visited.end()) - continue; - if (!iter->closed()) - work.emplace_back(v, p.second + 1); - } - f(*p.first, diameter); - } -} - -} // namespace Sim -} // namespace PeerFinder -} // namespace ripple - -#endif diff --git a/src/xrpld/peerfinder/sim/Message.h b/src/xrpld/peerfinder/sim/Message.h deleted file mode 100644 index 69be553c01a..00000000000 --- a/src/xrpld/peerfinder/sim/Message.h +++ /dev/null @@ -1,47 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_PEERFINDER_SIM_MESSAGE_H_INCLUDED -#define RIPPLE_PEERFINDER_SIM_MESSAGE_H_INCLUDED - -namespace ripple { -namespace PeerFinder { -namespace Sim { - -class Message -{ -public: - explicit Message(Endpoints const& endpoints) : m_payload(endpoints) - { - } - Endpoints const& - payload() const - { - return m_payload; - } - -private: - Endpoints m_payload; -}; - -} // namespace Sim -} // namespace PeerFinder -} // namespace ripple - -#endif diff --git a/src/xrpld/peerfinder/sim/NodeSnapshot.h b/src/xrpld/peerfinder/sim/NodeSnapshot.h deleted file mode 100644 index fbb08ece9a2..00000000000 --- a/src/xrpld/peerfinder/sim/NodeSnapshot.h +++ /dev/null @@ -1,37 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_PEERFINDER_SIM_NODESNAPSHOT_H_INCLUDED -#define RIPPLE_PEERFINDER_SIM_NODESNAPSHOT_H_INCLUDED - -namespace ripple { -namespace PeerFinder { -namespace Sim { - -/** A snapshot of a Node in the network simulator. */ -struct NodeSnapshot -{ - explicit NodeSnapshot() = default; -}; - -} // namespace Sim -} // namespace PeerFinder -} // namespace ripple - -#endif diff --git a/src/xrpld/peerfinder/sim/Params.h b/src/xrpld/peerfinder/sim/Params.h deleted file mode 100644 index c3c288bb985..00000000000 --- a/src/xrpld/peerfinder/sim/Params.h +++ /dev/null @@ -1,45 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_PEERFINDER_SIM_PARAMS_H_INCLUDED -#define RIPPLE_PEERFINDER_SIM_PARAMS_H_INCLUDED - -namespace ripple { -namespace PeerFinder { -namespace Sim { - -/** Defines the parameters for a network simulation. */ -struct Params -{ - Params() : steps(50), nodes(10), maxPeers(20), outPeers(9.5), firewalled(0) - { - } - - int steps; - int nodes; - int maxPeers; - double outPeers; - double firewalled; // [0, 1) -}; - -} // namespace Sim -} // namespace PeerFinder -} // namespace ripple - -#endif diff --git a/src/xrpld/peerfinder/sim/Predicates.h b/src/xrpld/peerfinder/sim/Predicates.h deleted file mode 100644 index 7bf125b383a..00000000000 --- a/src/xrpld/peerfinder/sim/Predicates.h +++ /dev/null @@ -1,87 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_PEERFINDER_SIM_PREDICATES_H_INCLUDED -#define RIPPLE_PEERFINDER_SIM_PREDICATES_H_INCLUDED - -namespace ripple { -namespace PeerFinder { -namespace Sim { - -/** UnaryPredicate, returns `true` if the 'to' node on a Link matches. */ -/** @{ */ -template -class is_remote_node_pred -{ -public: - is_remote_node_pred(Node const& n) : node(n) - { - } - template - bool - operator()(Link const& l) const - { - return &node == &l.remote_node(); - } - -private: - Node const& node; -}; - -template -is_remote_node_pred -is_remote_node(Node const& node) -{ - return is_remote_node_pred(node); -} - -template -is_remote_node_pred -is_remote_node(Node const* node) -{ - return is_remote_node_pred(*node); -} -/** @} */ - -//------------------------------------------------------------------------------ - -/** UnaryPredicate, `true` if the remote address matches. */ -class is_remote_endpoint -{ -public: - explicit is_remote_endpoint(beast::IP::Endpoint const& address) - : m_endpoint(address) - { - } - template - bool - operator()(Link const& link) const - { - return link.remote_endpoint() == m_endpoint; - } - -private: - beast::IP::Endpoint const m_endpoint; -}; - -} // namespace Sim -} // namespace PeerFinder -} // namespace ripple - -#endif From 2f432e812cb773048530ebfaf2e0e6def51e3cc2 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Wed, 28 Aug 2024 17:31:33 -0500 Subject: [PATCH 18/26] docs: Update options documentation (#5083) Co-authored-by: Elliot Lee --- BUILD.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/BUILD.md b/BUILD.md index a39df98a5a6..31755c36919 100644 --- a/BUILD.md +++ b/BUILD.md @@ -377,9 +377,10 @@ stored inside the build directory, as either of: | --- | ---| ---| | `assert` | OFF | Enable assertions. | `coverage` | OFF | Prepare the coverage report. | -| `tests` | ON | Build tests. | -| `unity` | ON | Configure a unity build. | | `san` | N/A | Enable a sanitizer with Clang. Choices are `thread` and `address`. | +| `tests` | OFF | Build tests. | +| `unity` | ON | Configure a unity build. | +| `xrpld` | OFF | Build the xrpld (`rippled`) application, and not just the libxrpl library. | [Unity builds][5] may be faster for the first build (at the cost of much more memory) since they concatenate sources into fewer From 774148389467781aca7c01bac90af2fba870570c Mon Sep 17 00:00:00 2001 From: Mark Travis Date: Sun, 25 Aug 2024 19:10:07 -0700 Subject: [PATCH 19/26] Allow only 1 job queue slot for acquiring inbound ledger. * Log when duplicate concurrent inbound ledger are filtered. * RAII for containers that track concurrent inbound ledger. * Comment on when to asynchronously acquire inbound ledgers, which is possible to be always OK, but should have further review. * Other small logging changes Co-authored-by: Ed Hennis --- src/ripple/app/consensus/RCLConsensus.cpp | 8 +++-- src/ripple/app/consensus/RCLValidations.cpp | 6 ++-- src/ripple/app/ledger/InboundLedgers.h | 14 ++++++-- src/ripple/app/ledger/impl/InboundLedger.cpp | 2 +- src/ripple/app/ledger/impl/InboundLedgers.cpp | 35 +++++++++++++++++++ src/ripple/app/misc/NetworkOPs.cpp | 3 +- src/test/app/LedgerReplay_test.cpp | 8 +++++ 7 files changed, 68 insertions(+), 8 deletions(-) diff --git a/src/ripple/app/consensus/RCLConsensus.cpp b/src/ripple/app/consensus/RCLConsensus.cpp index c0ebe06dd7e..0335e9979d2 100644 --- a/src/ripple/app/consensus/RCLConsensus.cpp +++ b/src/ripple/app/consensus/RCLConsensus.cpp @@ -135,8 +135,12 @@ RCLConsensus::Adaptor::acquireLedger(LedgerHash const& hash) acquiringLedger_ = hash; app_.getJobQueue().addJob( - jtADVANCE, "getConsensusLedger", [id = hash, &app = app_]() { - app.getInboundLedgers().acquire( + jtADVANCE, + "getConsensusLedger1", + [id = hash, &app = app_, this]() { + JLOG(j_.debug()) + << "JOB advanceLedger getConsensusLedger1 started"; + app.getInboundLedgers().acquireAsync( id, 0, InboundLedger::Reason::CONSENSUS); }); } diff --git a/src/ripple/app/consensus/RCLValidations.cpp b/src/ripple/app/consensus/RCLValidations.cpp index 6b626569e69..9ffd8829ccc 100644 --- a/src/ripple/app/consensus/RCLValidations.cpp +++ b/src/ripple/app/consensus/RCLValidations.cpp @@ -142,8 +142,10 @@ RCLValidationsAdaptor::acquire(LedgerHash const& hash) Application* pApp = &app_; app_.getJobQueue().addJob( - jtADVANCE, "getConsensusLedger", [pApp, hash]() { - pApp->getInboundLedgers().acquire( + jtADVANCE, "getConsensusLedger2", [pApp, hash, this]() { + JLOG(j_.debug()) + << "JOB advanceLedger getConsensusLedger2 started"; + pApp->getInboundLedgers().acquireAsync( hash, 0, InboundLedger::Reason::CONSENSUS); }); return std::nullopt; diff --git a/src/ripple/app/ledger/InboundLedgers.h b/src/ripple/app/ledger/InboundLedgers.h index b12760153e2..aa173925893 100644 --- a/src/ripple/app/ledger/InboundLedgers.h +++ b/src/ripple/app/ledger/InboundLedgers.h @@ -23,6 +23,7 @@ #include #include #include +#include namespace ripple { @@ -37,11 +38,20 @@ class InboundLedgers virtual ~InboundLedgers() = default; - // VFALCO TODO Should this be called findOrAdd ? - // + // Callers should use this if they possibly need an authoritative + // response immediately. virtual std::shared_ptr acquire(uint256 const& hash, std::uint32_t seq, InboundLedger::Reason) = 0; + // Callers should use this if they are known to be executing on the Job + // Queue. TODO review whether all callers of acquire() can use this + // instead. Inbound ledger acquisition is asynchronous anyway. + virtual void + acquireAsync( + uint256 const& hash, + std::uint32_t seq, + InboundLedger::Reason reason) = 0; + virtual std::shared_ptr find(LedgerHash const& hash) = 0; diff --git a/src/ripple/app/ledger/impl/InboundLedger.cpp b/src/ripple/app/ledger/impl/InboundLedger.cpp index 53475988cbf..c8dc005097b 100644 --- a/src/ripple/app/ledger/impl/InboundLedger.cpp +++ b/src/ripple/app/ledger/impl/InboundLedger.cpp @@ -536,7 +536,7 @@ InboundLedger::trigger(std::shared_ptr const& peer, TriggerReason reason) return; } - if (auto stream = journal_.trace()) + if (auto stream = journal_.debug()) { stream << "Trigger acquiring ledger " << hash_; if (peer) diff --git a/src/ripple/app/ledger/impl/InboundLedgers.cpp b/src/ripple/app/ledger/impl/InboundLedgers.cpp index b9b8b9fcfd2..f88137e8501 100644 --- a/src/ripple/app/ledger/impl/InboundLedgers.cpp +++ b/src/ripple/app/ledger/impl/InboundLedgers.cpp @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -149,6 +150,37 @@ class InboundLedgersImp : public InboundLedgers return ledger; } + void + acquireAsync( + uint256 const& hash, + std::uint32_t seq, + InboundLedger::Reason reason) override + { + std::unique_lock lock(acquiresMutex_); + try + { + if (pendingAcquires_.contains(hash)) + return; + pendingAcquires_.insert(hash); + lock.unlock(); + acquire(hash, seq, reason); + } + catch (std::exception const& e) + { + JLOG(j_.warn()) + << "Exception thrown for acquiring new inbound ledger " << hash + << ": " << e.what(); + } + catch (...) + { + JLOG(j_.warn()) + << "Unknown exception thrown for acquiring new inbound ledger " + << hash; + } + lock.lock(); + pendingAcquires_.erase(hash); + } + std::shared_ptr find(uint256 const& hash) override { @@ -441,6 +473,9 @@ class InboundLedgersImp : public InboundLedgers beast::insight::Counter mCounter; std::unique_ptr mPeerSetBuilder; + + std::set pendingAcquires_; + std::mutex acquiresMutex_; }; //------------------------------------------------------------------------------ diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index cd85bc9e4e1..5f72ce9cfa0 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -1730,7 +1730,8 @@ NetworkOPsImp::checkLastClosedLedger( } JLOG(m_journal.warn()) << "We are not running on the consensus ledger"; - JLOG(m_journal.info()) << "Our LCL: " << getJson({*ourClosed, {}}); + JLOG(m_journal.info()) << "Our LCL: " << ourClosed->info().hash + << getJson({*ourClosed, {}}); JLOG(m_journal.info()) << "Net LCL " << closedLedger; if ((mMode == OperatingMode::TRACKING) || (mMode == OperatingMode::FULL)) diff --git a/src/test/app/LedgerReplay_test.cpp b/src/test/app/LedgerReplay_test.cpp index aee24cd7d57..b312fb69fe0 100644 --- a/src/test/app/LedgerReplay_test.cpp +++ b/src/test/app/LedgerReplay_test.cpp @@ -106,6 +106,14 @@ class MagicInboundLedgers : public InboundLedgers return {}; } + virtual void + acquireAsync( + uint256 const& hash, + std::uint32_t seq, + InboundLedger::Reason reason) override + { + } + virtual std::shared_ptr find(LedgerHash const& hash) override { From fbbea9e6e25795a8a6bd1bf64b780771933a9579 Mon Sep 17 00:00:00 2001 From: Mark Travis Date: Sun, 25 Aug 2024 19:20:14 -0700 Subject: [PATCH 20/26] Allow only 1 job queue slot for each validation ledger check * refactor filtering of validations to specifically avoid concurrent checkAccept() calls for the same validation ledger hash. * Log when duplicate concurrent validation requests are filtered. * RAII for containers that track concurrent validation requests. --- src/ripple/app/consensus/RCLValidations.cpp | 20 ++++++++++-- src/ripple/app/consensus/RCLValidations.h | 8 ++++- src/ripple/app/misc/NetworkOPs.cpp | 35 ++++++++++++++++++++- 3 files changed, 59 insertions(+), 4 deletions(-) diff --git a/src/ripple/app/consensus/RCLValidations.cpp b/src/ripple/app/consensus/RCLValidations.cpp index 9ffd8829ccc..b5069bd25c9 100644 --- a/src/ripple/app/consensus/RCLValidations.cpp +++ b/src/ripple/app/consensus/RCLValidations.cpp @@ -161,7 +161,9 @@ void handleNewValidation( Application& app, std::shared_ptr const& val, - std::string const& source) + std::string const& source, + BypassAccept const bypassAccept, + std::optional j) { auto const& signingKey = val->getSignerPublic(); auto const& hash = val->getLedgerHash(); @@ -186,7 +188,21 @@ handleNewValidation( if (outcome == ValStatus::current) { if (val->isTrusted()) - app.getLedgerMaster().checkAccept(hash, seq); + { + if (bypassAccept == BypassAccept::yes) + { + assert(j.has_value()); + if (j.has_value()) + { + JLOG(j->trace()) << "Bypassing checkAccept for validation " + << val->getLedgerHash(); + } + } + else + { + app.getLedgerMaster().checkAccept(hash, seq); + } + } return; } diff --git a/src/ripple/app/consensus/RCLValidations.h b/src/ripple/app/consensus/RCLValidations.h index 93628fe1695..e141731e14d 100644 --- a/src/ripple/app/consensus/RCLValidations.h +++ b/src/ripple/app/consensus/RCLValidations.h @@ -25,12 +25,16 @@ #include #include #include +#include +#include #include namespace ripple { class Application; +enum class BypassAccept : bool { no = false, yes }; + /** Wrapper over STValidation for generic Validation code Wraps an STValidation for compatibility with the generic validation code. @@ -248,7 +252,9 @@ void handleNewValidation( Application& app, std::shared_ptr const& val, - std::string const& source); + std::string const& source, + BypassAccept const bypassAccept = BypassAccept::no, + std::optional j = std::nullopt); } // namespace ripple diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 5f72ce9cfa0..e2f5ef1a0b3 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -72,8 +72,10 @@ #include #include +#include #include #include +#include #include #include #include @@ -781,6 +783,9 @@ class NetworkOPsImp final : public NetworkOPs StateAccounting accounting_{}; + std::set pendingValidations_; + std::mutex validationsMutex_; + private: struct Stats { @@ -2308,7 +2313,35 @@ NetworkOPsImp::recvValidation( JLOG(m_journal.trace()) << "recvValidation " << val->getLedgerHash() << " from " << source; - handleNewValidation(app_, val, source); + std::unique_lock lock(validationsMutex_); + BypassAccept bypassAccept = BypassAccept::no; + try + { + if (pendingValidations_.contains(val->getLedgerHash())) + bypassAccept = BypassAccept::yes; + else + pendingValidations_.insert(val->getLedgerHash()); + lock.unlock(); + handleNewValidation(app_, val, source, bypassAccept, m_journal); + } + catch (std::exception const& e) + { + JLOG(m_journal.warn()) + << "Exception thrown for handling new validation " + << val->getLedgerHash() << ": " << e.what(); + } + catch (...) + { + JLOG(m_journal.warn()) + << "Unknown exception thrown for handling new validation " + << val->getLedgerHash(); + } + if (bypassAccept == BypassAccept::no) + { + lock.lock(); + pendingValidations_.erase(val->getLedgerHash()); + lock.unlock(); + } pubValidation(val); From 85214bdf811e1ff11a02977211fe59b07619b856 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Fri, 23 Aug 2024 17:19:52 -0400 Subject: [PATCH 21/26] Set version to 2.2.2 --- src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index e6359cd3a52..0086824b7e2 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.2.1" +char const* const versionString = "2.2.2" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From cc0177be8712ca367134aa6599b7328bbc5e6bc5 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Tue, 3 Sep 2024 18:37:47 -0400 Subject: [PATCH 22/26] Update Release Notes for 2.2.1 and 2.2.2 --- RELEASENOTES.md | 109 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) diff --git a/RELEASENOTES.md b/RELEASENOTES.md index cfc0e618cfe..72eb4687232 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -6,6 +6,115 @@ This document contains the release notes for `rippled`, the reference server imp Have new ideas? Need help with setting up your node? [Please open an issue here](https://github.com/xrplf/rippled/issues/new/choose). +# Version 2.2.2 + +Version 2.2.2 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release fixes an ongoing issue with Mainnet where validators can stall during consensus processing due to lock contention, preventing ledgers from being validated for up to two minutes. There are no new amendments in this release. + +[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) + + + +## Action Required + +If you run an XRP Ledger validator, upgrade to version 2.2.2 as soon as possible to ensure stable and uninterrupted network behavior. + +Additionally, five amendments introduced in version 2.2.0 are open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. If you operate an XRP Ledger server older than version 2.2.0, upgrade by September 17, 2024 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. Version 2.2.2 is recommended because of known bugs affecting stability of versions 2.2.0 and 2.2.1. + +If you operate a Clio server, Clio needs to be updated to 2.1.2 before updating to rippled 2.2.0. Clio will be blocked if it is not updated. + +## Changelog + +### Amendments and New Features + +- None + +### Bug Fixes and Performance Improvements + +- Allow only 1 job queue slot for acquiring inbound ledger [#5115](https://github.com/XRPLF/rippled/pull/5115) ([7741483](https://github.com/XRPLF/rippled/commit/774148389467781aca7c01bac90af2fba870570c)) + +- Allow only 1 job queue slot for each validation ledger check [#5115](https://github.com/XRPLF/rippled/pull/5115) ([fbbea9e](https://github.com/XRPLF/rippled/commit/fbbea9e6e25795a8a6bd1bf64b780771933a9579)) + +### Other improvements + + - Track latencies of certain code blocks, and log if they take too long [#5115](https://github.com/XRPLF/rippled/pull/5115) ([00ed7c9](https://github.com/XRPLF/rippled/commit/00ed7c942436f02644a13169002b5123f4e2a116)) + +### Docs and Build System + +- None + +### GitHub + +The public source code repository for `rippled` is hosted on GitHub at . + +We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. + + +## Credits + +The following people contributed directly to this release: + +Mark Travis +Valentin Balaschenko <13349202+vlntb@users.noreply.github.com> + +Bug Bounties and Responsible Disclosures: + +We welcome reviews of the `rippled` code and urge researchers to responsibly disclose any issues they may find. + +To report a bug, please send a detailed report to: + +# Version 2.2.1 + +Version 2.2.1 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release fixes a critical bug introduced in 2.2.0 handling some types of RPC requests. + +[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) + + + +## Action Required + +If you run an XRP Ledger validator, upgrade to version 2.2.1 as soon as possible to ensure stable and uninterrupted network behavior. + +Additionally, five amendments introduced in version 2.2.0 are open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. If you operate an XRP Ledger server older than version 2.2.0, upgrade by August 14, 2024 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. Version 2.2.1 is recommended because of known bugs affecting stability of versions 2.2.0. + +If you operate a Clio server, Clio needs to be updated to 2.2.2 before updating to rippled 2.2.1. Clio will be blocked if it is not updated. + +## Changelog + +### Amendments and New Features + +- None + +### Bug Fixes and Performance Improvements + +- Improve error handling in some RPC commands. [#5078](https://github.com/XRPLF/rippled/pull/5078) + +- Use error codes throughout fast Base58 implementation. [#5078](https://github.com/XRPLF/rippled/pull/5078) + +### Docs and Build System + +- None + +### GitHub + +The public source code repository for `rippled` is hosted on GitHub at . + +We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. + + +## Credits + +The following people contributed directly to this release: + +John Freeman +Mayukha Vadari + +Bug Bounties and Responsible Disclosures: + +We welcome reviews of the `rippled` code and urge researchers to responsibly disclose any issues they may find. + +To report a bug, please send a detailed report to: + + # Version 2.2.0 Version 2.2.0 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release adds performance optimizations, several bug fixes, and introduces the `featurePriceOracle`, `fixEmptyDID`, `fixXChainRewardRounding`, `fixPreviousTxnID`, and `fixAMMv1_1` amendments. From 23991c99c31ed93ee875a077c7b8312a3ee7f53d Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Wed, 11 Sep 2024 11:29:06 +0100 Subject: [PATCH 23/26] test: Retry RPC commands to try to fix MacOS CI jobs (#5120) * Retry some failed RPC connections / commands in unit tests * Remove orphaned `getAccounts` function Co-authored-by: John Freeman --- src/test/jtx/impl/Env.cpp | 28 ++++++++++++++++++---------- src/xrpld/app/misc/NetworkOPs.cpp | 18 ------------------ 2 files changed, 18 insertions(+), 28 deletions(-) diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index 6f0f9e3fc73..ef5a2124e24 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -317,16 +317,24 @@ Env::submit(JTx const& jt) auto const jr = [&]() { if (jt.stx) { - txid_ = jt.stx->getTransactionID(); - Serializer s; - jt.stx->add(s); - auto const jr = rpc("submit", strHex(s.slice())); - - parsedResult = parseResult(jr); - test.expect(parsedResult.ter, "ter uninitialized!"); - ter_ = parsedResult.ter.value_or(telENV_RPC_FAILED); - - return jr; + // We shouldn't need to retry, but it fixes the test on macOS for + // the moment. + int retries = 3; + do + { + txid_ = jt.stx->getTransactionID(); + Serializer s; + jt.stx->add(s); + auto const jr = rpc("submit", strHex(s.slice())); + + parsedResult = parseResult(jr); + test.expect(parsedResult.ter, "ter uninitialized!"); + ter_ = parsedResult.ter.value_or(telENV_RPC_FAILED); + if (ter_ != telENV_RPC_FAILED || + parsedResult.rpcCode != rpcINTERNAL || + jt.ter == telENV_RPC_FAILED || --retries <= 0) + return jr; + } while (true); } else { diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index 01906d306cd..02eb0435b57 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -2783,24 +2783,6 @@ NetworkOPsImp::pubProposedTransaction( pubProposedAccountTransaction(ledger, transaction, result); } -static void -getAccounts(Json::Value const& jvObj, std::vector& accounts) -{ - for (auto& jv : jvObj) - { - if (jv.isObject()) - { - getAccounts(jv, accounts); - } - else if (jv.isString()) - { - auto account = RPC::accountFromStringStrict(jv.asString()); - if (account) - accounts.push_back(*account); - } - } -} - void NetworkOPsImp::pubLedger(std::shared_ptr const& lpAccepted) { From 9abc4868d6df5e81e4799ccabb4824cdfee90945 Mon Sep 17 00:00:00 2001 From: "J. Scott Branson" <18340247+jscottbranson@users.noreply.github.com> Date: Sat, 14 Sep 2024 14:38:25 -0400 Subject: [PATCH 24/26] Update SQLite3 max_page_count to match current defaults (#5114) When rippled initiates a connection to SQLite3, rippled sends a "PRAGMA" statement defining the maximum number of pages allowed in the database. Update the max_page_count so it is consistent with the default for newer versions of SQLite3. Increasing max_page_count is critical for keeping full history servers online. Fix #5102 --- src/ripple/app/main/DBInit.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/app/main/DBInit.h b/src/ripple/app/main/DBInit.h index 3d2f42717b2..43b29312e63 100644 --- a/src/ripple/app/main/DBInit.h +++ b/src/ripple/app/main/DBInit.h @@ -77,7 +77,7 @@ inline constexpr auto TxDBName{"transaction.db"}; inline constexpr std::array TxDBPragma { "PRAGMA page_size=4096;", "PRAGMA journal_size_limit=1582080;", - "PRAGMA max_page_count=2147483646;", + "PRAGMA max_page_count=4294967294;", #if (ULONG_MAX > UINT_MAX) && !defined(NO_SQLITE_MMAP) "PRAGMA mmap_size=17179869184;" From 68e1be3cf544bc8f50283b0bfecba60f8370dbf2 Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Sat, 14 Sep 2024 13:08:18 -0700 Subject: [PATCH 25/26] Set version to 2.2.3 --- RELEASENOTES.md | 167 +++++++++++++++++++++++++ src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 2 files changed, 168 insertions(+), 1 deletion(-) diff --git a/RELEASENOTES.md b/RELEASENOTES.md index cfc0e618cfe..37f884b578f 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -6,6 +6,173 @@ This document contains the release notes for `rippled`, the reference server imp Have new ideas? Need help with setting up your node? [Please open an issue here](https://github.com/xrplf/rippled/issues/new/choose). +# Version 2.2.3 + +Version 2.2.3 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release fixes a problem that can cause full-history servers to run out of space in their SQLite databases, depending on configuration. There are no new amendments in this release. + +[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) + + + +## Background + +The `rippled` server uses a SQLite database for tracking transactions, in addition to the main data store (usually NuDB) for ledger data. In servers keeping a large amount of history, this database can run out of space based on the configured number and size of database pages, even if the machine has disk space available. Based on the size of full history on Mainnet, servers with the default SQLite page size of 4096 may now run out of space if they store full history. In this case, your server may shut down with an error such as the following: + +```text +Free SQLite space for transaction db is less than 512MB. To fix this, rippled + must be executed with the vacuum parameter before restarting. + Note that this activity can take multiple days, depending on database size. +``` + +The exact timing of when a server runs out of space can vary based on a few factors. Server operators who encountered a similar problem in 2018 and followed steps to [increase the SQLite transaction database page size issue](../../../docs/infrastructure/troubleshooting/fix-sqlite-tx-db-page-size-issue) may not encounter this problem at all. The `--vacuum` commandline option to `rippled` from that time may work to free up space in the database, but requires extended downtime. + +Version 2.2.3 of `rippled` reconfigures the maximum number of SQLite pages so that the issue does not occur. + +Clio servers providing full history are not affected by this issue. + + +## Action Required + +If you run an [XRP Ledger full history server](https://xrpl.org/docs/infrastructure/configuration/data-retention/configure-full-history), upgrading to version 2.2.3 may prevent the server from crashing when `transaction.db` exceeds approximately 8.7 terabytes. + +Additionally, five amendments introduced in version 2.2.0 are open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. If you operate an XRP Ledger server older than version 2.2.0, upgrade by Sep 23, 2024 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. + +## Changelog + +### Bug Fixes + +- Update SQLite3 max_page_count to match current defaults ([#5114](https://github.com/XRPLF/rippled/pull/5114)) + +### GitHub + +The public source code repository for `rippled` is hosted on GitHub at . + +We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. + + +## Credits + +The following people contributed directly to this release: + +J. Scott Branson + + +Bug Bounties and Responsible Disclosures: + +We welcome reviews of the `rippled` code and urge researchers to responsibly disclose any issues they may find. + +To report a bug, please send a detailed report to: + + +# Version 2.2.2 + +Version 2.2.2 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release fixes an ongoing issue with Mainnet where validators can stall during consensus processing due to lock contention, preventing ledgers from being validated for up to two minutes. There are no new amendments in this release. + +[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) + + + +## Action Required + +If you run an XRP Ledger validator, upgrade to version 2.2.2 as soon as possible to ensure stable and uninterrupted network behavior. + +Additionally, five amendments introduced in version 2.2.0 are open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. If you operate an XRP Ledger server older than version 2.2.0, upgrade by September 17, 2024 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. Version 2.2.2 is recommended because of known bugs affecting stability of versions 2.2.0 and 2.2.1. + +If you operate a Clio server, Clio needs to be updated to 2.1.2 before updating to rippled 2.2.0. Clio will be blocked if it is not updated. + +## Changelog + +### Amendments and New Features + +- None + +### Bug Fixes and Performance Improvements + +- Allow only 1 job queue slot for acquiring inbound ledger [#5115](https://github.com/XRPLF/rippled/pull/5115) ([7741483](https://github.com/XRPLF/rippled/commit/774148389467781aca7c01bac90af2fba870570c)) + +- Allow only 1 job queue slot for each validation ledger check [#5115](https://github.com/XRPLF/rippled/pull/5115) ([fbbea9e](https://github.com/XRPLF/rippled/commit/fbbea9e6e25795a8a6bd1bf64b780771933a9579)) + +### Other improvements + + - Track latencies of certain code blocks, and log if they take too long [#5115](https://github.com/XRPLF/rippled/pull/5115) ([00ed7c9](https://github.com/XRPLF/rippled/commit/00ed7c942436f02644a13169002b5123f4e2a116)) + +### Docs and Build System + +- None + +### GitHub + +The public source code repository for `rippled` is hosted on GitHub at . + +We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. + + +## Credits + +The following people contributed directly to this release: + +Mark Travis +Valentin Balaschenko <13349202+vlntb@users.noreply.github.com> + +Bug Bounties and Responsible Disclosures: + +We welcome reviews of the `rippled` code and urge researchers to responsibly disclose any issues they may find. + +To report a bug, please send a detailed report to: + +# Version 2.2.1 + +Version 2.2.1 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release fixes a critical bug introduced in 2.2.0 handling some types of RPC requests. + +[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) + + + +## Action Required + +If you run an XRP Ledger validator, upgrade to version 2.2.1 as soon as possible to ensure stable and uninterrupted network behavior. + +Additionally, five amendments introduced in version 2.2.0 are open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. If you operate an XRP Ledger server older than version 2.2.0, upgrade by August 14, 2024 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. Version 2.2.1 is recommended because of known bugs affecting stability of versions 2.2.0. + +If you operate a Clio server, Clio needs to be updated to 2.2.2 before updating to rippled 2.2.1. Clio will be blocked if it is not updated. + +## Changelog + +### Amendments and New Features + +- None + +### Bug Fixes and Performance Improvements + +- Improve error handling in some RPC commands. [#5078](https://github.com/XRPLF/rippled/pull/5078) + +- Use error codes throughout fast Base58 implementation. [#5078](https://github.com/XRPLF/rippled/pull/5078) + +### Docs and Build System + +- None + +### GitHub + +The public source code repository for `rippled` is hosted on GitHub at . + +We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. + + +## Credits + +The following people contributed directly to this release: + +John Freeman +Mayukha Vadari + +Bug Bounties and Responsible Disclosures: + +We welcome reviews of the `rippled` code and urge researchers to responsibly disclose any issues they may find. + +To report a bug, please send a detailed report to: + + # Version 2.2.0 Version 2.2.0 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release adds performance optimizations, several bug fixes, and introduces the `featurePriceOracle`, `fixEmptyDID`, `fixXChainRewardRounding`, `fixPreviousTxnID`, and `fixAMMv1_1` amendments. diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 0086824b7e2..fb86609f026 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "2.2.2" +char const* const versionString = "2.2.3" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 9a6af9c431808c182778bb641c03e092ff7151d4 Mon Sep 17 00:00:00 2001 From: luozexuan Date: Tue, 17 Sep 2024 04:53:19 +0800 Subject: [PATCH 26/26] chore: fix typos in comments (#5094) Signed-off-by: luozexuan --- src/libxrpl/protocol/tokens.cpp | 2 +- src/test/csf/README.md | 2 +- src/test/csf/random.h | 2 +- src/test/ledger/Directory_test.cpp | 2 +- src/xrpld/app/tx/detail/Transactor.cpp | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/libxrpl/protocol/tokens.cpp b/src/libxrpl/protocol/tokens.cpp index 454ed803f75..ccae1fb8ed2 100644 --- a/src/libxrpl/protocol/tokens.cpp +++ b/src/libxrpl/protocol/tokens.cpp @@ -91,7 +91,7 @@ algorithm that converts a number to coefficients from base B2. There is a useful shortcut that can be used if one of the bases is a power of the other base. If B1 == B2^G, then each coefficient from base B1 can be -converted to base B2 independently to create a a group of "G" B2 coefficient. +converted to base B2 independently to create a group of "G" B2 coefficient. These coefficients can be simply concatenated together. Since 16 == 2^4, this property is what makes base 16 useful when dealing with binary numbers. For example consider converting the base 16 number "93" to binary. The base 16 diff --git a/src/test/csf/README.md b/src/test/csf/README.md index ff6bdc5dfac..a4b69abab57 100644 --- a/src/test/csf/README.md +++ b/src/test/csf/README.md @@ -144,7 +144,7 @@ sim.collectors.add(simDur); ``` The next lines add a single collector to the simulation. The -`SimDurationCollector` is a a simple example collector which tracks the total +`SimDurationCollector` is a simple example collector which tracks the total duration of the simulation. More generally, a collector is any class that implements `void on(NodeID, SimTime, Event)` for all [Events](./events.h) emitted by a Peer. Events are arbitrary types used to indicate some action or diff --git a/src/test/csf/random.h b/src/test/csf/random.h index 5a512400704..e78bbf515be 100644 --- a/src/test/csf/random.h +++ b/src/test/csf/random.h @@ -121,7 +121,7 @@ makeSelector(Iter first, Iter last, std::vector const& w, Generator& g) } //------------------------------------------------------------------------------ -// Additional distributions of interest not defined in in +// Additional distributions of interest not defined in /** Constant "distribution" that always returns the same value */ diff --git a/src/test/ledger/Directory_test.cpp b/src/test/ledger/Directory_test.cpp index 4904b6e6fbf..bea394f2f36 100644 --- a/src/test/ledger/Directory_test.cpp +++ b/src/test/ledger/Directory_test.cpp @@ -29,7 +29,7 @@ namespace test { struct Directory_test : public beast::unit_test::suite { - // Map [0-15576] into a a unique 3 letter currency code + // Map [0-15576] into a unique 3 letter currency code std::string currcode(std::size_t i) { diff --git a/src/xrpld/app/tx/detail/Transactor.cpp b/src/xrpld/app/tx/detail/Transactor.cpp index 6ae8be8a67f..18e11415c0a 100644 --- a/src/xrpld/app/tx/detail/Transactor.cpp +++ b/src/xrpld/app/tx/detail/Transactor.cpp @@ -635,7 +635,7 @@ Transactor::checkMultiSign(PreclaimContext const& ctx) calcAccountID(PublicKey(makeSlice(spk))); // Verify that the signingAcctID and the signingAcctIDFromPubKey - // belong together. Here is are the rules: + // belong together. Here are the rules: // // 1. "Phantom account": an account that is not in the ledger // A. If signingAcctID == signingAcctIDFromPubKey and the