From 11271c2344da9fd6eba35e65d9c61f3fb2350bdb Mon Sep 17 00:00:00 2001 From: Gregory Tsipenyuk Date: Mon, 13 Apr 2020 10:26:14 -0400 Subject: [PATCH 01/19] Improve compression support: * Optimize parsing of compressed message headers * Enforce protocol-defined message size maxima * Update comments --- src/ripple/overlay/Compression.h | 4 +- src/ripple/overlay/Message.h | 4 +- src/ripple/overlay/Peer.h | 3 + src/ripple/overlay/impl/Message.cpp | 50 +++++++++--- src/ripple/overlay/impl/PeerImp.h | 6 ++ src/ripple/overlay/impl/ProtocolMessage.h | 99 ++++++++++++++++++----- src/test/overlay/compression_test.cpp | 40 +++------ 7 files changed, 138 insertions(+), 68 deletions(-) diff --git a/src/ripple/overlay/Compression.h b/src/ripple/overlay/Compression.h index 5d45dbda888..6bb94792b43 100644 --- a/src/ripple/overlay/Compression.h +++ b/src/ripple/overlay/Compression.h @@ -31,7 +31,9 @@ namespace compression { std::size_t constexpr headerBytes = 6; std::size_t constexpr headerBytesCompressed = 10; -enum class Algorithm : std::uint8_t { None = 0x00, LZ4 = 0x01 }; +// All values other than 'none' must have the high bit. The low order four bits +// must be 0. +enum class Algorithm : std::uint8_t { None = 0x00, LZ4 = 0x90 }; enum class Compressed : std::uint8_t { On, Off }; diff --git a/src/ripple/overlay/Message.h b/src/ripple/overlay/Message.h index af7a168ad0b..e2c081d123f 100644 --- a/src/ripple/overlay/Message.h +++ b/src/ripple/overlay/Message.h @@ -84,7 +84,7 @@ class Message : public std::enable_shared_from_this * @param in Pointer to the payload * @param payloadBytes Size of the payload excluding the header size * @param type Protocol message type - * @param comprAlgorithm Compression algorithm used in compression, + * @param compression Compression algorithm used in compression, * currently LZ4 only. If None then the message is uncompressed. * @param uncompressedBytes Size of the uncompressed message */ @@ -93,7 +93,7 @@ class Message : public std::enable_shared_from_this std::uint8_t* in, std::uint32_t payloadBytes, int type, - Algorithm comprAlgorithm, + Algorithm compression, std::uint32_t uncompressedBytes); /** Try to compress the payload. diff --git a/src/ripple/overlay/Peer.h b/src/ripple/overlay/Peer.h index d16433c1d0f..1f5aad376d2 100644 --- a/src/ripple/overlay/Peer.h +++ b/src/ripple/overlay/Peer.h @@ -118,6 +118,9 @@ class Peer cycleStatus() = 0; virtual bool hasRange(std::uint32_t uMin, std::uint32_t uMax) = 0; + + virtual bool + compressionEnabled() const = 0; }; } // namespace ripple diff --git a/src/ripple/overlay/impl/Message.cpp b/src/ripple/overlay/impl/Message.cpp index e2c1f2fb548..29440c44e48 100644 --- a/src/ripple/overlay/impl/Message.cpp +++ b/src/ripple/overlay/impl/Message.cpp @@ -109,22 +109,46 @@ Message::compress() } /** Set payload header - * Uncompressed message header - * 47-42 Set to 0 - * 41-16 Payload size - * 15-0 Message Type - * Compressed message header - * 79 Set to 0, indicates the message is compressed - * 78-76 Compression algorithm, value 1-7. Set to 1 to indicate LZ4 - * compression 75-74 Set to 0 73-48 Payload size 47-32 Message Type - * 31-0 Uncompressed message size - */ + + The header is a variable-sized structure that contains information about + the type of the message and the length and encoding of the payload. + + The first bit determines whether a message is compressed or uncompressed; + for compressed messages, the next three bits identify the compression + algorithm. + + All multi-byte values are represented in big endian. + + For uncompressed messages (6 bytes), numbering bits from left to right: + + - The first 6 bits are set to 0. + - The next 26 bits represent the payload size. + - The remaining 16 bits represent the message type. + + For compressed messages (10 bytes), numbering bits from left to right: + + - The first 32 bits, together, represent the compression algorithm + and payload size: + - The first bit is set to 1 to indicate the message is compressed. + - The next 3 bits indicate the compression algorithm. + - The next 2 bits are reserved at this time and set to 0. + - The remaining 26 bits represent the payload size. + - The next 16 bits represent the message type. + - The remaining 32 bits are the uncompressed message size. + + The maximum size of a message at this time is 64 MB. Messages larger than + this will be dropped and the recipient may, at its option, sever the link. + + @note While nominally a part of the wire protocol, the framing is subject + to change; future versions of the code may negotiate the use of + substantially different framing. +*/ void Message::setHeader( std::uint8_t* in, std::uint32_t payloadBytes, int type, - Algorithm comprAlgorithm, + Algorithm compression, std::uint32_t uncompressedBytes) { auto h = in; @@ -142,10 +166,10 @@ Message::setHeader( *in++ = static_cast((type >> 8) & 0xFF); *in++ = static_cast(type & 0xFF); - if (comprAlgorithm != Algorithm::None) + if (compression != Algorithm::None) { pack(in, uncompressedBytes); - *h |= 0x80 | (static_cast(comprAlgorithm) << 4); + *h |= static_cast(compression); } } diff --git a/src/ripple/overlay/impl/PeerImp.h b/src/ripple/overlay/impl/PeerImp.h index e005fcf846b..4b279dea658 100644 --- a/src/ripple/overlay/impl/PeerImp.h +++ b/src/ripple/overlay/impl/PeerImp.h @@ -428,6 +428,12 @@ class PeerImp : public Peer, boost::optional> getPeerShardInfo() const; + bool + compressionEnabled() const override + { + return compressionEnabled_ == Compressed::On; + } + private: void close(); diff --git a/src/ripple/overlay/impl/ProtocolMessage.h b/src/ripple/overlay/impl/ProtocolMessage.h index 0f97cdd11b9..b929f91ba40 100644 --- a/src/ripple/overlay/impl/ProtocolMessage.h +++ b/src/ripple/overlay/impl/ProtocolMessage.h @@ -120,51 +120,94 @@ buffersBegin(BufferSequence const& bufs) bufs); } +/** Parse a message header + * @return a seated optional if the message header was successfully + * parsed. An unseated optional otherwise, in which case + * @param ec contains more information: + * - set to `errc::success` if not enough bytes were present + * - set to `errc::no_message` if a valid header was not present + */ template boost::optional -parseMessageHeader(BufferSequence const& bufs, std::size_t size) +parseMessageHeader( + boost::system::error_code& ec, + BufferSequence const& bufs, + std::size_t size) { using namespace ripple::compression; - auto iter = buffersBegin(bufs); MessageHeader hdr; - auto const compressed = (*iter & 0x80) == 0x80; + auto iter = buffersBegin(bufs); // Check valid header - if ((*iter & 0xFC) == 0 || compressed) + if (*iter & 0x80) { - hdr.header_size = compressed ? headerBytesCompressed : headerBytes; + hdr.header_size = headerBytesCompressed; + // not enough bytes to parse the header if (size < hdr.header_size) - return {}; + { + ec = make_error_code(boost::system::errc::success); + return boost::none; + } + + if (*iter & 0x0C) + { + ec = make_error_code(boost::system::errc::protocol_error); + return boost::none; + } + + hdr.algorithm = static_cast(*iter); - if (compressed) + if (hdr.algorithm != compression::Algorithm::LZ4) { - uint8_t algorithm = (*iter & 0x70) >> 4; - if (algorithm != - static_cast(compression::Algorithm::LZ4)) - return {}; - hdr.algorithm = compression::Algorithm::LZ4; + ec = make_error_code(boost::system::errc::protocol_error); + return boost::none; } for (int i = 0; i != 4; ++i) hdr.payload_wire_size = (hdr.payload_wire_size << 8) + *iter++; - // clear the compression bits - hdr.payload_wire_size &= 0x03FFFFFF; + + // clear the top four bits (the compression bits). + hdr.payload_wire_size &= 0x0FFFFFFF; hdr.total_wire_size = hdr.header_size + hdr.payload_wire_size; for (int i = 0; i != 2; ++i) hdr.message_type = (hdr.message_type << 8) + *iter++; - if (compressed) - for (int i = 0; i != 4; ++i) - hdr.uncompressed_size = (hdr.uncompressed_size << 8) + *iter++; + for (int i = 0; i != 4; ++i) + hdr.uncompressed_size = (hdr.uncompressed_size << 8) + *iter++; + + return hdr; + } + + if ((*iter & 0xFC) == 0) + { + hdr.header_size = headerBytes; + + if (size < hdr.header_size) + { + ec = make_error_code(boost::system::errc::success); + return boost::none; + } + + hdr.algorithm = Algorithm::None; + + for (int i = 0; i != 4; ++i) + hdr.payload_wire_size = (hdr.payload_wire_size << 8) + *iter++; + + hdr.uncompressed_size = hdr.payload_wire_size; + hdr.total_wire_size = hdr.header_size + hdr.payload_wire_size; + + for (int i = 0; i != 2; ++i) + hdr.message_type = (hdr.message_type << 8) + *iter++; return hdr; } - return {}; + ec = make_error_code(boost::system::errc::no_message); + return boost::none; } template < @@ -186,7 +229,7 @@ invoke(MessageHeader const& header, Buffers const& buffers, Handler& handler) std::vector payload; payload.resize(header.uncompressed_size); - auto payloadSize = ripple::compression::decompress( + auto const payloadSize = ripple::compression::decompress( stream, header.payload_wire_size, payload.data(), @@ -226,10 +269,13 @@ invokeProtocolMessage(Buffers const& buffers, Handler& handler) if (size == 0) return result; - auto header = detail::parseMessageHeader(buffers, size); + auto header = detail::parseMessageHeader(result.second, buffers, size); // If we can't parse the header then it may be that we don't have enough - // bytes yet, or because the message was cut off. + // bytes yet, or because the message was cut off (if error_code is success). + // Otherwise we failed to match the header's marker (error_code is set to + // no_message) or the compression algorithm is invalid (error_code is + // protocol_error) and signal an error. if (!header) return result; @@ -237,12 +283,21 @@ invokeProtocolMessage(Buffers const& buffers, Handler& handler) // whose size exceeds this may result in the connection being dropped. A // larger message size may be supported in the future or negotiated as // part of a protocol upgrade. - if (header->payload_wire_size > megabytes(64)) + if (header->payload_wire_size > megabytes(64) || + header->uncompressed_size > megabytes(64)) { result.second = make_error_code(boost::system::errc::message_size); return result; } + // We requested uncompressed messages from the peer but received compressed. + if (!handler.compressionEnabled() && + header->algorithm != compression::Algorithm::None) + { + result.second = make_error_code(boost::system::errc::protocol_error); + return result; + } + // We don't have the whole message yet. This isn't an error but we have // nothing to do. if (header->total_wire_size > size) diff --git a/src/test/overlay/compression_test.cpp b/src/test/overlay/compression_test.cpp index a73225eb3b0..454b10136f5 100644 --- a/src/test/overlay/compression_test.cpp +++ b/src/test/overlay/compression_test.cpp @@ -84,21 +84,14 @@ class compression_test : public beast::unit_test::suite std::shared_ptr proto, protocol::MessageType mt, uint16_t nbuffers, - const char* msg, - bool log = false) + std::string msg) { - if (log) - printf("=== compress/decompress %s ===\n", msg); + testcase("Compress/Decompress: " + msg); + Message m(*proto, mt); auto& buffer = m.getBuffer(Compressed::On); - if (log) - printf( - "==> compressed, original %d bytes, compressed %d bytes\n", - (int)m.getBuffer(Compressed::Off).size(), - (int)m.getBuffer(Compressed::On).size()); - boost::beast::multi_buffer buffers; // simulate multi-buffer @@ -112,26 +105,15 @@ class compression_test : public beast::unit_test::suite buffers.commit(boost::asio::buffer_copy( buffers.prepare(slice.size()), boost::asio::buffer(slice))); } - auto header = - ripple::detail::parseMessageHeader(buffers.data(), buffer.size()); - - if (log) - printf( - "==> parsed header: buffers size %d, compressed %d, algorithm " - "%d, header size %d, payload size %d, buffer size %d\n", - (int)buffers.size(), - header->algorithm != Algorithm::None, - (int)header->algorithm, - (int)header->header_size, - (int)header->payload_wire_size, - (int)buffer.size()); + + boost::system::error_code ec; + auto header = ripple::detail::parseMessageHeader( + ec, buffers.data(), buffer.size()); + + BEAST_EXPECT(header); if (header->algorithm == Algorithm::None) - { - if (log) - printf("==> NOT COMPRESSED\n"); return; - } std::vector decompressed; decompressed.resize(header->uncompressed_size); @@ -157,8 +139,6 @@ class compression_test : public beast::unit_test::suite uncompressed.begin() + ripple::compression::headerBytes, uncompressed.end(), decompressed.begin())); - if (log) - printf("\n"); } std::shared_ptr @@ -460,4 +440,4 @@ class compression_test : public beast::unit_test::suite BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(compression, ripple_data, ripple, 20); } // namespace test -} // namespace ripple \ No newline at end of file +} // namespace ripple From ea0bcdd1f51c42cf505912fbf693c78d7b012a91 Mon Sep 17 00:00:00 2001 From: Gregory Tsipenyuk Date: Mon, 18 May 2020 17:40:53 -0400 Subject: [PATCH 02/19] Use base 10 for majority vote calculation: *Add majority timer configuration *FIXES: #3396 --- src/ripple/app/main/Application.cpp | 6 +- src/ripple/app/misc/AmendmentTable.h | 3 +- src/ripple/app/misc/impl/AmendmentTable.cpp | 135 +++++++++------ src/ripple/core/Config.h | 4 + src/ripple/core/ConfigSections.h | 1 + src/ripple/core/impl/Config.cpp | 31 ++++ src/ripple/protocol/Feature.h | 4 +- src/ripple/protocol/SystemParameters.h | 13 ++ src/ripple/protocol/impl/Feature.cpp | 6 +- src/test/app/AmendmentTable_test.cpp | 174 ++++++++++++++------ src/test/core/Config_test.cpp | 52 ++++++ src/test/rpc/Feature_test.cpp | 7 +- 12 files changed, 318 insertions(+), 118 deletions(-) diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index 3b37c736963..5d4b7c88573 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -77,9 +77,6 @@ namespace ripple { -// 204/256 about 80% -static int const MAJORITY_FRACTION(204); - //------------------------------------------------------------------------------ namespace detail { @@ -1533,8 +1530,7 @@ ApplicationImp::setup() Section enabledAmendments = config_->section(SECTION_AMENDMENTS); m_amendmentTable = make_AmendmentTable( - weeks{2}, - MAJORITY_FRACTION, + config().AMENDMENT_MAJORITY_TIME, supportedAmendments, enabledAmendments, config_->section(SECTION_VETO_AMENDMENTS), diff --git a/src/ripple/app/misc/AmendmentTable.h b/src/ripple/app/misc/AmendmentTable.h index 0ac55858074..bcd21f763b5 100644 --- a/src/ripple/app/misc/AmendmentTable.h +++ b/src/ripple/app/misc/AmendmentTable.h @@ -99,6 +99,7 @@ class AmendmentTable // inject pseudo-transactions virtual std::map doVoting( + Rules const& rules, NetClock::time_point closeTime, std::set const& enabledAmendments, majorityAmendments_t const& majorityAmendments, @@ -130,6 +131,7 @@ class AmendmentTable { // Ask implementation what to do auto actions = doVoting( + lastClosedLedger->rules(), lastClosedLedger->parentCloseTime(), getEnabledAmendments(*lastClosedLedger), getMajorityAmendments(*lastClosedLedger), @@ -164,7 +166,6 @@ class AmendmentTable std::unique_ptr make_AmendmentTable( std::chrono::seconds majorityTime, - int majorityFraction, Section const& supported, Section const& enabled, Section const& vetoed, diff --git a/src/ripple/app/misc/impl/AmendmentTable.cpp b/src/ripple/app/misc/impl/AmendmentTable.cpp index 4d499ea9172..2f29a2f5788 100644 --- a/src/ripple/app/misc/impl/AmendmentTable.cpp +++ b/src/ripple/app/misc/impl/AmendmentTable.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -92,28 +93,74 @@ struct AmendmentState }; /** The status of all amendments requested in a given window. */ -struct AmendmentSet +class AmendmentSet { private: // How many yes votes each amendment received hash_map votes_; - -public: + Rules const& rules_; // number of trusted validations - int mTrustedValidations = 0; - + int trustedValidations_ = 0; // number of votes needed - int mThreshold = 0; + int threshold_ = 0; - AmendmentSet() = default; +public: + AmendmentSet( + Rules const& rules, + std::vector> const& valSet) + : rules_(rules) + { + // process validations for ledger before flag ledger + for (auto const& val : valSet) + { + if (val->isTrusted()) + { + if (val->isFieldPresent(sfAmendments)) + { + auto const choices = val->getFieldV256(sfAmendments); + std::for_each( + choices.begin(), + choices.end(), + [&](auto const& amendment) { ++votes_[amendment]; }); + } + + ++trustedValidations_; + } + } - void - tally(std::set const& amendments) + threshold_ = !rules_.enabled(fixAmendmentMajorityCalc) + ? std::max( + 1L, + static_cast( + (trustedValidations_ * + preFixAmendmentMajorityCalcThreshold.num) / + preFixAmendmentMajorityCalcThreshold.den)) + : std::max( + 1L, + static_cast( + (trustedValidations_ * + postFixAmendmentMajorityCalcThreshold.num) / + postFixAmendmentMajorityCalcThreshold.den)); + } + + bool + passes(uint256 const& amendment) const { - ++mTrustedValidations; + auto const& it = votes_.find(amendment); - for (auto const& amendment : amendments) - ++votes_[amendment]; + if (it == votes_.end()) + return false; + + // Before this fix, it was possible for an amendment to activate with a + // percentage slightly less than 80% because we compared for "greater + // than or equal to" instead of strictly "greater than". + // One validator is an exception, otherwise it is not possible + // to gain majority. + if (!rules_.enabled(fixAmendmentMajorityCalc) || + trustedValidations_ == 1) + return it->second >= threshold_; + + return it->second > threshold_; } int @@ -126,6 +173,18 @@ struct AmendmentSet return it->second; } + + int + trustedValidations() const + { + return trustedValidations_; + } + + int + threshold() const + { + return threshold_; + } }; //------------------------------------------------------------------------------ @@ -138,7 +197,7 @@ struct AmendmentSet */ class AmendmentTableImpl final : public AmendmentTable { -protected: +private: mutable std::mutex mutex_; hash_map amendmentMap_; @@ -147,10 +206,6 @@ class AmendmentTableImpl final : public AmendmentTable // Time that an amendment must hold a majority for std::chrono::seconds const majorityTime_; - // The amount of support that an amendment must receive - // 0 = 0% and 256 = 100% - int const majorityFraction_; - // The results of the last voting round - may be empty if // we haven't participated in one yet. std::unique_ptr lastVote_; @@ -187,7 +242,6 @@ class AmendmentTableImpl final : public AmendmentTable public: AmendmentTableImpl( std::chrono::seconds majorityTime, - int majorityFraction, Section const& supported, Section const& enabled, Section const& vetoed, @@ -237,6 +291,7 @@ class AmendmentTableImpl final : public AmendmentTable std::map doVoting( + Rules const& rules, NetClock::time_point closeTime, std::set const& enabledAmendments, majorityAmendments_t const& majorityAmendments, @@ -247,19 +302,15 @@ class AmendmentTableImpl final : public AmendmentTable AmendmentTableImpl::AmendmentTableImpl( std::chrono::seconds majorityTime, - int majorityFraction, Section const& supported, Section const& enabled, Section const& vetoed, beast::Journal journal) : lastUpdateSeq_(0) , majorityTime_(majorityTime) - , majorityFraction_(majorityFraction) , unsupportedEnabled_(false) , j_(journal) { - assert(majorityFraction_ != 0); - std::lock_guard sl(mutex_); for (auto const& a : parseSection(supported)) @@ -461,6 +512,7 @@ AmendmentTableImpl::getDesired() const std::map AmendmentTableImpl::doVoting( + Rules const& rules, NetClock::time_point closeTime, std::set const& enabledAmendments, majorityAmendments_t const& majorityAmendments, @@ -470,31 +522,11 @@ AmendmentTableImpl::doVoting( << ": " << enabledAmendments.size() << ", " << majorityAmendments.size() << ", " << valSet.size(); - auto vote = std::make_unique(); - - // process validations for ledger before flag ledger - for (auto const& val : valSet) - { - if (val->isTrusted()) - { - std::set ballot; + auto vote = std::make_unique(rules, valSet); - if (val->isFieldPresent(sfAmendments)) - { - auto const choices = val->getFieldV256(sfAmendments); - ballot.insert(choices.begin(), choices.end()); - } - - vote->tally(ballot); - } - } - - vote->mThreshold = - std::max(1, (vote->mTrustedValidations * majorityFraction_) / 256); - - JLOG(j_.debug()) << "Received " << vote->mTrustedValidations + JLOG(j_.debug()) << "Received " << vote->trustedValidations() << " trusted validations, threshold is: " - << vote->mThreshold; + << vote->threshold(); // Map of amendments to the action to be taken for each one. The action is // the value of the flags in the pseudo-transaction @@ -507,8 +539,7 @@ AmendmentTableImpl::doVoting( { NetClock::time_point majorityTime = {}; - bool const hasValMajority = - (vote->votes(entry.first) >= vote->mThreshold); + bool const hasValMajority = vote->passes(entry.first); { auto const it = majorityAmendments.find(entry.first); @@ -614,18 +645,15 @@ AmendmentTableImpl::injectJson( if (!fs.enabled && lastVote_) { - auto const votesTotal = lastVote_->mTrustedValidations; - auto const votesNeeded = lastVote_->mThreshold; + auto const votesTotal = lastVote_->trustedValidations(); + auto const votesNeeded = lastVote_->threshold(); auto const votesFor = lastVote_->votes(id); v[jss::count] = votesFor; v[jss::validations] = votesTotal; if (votesNeeded) - { - v[jss::vote] = votesFor * 256 / votesNeeded; v[jss::threshold] = votesNeeded; - } } } @@ -666,14 +694,13 @@ AmendmentTableImpl::getJson(uint256 const& amendmentID) const std::unique_ptr make_AmendmentTable( std::chrono::seconds majorityTime, - int majorityFraction, Section const& supported, Section const& enabled, Section const& vetoed, beast::Journal journal) { return std::make_unique( - majorityTime, majorityFraction, supported, enabled, vetoed, journal); + majorityTime, supported, enabled, vetoed, journal); } } // namespace ripple diff --git a/src/ripple/core/Config.h b/src/ripple/core/Config.h index 7943906fdae..7eec3bc0764 100644 --- a/src/ripple/core/Config.h +++ b/src/ripple/core/Config.h @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -172,6 +173,9 @@ class Config : public BasicConfig // Compression bool COMPRESSION = false; + // Amendment majority time + std::chrono::seconds AMENDMENT_MAJORITY_TIME = defaultAmendmentMajorityTime; + // Thread pool configuration std::size_t WORKERS = 0; diff --git a/src/ripple/core/ConfigSections.h b/src/ripple/core/ConfigSections.h index c9b61c2cb2b..3aae9774d10 100644 --- a/src/ripple/core/ConfigSections.h +++ b/src/ripple/core/ConfigSections.h @@ -60,6 +60,7 @@ struct ConfigSection #define SECTION_INSIGHT "insight" #define SECTION_IPS "ips" #define SECTION_IPS_FIXED "ips_fixed" +#define SECTION_AMENDMENT_MAJORITY_TIME "amendment_majority_time" #define SECTION_NETWORK_QUORUM "network_quorum" #define SECTION_NODE_SEED "node_seed" #define SECTION_NODE_SIZE "node_size" diff --git a/src/ripple/core/impl/Config.cpp b/src/ripple/core/impl/Config.cpp index f12ba7dbcee..cd272fd885c 100644 --- a/src/ripple/core/impl/Config.cpp +++ b/src/ripple/core/impl/Config.cpp @@ -480,6 +480,37 @@ Config::loadFromString(std::string const& fileContents) if (getSingleSection(secConfig, SECTION_COMPRESSION, strTemp, j_)) COMPRESSION = beast::lexicalCastThrow(strTemp); + if (getSingleSection( + secConfig, SECTION_AMENDMENT_MAJORITY_TIME, strTemp, j_)) + { + using namespace std::chrono; + boost::regex const re( + "^\\s*(\\d+)\\s*(minutes|hours|days|weeks)\\s*(\\s+.*)?$"); + boost::smatch match; + if (!boost::regex_match(strTemp, match, re)) + Throw( + "Invalid " SECTION_AMENDMENT_MAJORITY_TIME + ", must be: [0-9]+ [minutes|hours|days|weeks]"); + + std::uint32_t duration = + beast::lexicalCastThrow(match[1].str()); + + if (boost::iequals(match[2], "minutes")) + AMENDMENT_MAJORITY_TIME = minutes(duration); + else if (boost::iequals(match[2], "hours")) + AMENDMENT_MAJORITY_TIME = hours(duration); + else if (boost::iequals(match[2], "days")) + AMENDMENT_MAJORITY_TIME = days(duration); + else if (boost::iequals(match[2], "weeks")) + AMENDMENT_MAJORITY_TIME = weeks(duration); + + if (AMENDMENT_MAJORITY_TIME < minutes(15)) + Throw( + "Invalid " SECTION_AMENDMENT_MAJORITY_TIME + ", the minimum amount of time an amendment must hold a " + "majority is 15 minutes"); + } + // Do not load trusted validator configuration for standalone mode if (!RUN_STANDALONE) { diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index e47778a03a4..ae05c8c1d11 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -111,7 +111,8 @@ class FeatureCollections "RequireFullyCanonicalSig", "fix1781", // XRPEndpointSteps should be included in the circular // payment check - "HardenedValidations"}; + "HardenedValidations", + "fixAmendmentMajorityCalc"}; // Fix Amendment majority calculation std::vector features; boost::container::flat_map featureToIndex; @@ -367,6 +368,7 @@ extern uint256 const fixQualityUpperBound; extern uint256 const featureRequireFullyCanonicalSig; extern uint256 const fix1781; extern uint256 const featureHardenedValidations; +extern uint256 const fixAmendmentMajorityCalc; } // namespace ripple diff --git a/src/ripple/protocol/SystemParameters.h b/src/ripple/protocol/SystemParameters.h index a74155a6a32..2a59de656d6 100644 --- a/src/ripple/protocol/SystemParameters.h +++ b/src/ripple/protocol/SystemParameters.h @@ -21,6 +21,7 @@ #define RIPPLE_PROTOCOL_SYSTEMPARAMETERS_H_INCLUDED #include +#include #include #include @@ -59,6 +60,18 @@ systemCurrencyCode() /** The XRP ledger network's earliest allowed sequence */ static std::uint32_t constexpr XRP_LEDGER_EARLIEST_SEQ{32570}; +/** The minimum amount of support an amendment should have. + + @note This value is used by legacy code and will become obsolete + once the fixAmendmentMajorityCalc amendment activates. +*/ +constexpr std::ratio<204, 256> preFixAmendmentMajorityCalcThreshold; + +constexpr std::ratio<80, 100> postFixAmendmentMajorityCalcThreshold; + +/** The minimum amount of time an amendment must hold a majority */ +constexpr std::chrono::seconds const defaultAmendmentMajorityTime = weeks{2}; + } // namespace ripple /** Default peer port (IANA registered) */ diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index b8cac9b8d68..9d88c9d211a 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -130,7 +130,8 @@ detail::supportedAmendments() "fixQualityUpperBound", "RequireFullyCanonicalSig", "fix1781", - "HardenedValidations"}; + "HardenedValidations", + "fixAmendmentMajorityCalc"}; return supported; } @@ -181,7 +182,8 @@ uint256 const fixQualityUpperBound = *getRegisteredFeature("fixQualityUpperBound"), featureRequireFullyCanonicalSig = *getRegisteredFeature("RequireFullyCanonicalSig"), fix1781 = *getRegisteredFeature("fix1781"), - featureHardenedValidations = *getRegisteredFeature("HardenedValidations"); + featureHardenedValidations = *getRegisteredFeature("HardenedValidations"), + fixAmendmentMajorityCalc = *getRegisteredFeature("fixAmendmentMajorityCalc"); // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. diff --git a/src/test/app/AmendmentTable_test.cpp b/src/test/app/AmendmentTable_test.cpp index ec9293281b0..3b4ec47d143 100644 --- a/src/test/app/AmendmentTable_test.cpp +++ b/src/test/app/AmendmentTable_test.cpp @@ -38,9 +38,6 @@ namespace ripple { class AmendmentTable_test final : public beast::unit_test::suite { private: - // 204/256 about 80% (we round down because the implementation rounds up) - static int const majorityFraction{204}; - static uint256 amendmentId(std::string in) { @@ -100,12 +97,7 @@ class AmendmentTable_test final : public beast::unit_test::suite Section const vetoed) { return make_AmendmentTable( - majorityTime, - majorityFraction, - supported, - enabled, - vetoed, - journal); + majorityTime, supported, enabled, vetoed, journal); } std::unique_ptr @@ -373,6 +365,7 @@ class AmendmentTable_test final : public beast::unit_test::suite // Execute a pretend consensus round for a flag ledger void doRound( + uint256 const& feat, AmendmentTable& table, weeks week, std::vector> const& validators, @@ -399,25 +392,25 @@ class AmendmentTable_test final : public beast::unit_test::suite validations.reserve(validators.size()); int i = 0; - for (auto const& val : validators) + for (auto const& [pub, sec] : validators) { ++i; std::vector field; - for (auto const& amendment : votes) + for (auto const& [hash, nVotes] : votes) { - if ((256 * i) < (validators.size() * amendment.second)) + if (feat == fixAmendmentMajorityCalc ? nVotes >= i : nVotes > i) { // We vote yes on this amendment - field.push_back(amendment.first); + field.push_back(hash); } } auto v = std::make_shared( ripple::NetClock::time_point{}, - val.first, - val.second, - calcNodeID(val.first), + pub, + sec, + calcNodeID(pub), [&field](STValidation& v) { if (!field.empty()) v.setFieldV256( @@ -430,14 +423,13 @@ class AmendmentTable_test final : public beast::unit_test::suite ourVotes = table.doValidation(enabled); - auto actions = - table.doVoting(roundTime, enabled, majority, validations); - for (auto const& action : actions) + auto actions = table.doVoting( + Rules({feat}), roundTime, enabled, majority, validations); + for (auto const& [hash, action] : actions) { // This code assumes other validators do as we do - auto const& hash = action.first; - switch (action.second) + switch (action) { case 0: // amendment goes from majority to enabled @@ -471,7 +463,7 @@ class AmendmentTable_test final : public beast::unit_test::suite // No vote on unknown amendment void - testNoOnUnknown() + testNoOnUnknown(uint256 const& feat) { testcase("Vote NO on unknown"); @@ -487,15 +479,29 @@ class AmendmentTable_test final : public beast::unit_test::suite majorityAmendments_t majority; doRound( - *table, weeks{1}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{1}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(ourVotes.empty()); BEAST_EXPECT(enabled.empty()); BEAST_EXPECT(majority.empty()); - votes.emplace_back(testAmendment, 256); + votes.emplace_back(testAmendment, validators.size()); doRound( - *table, weeks{2}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{2}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(ourVotes.empty()); BEAST_EXPECT(enabled.empty()); @@ -504,14 +510,21 @@ class AmendmentTable_test final : public beast::unit_test::suite // Note that the simulation code assumes others behave as we do, // so the amendment won't get enabled doRound( - *table, weeks{5}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{5}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(ourVotes.empty()); BEAST_EXPECT(enabled.empty()); } // No vote on vetoed amendment void - testNoOnVetoed() + testNoOnVetoed(uint256 const& feat) { testcase("Vote NO on vetoed"); @@ -528,29 +541,50 @@ class AmendmentTable_test final : public beast::unit_test::suite majorityAmendments_t majority; doRound( - *table, weeks{1}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{1}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(ourVotes.empty()); BEAST_EXPECT(enabled.empty()); BEAST_EXPECT(majority.empty()); - votes.emplace_back(testAmendment, 256); + votes.emplace_back(testAmendment, validators.size()); doRound( - *table, weeks{2}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{2}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(ourVotes.empty()); BEAST_EXPECT(enabled.empty()); majority[testAmendment] = weekTime(weeks{1}); doRound( - *table, weeks{5}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{5}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(ourVotes.empty()); BEAST_EXPECT(enabled.empty()); } // Vote on and enable known, not-enabled amendment void - testVoteEnable() + testVoteEnable(uint256 const& feat) { testcase("voteEnable"); @@ -565,7 +599,14 @@ class AmendmentTable_test final : public beast::unit_test::suite // Week 1: We should vote for all known amendments not enabled doRound( - *table, weeks{1}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{1}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(ourVotes.size() == supported_.size()); BEAST_EXPECT(enabled.empty()); for (auto const& i : supported_) @@ -573,11 +614,18 @@ class AmendmentTable_test final : public beast::unit_test::suite // Now, everyone votes for this feature for (auto const& i : supported_) - votes.emplace_back(amendmentId(i), 256); + votes.emplace_back(amendmentId(i), validators.size()); // Week 2: We should recognize a majority doRound( - *table, weeks{2}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{2}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(ourVotes.size() == supported_.size()); BEAST_EXPECT(enabled.empty()); @@ -586,12 +634,26 @@ class AmendmentTable_test final : public beast::unit_test::suite // Week 5: We should enable the amendment doRound( - *table, weeks{5}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{5}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(enabled.size() == supported_.size()); // Week 6: We should remove it from our votes and from having a majority doRound( - *table, weeks{6}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{6}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(enabled.size() == supported_.size()); BEAST_EXPECT(ourVotes.empty()); for (auto const& i : supported_) @@ -600,7 +662,7 @@ class AmendmentTable_test final : public beast::unit_test::suite // Detect majority at 80%, enable later void - testDetectMajority() + testDetectMajority(uint256 const& feat) { testcase("detectMajority"); @@ -619,9 +681,10 @@ class AmendmentTable_test final : public beast::unit_test::suite std::vector ourVotes; if ((i > 0) && (i < 17)) - votes.emplace_back(testAmendment, i * 16); + votes.emplace_back(testAmendment, i); doRound( + feat, *table, weeks{i}, validators, @@ -630,7 +693,7 @@ class AmendmentTable_test final : public beast::unit_test::suite enabled, majority); - if (i < 13) + if (i < 13) // 13 => 13/16 = 0.8125 => > 80% { // We are voting yes, not enabled, no majority BEAST_EXPECT(!ourVotes.empty()); @@ -663,7 +726,7 @@ class AmendmentTable_test final : public beast::unit_test::suite // Detect loss of majority void - testLostMajority() + testLostMajority(uint256 const& feat) { testcase("lostMajority"); @@ -681,9 +744,10 @@ class AmendmentTable_test final : public beast::unit_test::suite std::vector> votes; std::vector ourVotes; - votes.emplace_back(testAmendment, 250); + votes.emplace_back(testAmendment, validators.size()); doRound( + feat, *table, weeks{1}, validators, @@ -696,15 +760,16 @@ class AmendmentTable_test final : public beast::unit_test::suite BEAST_EXPECT(!majority.empty()); } - for (int i = 1; i < 16; ++i) + for (int i = 1; i < 8; ++i) { std::vector> votes; std::vector ourVotes; // Gradually reduce support - votes.emplace_back(testAmendment, 256 - i * 8); + votes.emplace_back(testAmendment, validators.size() - i); doRound( + feat, *table, weeks{i + 1}, validators, @@ -713,8 +778,8 @@ class AmendmentTable_test final : public beast::unit_test::suite enabled, majority); - if (i < 8) - { + if (i < 4) // 16 - 3 = 13 => 13/16 = 0.8125 => > 80% + { // 16 - 4 = 12 => 12/16 = 0.75 => < 80% // We are voting yes, not enabled, majority BEAST_EXPECT(!ourVotes.empty()); BEAST_EXPECT(enabled.empty()); @@ -775,6 +840,16 @@ class AmendmentTable_test final : public beast::unit_test::suite BEAST_EXPECT(table->needValidatedLedger(257)); } + void + testFeature(uint256 const& feat) + { + testNoOnUnknown(feat); + testNoOnVetoed(feat); + testVoteEnable(feat); + testDetectMajority(feat); + testLostMajority(feat); + } + void run() override { @@ -782,12 +857,9 @@ class AmendmentTable_test final : public beast::unit_test::suite testGet(); testBadConfig(); testEnableVeto(); - testNoOnUnknown(); - testNoOnVetoed(); - testVoteEnable(); - testDetectMajority(); - testLostMajority(); testHasUnsupported(); + testFeature({}); + testFeature(fixAmendmentMajorityCalc); } }; diff --git a/src/test/core/Config_test.cpp b/src/test/core/Config_test.cpp index 0e829642af8..03282fd59de 100644 --- a/src/test/core/Config_test.cpp +++ b/src/test/core/Config_test.cpp @@ -1014,6 +1014,57 @@ r.ripple.com 51235 } } + void + testAmendment() + { + testcase("amendment"); + struct ConfigUnit + { + std::string unit; + std::uint32_t numSeconds; + std::uint32_t configVal; + bool shouldPass; + }; + + std::vector units = { + {"seconds", 1, 15 * 60, false}, + {"minutes", 60, 14, false}, + {"minutes", 60, 15, true}, + {"hours", 3600, 10, true}, + {"days", 86400, 10, true}, + {"weeks", 604800, 2, true}, + {"months", 2592000, 1, false}, + {"years", 31536000, 1, false}}; + + std::string space = ""; + for (auto& [unit, sec, val, shouldPass] : units) + { + Config c; + std::string toLoad(R"rippleConfig( +[amendment_majority_time] +)rippleConfig"); + toLoad += std::to_string(val) + space + unit; + space = space == "" ? " " : ""; + + try + { + c.loadFromString(toLoad); + if (shouldPass) + BEAST_EXPECT( + c.AMENDMENT_MAJORITY_TIME.count() == val * sec); + else + fail(); + } + catch (std::runtime_error&) + { + if (!shouldPass) + pass(); + else + fail(); + } + } + } + void run() override { @@ -1027,6 +1078,7 @@ r.ripple.com 51235 testWhitespace(); testComments(); testGetters(); + testAmendment(); } }; diff --git a/src/test/rpc/Feature_test.cpp b/src/test/rpc/Feature_test.cpp index 731e3560dfc..5773f756667 100644 --- a/src/test/rpc/Feature_test.cpp +++ b/src/test/rpc/Feature_test.cpp @@ -218,10 +218,9 @@ class Feature_test : public beast::unit_test::suite BEAST_EXPECTS( feature.isMember(jss::validations), feature[jss::name].asString() + " validations"); - BEAST_EXPECTS( - feature.isMember(jss::vote), - feature[jss::name].asString() + " vote"); - BEAST_EXPECT(feature[jss::vote] == 256); + BEAST_EXPECT(feature[jss::count] == 1); + BEAST_EXPECT(feature[jss::threshold] == 1); + BEAST_EXPECT(feature[jss::validations] == 1); BEAST_EXPECT(feature[jss::majority] == 2740); } } From 69c75233738eb6db5fd1da8ca52fdc84a612b683 Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Sat, 9 May 2020 20:53:48 -0700 Subject: [PATCH 03/19] Improve Slice: Slice should, eventually, be replaced by std::string_view so begin adding some helpful functions to align its interface. --- src/ripple/basics/Slice.h | 48 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 47 insertions(+), 1 deletion(-) diff --git a/src/ripple/basics/Slice.h b/src/ripple/basics/Slice.h index 126e8ab1ce0..67c954bb723 100644 --- a/src/ripple/basics/Slice.h +++ b/src/ripple/basics/Slice.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -63,7 +64,7 @@ class Slice } /** Return `true` if the byte range is empty. */ - bool + [[nodiscard]] bool empty() const noexcept { return size_ == 0; @@ -73,12 +74,20 @@ class Slice This may be zero for an empty range. */ + /** @{ */ std::size_t size() const noexcept { return size_; } + std::size_t + length() const noexcept + { + return size_; + } + /** @} */ + /** Return a pointer to beginning of the storage. @note The return type is guaranteed to be a pointer to a single byte, to facilitate pointer arithmetic. @@ -117,6 +126,21 @@ class Slice } /** @} */ + /** Shrinks the slice by moving its start forward by n characters. */ + void + remove_prefix(std::size_t n) + { + data_ += n; + size_ -= n; + } + + /** Shrinks the slice by moving its end backward by n characters. */ + void + remove_suffix(std::size_t n) + { + size_ -= n; + } + const_iterator begin() const noexcept { @@ -140,6 +164,28 @@ class Slice { return data_ + size_; } + + /** Return a "sub slice" of given length starting at the given position + + Note that the subslice encompasses the range [pos, pos + rcount), + where rcount is the smaller of count and size() - pos. + + @param pos position of the first character + @count requested length + + @returns The requested subslice, if the request is valid. + @throws std::out_of_range if pos > size() + */ + Slice + substr( + std::size_t pos, + std::size_t count = std::numeric_limits::max()) const + { + if (pos > size()) + throw std::out_of_range("Requested sub-slice is out of bounds"); + + return {data_ + pos, std::min(count, size() - pos)}; + } }; //------------------------------------------------------------------------------ From 9be4b3e2bc90b79412f243d32bbe50fe3d7de797 Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Mon, 4 May 2020 11:14:01 -0700 Subject: [PATCH 04/19] Cleanup SHAMap and simplify interfaces: * Improve error reporting (more readable exception messages) * Reduce function complexity (split oversized function to smaller pieces) * Reduce code duplication * Reduce buffer copying --- src/ripple/app/ledger/LedgerMaster.h | 7 +- src/ripple/app/ledger/impl/InboundLedger.cpp | 159 ++++--- src/ripple/app/ledger/impl/InboundLedgers.cpp | 8 +- src/ripple/app/ledger/impl/LedgerMaster.cpp | 37 +- .../app/ledger/impl/TransactionAcquire.cpp | 1 - src/ripple/overlay/impl/PeerImp.cpp | 35 +- src/ripple/shamap/SHAMap.h | 1 - src/ripple/shamap/SHAMapTreeNode.h | 55 ++- src/ripple/shamap/impl/SHAMap.cpp | 48 ++- src/ripple/shamap/impl/SHAMapSync.cpp | 9 +- src/ripple/shamap/impl/SHAMapTreeNode.cpp | 395 +++++++++--------- src/test/shamap/SHAMapSync_test.cpp | 1 - 12 files changed, 397 insertions(+), 359 deletions(-) diff --git a/src/ripple/app/ledger/LedgerMaster.h b/src/ripple/app/ledger/LedgerMaster.h index b82fce0bd12..5fa835a9ba3 100644 --- a/src/ripple/app/ledger/LedgerMaster.h +++ b/src/ripple/app/ledger/LedgerMaster.h @@ -183,11 +183,6 @@ class LedgerMaster : public Stoppable, public AbstractFetchPackContainer void setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV); - boost::optional - getLedgerHash( - std::uint32_t desiredSeq, - std::shared_ptr const& knownGoodLedger); - boost::optional getCloseTimeBySeq(LedgerIndex ledgerIndex); @@ -264,7 +259,7 @@ class LedgerMaster : public Stoppable, public AbstractFetchPackContainer gotFetchPack(bool progress, std::uint32_t seq); void - addFetchPack(uint256 const& hash, std::shared_ptr& data); + addFetchPack(uint256 const& hash, std::shared_ptr data); boost::optional getFetchPack(uint256 const& hash) override; diff --git a/src/ripple/app/ledger/impl/InboundLedger.cpp b/src/ripple/app/ledger/impl/InboundLedger.cpp index 3131e3304cf..e8a1de81ff8 100644 --- a/src/ripple/app/ledger/impl/InboundLedger.cpp +++ b/src/ripple/app/ledger/impl/InboundLedger.cpp @@ -898,33 +898,40 @@ InboundLedger::takeTxNode( return true; } - auto nodeIDit = nodeIDs.cbegin(); - auto nodeDatait = data.begin(); - TransactionStateSF filter( - mLedger->txMap().family().db(), app_.getLedgerMaster()); - - while (nodeIDit != nodeIDs.cend()) + try { - if (nodeIDit->isRoot()) - { - san += mLedger->txMap().addRootNode( - SHAMapHash{mLedger->info().txHash}, - makeSlice(*nodeDatait), - snfWIRE, - &filter); - if (!san.isGood()) - return false; - } - else + auto nodeIDit = nodeIDs.cbegin(); + auto nodeDatait = data.begin(); + TransactionStateSF filter( + mLedger->txMap().family().db(), app_.getLedgerMaster()); + + while (nodeIDit != nodeIDs.cend()) { - san += mLedger->txMap().addKnownNode( - *nodeIDit, makeSlice(*nodeDatait), &filter); - if (!san.isGood()) - return false; - } + if (nodeIDit->isRoot()) + { + san += mLedger->txMap().addRootNode( + SHAMapHash{mLedger->info().txHash}, + makeSlice(*nodeDatait), + &filter); + if (!san.isGood()) + return false; + } + else + { + san += mLedger->txMap().addKnownNode( + *nodeIDit, makeSlice(*nodeDatait), &filter); + if (!san.isGood()) + return false; + } - ++nodeIDit; - ++nodeDatait; + ++nodeIDit; + ++nodeDatait; + } + } + catch (std::exception const& ex) + { + JLOG(m_journal.error()) << "Peer sent bad tx node data: " << ex.what(); + return false; } if (!mLedger->txMap().isSynching()) @@ -972,39 +979,47 @@ InboundLedger::takeAsNode( return true; } - auto nodeIDit = nodeIDs.cbegin(); - auto nodeDatait = data.begin(); - AccountStateSF filter( - mLedger->stateMap().family().db(), app_.getLedgerMaster()); - - while (nodeIDit != nodeIDs.cend()) + try { - if (nodeIDit->isRoot()) + auto nodeIDit = nodeIDs.cbegin(); + auto nodeDatait = data.begin(); + AccountStateSF filter( + mLedger->stateMap().family().db(), app_.getLedgerMaster()); + + while (nodeIDit != nodeIDs.cend()) { - san += mLedger->stateMap().addRootNode( - SHAMapHash{mLedger->info().accountHash}, - makeSlice(*nodeDatait), - snfWIRE, - &filter); - if (!san.isGood()) + if (nodeIDit->isRoot()) { - JLOG(m_journal.warn()) << "Bad ledger header"; - return false; + san += mLedger->stateMap().addRootNode( + SHAMapHash{mLedger->info().accountHash}, + makeSlice(*nodeDatait), + &filter); + if (!san.isGood()) + { + JLOG(m_journal.warn()) << "Unable to add AS root node"; + return false; + } } - } - else - { - san += mLedger->stateMap().addKnownNode( - *nodeIDit, makeSlice(*nodeDatait), &filter); - if (!san.isGood()) + else { - JLOG(m_journal.warn()) << "Unable to add AS node"; - return false; + san += mLedger->stateMap().addKnownNode( + *nodeIDit, makeSlice(*nodeDatait), &filter); + if (!san.isGood()) + { + JLOG(m_journal.warn()) << "Unable to add AS node"; + return false; + } } - } - ++nodeIDit; - ++nodeDatait; + ++nodeIDit; + ++nodeDatait; + } + } + catch (std::exception const& ex) + { + JLOG(m_journal.error()) + << "Peer sent bad account state node data: " << ex.what(); + return false; } if (!mLedger->stateMap().isSynching()) @@ -1042,7 +1057,7 @@ InboundLedger::takeAsRootNode(Slice const& data, SHAMapAddNode& san) AccountStateSF filter( mLedger->stateMap().family().db(), app_.getLedgerMaster()); san += mLedger->stateMap().addRootNode( - SHAMapHash{mLedger->info().accountHash}, data, snfWIRE, &filter); + SHAMapHash{mLedger->info().accountHash}, data, &filter); return san.isGood(); } @@ -1067,7 +1082,7 @@ InboundLedger::takeTxRootNode(Slice const& data, SHAMapAddNode& san) TransactionStateSF filter( mLedger->txMap().family().db(), app_.getLedgerMaster()); san += mLedger->txMap().addRootNode( - SHAMapHash{mLedger->info().txHash}, data, snfWIRE, &filter); + SHAMapHash{mLedger->info().txHash}, data, &filter); return san.isGood(); } @@ -1156,28 +1171,38 @@ InboundLedger::processData( SHAMapAddNode san; - if (!mHaveHeader) + try { - if (takeHeader(packet.nodes(0).nodedata())) + if (!mHaveHeader) + { + if (!takeHeader(packet.nodes(0).nodedata())) + { + JLOG(m_journal.warn()) << "Got invalid header data"; + peer->charge(Resource::feeInvalidRequest); + return -1; + } + san.incUseful(); - else + } + + if (!mHaveState && (packet.nodes().size() > 1) && + !takeAsRootNode(makeSlice(packet.nodes(1).nodedata()), san)) { - JLOG(m_journal.warn()) << "Got invalid header data"; - peer->charge(Resource::feeInvalidRequest); - return -1; + JLOG(m_journal.warn()) << "Included AS root invalid"; } - } - if (!mHaveState && (packet.nodes().size() > 1) && - !takeAsRootNode(makeSlice(packet.nodes(1).nodedata()), san)) - { - JLOG(m_journal.warn()) << "Included AS root invalid"; + if (!mHaveTransactions && (packet.nodes().size() > 2) && + !takeTxRootNode(makeSlice(packet.nodes(2).nodedata()), san)) + { + JLOG(m_journal.warn()) << "Included TX root invalid"; + } } - - if (!mHaveTransactions && (packet.nodes().size() > 2) && - !takeTxRootNode(makeSlice(packet.nodes(2).nodedata()), san)) + catch (std::exception const& ex) { - JLOG(m_journal.warn()) << "Included TX root invalid"; + JLOG(m_journal.warn()) + << "Included AS/TX root invalid: " << ex.what(); + peer->charge(Resource::feeBadData); + return -1; } if (san.isUseful()) diff --git a/src/ripple/app/ledger/impl/InboundLedgers.cpp b/src/ripple/app/ledger/impl/InboundLedgers.cpp index 6eb80c36edf..ae81fde9155 100644 --- a/src/ripple/app/ledger/impl/InboundLedgers.cpp +++ b/src/ripple/app/ledger/impl/InboundLedgers.cpp @@ -248,10 +248,9 @@ class InboundLedgersImp : public InboundLedgers, public Stoppable return; auto id_string = node.nodeid(); - auto newNode = SHAMapAbstractNode::make( + auto newNode = SHAMapAbstractNode::makeFromWire( makeSlice(node.nodedata()), 0, - snfWIRE, SHAMapHash{uZero}, false, app_.journal("SHAMapNodeID"), @@ -263,10 +262,9 @@ class InboundLedgersImp : public InboundLedgers, public Stoppable s.erase(); newNode->addRaw(s, snfPREFIX); - auto blob = std::make_shared(s.begin(), s.end()); - app_.getLedgerMaster().addFetchPack( - newNode->getNodeHash().as_uint256(), blob); + newNode->getNodeHash().as_uint256(), + std::make_shared(s.begin(), s.end())); } } catch (std::exception const&) diff --git a/src/ripple/app/ledger/impl/LedgerMaster.cpp b/src/ripple/app/ledger/impl/LedgerMaster.cpp index 9a8f7dfe382..f189340b94f 100644 --- a/src/ripple/app/ledger/impl/LedgerMaster.cpp +++ b/src/ripple/app/ledger/impl/LedgerMaster.cpp @@ -1303,41 +1303,6 @@ LedgerMaster::tryAdvance() } } -// Return the hash of the valid ledger with a particular sequence, given a -// subsequent ledger known valid. -boost::optional -LedgerMaster::getLedgerHash( - std::uint32_t desiredSeq, - std::shared_ptr const& knownGoodLedger) -{ - assert(desiredSeq < knownGoodLedger->info().seq); - - auto hash = hashOfSeq(*knownGoodLedger, desiredSeq, m_journal); - - // Not directly in the given ledger - if (!hash) - { - std::uint32_t seq = (desiredSeq + 255) % 256; - assert(seq < desiredSeq); - - hash = hashOfSeq(*knownGoodLedger, seq, m_journal); - if (hash) - { - if (auto l = getLedgerByHash(*hash)) - { - hash = hashOfSeq(*l, desiredSeq, m_journal); - assert(hash); - } - } - else - { - assert(false); - } - } - - return hash; -} - void LedgerMaster::updatePaths(Job& job) { @@ -1948,7 +1913,7 @@ LedgerMaster::doAdvance(std::unique_lock& sl) } void -LedgerMaster::addFetchPack(uint256 const& hash, std::shared_ptr& data) +LedgerMaster::addFetchPack(uint256 const& hash, std::shared_ptr data) { fetch_packs_.canonicalize_replace_client(hash, data); } diff --git a/src/ripple/app/ledger/impl/TransactionAcquire.cpp b/src/ripple/app/ledger/impl/TransactionAcquire.cpp index 1608a38f14f..3abb88e7f41 100644 --- a/src/ripple/app/ledger/impl/TransactionAcquire.cpp +++ b/src/ripple/app/ledger/impl/TransactionAcquire.cpp @@ -212,7 +212,6 @@ TransactionAcquire::takeNodes( else if (!mMap->addRootNode( SHAMapHash{mHash}, makeSlice(*nodeDatait), - snfWIRE, nullptr) .isGood()) { diff --git a/src/ripple/overlay/impl/PeerImp.cpp b/src/ripple/overlay/impl/PeerImp.cpp index cfcc8ef0c5e..b3c00894d8f 100644 --- a/src/ripple/overlay/impl/PeerImp.cpp +++ b/src/ripple/overlay/impl/PeerImp.cpp @@ -1184,7 +1184,15 @@ PeerImp::onMessage(std::shared_ptr const& m) reply.set_lastlink(true); if (m->peerchain_size() > 0) + { + for (int i = 0; i < m->peerchain_size(); ++i) + { + if (!publicKeyType(makeSlice(m->peerchain(i).nodepubkey()))) + return badData("Invalid peer chain public key"); + } + *reply.mutable_peerchain() = m->peerchain(); + } send(std::make_shared(reply, protocol::mtPEER_SHARD_INFO)); @@ -2209,7 +2217,15 @@ PeerImp::onMessage(std::shared_ptr const& m) reply.set_type(packet.type()); if (packet.has_ledgerhash()) + { + if (!stringIsUint256Sized(packet.ledgerhash())) + { + fee_ = Resource::feeInvalidRequest; + return; + } + reply.set_ledgerhash(packet.ledgerhash()); + } // This is a very minimal implementation for (int i = 0; i < packet.objects_size(); ++i) @@ -2290,10 +2306,10 @@ PeerImp::onMessage(std::shared_ptr const& m) { uint256 const hash{obj.hash()}; - std::shared_ptr data(std::make_shared( - obj.data().begin(), obj.data().end())); - - app_.getLedgerMaster().addFetchPack(hash, data); + app_.getLedgerMaster().addFetchPack( + hash, + std::make_shared( + obj.data().begin(), obj.data().end())); } } } @@ -2587,16 +2603,15 @@ PeerImp::getLedger(std::shared_ptr const& m) { JLOG(p_journal_.debug()) << "GetLedger: Routing Tx set request"; - auto const v = getPeerWithTree(overlay_, txHash, this); - if (!v) + if (auto const v = getPeerWithTree(overlay_, txHash, this)) { - JLOG(p_journal_.info()) << "GetLedger: Route TX set failed"; + packet.set_requestcookie(id()); + v->send(std::make_shared( + packet, protocol::mtGET_LEDGER)); return; } - packet.set_requestcookie(id()); - v->send( - std::make_shared(packet, protocol::mtGET_LEDGER)); + JLOG(p_journal_.info()) << "GetLedger: Route TX set failed"; return; } diff --git a/src/ripple/shamap/SHAMap.h b/src/ripple/shamap/SHAMap.h index 67b3c3c5605..86adbd05470 100644 --- a/src/ripple/shamap/SHAMap.h +++ b/src/ripple/shamap/SHAMap.h @@ -246,7 +246,6 @@ class SHAMap addRootNode( SHAMapHash const& hash, Slice const& rootNode, - SHANodeFormat format, SHAMapSyncFilter* filter); SHAMapAddNode addKnownNode( diff --git a/src/ripple/shamap/SHAMapTreeNode.h b/src/ripple/shamap/SHAMapTreeNode.h index 93cb91fe032..a08428cb8a3 100644 --- a/src/ripple/shamap/SHAMapTreeNode.h +++ b/src/ripple/shamap/SHAMapTreeNode.h @@ -176,14 +176,43 @@ class SHAMapAbstractNode invariants(bool is_root = false) const = 0; static std::shared_ptr - make( - Slice const& rawNode, + makeFromPrefix( + Slice rawNode, + std::uint32_t seq, + SHAMapHash const& hash, + bool hashValid, + beast::Journal j); + + static std::shared_ptr + makeFromWire( + Slice rawNode, std::uint32_t seq, - SHANodeFormat format, SHAMapHash const& hash, bool hashValid, beast::Journal j, - SHAMapNodeID const& id = SHAMapNodeID{}); + SHAMapNodeID const& id); + +private: + static std::shared_ptr + makeTransaction( + Slice data, + std::uint32_t seq, + SHAMapHash const& hash, + bool hashValid); + + static std::shared_ptr + makeAccountState( + Slice data, + std::uint32_t seq, + SHAMapHash const& hash, + bool hashValid); + + static std::shared_ptr + makeTransactionWithMeta( + Slice data, + std::uint32_t seq, + SHAMapHash const& hash, + bool hashValid); }; class SHAMapInnerNode : public SHAMapAbstractNode @@ -239,15 +268,19 @@ class SHAMapInnerNode : public SHAMapAbstractNode void invariants(bool is_root = false) const override; - friend std::shared_ptr - SHAMapAbstractNode::make( - Slice const& rawNode, + static std::shared_ptr + makeFullInner( + Slice data, std::uint32_t seq, - SHANodeFormat format, SHAMapHash const& hash, - bool hashValid, - beast::Journal j, - SHAMapNodeID const& id); + bool hashValid); + + static std::shared_ptr + makeCompressedInner( + Slice data, + std::uint32_t seq, + SHAMapHash const& hash, + bool hashValid); }; // SHAMapTreeNode represents a leaf, and may eventually be renamed to reflect diff --git a/src/ripple/shamap/impl/SHAMap.cpp b/src/ripple/shamap/impl/SHAMap.cpp index b1d390b1ccc..70329585a96 100644 --- a/src/ripple/shamap/impl/SHAMap.cpp +++ b/src/ripple/shamap/impl/SHAMap.cpp @@ -151,13 +151,8 @@ SHAMap::fetchNodeFromDB(SHAMapHash const& hash) const { try { - node = SHAMapAbstractNode::make( - makeSlice(obj->getData()), - 0, - snfPREFIX, - hash, - true, - f_.journal()); + node = SHAMapAbstractNode::makeFromPrefix( + makeSlice(obj->getData()), 0, hash, true, f_.journal()); if (node) canonicalize(hash, node); } @@ -181,20 +176,32 @@ SHAMap::fetchNodeFromDB(SHAMapHash const& hash) const std::shared_ptr SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const { - std::shared_ptr node; if (auto nodeData = filter->getNode(hash)) { - node = SHAMapAbstractNode::make( - makeSlice(*nodeData), 0, snfPREFIX, hash, true, f_.journal()); - if (node) + try { - filter->gotNode( - true, hash, ledgerSeq_, std::move(*nodeData), node->getType()); - if (backed_) - canonicalize(hash, node); + auto node = SHAMapAbstractNode::makeFromPrefix( + makeSlice(*nodeData), 0, hash, true, f_.journal()); + if (node) + { + filter->gotNode( + true, + hash, + ledgerSeq_, + std::move(*nodeData), + node->getType()); + if (backed_) + canonicalize(hash, node); + } + return node; + } + catch (std::exception const& x) + { + JLOG(f_.journal().warn()) + << "Invalid node/data, hash=" << hash << ": " << x.what(); } } - return node; + return {}; } // Get a node without throwing @@ -374,13 +381,8 @@ SHAMap::descendAsync( if (!obj) return nullptr; - ptr = SHAMapAbstractNode::make( - makeSlice(obj->getData()), - 0, - snfPREFIX, - hash, - true, - f_.journal()); + ptr = SHAMapAbstractNode::makeFromPrefix( + makeSlice(obj->getData()), 0, hash, true, f_.journal()); if (ptr && backed_) canonicalize(hash, ptr); } diff --git a/src/ripple/shamap/impl/SHAMapSync.cpp b/src/ripple/shamap/impl/SHAMapSync.cpp index b62ca050b63..fbaced398a8 100644 --- a/src/ripple/shamap/impl/SHAMapSync.cpp +++ b/src/ripple/shamap/impl/SHAMapSync.cpp @@ -546,7 +546,6 @@ SHAMapAddNode SHAMap::addRootNode( SHAMapHash const& hash, Slice const& rootNode, - SHANodeFormat format, SHAMapSyncFilter* filter) { // we already have a root_ node @@ -558,8 +557,8 @@ SHAMap::addRootNode( } assert(seq_ >= 1); - auto node = SHAMapAbstractNode::make( - rootNode, 0, format, SHAMapHash{}, false, f_.journal()); + auto node = SHAMapAbstractNode::makeFromWire( + rootNode, 0, SHAMapHash{}, false, f_.journal(), {}); if (!node || !node->isValid() || node->getNodeHash() != hash) return SHAMapAddNode::invalid(); @@ -602,8 +601,8 @@ SHAMap::addKnownNode( } std::uint32_t generation = f_.fullbelow().getGeneration(); - auto newNode = SHAMapAbstractNode::make( - rawNode, 0, snfWIRE, SHAMapHash{}, false, f_.journal(), node); + auto newNode = SHAMapAbstractNode::makeFromWire( + rawNode, 0, SHAMapHash{}, false, f_.journal(), node); SHAMapNodeID iNodeID; auto iNode = root_.get(); diff --git a/src/ripple/shamap/impl/SHAMapTreeNode.cpp b/src/ripple/shamap/impl/SHAMapTreeNode.cpp index cbf5a3745e9..0aacfcd7c07 100644 --- a/src/ripple/shamap/impl/SHAMapTreeNode.cpp +++ b/src/ripple/shamap/impl/SHAMapTreeNode.cpp @@ -76,220 +76,229 @@ SHAMapTreeNode::SHAMapTreeNode( } std::shared_ptr -SHAMapAbstractNode::make( - Slice const& rawNode, +SHAMapAbstractNode::makeTransaction( + Slice data, + std::uint32_t seq, + SHAMapHash const& hash, + bool hashValid) +{ + // FIXME: using a Serializer results in a copy; avoid it? + Serializer s(data.begin(), data.size()); + + auto item = std::make_shared( + sha512Half(HashPrefix::transactionID, data), s); + + if (hashValid) + return std::make_shared( + std::move(item), tnTRANSACTION_NM, seq, hash); + + return std::make_shared( + std::move(item), tnTRANSACTION_NM, seq); +} + +std::shared_ptr +SHAMapAbstractNode::makeTransactionWithMeta( + Slice data, + std::uint32_t seq, + SHAMapHash const& hash, + bool hashValid) +{ + Serializer s(data.data(), data.size()); + + uint256 tag; + + if (s.size() < tag.bytes) + Throw("Short TXN+MD node"); + + // FIXME: improve this interface so that the above check isn't needed + if (!s.getBitString(tag, s.size() - tag.bytes)) + Throw( + "Short TXN+MD node (" + std::to_string(s.size()) + ")"); + + s.chop(tag.bytes); + + auto item = std::make_shared(tag, s.peekData()); + + if (hashValid) + return std::make_shared( + std::move(item), tnTRANSACTION_MD, seq, hash); + + return std::make_shared( + std::move(item), tnTRANSACTION_MD, seq); +} + +std::shared_ptr +SHAMapAbstractNode::makeAccountState( + Slice data, + std::uint32_t seq, + SHAMapHash const& hash, + bool hashValid) +{ + Serializer s(data.data(), data.size()); + + uint256 tag; + + if (s.size() < tag.bytes) + Throw("short AS node"); + + // FIXME: improve this interface so that the above check isn't needed + if (!s.getBitString(tag, s.size() - tag.bytes)) + Throw( + "Short AS node (" + std::to_string(s.size()) + ")"); + + s.chop(tag.bytes); + + if (tag.isZero()) + Throw("Invalid AS node"); + + auto item = std::make_shared(tag, s.peekData()); + + if (hashValid) + return std::make_shared( + std::move(item), tnACCOUNT_STATE, seq, hash); + + return std::make_shared( + std::move(item), tnACCOUNT_STATE, seq); +} + +std::shared_ptr +SHAMapInnerNode::makeFullInner( + Slice data, + std::uint32_t seq, + SHAMapHash const& hash, + bool hashValid) +{ + if (data.size() != 512) + Throw("Invalid FI node"); + + auto ret = std::make_shared(seq); + + Serializer s(data.data(), data.size()); + + for (int i = 0; i < 16; ++i) + { + s.getBitString(ret->mHashes[i].as_uint256(), i * 32); + + if (ret->mHashes[i].isNonZero()) + ret->mIsBranch |= (1 << i); + } + + if (hashValid) + ret->mHash = hash; + else + ret->updateHash(); + return ret; +} + +std::shared_ptr +SHAMapInnerNode::makeCompressedInner( + Slice data, + std::uint32_t seq, + SHAMapHash const& hash, + bool hashValid) +{ + Serializer s(data.data(), data.size()); + + int len = s.getLength(); + + auto ret = std::make_shared(seq); + + for (int i = 0; i < (len / 33); ++i) + { + int pos; + + if (!s.get8(pos, 32 + (i * 33))) + Throw("short CI node"); + + if ((pos < 0) || (pos >= 16)) + Throw("invalid CI node"); + + s.getBitString(ret->mHashes[pos].as_uint256(), i * 33); + + if (ret->mHashes[pos].isNonZero()) + ret->mIsBranch |= (1 << pos); + } + + if (hashValid) + ret->mHash = hash; + else + ret->updateHash(); + return ret; +} + +std::shared_ptr +SHAMapAbstractNode::makeFromWire( + Slice rawNode, std::uint32_t seq, - SHANodeFormat format, SHAMapHash const& hash, bool hashValid, beast::Journal j, SHAMapNodeID const& id) { - if (format == snfWIRE) - { - if (rawNode.empty()) - return {}; + if (rawNode.empty()) + return {}; - Serializer s(rawNode.data(), rawNode.size() - 1); - int type = rawNode[rawNode.size() - 1]; - int len = s.getLength(); + auto const type = rawNode[rawNode.size() - 1]; - if ((type < 0) || (type > 6)) - return {}; - if (type == 0) - { - // transaction - auto item = std::make_shared( - sha512Half( - HashPrefix::transactionID, Slice(s.data(), s.size())), - s.peekData()); - if (hashValid) - return std::make_shared( - std::move(item), tnTRANSACTION_NM, seq, hash); - return std::make_shared( - std::move(item), tnTRANSACTION_NM, seq); - } - else if (type == 1) - { - // account state - if (len < (256 / 8)) - Throw("short AS node"); - - uint256 u; - s.getBitString(u, len - (256 / 8)); - s.chop(256 / 8); - - if (u.isZero()) - Throw("invalid AS node"); - - auto item = std::make_shared(u, s.peekData()); - if (hashValid) - return std::make_shared( - std::move(item), tnACCOUNT_STATE, seq, hash); - return std::make_shared( - std::move(item), tnACCOUNT_STATE, seq); - } - else if (type == 2) - { - // full inner - if (len != 512) - Throw("invalid FI node"); + rawNode.remove_suffix(1); - auto ret = std::make_shared(seq); - for (int i = 0; i < 16; ++i) - { - s.getBitString(ret->mHashes[i].as_uint256(), i * 32); + if (type == 0) + return makeTransaction(rawNode, seq, hash, hashValid); - if (ret->mHashes[i].isNonZero()) - ret->mIsBranch |= (1 << i); - } - if (hashValid) - ret->mHash = hash; - else - ret->updateHash(); - return ret; - } - else if (type == 3) - { - auto ret = std::make_shared(seq); - // compressed inner - for (int i = 0; i < (len / 33); ++i) - { - int pos; - if (!s.get8(pos, 32 + (i * 33))) - Throw("short CI node"); - if ((pos < 0) || (pos >= 16)) - Throw("invalid CI node"); - s.getBitString(ret->mHashes[pos].as_uint256(), i * 33); - if (ret->mHashes[pos].isNonZero()) - ret->mIsBranch |= (1 << pos); - } - if (hashValid) - ret->mHash = hash; - else - ret->updateHash(); - return ret; - } - else if (type == 4) - { - // transaction with metadata - if (len < (256 / 8)) - Throw("short TM node"); - - uint256 u; - s.getBitString(u, len - (256 / 8)); - s.chop(256 / 8); - - if (u.isZero()) - Throw("invalid TM node"); - - auto item = std::make_shared(u, s.peekData()); - if (hashValid) - return std::make_shared( - std::move(item), tnTRANSACTION_MD, seq, hash); - return std::make_shared( - std::move(item), tnTRANSACTION_MD, seq); - } - } + if (type == 1) + return makeAccountState(rawNode, seq, hash, hashValid); - else if (format == snfPREFIX) - { - if (rawNode.size() < 4) - { - JLOG(j.info()) << "size < 4"; - Throw("invalid P node"); - } + if (type == 2) + return SHAMapInnerNode::makeFullInner(rawNode, seq, hash, hashValid); - std::uint32_t prefix = rawNode[0]; - prefix <<= 8; - prefix |= rawNode[1]; - prefix <<= 8; - prefix |= rawNode[2]; - prefix <<= 8; - prefix |= rawNode[3]; - Serializer s(rawNode.data() + 4, rawNode.size() - 4); + if (type == 3) + return SHAMapInnerNode::makeCompressedInner( + rawNode, seq, hash, hashValid); - if (safe_cast(prefix) == HashPrefix::transactionID) - { - auto item = std::make_shared( - sha512Half(rawNode), s.peekData()); - if (hashValid) - return std::make_shared( - std::move(item), tnTRANSACTION_NM, seq, hash); - return std::make_shared( - std::move(item), tnTRANSACTION_NM, seq); - } - else if (safe_cast(prefix) == HashPrefix::leafNode) - { - if (s.getLength() < 32) - Throw("short PLN node"); + if (type == 4) + return makeTransactionWithMeta(rawNode, seq, hash, hashValid); - uint256 u; - s.getBitString(u, s.getLength() - 32); - s.chop(32); + Throw( + "wire: Unknown type (" + std::to_string(type) + ")"); +} - if (u.isZero()) - { - JLOG(j.info()) << "invalid PLN node"; - Throw("invalid PLN node"); - } +std::shared_ptr +SHAMapAbstractNode::makeFromPrefix( + Slice rawNode, + std::uint32_t seq, + SHAMapHash const& hash, + bool hashValid, + beast::Journal j) +{ + if (rawNode.size() < 4) + Throw("prefix: short node"); - auto item = std::make_shared(u, s.peekData()); - if (hashValid) - return std::make_shared( - std::move(item), tnACCOUNT_STATE, seq, hash); - return std::make_shared( - std::move(item), tnACCOUNT_STATE, seq); - } - else if (safe_cast(prefix) == HashPrefix::innerNode) - { - auto len = s.getLength(); + // FIXME: Use SerialIter::get32? + // Extract the prefix + auto const type = safe_cast( + (safe_cast(rawNode[0]) << 24) + + (safe_cast(rawNode[1]) << 16) + + (safe_cast(rawNode[2]) << 8) + + (safe_cast(rawNode[3]))); - if (len != 512) - Throw("invalid PIN node"); + rawNode.remove_prefix(4); - auto ret = std::make_shared(seq); + if (type == HashPrefix::transactionID) + return makeTransaction(rawNode, seq, hash, hashValid); - for (int i = 0; i < 16; ++i) - { - s.getBitString(ret->mHashes[i].as_uint256(), i * 32); + if (type == HashPrefix::leafNode) + return makeAccountState(rawNode, seq, hash, hashValid); - if (ret->mHashes[i].isNonZero()) - ret->mIsBranch |= (1 << i); - } + if (type == HashPrefix::innerNode) + return SHAMapInnerNode::makeFullInner(rawNode, seq, hash, hashValid); - if (hashValid) - ret->mHash = hash; - else - ret->updateHash(); - return ret; - } - else if (safe_cast(prefix) == HashPrefix::txNode) - { - // transaction with metadata - if (s.getLength() < 32) - Throw("short TXN node"); - - uint256 txID; - s.getBitString(txID, s.getLength() - 32); - s.chop(32); - auto item = std::make_shared(txID, s.peekData()); - if (hashValid) - return std::make_shared( - std::move(item), tnTRANSACTION_MD, seq, hash); - return std::make_shared( - std::move(item), tnTRANSACTION_MD, seq); - } - else - { - JLOG(j.info()) << "Unknown node prefix " << std::hex << prefix - << std::dec; - Throw("invalid node prefix"); - } - } - assert(false); - Throw("Unknown format"); - return {}; // Silence compiler warning. + if (type == HashPrefix::txNode) + return makeTransactionWithMeta(rawNode, seq, hash, hashValid); + + Throw( + "prefix: unknown type (" + + std::to_string(safe_cast>(type)) + + ")"); } bool diff --git a/src/test/shamap/SHAMapSync_test.cpp b/src/test/shamap/SHAMapSync_test.cpp index 78f295d2fc5..7ebdc99a12a 100644 --- a/src/test/shamap/SHAMapSync_test.cpp +++ b/src/test/shamap/SHAMapSync_test.cpp @@ -140,7 +140,6 @@ class SHAMapSync_test : public beast::unit_test::suite .addRootNode( source.getHash(), makeSlice(*gotNodes_a.begin()), - snfWIRE, nullptr) .isGood()); } From 230a2f8ffbc8776014c16515613a5582fe128dc3 Mon Sep 17 00:00:00 2001 From: Mark Travis Date: Sat, 28 Mar 2020 17:39:48 -0700 Subject: [PATCH 05/19] Make server_info report consistent with internal evaluations for validated ledger age. --- src/ripple/app/ledger/LedgerMaster.h | 4 ++++ src/ripple/app/ledger/impl/LedgerMaster.cpp | 2 +- src/ripple/app/misc/NetworkOPs.cpp | 26 ++++++++++++++------- 3 files changed, 22 insertions(+), 10 deletions(-) diff --git a/src/ripple/app/ledger/LedgerMaster.h b/src/ripple/app/ledger/LedgerMaster.h index 5fa835a9ba3..67a49712ae3 100644 --- a/src/ripple/app/ledger/LedgerMaster.h +++ b/src/ripple/app/ledger/LedgerMaster.h @@ -54,6 +54,10 @@ class Transaction; class LedgerMaster : public Stoppable, public AbstractFetchPackContainer { public: + // Age for last validated ledger if the process has yet to validate. + static constexpr std::chrono::seconds NO_VALIDATED_LEDGER_AGE = + std::chrono::hours{24 * 14}; + explicit LedgerMaster( Application& app, Stopwatch& stopwatch, diff --git a/src/ripple/app/ledger/impl/LedgerMaster.cpp b/src/ripple/app/ledger/impl/LedgerMaster.cpp index f189340b94f..da23bf12ddc 100644 --- a/src/ripple/app/ledger/impl/LedgerMaster.cpp +++ b/src/ripple/app/ledger/impl/LedgerMaster.cpp @@ -269,7 +269,7 @@ LedgerMaster::getValidatedLedgerAge() if (valClose == 0s) { JLOG(m_journal.debug()) << "No validated ledger"; - return weeks{2}; + return NO_VALIDATED_LEDGER_AGE; } std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch(); diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 66d01b791e0..5c3aadc0ec0 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -2757,16 +2757,24 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) if (std::abs(closeOffset.count()) >= 60) l[jss::close_time_offset] = closeOffset.count(); - auto lCloseTime = lpClosed->info().closeTime; - auto closeTime = app_.timeKeeper().closeTime(); - if (lCloseTime <= closeTime) + constexpr std::chrono::seconds HIGH_AGE_THRESHOLD{1000000}; + if (m_ledgerMaster.haveValidated()) { - using namespace std::chrono_literals; - auto age = closeTime - lCloseTime; - if (age < 1000000s) - l[jss::age] = Json::UInt(age.count()); - else - l[jss::age] = 0; + auto const age = m_ledgerMaster.getValidatedLedgerAge(); + l[jss::age] = + Json::UInt(age < HIGH_AGE_THRESHOLD ? age.count() : 0); + } + else + { + auto lCloseTime = lpClosed->info().closeTime; + auto closeTime = app_.timeKeeper().closeTime(); + if (lCloseTime <= closeTime) + { + using namespace std::chrono_literals; + auto age = closeTime - lCloseTime; + l[jss::age] = + Json::UInt(age < HIGH_AGE_THRESHOLD ? age.count() : 0); + } } } From 1555a58faa623cac9d8098b7ddcaaa88cb908eff Mon Sep 17 00:00:00 2001 From: Edward Hennis Date: Mon, 11 May 2020 16:48:34 -0400 Subject: [PATCH 06/19] Improve online_delete configuration and DB tuning: * Document delete_batch, back_off_milliseconds, age_threshold_seconds. * Convert those time values to chrono types. * Fix bug that ignored age_threshold_seconds. * Add a "recovery buffer" to the config that gives the node a chance to recover before aborting online delete. * Add begin/end log messages around the SQL queries. * Add a new configuration section: [sqlite] to allow tuning the sqlite database operations. Ignored on full/large history servers. * Update documentation of [node_db] and [sqlite] in the rippled-example.cfg file. * Resolves #3321 --- cfg/rippled-example.cfg | 181 +++++++++-- src/ripple/app/ledger/Ledger.cpp | 4 +- src/ripple/app/main/Application.cpp | 5 +- src/ripple/app/main/DBInit.h | 43 +-- src/ripple/app/main/Main.cpp | 32 +- src/ripple/app/misc/NetworkOPs.cpp | 6 +- src/ripple/app/misc/SHAMapStoreImp.cpp | 119 ++++--- src/ripple/app/misc/SHAMapStoreImp.h | 24 +- src/ripple/core/DatabaseCon.h | 51 ++- src/ripple/core/impl/Config.cpp | 5 +- src/ripple/core/impl/DatabaseCon.cpp | 129 +++++++- src/ripple/net/impl/DatabaseBody.ipp | 2 + src/ripple/nodestore/impl/Shard.cpp | 13 +- src/test/app/LedgerHistory_test.cpp | 59 +--- src/test/app/Manifest_test.cpp | 1 + src/test/jtx/CaptureLogs.h | 80 +++++ src/test/jtx/CheckMessageLogs.h | 75 +++++ src/test/jtx/Env.h | 18 +- src/test/jtx/impl/Env.cpp | 18 +- src/test/nodestore/Database_test.cpp | 409 +++++++++++++++++++++++++ src/test/server/Server_test.cpp | 67 +--- 21 files changed, 1078 insertions(+), 263 deletions(-) create mode 100644 src/test/jtx/CaptureLogs.h create mode 100644 src/test/jtx/CheckMessageLogs.h diff --git a/cfg/rippled-example.cfg b/cfg/rippled-example.cfg index c4917d65044..b2b3c03f346 100644 --- a/cfg/rippled-example.cfg +++ b/cfg/rippled-example.cfg @@ -36,7 +36,7 @@ # For more information on where the rippled server instance searches for the # file, visit: # -# https://developers.ripple.com/commandline-usage.html#generic-options +# https://xrpl.org/commandline-usage.html#generic-options # # This file should be named rippled.cfg. This file is UTF-8 with DOS, UNIX, # or Mac style end of lines. Blank lines and lines beginning with '#' are @@ -869,18 +869,65 @@ # # These keys are possible for any type of backend: # +# earliest_seq The default is 32570 to match the XRP ledger +# network's earliest allowed sequence. Alternate +# networks may set this value. Minimum value of 1. +# If a [shard_db] section is defined, and this +# value is present either [node_db] or [shard_db], +# it must be defined with the same value in both +# sections. +# # online_delete Minimum value of 256. Enable automatic purging # of older ledger information. Maintain at least this # number of ledger records online. Must be greater # than or equal to ledger_history. # -# advisory_delete 0 for disabled, 1 for enabled. If set, then -# require administrative RPC call "can_delete" -# to enable online deletion of ledger records. +# These keys modify the behavior of online_delete, and thus are only +# relevant if online_delete is defined and non-zero: # -# earliest_seq The default is 32570 to match the XRP ledger -# network's earliest allowed sequence. Alternate -# networks may set this value. Minimum value of 1. +# advisory_delete 0 for disabled, 1 for enabled. If set, the +# administrative RPC call "can_delete" is required +# to enable online deletion of ledger records. +# Online deletion does not run automatically if +# non-zero and the last deletion was on a ledger +# greater than the current "can_delete" setting. +# Default is 0. +# +# delete_batch When automatically purging, SQLite database +# records are deleted in batches. This value +# controls the maximum size of each batch. Larger +# batches keep the databases locked for more time, +# which may cause other functions to fall behind, +# and thus cause the node to lose sync. +# Default is 100. +# +# back_off_milliseconds +# Number of milliseconds to wait between +# online_delete batches to allow other functions +# to catch up. +# Default is 100. +# +# age_threshold_seconds +# The online delete process will only run if the +# latest validated ledger is younger than this +# number of seconds. +# Default is 60. +# +# recovery_wait_seconds +# The online delete process checks periodically +# that rippled is still in sync with the network, +# and that the validated ledger is less than +# 'age_threshold_seconds' old. By default, if it +# is not the online delete process aborts and +# tries again later. If 'recovery_wait_seconds' +# is set and rippled is out of sync, but likely to +# recover quickly, then online delete will wait +# this number of seconds for rippled to get back +# into sync before it aborts. +# Set this value if the node is otherwise staying +# in sync, or recovering quickly, but the online +# delete process is unable to finish. +# Default is unset. # # Notes: # The 'node_db' entry configures the primary, persistent storage. @@ -892,6 +939,12 @@ # [import_db] Settings for performing a one-time import (optional) # [database_path] Path to the book-keeping databases. # +# The server creates and maintains 4 to 5 bookkeeping SQLite databases in +# the 'database_path' location. If you omit this configuration setting, +# the server creates a directory called "db" located in the same place as +# your rippled.cfg file. +# Partial pathnames are relative to the location of the rippled executable. +# # [shard_db] Settings for the Shard Database (optional) # # Format (without spaces): @@ -907,12 +960,84 @@ # # max_size_gb Maximum disk space the database will utilize (in gigabytes) # +# [sqlite] Tuning settings for the SQLite databases (optional) +# +# Format (without spaces): +# One or more lines of case-insensitive key / value pairs: +# '=' +# ... +# +# Example 1: +# sync_level=low +# +# Example 2: +# journal_mode=off +# synchronous=off +# +# WARNING: These settings can have significant effects on data integrity, +# particularly in systemic failure scenarios. It is strongly recommended +# that they be left at their defaults unless the server is having +# performance issues during normal operation or during automatic purging +# (online_delete) operations. A warning will be logged on startup if +# 'ledger_history' is configured to store more than 10,000,000 ledgers and +# any of these settings are less safe than the default. This is due to the +# inordinate amount of time and bandwidth it will take to safely rebuild a +# corrupted database of that size from other peers. +# +# Optional keys: # -# There are 4 bookkeeping SQLite database that the server creates and -# maintains. If you omit this configuration setting, it will default to -# creating a directory called "db" located in the same place as your -# rippled.cfg file. Partial pathnames will be considered relative to -# the location of the rippled executable. +# safety_level Valid values: high, low +# The default is "high", which tunes the SQLite +# databases in the most reliable mode, and is +# equivalent to: +# journal_mode=wal +# synchronous=normal +# temp_store=file +# "low" is equivalent to: +# journal_mode=memory +# synchronous=off +# temp_store=memory +# These "low" settings trade speed and reduced I/O +# for a higher risk of data loss. See the +# individual settings below for more information. +# This setting may not be combined with any of the +# other tuning settings: "journal_mode", +# "synchronous", or "temp_store". +# +# journal_mode Valid values: delete, truncate, persist, memory, wal, off +# The default is "wal", which uses a write-ahead +# log to implement database transactions. +# Alternately, "memory" saves disk I/O, but if +# rippled crashes during a transaction, the +# database is likely to be corrupted. +# See https://www.sqlite.org/pragma.html#pragma_journal_mode +# for more details about the available options. +# This setting may not be combined with the +# "safety_level" setting. +# +# synchronous Valid values: off, normal, full, extra +# The default is "normal", which works well with +# the "wal" journal mode. Alternatively, "off" +# allows rippled to continue as soon as data is +# passed to the OS, which can significantly +# increase speed, but risks data corruption if +# the host computer crashes before writing that +# data to disk. +# See https://www.sqlite.org/pragma.html#pragma_synchronous +# for more details about the available options. +# This setting may not be combined with the +# "safety_level" setting. +# +# temp_store Valid values: default, file, memory +# The default is "file", which will use files +# for temporary database tables and indices. +# Alternatively, "memory" may save I/O, but +# rippled does not currently use many, if any, +# of these temporary objects. +# See https://www.sqlite.org/pragma.html#pragma_temp_store +# for more details about the available options. +# This setting may not be combined with the +# "safety_level" setting. # # # @@ -1212,24 +1337,27 @@ medium # This is primary persistent datastore for rippled. This includes transaction # metadata, account states, and ledger headers. Helpful information can be -# found here: https://ripple.com/wiki/NodeBackEnd -# delete old ledgers while maintaining at least 2000. Do not require an -# external administrative command to initiate deletion. +# found at https://xrpl.org/capacity-planning.html#node-db-type +# type=NuDB is recommended for non-validators with fast SSDs. Validators or +# slow / spinning disks should use RocksDB. Caution: Spinning disks are +# not recommended. They do not perform well enough to consistently remain +# synced to the network. +# online_delete=512 is recommended to delete old ledgers while maintaining at +# least 512. +# advisory_delete=0 allows the online delete process to run automatically +# when the node has approximately two times the "online_delete" value of +# ledgers. No external administrative command is required to initiate +# deletion. [node_db] -type=RocksDB -path=/var/lib/rippled/db/rocksdb -open_files=2000 -filter_bits=12 -cache_mb=256 -file_size_mb=8 -file_size_mult=2 -online_delete=2000 +type=NuDB +path=/var/lib/rippled/db/nudb +online_delete=512 advisory_delete=0 # This is the persistent datastore for shards. It is important for the health # of the ripple network that rippled operators shard as much as practical. -# NuDB requires SSD storage. Helpful information can be found here -# https://ripple.com/build/history-sharding +# NuDB requires SSD storage. Helpful information can be found at +# https://xrpl.org/history-sharding.html #[shard_db] #path=/var/lib/rippled/db/shards/nudb #max_size_gb=500 @@ -1248,7 +1376,8 @@ time.apple.com time.nist.gov pool.ntp.org -# To use the XRP test network (see https://ripple.com/build/xrp-test-net/), +# To use the XRP test network +# (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html), # use the following [ips] section: # [ips] # r.altnet.rippletest.net 51235 diff --git a/src/ripple/app/ledger/Ledger.cpp b/src/ripple/app/ledger/Ledger.cpp index 3a6c43376c5..b583b540633 100644 --- a/src/ripple/app/ledger/Ledger.cpp +++ b/src/ripple/app/ledger/Ledger.cpp @@ -228,14 +228,14 @@ Ledger::Ledger( !txMap_->fetchRoot(SHAMapHash{info_.txHash}, nullptr)) { loaded = false; - JLOG(j.warn()) << "Don't have TX root for ledger"; + JLOG(j.warn()) << "Don't have transaction root for ledger" << info_.seq; } if (info_.accountHash.isNonZero() && !stateMap_->fetchRoot(SHAMapHash{info_.accountHash}, nullptr)) { loaded = false; - JLOG(j.warn()) << "Don't have AS root for ledger"; + JLOG(j.warn()) << "Don't have state data root for ledger" << info_.seq; } txMap_->setImmutable(); diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index 5d4b7c88573..15713a6dc3d 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -1019,7 +1019,7 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp try { - auto const setup = setup_DatabaseCon(*config_); + auto setup = setup_DatabaseCon(*config_, m_journal); // transaction database mTxnDB = std::make_unique( @@ -1069,6 +1069,7 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp mLedgerDB->setupCheckpointing(m_jobQueue.get(), logs()); // wallet database + setup.useGlobalPragma = false; mWalletDB = std::make_unique( setup, WalletDBName, @@ -1360,7 +1361,7 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp JLOG(m_journal.fatal()) << "Free SQLite space for transaction db is less than " "512MB. To fix this, rippled must be executed with the " - "vacuum parameter before restarting. " + "\"--vacuum\" parameter before restarting. " "Note that this activity can take multiple days, " "depending on database size."; signalStop(); diff --git a/src/ripple/app/main/DBInit.h b/src/ripple/app/main/DBInit.h index 2aa183d4e70..0a561be8834 100644 --- a/src/ripple/app/main/DBInit.h +++ b/src/ripple/app/main/DBInit.h @@ -26,13 +26,23 @@ namespace ripple { //////////////////////////////////////////////////////////////////////////////// +// These pragmas are built at startup and applied to all database +// connections, unless otherwise noted. +inline constexpr char const* CommonDBPragmaJournal{"PRAGMA journal_mode=%s;"}; +inline constexpr char const* CommonDBPragmaSync{"PRAGMA synchronous=%s;"}; +inline constexpr char const* CommonDBPragmaTemp{"PRAGMA temp_store=%s;"}; +// A warning will be logged if any lower-safety sqlite tuning settings +// are used and at least this much ledger history is configured. This +// includes full history nodes. This is because such a large amount of +// data will be more difficult to recover if a rare failure occurs, +// which are more likely with some of the other available tuning settings. +inline constexpr std::uint32_t SQLITE_TUNING_CUTOFF = 10'000'000; + // Ledger database holds ledgers and ledger confirmations inline constexpr auto LgrDBName{"ledger.db"}; -inline constexpr std::array LgrDBPragma{ - {"PRAGMA synchronous=NORMAL;", - "PRAGMA journal_mode=WAL;", - "PRAGMA journal_size_limit=1582080;"}}; +inline constexpr std::array LgrDBPragma{ + {"PRAGMA journal_size_limit=1582080;"}}; inline constexpr std::array LgrDBInit{ {"BEGIN TRANSACTION;", @@ -61,22 +71,13 @@ inline constexpr std::array LgrDBInit{ // Transaction database holds transactions and public keys inline constexpr auto TxDBName{"transaction.db"}; -inline constexpr -#if (ULONG_MAX > UINT_MAX) && !defined(NO_SQLITE_MMAP) - std::array - TxDBPragma +inline constexpr std::array TxDBPragma { - { -#else - std::array TxDBPragma {{ -#endif - "PRAGMA page_size=4096;", "PRAGMA synchronous=NORMAL;", - "PRAGMA journal_mode=WAL;", "PRAGMA journal_size_limit=1582080;", - "PRAGMA max_page_count=2147483646;", + "PRAGMA page_size=4096;", "PRAGMA journal_size_limit=1582080;", + "PRAGMA max_page_count=2147483646;", #if (ULONG_MAX > UINT_MAX) && !defined(NO_SQLITE_MMAP) - "PRAGMA mmap_size=17179869184;" + "PRAGMA mmap_size=17179869184;" #endif - } }; inline constexpr std::array TxDBInit{ @@ -115,10 +116,8 @@ inline constexpr std::array TxDBInit{ // Temporary database used with an incomplete shard that is being acquired inline constexpr auto AcquireShardDBName{"acquire.db"}; -inline constexpr std::array AcquireShardDBPragma{ - {"PRAGMA synchronous=NORMAL;", - "PRAGMA journal_mode=WAL;", - "PRAGMA journal_size_limit=1582080;"}}; +inline constexpr std::array AcquireShardDBPragma{ + {"PRAGMA journal_size_limit=1582080;"}}; inline constexpr std::array AcquireShardDBInit{ {"CREATE TABLE IF NOT EXISTS Shard ( \ @@ -130,6 +129,7 @@ inline constexpr std::array AcquireShardDBInit{ //////////////////////////////////////////////////////////////////////////////// // Pragma for Ledger and Transaction databases with complete shards +// These override the CommonDBPragma values defined above. inline constexpr std::array CompleteShardDBPragma{ {"PRAGMA synchronous=OFF;", "PRAGMA journal_mode=OFF;"}}; @@ -172,6 +172,7 @@ inline constexpr std::array WalletDBInit{ static constexpr auto stateDBName{"state.db"}; +// These override the CommonDBPragma values defined above. static constexpr std::array DownloaderDBPragma{ {"PRAGMA synchronous=FULL;", "PRAGMA journal_mode=DELETE;"}}; diff --git a/src/ripple/app/main/Main.cpp b/src/ripple/app/main/Main.cpp index ccc3f2c773e..e8ed917587f 100644 --- a/src/ripple/app/main/Main.cpp +++ b/src/ripple/app/main/Main.cpp @@ -354,10 +354,7 @@ run(int argc, char** argv) "nodetoshard", "Import node store into shards")( "replay", "Replay a ledger close.")( "start", "Start from a fresh Ledger.")( - "vacuum", - po::value(), - "VACUUM the transaction db. Mandatory string argument specifies " - "temporary directory path.")( + "vacuum", "VACUUM the transaction db.")( "valid", "Consider the initial ledger a valid network ledger.")( "validateShards", shardsText.c_str()); @@ -520,24 +517,22 @@ run(int argc, char** argv) } using namespace boost::filesystem; - DatabaseCon::Setup dbSetup = setup_DatabaseCon(*config); + DatabaseCon::Setup const dbSetup = setup_DatabaseCon(*config); path dbPath = dbSetup.dataDir / TxDBName; - path tmpPath = vm["vacuum"].as(); try { uintmax_t const dbSize = file_size(dbPath); assert(dbSize != static_cast(-1)); - if (space(tmpPath).available < dbSize) + if (auto available = space(dbPath.parent_path()).available; + available < dbSize) { - std::cerr << "A valid directory for vacuuming must be " - "specified on a filesystem with at least " - "as much free space as the size of " + std::cerr << "The database filesystem must have at least as " + "much free space as the size of " << dbPath.string() << ", which is " << dbSize - << " bytes. The filesystem for " << tmpPath.string() - << " only has " << space(tmpPath).available - << " bytes.\n"; + << " bytes. Only " << available + << " bytes are available.\n"; return -1; } @@ -546,16 +541,19 @@ run(int argc, char** argv) auto& session = txnDB->getSession(); std::uint32_t pageSize; + // Only the most trivial databases will fit in memory on typical + // (recommended) software. Force temp files to be written to disk + // regardless of the config settings. + session << boost::format(CommonDBPragmaTemp) % "file"; session << "PRAGMA page_size;", soci::into(pageSize); std::cout << "VACUUM beginning. page_size: " << pageSize << std::endl; - session << "PRAGMA journal_mode=OFF;"; - session << "PRAGMA temp_store_directory=\"" << tmpPath.string() - << "\";"; session << "VACUUM;"; - session << "PRAGMA journal_mode=WAL;"; + assert(dbSetup.globalPragma); + for (auto const& p : *dbSetup.globalPragma) + session << p; session << "PRAGMA page_size;", soci::into(pageSize); std::cout << "VACUUM finished. page_size: " << pageSize diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 5c3aadc0ec0..0dc0771d244 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -2757,12 +2757,12 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) if (std::abs(closeOffset.count()) >= 60) l[jss::close_time_offset] = closeOffset.count(); - constexpr std::chrono::seconds HIGH_AGE_THRESHOLD{1000000}; + constexpr std::chrono::seconds highAgeThreshold{1000000}; if (m_ledgerMaster.haveValidated()) { auto const age = m_ledgerMaster.getValidatedLedgerAge(); l[jss::age] = - Json::UInt(age < HIGH_AGE_THRESHOLD ? age.count() : 0); + Json::UInt(age < highAgeThreshold ? age.count() : 0); } else { @@ -2773,7 +2773,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) using namespace std::chrono_literals; auto age = closeTime - lCloseTime; l[jss::age] = - Json::UInt(age < HIGH_AGE_THRESHOLD ? age.count() : 0); + Json::UInt(age < highAgeThreshold ? age.count() : 0); } } } diff --git a/src/ripple/app/misc/SHAMapStoreImp.cpp b/src/ripple/app/misc/SHAMapStoreImp.cpp index 94deae5d276..590decf1924 100644 --- a/src/ripple/app/misc/SHAMapStoreImp.cpp +++ b/src/ripple/app/misc/SHAMapStoreImp.cpp @@ -180,13 +180,24 @@ SHAMapStoreImp::SHAMapStoreImp( section.set("filter_bits", "10"); } - get_if_exists(section, "delete_batch", deleteBatch_); - get_if_exists(section, "backOff", backOff_); - get_if_exists(section, "age_threshold", ageThreshold_); get_if_exists(section, "online_delete", deleteInterval_); if (deleteInterval_) { + // Configuration that affects the behavior of online delete + get_if_exists(section, "delete_batch", deleteBatch_); + std::uint32_t temp; + if (get_if_exists(section, "back_off_milliseconds", temp) || + // Included for backward compaibility with an undocumented setting + get_if_exists(section, "backOff", temp)) + { + backOff_ = std::chrono::milliseconds{temp}; + } + if (get_if_exists(section, "age_threshold_seconds", temp)) + ageThreshold_ = std::chrono::seconds{temp}; + if (get_if_exists(section, "recovery_wait_seconds", temp)) + recoveryWaitTime_.emplace(std::chrono::seconds{temp}); + get_if_exists(section, "advisory_delete", advisoryDelete_); auto const minInterval = config.standalone() @@ -348,23 +359,14 @@ SHAMapStoreImp::run() // will delete up to (not including) lastRotated if (validatedSeq >= lastRotated + deleteInterval_ && - canDelete_ >= lastRotated - 1) + canDelete_ >= lastRotated - 1 && !health()) { JLOG(journal_.warn()) << "rotating validatedSeq " << validatedSeq << " lastRotated " << lastRotated << " deleteInterval " << deleteInterval_ - << " canDelete_ " << canDelete_; - - switch (health()) - { - case Health::stopping: - stopped(); - return; - case Health::unhealthy: - continue; - case Health::ok: - default:; - } + << " canDelete_ " << canDelete_ << " state " + << app_.getOPs().strOperatingMode(false) << " age " + << ledgerMaster_->getValidatedLedgerAge().count() << 's'; clearPrior(lastRotated); switch (health()) @@ -378,14 +380,13 @@ SHAMapStoreImp::run() default:; } + JLOG(journal_.debug()) << "copying ledger " << validatedSeq; std::uint64_t nodeCount = 0; validatedLedger->stateMap().snapShot(false)->visitNodes(std::bind( &SHAMapStoreImp::copyNode, this, std::ref(nodeCount), std::placeholders::_1)); - JLOG(journal_.debug()) << "copied ledger " << validatedSeq - << " nodecount " << nodeCount; switch (health()) { case Health::stopping: @@ -396,9 +397,12 @@ SHAMapStoreImp::run() case Health::ok: default:; } + // Only log if we completed without a "health" abort + JLOG(journal_.debug()) << "copied ledger " << validatedSeq + << " nodecount " << nodeCount; + JLOG(journal_.debug()) << "freshening caches"; freshenCaches(); - JLOG(journal_.debug()) << validatedSeq << " freshened caches"; switch (health()) { case Health::stopping: @@ -409,7 +413,10 @@ SHAMapStoreImp::run() case Health::ok: default:; } + // Only log if we completed without a "health" abort + JLOG(journal_.debug()) << validatedSeq << " freshened caches"; + JLOG(journal_.trace()) << "Making a new backend"; auto newBackend = makeBackendRotating(); JLOG(journal_.debug()) << validatedSeq << " new backend " << newBackend->getName(); @@ -559,26 +566,38 @@ SHAMapStoreImp::makeBackendRotating(std::string path) return backend; } -bool +void SHAMapStoreImp::clearSql( DatabaseCon& database, LedgerIndex lastRotated, std::string const& minQuery, std::string const& deleteQuery) { + assert(deleteInterval_); LedgerIndex min = std::numeric_limits::max(); { - auto db = database.checkoutDb(); boost::optional m; - *db << minQuery, soci::into(m); + JLOG(journal_.trace()) + << "Begin: Look up lowest value of: " << minQuery; + { + auto db = database.checkoutDb(); + *db << minQuery, soci::into(m); + } + JLOG(journal_.trace()) << "End: Look up lowest value of: " << minQuery; if (!m) - return false; + return; min = *m; } if (min > lastRotated || health() != Health::ok) - return false; + return; + if (min == lastRotated) + { + // Micro-optimization mainly to clarify logs + JLOG(journal_.trace()) << "Nothing to delete from " << deleteQuery; + return; + } boost::format formattedDeleteQuery(deleteQuery); @@ -587,17 +606,24 @@ SHAMapStoreImp::clearSql( while (min < lastRotated) { min = std::min(lastRotated, min + deleteBatch_); + JLOG(journal_.trace()) << "Begin: Delete up to " << deleteBatch_ + << " rows with LedgerSeq < " << min + << " using query: " << deleteQuery; { auto db = database.checkoutDb(); *db << boost::str(formattedDeleteQuery % min); } + JLOG(journal_.trace()) + << "End: Delete up to " << deleteBatch_ << " rows with LedgerSeq < " + << min << " using query: " << deleteQuery; if (health()) - return true; + return; if (min < lastRotated) - std::this_thread::sleep_for(std::chrono::milliseconds(backOff_)); + std::this_thread::sleep_for(backOff_); + if (health()) + return; } JLOG(journal_.debug()) << "finished: " << deleteQuery; - return true; } void @@ -621,13 +647,14 @@ SHAMapStoreImp::freshenCaches() void SHAMapStoreImp::clearPrior(LedgerIndex lastRotated) { - if (health()) - return; - // Do not allow ledgers to be acquired from the network // that are about to be deleted. minimumOnline_ = lastRotated + 1; + JLOG(journal_.trace()) << "Begin: Clear internal ledgers up to " + << lastRotated; ledgerMaster_->clearPriorLedgers(lastRotated); + JLOG(journal_.trace()) << "End: Clear internal ledgers up to " + << lastRotated; if (health()) return; @@ -666,16 +693,32 @@ SHAMapStoreImp::health() } if (!netOPs_) return Health::ok; + assert(deleteInterval_); - constexpr static std::chrono::seconds age_threshold(60); - auto age = ledgerMaster_->getValidatedLedgerAge(); - OperatingMode mode = netOPs_->getOperatingMode(); - if (mode != OperatingMode::FULL || age > age_threshold) + if (healthy_) { - JLOG(journal_.warn()) << "Not deleting. state: " - << app_.getOPs().strOperatingMode(mode, false) - << ". age " << age.count() << 's'; - healthy_ = false; + auto age = ledgerMaster_->getValidatedLedgerAge(); + OperatingMode mode = netOPs_->getOperatingMode(); + if (recoveryWaitTime_ && mode == OperatingMode::SYNCING && + age < ageThreshold_) + { + JLOG(journal_.warn()) + << "Waiting " << recoveryWaitTime_->count() + << "s for node to get back into sync with network. state: " + << app_.getOPs().strOperatingMode(mode, false) << ". age " + << age.count() << 's'; + std::this_thread::sleep_for(*recoveryWaitTime_); + + age = ledgerMaster_->getValidatedLedgerAge(); + mode = netOPs_->getOperatingMode(); + } + if (mode != OperatingMode::FULL || age > ageThreshold_) + { + JLOG(journal_.warn()) << "Not deleting. state: " + << app_.getOPs().strOperatingMode(mode, false) + << ". age " << age.count() << 's'; + healthy_ = false; + } } if (healthy_) diff --git a/src/ripple/app/misc/SHAMapStoreImp.h b/src/ripple/app/misc/SHAMapStoreImp.h index 2fabf1a6996..6145cb48dfd 100644 --- a/src/ripple/app/misc/SHAMapStoreImp.h +++ b/src/ripple/app/misc/SHAMapStoreImp.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -106,8 +107,14 @@ class SHAMapStoreImp : public SHAMapStore std::uint32_t deleteInterval_ = 0; bool advisoryDelete_ = false; std::uint32_t deleteBatch_ = 100; - std::uint32_t backOff_ = 100; - std::int32_t ageThreshold_ = 60; + std::chrono::milliseconds backOff_{100}; + std::chrono::seconds ageThreshold_{60}; + /// If set, and the node is out of sync during an + /// online_delete health check, sleep the thread + /// for this time and check again so the node can + /// recover. + /// See also: "recovery_wait_seconds" in rippled-example.cfg + boost::optional recoveryWaitTime_; // these do not exist upon SHAMapStore creation, but do exist // as of onPrepare() or before @@ -212,13 +219,11 @@ class SHAMapStoreImp : public SHAMapStore return false; } - /** delete from sqlite table in batches to not lock the db excessively - * pause briefly to extend access time to other users - * call with mutex object unlocked - * @return true if any deletable rows were found (though not - * necessarily deleted. + /** delete from sqlite table in batches to not lock the db excessively. + * Pause briefly to extend access time to other users. + * Call with mutex object unlocked. */ - bool + void clearSql( DatabaseCon& database, LedgerIndex lastRotated, @@ -236,6 +241,9 @@ class SHAMapStoreImp : public SHAMapStore // Assume that, once unhealthy, a necessary step has been // aborted, so the online-delete process needs to restart // at next ledger. + // If recoveryWaitTime_ is set, this may sleep to give rippled + // time to recover, so never call it from any thread other than + // the main "run()". Health health(); // diff --git a/src/ripple/core/DatabaseCon.h b/src/ripple/core/DatabaseCon.h index d79ecef2071..5cdabb08f08 100644 --- a/src/ripple/core/DatabaseCon.h +++ b/src/ripple/core/DatabaseCon.h @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -89,6 +90,19 @@ class DatabaseCon Config::StartUpType startUp = Config::NORMAL; bool standAlone = false; boost::filesystem::path dataDir; + // Indicates whether or not to return the `globalPragma` + // from commonPragma() + bool useGlobalPragma = false; + + std::vector const* + commonPragma() const + { + assert(!useGlobalPragma || globalPragma); + return useGlobalPragma && globalPragma ? globalPragma.get() + : nullptr; + } + + static std::unique_ptr const> globalPragma; }; template @@ -97,16 +111,17 @@ class DatabaseCon std::string const& DBName, std::array const& pragma, std::array const& initSQL) - { // Use temporary files or regular DB files? - auto const useTempFiles = setup.standAlone && - setup.startUp != Config::LOAD && - setup.startUp != Config::LOAD_FILE && - setup.startUp != Config::REPLAY; - boost::filesystem::path pPath = - useTempFiles ? "" : (setup.dataDir / DBName); - - init(pPath, pragma, initSQL); + : DatabaseCon( + setup.standAlone && setup.startUp != Config::LOAD && + setup.startUp != Config::LOAD_FILE && + setup.startUp != Config::REPLAY + ? "" + : (setup.dataDir / DBName), + setup.commonPragma(), + pragma, + initSQL) + { } template @@ -115,8 +130,8 @@ class DatabaseCon std::string const& DBName, std::array const& pragma, std::array const& initSQL) + : DatabaseCon(dataDir / DBName, nullptr, pragma, initSQL) { - init((dataDir / DBName), pragma, initSQL); } soci::session& @@ -136,14 +151,22 @@ class DatabaseCon private: template - void - init( + DatabaseCon( boost::filesystem::path const& pPath, + std::vector const* commonPragma, std::array const& pragma, std::array const& initSQL) { open(session_, "sqlite", pPath.string()); + if (commonPragma) + { + for (auto const& p : *commonPragma) + { + soci::statement st = session_.prepare << p; + st.execute(true); + } + } for (auto const& p : pragma) { soci::statement st = session_.prepare << p; @@ -163,7 +186,9 @@ class DatabaseCon }; DatabaseCon::Setup -setup_DatabaseCon(Config const& c); +setup_DatabaseCon( + Config const& c, + boost::optional j = boost::none); } // namespace ripple diff --git a/src/ripple/core/impl/Config.cpp b/src/ripple/core/impl/Config.cpp index cd272fd885c..a7aeca8617e 100644 --- a/src/ripple/core/impl/Config.cpp +++ b/src/ripple/core/impl/Config.cpp @@ -442,7 +442,8 @@ Config::loadFromString(std::string const& fileContents) if (getSingleSection(secConfig, SECTION_LEDGER_HISTORY, strTemp, j_)) { if (boost::iequals(strTemp, "full")) - LEDGER_HISTORY = 1000000000u; + LEDGER_HISTORY = + std::numeric_limits::max(); else if (boost::iequals(strTemp, "none")) LEDGER_HISTORY = 0; else @@ -454,7 +455,7 @@ Config::loadFromString(std::string const& fileContents) if (boost::iequals(strTemp, "none")) FETCH_DEPTH = 0; else if (boost::iequals(strTemp, "full")) - FETCH_DEPTH = 1000000000u; + FETCH_DEPTH = std::numeric_limits::max(); else FETCH_DEPTH = beast::lexicalCastThrow(strTemp); diff --git a/src/ripple/core/impl/DatabaseCon.cpp b/src/ripple/core/impl/DatabaseCon.cpp index 3a4489b2f94..89c4ee1f291 100644 --- a/src/ripple/core/impl/DatabaseCon.cpp +++ b/src/ripple/core/impl/DatabaseCon.cpp @@ -21,12 +21,14 @@ #include #include #include +#include +#include #include namespace ripple { DatabaseCon::Setup -setup_DatabaseCon(Config const& c) +setup_DatabaseCon(Config const& c, boost::optional j) { DatabaseCon::Setup setup; @@ -38,9 +40,134 @@ setup_DatabaseCon(Config const& c) Throw("database_path must be set."); } + if (!setup.globalPragma) + { + setup.globalPragma = [&c, &j]() { + auto const& sqlite = c.section("sqlite"); + auto result = std::make_unique>(); + result->reserve(3); + + // defaults + std::string safety_level; + std::string journal_mode = "wal"; + std::string synchronous = "normal"; + std::string temp_store = "file"; + bool showRiskWarning = false; + + if (set(safety_level, "safety_level", sqlite)) + { + if (boost::iequals(safety_level, "low")) + { + // low safety defaults + journal_mode = "memory"; + synchronous = "off"; + temp_store = "memory"; + showRiskWarning = true; + } + else if (!boost::iequals(safety_level, "high")) + { + Throw( + "Invalid safety_level value: " + safety_level); + } + } + + { + // #journal_mode Valid values : delete, truncate, persist, + // memory, wal, off + if (set(journal_mode, "journal_mode", sqlite) && + !safety_level.empty()) + { + Throw( + "Configuration file may not define both " + "\"safety_level\" and \"journal_mode\""); + } + bool higherRisk = boost::iequals(journal_mode, "memory") || + boost::iequals(journal_mode, "off"); + showRiskWarning = showRiskWarning || higherRisk; + if (higherRisk || boost::iequals(journal_mode, "delete") || + boost::iequals(journal_mode, "truncate") || + boost::iequals(journal_mode, "persist") || + boost::iequals(journal_mode, "wal")) + { + result->emplace_back(boost::str( + boost::format(CommonDBPragmaJournal) % journal_mode)); + } + else + { + Throw( + "Invalid journal_mode value: " + journal_mode); + } + } + + { + //#synchronous Valid values : off, normal, full, extra + if (set(synchronous, "synchronous", sqlite) && + !safety_level.empty()) + { + Throw( + "Configuration file may not define both " + "\"safety_level\" and \"synchronous\""); + } + bool higherRisk = boost::iequals(synchronous, "off"); + showRiskWarning = showRiskWarning || higherRisk; + if (higherRisk || boost::iequals(synchronous, "normal") || + boost::iequals(synchronous, "full") || + boost::iequals(synchronous, "extra")) + { + result->emplace_back(boost::str( + boost::format(CommonDBPragmaSync) % synchronous)); + } + else + { + Throw( + "Invalid synchronous value: " + synchronous); + } + } + + { + // #temp_store Valid values : default, file, memory + if (set(temp_store, "temp_store", sqlite) && + !safety_level.empty()) + { + Throw( + "Configuration file may not define both " + "\"safety_level\" and \"temp_store\""); + } + bool higherRisk = boost::iequals(temp_store, "memory"); + showRiskWarning = showRiskWarning || higherRisk; + if (higherRisk || boost::iequals(temp_store, "default") || + boost::iequals(temp_store, "file")) + { + result->emplace_back(boost::str( + boost::format(CommonDBPragmaTemp) % temp_store)); + } + else + { + Throw( + "Invalid temp_store value: " + temp_store); + } + } + + if (showRiskWarning && j && c.LEDGER_HISTORY > SQLITE_TUNING_CUTOFF) + { + JLOG(j->warn()) + << "reducing the data integrity guarantees from the " + "default [sqlite] behavior is not recommended for " + "nodes storing large amounts of history, because of the " + "difficulty inherent in rebuilding corrupted data."; + } + assert(result->size() == 3); + return result; + }(); + } + setup.useGlobalPragma = true; + return setup; } +std::unique_ptr const> + DatabaseCon::Setup::globalPragma; + void DatabaseCon::setupCheckpointing(JobQueue* q, Logs& l) { diff --git a/src/ripple/net/impl/DatabaseBody.ipp b/src/ripple/net/impl/DatabaseBody.ipp index d6bae7b47f7..5a1bd7e6185 100644 --- a/src/ripple/net/impl/DatabaseBody.ipp +++ b/src/ripple/net/impl/DatabaseBody.ipp @@ -50,7 +50,9 @@ DatabaseBody::value_type::open( auto setup = setup_DatabaseCon(config); setup.dataDir = path.parent_path(); + setup.useGlobalPragma = false; + // Downloader ignores the "CommonPragma" conn_ = std::make_unique( setup, "Download", DownloaderDBPragma, DatabaseBodyDBInit); diff --git a/src/ripple/nodestore/impl/Shard.cpp b/src/ripple/nodestore/impl/Shard.cpp index 1701206fe4d..f8799ff3d27 100644 --- a/src/ripple/nodestore/impl/Shard.cpp +++ b/src/ripple/nodestore/impl/Shard.cpp @@ -124,6 +124,7 @@ Shard::open(Scheduler& scheduler, nudb::context& ctx) setup.startUp = config.START_UP; setup.standAlone = config.standalone(); setup.dataDir = dir_; + setup.useGlobalPragma = true; acquireInfo_->SQLiteDB = std::make_unique( setup, @@ -668,10 +669,14 @@ bool Shard::initSQLite(std::lock_guard const&) { Config const& config{app_.config()}; - DatabaseCon::Setup setup; - setup.startUp = config.START_UP; - setup.standAlone = config.standalone(); - setup.dataDir = dir_; + DatabaseCon::Setup const setup = [&]() { + DatabaseCon::Setup result; + result.startUp = config.START_UP; + result.standAlone = config.standalone(); + result.dataDir = dir_; + result.useGlobalPragma = !backendComplete_; + return result; + }(); try { diff --git a/src/test/app/LedgerHistory_test.cpp b/src/test/app/LedgerHistory_test.cpp index ac2dcda61b2..cbc9c95b325 100644 --- a/src/test/app/LedgerHistory_test.cpp +++ b/src/test/app/LedgerHistory_test.cpp @@ -27,6 +27,7 @@ #include #include #include +#include namespace ripple { namespace test { @@ -34,56 +35,6 @@ namespace test { class LedgerHistory_test : public beast::unit_test::suite { public: - /** Log manager that searches for a specific message substring - */ - class CheckMessageLogs : public Logs - { - std::string msg_; - bool& found_; - - class CheckMessageSink : public beast::Journal::Sink - { - CheckMessageLogs& owner_; - - public: - CheckMessageSink( - beast::severities::Severity threshold, - CheckMessageLogs& owner) - : beast::Journal::Sink(threshold, false), owner_(owner) - { - } - - void - write(beast::severities::Severity level, std::string const& text) - override - { - if (text.find(owner_.msg_) != std::string::npos) - owner_.found_ = true; - } - }; - - public: - /** Constructor - - @param msg The message string to search for - @param found The variable to set to true if the message is found - */ - CheckMessageLogs(std::string msg, bool& found) - : Logs{beast::severities::kDebug} - , msg_{std::move(msg)} - , found_{found} - { - } - - std::unique_ptr - makeSink( - std::string const& partition, - beast::severities::Severity threshold) override - { - return std::make_unique(threshold, *this); - } - }; - /** Generate a new ledger by hand, applying a specific close time offset and optionally inserting a transaction. @@ -149,7 +100,7 @@ class LedgerHistory_test : public beast::unit_test::suite Env env{ *this, envconfig(), - std::make_unique("MISMATCH ", found)}; + std::make_unique("MISMATCH ", &found)}; LedgerHistory lh{beast::insight::NullCollector::New(), env.app()}; auto const genesis = makeLedger({}, env, lh, 0s); uint256 const dummyTxHash{1}; @@ -166,7 +117,7 @@ class LedgerHistory_test : public beast::unit_test::suite *this, envconfig(), std::make_unique( - "MISMATCH on close time", found)}; + "MISMATCH on close time", &found)}; LedgerHistory lh{beast::insight::NullCollector::New(), env.app()}; auto const genesis = makeLedger({}, env, lh, 0s); auto const ledgerA = makeLedger(genesis, env, lh, 4s); @@ -186,7 +137,7 @@ class LedgerHistory_test : public beast::unit_test::suite *this, envconfig(), std::make_unique( - "MISMATCH on prior ledger", found)}; + "MISMATCH on prior ledger", &found)}; LedgerHistory lh{beast::insight::NullCollector::New(), env.app()}; auto const genesis = makeLedger({}, env, lh, 0s); auto const ledgerA = makeLedger(genesis, env, lh, 4s); @@ -212,7 +163,7 @@ class LedgerHistory_test : public beast::unit_test::suite Env env{ *this, envconfig(), - std::make_unique(msg, found)}; + std::make_unique(msg, &found)}; LedgerHistory lh{beast::insight::NullCollector::New(), env.app()}; Account alice{"A1"}; diff --git a/src/test/app/Manifest_test.cpp b/src/test/app/Manifest_test.cpp index 063b4383281..18460ce3689 100644 --- a/src/test/app/Manifest_test.cpp +++ b/src/test/app/Manifest_test.cpp @@ -256,6 +256,7 @@ class Manifest_test : public beast::unit_test::suite { DatabaseCon::Setup setup; setup.dataDir = getDatabasePath(); + BEAST_EXPECT(!setup.useGlobalPragma); DatabaseCon dbCon( setup, dbName.data(), diff --git a/src/test/jtx/CaptureLogs.h b/src/test/jtx/CaptureLogs.h new file mode 100644 index 00000000000..30a562e99d0 --- /dev/null +++ b/src/test/jtx/CaptureLogs.h @@ -0,0 +1,80 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +namespace ripple { +namespace test { + +/** + * @brief Log manager for CaptureSinks. This class holds the stream + * instance that is written to by the sinks. Upon destruction, all + * contents of the stream are assigned to the string specified in the + * ctor + */ +class CaptureLogs : public Logs +{ + std::stringstream strm_; + std::string* pResult_; + + /** + * @brief sink for writing all log messages to a stringstream + */ + class CaptureSink : public beast::Journal::Sink + { + std::stringstream& strm_; + + public: + CaptureSink( + beast::severities::Severity threshold, + std::stringstream& strm) + : beast::Journal::Sink(threshold, false), strm_(strm) + { + } + + void + write(beast::severities::Severity level, std::string const& text) + override + { + strm_ << text; + } + }; + +public: + explicit CaptureLogs(std::string* pResult) + : Logs(beast::severities::kInfo), pResult_(pResult) + { + } + + ~CaptureLogs() override + { + *pResult_ = strm_.str(); + } + + std::unique_ptr + makeSink( + std::string const& partition, + beast::severities::Severity threshold) override + { + return std::make_unique(threshold, strm_); + } +}; + +} // namespace test +} // namespace ripple diff --git a/src/test/jtx/CheckMessageLogs.h b/src/test/jtx/CheckMessageLogs.h new file mode 100644 index 00000000000..66f5f7e106c --- /dev/null +++ b/src/test/jtx/CheckMessageLogs.h @@ -0,0 +1,75 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +namespace ripple { +namespace test { + +/** Log manager that searches for a specific message substring + */ +class CheckMessageLogs : public Logs +{ + std::string msg_; + bool* pFound_; + + class CheckMessageSink : public beast::Journal::Sink + { + CheckMessageLogs& owner_; + + public: + CheckMessageSink( + beast::severities::Severity threshold, + CheckMessageLogs& owner) + : beast::Journal::Sink(threshold, false), owner_(owner) + { + } + + void + write(beast::severities::Severity level, std::string const& text) + override + { + if (text.find(owner_.msg_) != std::string::npos) + *owner_.pFound_ = true; + } + }; + +public: + /** Constructor + + @param msg The message string to search for + @param pFound Pointer to the variable to set to true if the message is + found + */ + CheckMessageLogs(std::string msg, bool* pFound) + : Logs{beast::severities::kDebug}, msg_{std::move(msg)}, pFound_{pFound} + { + } + + std::unique_ptr + makeSink( + std::string const& partition, + beast::severities::Severity threshold) override + { + return std::make_unique(threshold, *this); + } +}; + +} // namespace test +} // namespace ripple diff --git a/src/test/jtx/Env.h b/src/test/jtx/Env.h index f06cfbf7a9c..f2934bb5002 100644 --- a/src/test/jtx/Env.h +++ b/src/test/jtx/Env.h @@ -27,6 +27,7 @@ #include #include #include // +#include #include #include #include @@ -131,7 +132,8 @@ class Env AppBundle( beast::unit_test::suite& suite, std::unique_ptr config, - std::unique_ptr logs); + std::unique_ptr logs, + beast::severities::Severity thresh); ~AppBundle(); }; @@ -163,12 +165,10 @@ class Env Env(beast::unit_test::suite& suite_, std::unique_ptr config, FeatureBitset features, - std::unique_ptr logs = nullptr) + std::unique_ptr logs = nullptr, + beast::severities::Severity thresh = beast::severities::kError) : test(suite_) - , bundle_( - suite_, - std::move(config), - logs ? std::move(logs) : std::make_unique(suite_)) + , bundle_(suite_, std::move(config), std::move(logs), thresh) , journal{bundle_.app->journal("Env")} { memoize(Account::master); @@ -211,11 +211,13 @@ class Env */ Env(beast::unit_test::suite& suite_, std::unique_ptr config, - std::unique_ptr logs = nullptr) + std::unique_ptr logs = nullptr, + beast::severities::Severity thresh = beast::severities::kError) : Env(suite_, std::move(config), supported_amendments(), - std::move(logs)) + std::move(logs), + thresh) { } diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index a9b7c3430ff..855dfe7bbf0 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -59,12 +59,22 @@ namespace jtx { Env::AppBundle::AppBundle( beast::unit_test::suite& suite, std::unique_ptr config, - std::unique_ptr logs) + std::unique_ptr logs, + beast::severities::Severity thresh) : AppBundle() { using namespace beast::severities; - // Use kFatal threshold to reduce noise from STObject. - setDebugLogSink(std::make_unique("Debug", kFatal, suite)); + if (logs) + { + setDebugLogSink(logs->makeSink("Debug", kFatal)); + } + else + { + logs = std::make_unique(suite); + // Use kFatal threshold to reduce noise from STObject. + setDebugLogSink( + std::make_unique("Debug", kFatal, suite)); + } auto timeKeeper_ = std::make_unique(); timeKeeper = timeKeeper_.get(); // Hack so we don't have to call Config::setup @@ -72,7 +82,7 @@ Env::AppBundle::AppBundle( owned = make_Application( std::move(config), std::move(logs), std::move(timeKeeper_)); app = owned.get(); - app->logs().threshold(kError); + app->logs().threshold(thresh); if (!app->setup()) Throw("Env::AppBundle: setup failed"); timeKeeper->set(app->getLedgerMaster().getClosedLedger()->info().closeTime); diff --git a/src/test/nodestore/Database_test.cpp b/src/test/nodestore/Database_test.cpp index b1a88bea557..826f5ccf5bf 100644 --- a/src/test/nodestore/Database_test.cpp +++ b/src/test/nodestore/Database_test.cpp @@ -18,8 +18,12 @@ //============================================================================== #include +#include #include #include +#include +#include +#include #include #include @@ -35,6 +39,409 @@ class Database_test : public TestBase { } + void + testConfig() + { + testcase("Config"); + + using namespace ripple::test; + using namespace ripple::test::jtx; + + auto const integrityWarning = + "reducing the data integrity guarantees from the " + "default [sqlite] behavior is not recommended for " + "nodes storing large amounts of history, because of the " + "difficulty inherent in rebuilding corrupted data."; + { + // defaults + Env env(*this); + + auto const s = setup_DatabaseCon(env.app().config()); + + if (BEAST_EXPECT(s.globalPragma->size() == 3)) + { + BEAST_EXPECT( + s.globalPragma->at(0) == "PRAGMA journal_mode=wal;"); + BEAST_EXPECT( + s.globalPragma->at(1) == "PRAGMA synchronous=normal;"); + BEAST_EXPECT( + s.globalPragma->at(2) == "PRAGMA temp_store=file;"); + } + } + { + // High safety level + DatabaseCon::Setup::globalPragma.reset(); + + bool found = false; + Env env = [&]() { + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("safety_level", "high"); + } + p->LEDGER_HISTORY = 100'000'000; + + return Env( + *this, + std::move(p), + std::make_unique( + integrityWarning, &found), + beast::severities::kWarning); + }(); + + BEAST_EXPECT(!found); + auto const s = setup_DatabaseCon(env.app().config()); + if (BEAST_EXPECT(s.globalPragma->size() == 3)) + { + BEAST_EXPECT( + s.globalPragma->at(0) == "PRAGMA journal_mode=wal;"); + BEAST_EXPECT( + s.globalPragma->at(1) == "PRAGMA synchronous=normal;"); + BEAST_EXPECT( + s.globalPragma->at(2) == "PRAGMA temp_store=file;"); + } + } + { + // Low safety level + DatabaseCon::Setup::globalPragma.reset(); + + bool found = false; + Env env = [&]() { + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("safety_level", "low"); + } + p->LEDGER_HISTORY = 100'000'000; + + return Env( + *this, + std::move(p), + std::make_unique( + integrityWarning, &found), + beast::severities::kWarning); + }(); + + BEAST_EXPECT(found); + auto const s = setup_DatabaseCon(env.app().config()); + if (BEAST_EXPECT(s.globalPragma->size() == 3)) + { + BEAST_EXPECT( + s.globalPragma->at(0) == "PRAGMA journal_mode=memory;"); + BEAST_EXPECT( + s.globalPragma->at(1) == "PRAGMA synchronous=off;"); + BEAST_EXPECT( + s.globalPragma->at(2) == "PRAGMA temp_store=memory;"); + } + } + { + // Override individual settings + DatabaseCon::Setup::globalPragma.reset(); + + bool found = false; + Env env = [&]() { + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("journal_mode", "off"); + section.set("synchronous", "extra"); + section.set("temp_store", "default"); + } + + return Env( + *this, + std::move(p), + std::make_unique( + integrityWarning, &found), + beast::severities::kWarning); + }(); + + // No warning, even though higher risk settings were used because + // LEDGER_HISTORY is small + BEAST_EXPECT(!found); + auto const s = setup_DatabaseCon(env.app().config()); + if (BEAST_EXPECT(s.globalPragma->size() == 3)) + { + BEAST_EXPECT( + s.globalPragma->at(0) == "PRAGMA journal_mode=off;"); + BEAST_EXPECT( + s.globalPragma->at(1) == "PRAGMA synchronous=extra;"); + BEAST_EXPECT( + s.globalPragma->at(2) == "PRAGMA temp_store=default;"); + } + } + { + // Override individual settings with large history + DatabaseCon::Setup::globalPragma.reset(); + + bool found = false; + Env env = [&]() { + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("journal_mode", "off"); + section.set("synchronous", "extra"); + section.set("temp_store", "default"); + } + p->LEDGER_HISTORY = 50'000'000; + + return Env( + *this, + std::move(p), + std::make_unique( + integrityWarning, &found), + beast::severities::kWarning); + }(); + + // No warning, even though higher risk settings were used because + // LEDGER_HISTORY is small + BEAST_EXPECT(found); + auto const s = setup_DatabaseCon(env.app().config()); + if (BEAST_EXPECT(s.globalPragma->size() == 3)) + { + BEAST_EXPECT( + s.globalPragma->at(0) == "PRAGMA journal_mode=off;"); + BEAST_EXPECT( + s.globalPragma->at(1) == "PRAGMA synchronous=extra;"); + BEAST_EXPECT( + s.globalPragma->at(2) == "PRAGMA temp_store=default;"); + } + } + { + // Error: Mix safety_level and individual settings + DatabaseCon::Setup::globalPragma.reset(); + auto const expected = + "Failed to initialize SQLite databases: " + "Configuration file may not define both \"safety_level\" and " + "\"journal_mode\""; + bool found = false; + + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("safety_level", "low"); + section.set("journal_mode", "off"); + section.set("synchronous", "extra"); + section.set("temp_store", "default"); + } + + try + { + Env env( + *this, + std::move(p), + std::make_unique(expected, &found), + beast::severities::kWarning); + fail(); + } + catch (...) + { + BEAST_EXPECT(found); + } + } + { + // Error: Mix safety_level and one setting (gotta catch 'em all) + DatabaseCon::Setup::globalPragma.reset(); + auto const expected = + "Failed to initialize SQLite databases: Configuration file may " + "not define both \"safety_level\" and \"journal_mode\""; + bool found = false; + + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("safety_level", "high"); + section.set("journal_mode", "off"); + } + + try + { + Env env( + *this, + std::move(p), + std::make_unique(expected, &found), + beast::severities::kWarning); + fail(); + } + catch (...) + { + BEAST_EXPECT(found); + } + } + { + // Error: Mix safety_level and one setting (gotta catch 'em all) + DatabaseCon::Setup::globalPragma.reset(); + auto const expected = + "Failed to initialize SQLite databases: Configuration file may " + "not define both \"safety_level\" and \"synchronous\""; + bool found = false; + + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("safety_level", "low"); + section.set("synchronous", "extra"); + } + + try + { + Env env( + *this, + std::move(p), + std::make_unique(expected, &found), + beast::severities::kWarning); + fail(); + } + catch (...) + { + BEAST_EXPECT(found); + } + } + { + // Error: Mix safety_level and one setting (gotta catch 'em all) + DatabaseCon::Setup::globalPragma.reset(); + auto const expected = + "Failed to initialize SQLite databases: Configuration file may " + "not define both \"safety_level\" and \"temp_store\""; + bool found = false; + + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("safety_level", "high"); + section.set("temp_store", "default"); + } + + try + { + Env env( + *this, + std::move(p), + std::make_unique(expected, &found), + beast::severities::kWarning); + fail(); + } + catch (...) + { + BEAST_EXPECT(found); + } + } + { + // Error: Invalid value + DatabaseCon::Setup::globalPragma.reset(); + auto const expected = + "Failed to initialize SQLite databases: Invalid safety_level " + "value: slow"; + bool found = false; + + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("safety_level", "slow"); + } + + try + { + Env env( + *this, + std::move(p), + std::make_unique(expected, &found), + beast::severities::kWarning); + fail(); + } + catch (...) + { + BEAST_EXPECT(found); + } + } + { + // Error: Invalid value + DatabaseCon::Setup::globalPragma.reset(); + auto const expected = + "Failed to initialize SQLite databases: Invalid journal_mode " + "value: fast"; + bool found = false; + + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("journal_mode", "fast"); + } + + try + { + Env env( + *this, + std::move(p), + std::make_unique(expected, &found), + beast::severities::kWarning); + fail(); + } + catch (...) + { + BEAST_EXPECT(found); + } + } + { + // Error: Invalid value + DatabaseCon::Setup::globalPragma.reset(); + auto const expected = + "Failed to initialize SQLite databases: Invalid synchronous " + "value: instant"; + bool found = false; + + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("synchronous", "instant"); + } + + try + { + Env env( + *this, + std::move(p), + std::make_unique(expected, &found), + beast::severities::kWarning); + fail(); + } + catch (...) + { + BEAST_EXPECT(found); + } + } + { + // Error: Invalid value + DatabaseCon::Setup::globalPragma.reset(); + auto const expected = + "Failed to initialize SQLite databases: Invalid temp_store " + "value: network"; + bool found = false; + + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("temp_store", "network"); + } + + try + { + Env env( + *this, + std::move(p), + std::make_unique(expected, &found), + beast::severities::kWarning); + fail(); + } + catch (...) + { + BEAST_EXPECT(found); + } + } + } + + //-------------------------------------------------------------------------- + void testImport( std::string const& destBackendType, @@ -221,6 +628,8 @@ class Database_test : public TestBase { std::int64_t const seedValue = 50; + testConfig(); + testNodeStore("memory", false, seedValue); // Persistent backend tests diff --git a/src/test/server/Server_test.cpp b/src/test/server/Server_test.cpp index ef132d2eb0c..521661c5895 100644 --- a/src/test/server/Server_test.cpp +++ b/src/test/server/Server_test.cpp @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -375,60 +376,6 @@ class Server_test : public beast::unit_test::suite pass(); } - /** - * @brief sink for writing all log messages to a stringstream - */ - class CaptureSink : public beast::Journal::Sink - { - std::stringstream& strm_; - - public: - CaptureSink( - beast::severities::Severity threshold, - std::stringstream& strm) - : beast::Journal::Sink(threshold, false), strm_(strm) - { - } - - void - write(beast::severities::Severity level, std::string const& text) - override - { - strm_ << text; - } - }; - - /** - * @brief Log manager for CaptureSinks. This class holds the stream - * instance that is written to by the sinks. Upon destruction, all - * contents of the stream are assigned to the string specified in the - * ctor - */ - class CaptureLogs : public Logs - { - std::stringstream strm_; - std::string& result_; - - public: - explicit CaptureLogs(std::string& result) - : Logs(beast::severities::kInfo), result_(result) - { - } - - ~CaptureLogs() override - { - result_ = strm_.str(); - } - - std::unique_ptr - makeSink( - std::string const& partition, - beast::severities::Severity threshold) override - { - return std::make_unique(threshold, strm_); - } - }; - void testBadConfig() { @@ -444,7 +391,7 @@ class Server_test : public beast::unit_test::suite (*cfg).deprecatedClearSection("port_rpc"); return cfg; }), - std::make_unique(messages)}; + std::make_unique(&messages)}; }); BEAST_EXPECT( messages.find("Missing 'ip' in [port_rpc]") != std::string::npos); @@ -457,7 +404,7 @@ class Server_test : public beast::unit_test::suite (*cfg)["port_rpc"].set("ip", getEnvLocalhostAddr()); return cfg; }), - std::make_unique(messages)}; + std::make_unique(&messages)}; }); BEAST_EXPECT( messages.find("Missing 'port' in [port_rpc]") != std::string::npos); @@ -471,7 +418,7 @@ class Server_test : public beast::unit_test::suite (*cfg)["port_rpc"].set("port", "0"); return cfg; }), - std::make_unique(messages)}; + std::make_unique(&messages)}; }); BEAST_EXPECT( messages.find("Invalid value '0' for key 'port' in [port_rpc]") != @@ -487,7 +434,7 @@ class Server_test : public beast::unit_test::suite (*cfg)["port_rpc"].set("protocol", ""); return cfg; }), - std::make_unique(messages)}; + std::make_unique(&messages)}; }); BEAST_EXPECT( messages.find("Missing 'protocol' in [port_rpc]") != @@ -522,7 +469,7 @@ class Server_test : public beast::unit_test::suite (*cfg)["port_ws"].set("admin", getEnvLocalhostAddr()); return cfg; }), - std::make_unique(messages)}; + std::make_unique(&messages)}; }); BEAST_EXPECT( messages.find("Required section [server] is missing") != @@ -548,7 +495,7 @@ class Server_test : public beast::unit_test::suite (*cfg)["server"].append("port_ws"); return cfg; }), - std::make_unique(messages)}; + std::make_unique(&messages)}; }); BEAST_EXPECT( messages.find("Missing section: [port_peer]") != std::string::npos); From be0b23b80483c1b573507a46a85e920dfd5c26a0 Mon Sep 17 00:00:00 2001 From: Edward Hennis Date: Wed, 10 Jun 2020 20:50:50 -0400 Subject: [PATCH 07/19] Update cmake so that rippled can build as a submodule --- Builds/CMake/CMakeFuncs.cmake | 8 ++++---- Builds/CMake/RippledCov.cmake | 6 +++--- Builds/CMake/RippledDocs.cmake | 2 +- Builds/CMake/RippledNIH.cmake | 2 +- Builds/CMake/RippledRelease.cmake | 4 ++-- Builds/CMake/deps/Rocksdb.cmake | 4 ++-- Builds/CMake/deps/Soci.cmake | 2 +- Builds/CMake/deps/Sqlite.cmake | 2 +- Builds/CMake/deps/gRPC.cmake | 2 +- 9 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Builds/CMake/CMakeFuncs.cmake b/Builds/CMake/CMakeFuncs.cmake index bb24bdc31f9..fb60fd9b4eb 100644 --- a/Builds/CMake/CMakeFuncs.cmake +++ b/Builds/CMake/CMakeFuncs.cmake @@ -35,10 +35,10 @@ function (print_ep_logs _target) COMMENT "${_target} BUILD OUTPUT" COMMAND ${CMAKE_COMMAND} -DIN_FILE=${STAMP_DIR}/${_target}-build-out.log - -P ${CMAKE_SOURCE_DIR}/Builds/CMake/echo_file.cmake + -P ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/echo_file.cmake COMMAND ${CMAKE_COMMAND} -DIN_FILE=${STAMP_DIR}/${_target}-build-err.log - -P ${CMAKE_SOURCE_DIR}/Builds/CMake/echo_file.cmake) + -P ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/echo_file.cmake) endfunction () #[=========================================================[ @@ -177,7 +177,7 @@ function (git_hash hash_val) endif () endif () execute_process (COMMAND ${GIT_EXECUTABLE} "log" "--pretty=${_format}" "-n1" - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} RESULT_VARIABLE _git_exit_code OUTPUT_VARIABLE _temp_hash OUTPUT_STRIP_TRAILING_WHITESPACE @@ -194,7 +194,7 @@ function (git_branch branch_val) endif () set (_branch "") execute_process (COMMAND ${GIT_EXECUTABLE} "rev-parse" "--abbrev-ref" "HEAD" - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} RESULT_VARIABLE _git_exit_code OUTPUT_VARIABLE _temp_branch OUTPUT_STRIP_TRAILING_WHITESPACE diff --git a/Builds/CMake/RippledCov.cmake b/Builds/CMake/RippledCov.cmake index b9d93f41af8..e177aa52ae2 100644 --- a/Builds/CMake/RippledCov.cmake +++ b/Builds/CMake/RippledCov.cmake @@ -28,7 +28,7 @@ if (coverage) set (extract_pattern "") if (coverage_core_only) - set (extract_pattern "${CMAKE_SOURCE_DIR}/src/ripple/") + set (extract_pattern "${CMAKE_CURRENT_SOURCE_DIR}/src/ripple/") endif () if (LLVM_COV AND LLVM_PROFDATA) @@ -72,14 +72,14 @@ if (coverage) COMMAND ${CMAKE_COMMAND} -E echo "Generating coverage- results will be in ${CMAKE_BINARY_DIR}/coverage/index.html." # create baseline info file COMMAND ${LCOV} - --no-external -d "${CMAKE_SOURCE_DIR}" -c -d . -i -o baseline.info + --no-external -d "${CMAKE_CURRENT_SOURCE_DIR}" -c -d . -i -o baseline.info | grep -v "ignoring data for external file" # run tests COMMAND ${CMAKE_COMMAND} -E echo "Running rippled tests for coverage report." COMMAND rippled --unittest$<$:=${coverage_test}> --quiet --unittest-log # Create test coverage data file COMMAND ${LCOV} - --no-external -d "${CMAKE_SOURCE_DIR}" -c -d . -o tests.info + --no-external -d "${CMAKE_CURRENT_SOURCE_DIR}" -c -d . -o tests.info | grep -v "ignoring data for external file" # Combine baseline and test coverage data COMMAND ${LCOV} diff --git a/Builds/CMake/RippledDocs.cmake b/Builds/CMake/RippledDocs.cmake index d0440f5ff8b..a3e2ae1ba5a 100644 --- a/Builds/CMake/RippledDocs.cmake +++ b/Builds/CMake/RippledDocs.cmake @@ -9,7 +9,7 @@ if (NOT TARGET Doxygen::doxygen) endif () set (doxygen_output_directory "${CMAKE_BINARY_DIR}/docs") -set (doxygen_include_path "${CMAKE_SOURCE_DIR}/src") +set (doxygen_include_path "${CMAKE_CURRENT_SOURCE_DIR}/src") set (doxygen_index_file "${doxygen_output_directory}/html/index.html") set (doxyfile "${CMAKE_CURRENT_SOURCE_DIR}/docs/Doxyfile") diff --git a/Builds/CMake/RippledNIH.cmake b/Builds/CMake/RippledNIH.cmake index e0c161aba78..60ab3e4bf85 100644 --- a/Builds/CMake/RippledNIH.cmake +++ b/Builds/CMake/RippledNIH.cmake @@ -13,7 +13,7 @@ if (NOT DEFINED NIH_CACHE_ROOT) if (DEFINED ENV{NIH_CACHE_ROOT}) set (NIH_CACHE_ROOT $ENV{NIH_CACHE_ROOT}) else () - set (NIH_CACHE_ROOT "${CMAKE_SOURCE_DIR}/.nih_c") + set (NIH_CACHE_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/.nih_c") endif () endif () set (nih_cache_path diff --git a/Builds/CMake/RippledRelease.cmake b/Builds/CMake/RippledRelease.cmake index eb08566b2ff..b10bf6cf023 100644 --- a/Builds/CMake/RippledRelease.cmake +++ b/Builds/CMake/RippledRelease.cmake @@ -61,7 +61,7 @@ if (is_root_project) docker run -e NIH_CACHE_ROOT=/opt/rippled_bld/pkg/.nih_c -v ${NIH_CACHE_ROOT}/pkgbuild:/opt/rippled_bld/pkg/.nih_c - -v ${CMAKE_SOURCE_DIR}:/opt/rippled_bld/pkg/rippled + -v ${CMAKE_CURRENT_SOURCE_DIR}:/opt/rippled_bld/pkg/rippled -v ${CMAKE_CURRENT_BINARY_DIR}/packages:/opt/rippled_bld/pkg/out "$<$:--volume=/etc/passwd:/etc/passwd;--volume=/etc/group:/etc/group;--user=${DOCKER_USER_ID}:${DOCKER_GROUP_ID}>" -t rippled-rpm-builder:${container_label} @@ -124,7 +124,7 @@ if (is_root_project) docker run -e NIH_CACHE_ROOT=/opt/rippled_bld/pkg/.nih_c -v ${NIH_CACHE_ROOT}/pkgbuild:/opt/rippled_bld/pkg/.nih_c - -v ${CMAKE_SOURCE_DIR}:/opt/rippled_bld/pkg/rippled + -v ${CMAKE_CURRENT_SOURCE_DIR}:/opt/rippled_bld/pkg/rippled -v ${CMAKE_CURRENT_BINARY_DIR}/packages:/opt/rippled_bld/pkg/out "$<$:--volume=/etc/passwd:/etc/passwd;--volume=/etc/group:/etc/group;--user=${DOCKER_USER_ID}:${DOCKER_GROUP_ID}>" -t rippled-dpkg-builder:${container_label} diff --git a/Builds/CMake/deps/Rocksdb.cmake b/Builds/CMake/deps/Rocksdb.cmake index f61cab3f740..eed6cefe162 100644 --- a/Builds/CMake/deps/Rocksdb.cmake +++ b/Builds/CMake/deps/Rocksdb.cmake @@ -64,13 +64,13 @@ if (local_rocksdb) PATCH_COMMAND # only used by windows build ${CMAKE_COMMAND} -E copy - ${CMAKE_SOURCE_DIR}/Builds/CMake/rocks_thirdparty.inc + ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/rocks_thirdparty.inc /thirdparty.inc COMMAND # fixup their build version file to keep the values # from changing always ${CMAKE_COMMAND} -E copy_if_different - ${CMAKE_SOURCE_DIR}/Builds/CMake/rocksdb_build_version.cc.in + ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/rocksdb_build_version.cc.in /util/build_version.cc.in CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} diff --git a/Builds/CMake/deps/Soci.cmake b/Builds/CMake/deps/Soci.cmake index 0bf022bea26..4015a3f2dea 100644 --- a/Builds/CMake/deps/Soci.cmake +++ b/Builds/CMake/deps/Soci.cmake @@ -51,7 +51,7 @@ else() # This patch process is likely fragile and should be reviewed carefully # whenever we update the GIT_TAG above. PATCH_COMMAND - ${CMAKE_COMMAND} -P ${CMAKE_SOURCE_DIR}/Builds/CMake/soci_patch.cmake + ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/soci_patch.cmake CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} diff --git a/Builds/CMake/deps/Sqlite.cmake b/Builds/CMake/deps/Sqlite.cmake index be2a7904e97..73760f34389 100644 --- a/Builds/CMake/deps/Sqlite.cmake +++ b/Builds/CMake/deps/Sqlite.cmake @@ -31,7 +31,7 @@ else() # for the single amalgamation source file. PATCH_COMMAND ${CMAKE_COMMAND} -E copy_if_different - ${CMAKE_SOURCE_DIR}/Builds/CMake/CMake_sqlite3.txt + ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/CMake_sqlite3.txt /CMakeLists.txt CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} diff --git a/Builds/CMake/deps/gRPC.cmake b/Builds/CMake/deps/gRPC.cmake index f1d2adbae13..5195ff1979c 100644 --- a/Builds/CMake/deps/gRPC.cmake +++ b/Builds/CMake/deps/gRPC.cmake @@ -312,7 +312,7 @@ set (GRPC_GEN_DIR "${CMAKE_BINARY_DIR}/proto_gen_grpc") file (MAKE_DIRECTORY ${GRPC_GEN_DIR}) set (GRPC_PROTO_SRCS) set (GRPC_PROTO_HDRS) -set (GRPC_PROTO_ROOT "${CMAKE_SOURCE_DIR}/src/ripple/proto/org") +set (GRPC_PROTO_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/src/ripple/proto/org") file(GLOB_RECURSE GRPC_DEFINITION_FILES LIST_DIRECTORIES false "${GRPC_PROTO_ROOT}/*.proto") foreach(file ${GRPC_DEFINITION_FILES}) get_filename_component(_abs_file ${file} ABSOLUTE) From 46bb01182785caeeab473d65df06214aa7a201d8 Mon Sep 17 00:00:00 2001 From: manojsdoshi Date: Thu, 11 Jun 2020 17:30:25 -0700 Subject: [PATCH 08/19] Fix a build issue caused by pip3 being unavailable --- Builds/containers/gitlab-ci/docker_alpine_setup.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/Builds/containers/gitlab-ci/docker_alpine_setup.sh b/Builds/containers/gitlab-ci/docker_alpine_setup.sh index 43eeed8a914..00cf6eb5fa5 100644 --- a/Builds/containers/gitlab-ci/docker_alpine_setup.sh +++ b/Builds/containers/gitlab-ci/docker_alpine_setup.sh @@ -5,6 +5,7 @@ set -ex echo $(nproc) docker login -u rippled \ -p ${ARTIFACTORY_DEPLOY_KEY_RIPPLED} ${ARTIFACTORY_HUB} +apk add --update py-pip apk add \ bash util-linux coreutils binutils grep \ make ninja cmake build-base gcc g++ abuild git \ From d12f92b9b6fd8db5837873f587d03be2adfb6dd1 Mon Sep 17 00:00:00 2001 From: seelabs Date: Mon, 15 Jun 2020 06:29:34 -0700 Subject: [PATCH 09/19] Remove CryptoConditionsSuite amendment: * This was always a stub and enabled no functional changes --- src/ripple/app/tx/impl/Escrow.cpp | 5 +---- src/ripple/protocol/Feature.h | 3 +-- src/ripple/protocol/impl/Feature.cpp | 3 +-- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/src/ripple/app/tx/impl/Escrow.cpp b/src/ripple/app/tx/impl/Escrow.cpp index 30a74c138c6..07d72f7875a 100644 --- a/src/ripple/app/tx/impl/Escrow.cpp +++ b/src/ripple/app/tx/impl/Escrow.cpp @@ -147,10 +147,7 @@ EscrowCreate::preflight(PreflightContext const& ctx) return temMALFORMED; } - // Conditions other than PrefixSha256 require the - // "CryptoConditionsSuite" amendment: - if (condition->type != Type::preimageSha256 && - !ctx.rules.enabled(featureCryptoConditionsSuite)) + if (condition->type != Type::preimageSha256) return temDISABLED; } diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index ae05c8c1d11..e378ec49ee2 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -83,7 +83,7 @@ class FeatureCollections "TickSize", "fix1368", "Escrow", - "CryptoConditionsSuite", + // "CryptoConditionsSuite", "fix1373", "EnforceInvariants", "SortedDirectories", @@ -348,7 +348,6 @@ extern uint256 const featureOwnerPaysFee; extern uint256 const featureFlow; extern uint256 const featureCompareTakerFlowCross; extern uint256 const featureFlowCross; -extern uint256 const featureCryptoConditionsSuite; extern uint256 const fix1513; extern uint256 const featureDepositAuth; extern uint256 const featureChecks; diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index 9d88c9d211a..b9c95673938 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -102,7 +102,7 @@ detail::supportedAmendments() "TickSize", "fix1368", "Escrow", - "CryptoConditionsSuite", + // "CryptoConditionsSuite", DO NOT REUSE "fix1373", "EnforceInvariants", "FlowCross", @@ -163,7 +163,6 @@ uint256 const featureFlow = *getRegisteredFeature("Flow"), featureCompareTakerFlowCross = *getRegisteredFeature("CompareTakerFlowCross"), featureFlowCross = *getRegisteredFeature("FlowCross"), - featureCryptoConditionsSuite = *getRegisteredFeature("CryptoConditionsSuite"), fix1513 = *getRegisteredFeature("fix1513"), featureDepositAuth = *getRegisteredFeature("DepositAuth"), featureChecks = *getRegisteredFeature("Checks"), From f006fbf0b6b0fd0081eea88ea1d97ca09d8a2674 Mon Sep 17 00:00:00 2001 From: Rome Reginelli Date: Thu, 11 Jun 2020 13:41:53 -0700 Subject: [PATCH 10/19] crawl_shards comment fix Corrects the public_key parameter name in the comment. See https://github.com/ripple/xrpl-dev-portal/pull/854 for context. --- src/ripple/rpc/handlers/CrawlShards.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/rpc/handlers/CrawlShards.cpp b/src/ripple/rpc/handlers/CrawlShards.cpp index bc235a3a05a..b28aa0e9596 100644 --- a/src/ripple/rpc/handlers/CrawlShards.cpp +++ b/src/ripple/rpc/handlers/CrawlShards.cpp @@ -32,7 +32,7 @@ namespace ripple { { // Determines if the result includes node public key. // optional, default is false - pubkey: + public_key: // The maximum number of peer hops to attempt. // optional, default is zero, maximum is 3 From 87d661c4bec65f368bf52a6d7a4a6fc66d899a1e Mon Sep 17 00:00:00 2001 From: mDuo13 Date: Thu, 18 Jun 2020 16:35:32 -0700 Subject: [PATCH 11/19] Update tecUNFUNDED & tecUNFUNDED_ADD messages: *The tecUNFUNDED code is actively used when attempting to create payment channels; the messages incorrectly list it as deprecated. Meanwhile, the tecUNFUNDED_ADD code actually is an unused legacy code, dating back to when there was a WalletAdd transactor. *Engine result messages are not part of the binary format and are documented as subject to change without notice, so this should not require an amendment nor a new API version. *Mark terLAST, terFUNDS_SPENT deprecated --- src/ripple/protocol/TER.h | 8 +- src/ripple/protocol/impl/TER.cpp | 358 ++++++++++++------------------- 2 files changed, 140 insertions(+), 226 deletions(-) diff --git a/src/ripple/protocol/TER.h b/src/ripple/protocol/TER.h index dcb31bfd8aa..a30bd794a20 100644 --- a/src/ripple/protocol/TER.h +++ b/src/ripple/protocol/TER.h @@ -185,7 +185,7 @@ enum TERcodes : TERUnderlyingType { // - Hold // - Makes hole in sequence which jams transactions. terRETRY = -99, - terFUNDS_SPENT, // This is a free transaction, so don't burden network. + terFUNDS_SPENT, // DEPRECATED. terINSUF_FEE_B, // Can't pay fee, therefore don't burden network. terNO_ACCOUNT, // Can't pay fee, therefore don't burden network. terNO_AUTH, // Not authorized to hold IOUs. @@ -193,7 +193,7 @@ enum TERcodes : TERUnderlyingType { terOWNERS, // Can't succeed with non-zero owner count. terPRE_SEQ, // Can't pay fee, no point in forwarding, so don't // burden network. - terLAST, // Process after all other transactions + terLAST, // DEPRECATED. terNO_RIPPLE, // Rippling not allowed terQUEUED // Transaction is being held in TxQ until fee drops }; @@ -238,7 +238,7 @@ enum TECcodes : TERUnderlyingType { // DO NOT CHANGE THESE NUMBERS: They appear in ledger meta data. tecCLAIM = 100, tecPATH_PARTIAL = 101, - tecUNFUNDED_ADD = 102, + tecUNFUNDED_ADD = 102, // Unused legacy code tecUNFUNDED_OFFER = 103, tecUNFUNDED_PAYMENT = 104, tecFAILED_PROCESSING = 105, @@ -250,7 +250,7 @@ enum TECcodes : TERUnderlyingType { tecNO_LINE_INSUF_RESERVE = 126, tecNO_LINE_REDUNDANT = 127, tecPATH_DRY = 128, - tecUNFUNDED = 129, // Deprecated, old ambiguous unfunded. + tecUNFUNDED = 129, tecNO_ALTERNATIVE_KEY = 130, tecNO_REGULAR_KEY = 131, tecOWNERS = 132, diff --git a/src/ripple/protocol/impl/TER.cpp b/src/ripple/protocol/impl/TER.cpp index 7f0e795cf3f..f496711dd92 100644 --- a/src/ripple/protocol/impl/TER.cpp +++ b/src/ripple/protocol/impl/TER.cpp @@ -31,230 +31,144 @@ static std::unordered_map< std::pair> const& transResults() { - static std::unordered_map< - TERUnderlyingType, - std::pair> const results{ - {tecCLAIM, {"tecCLAIM", "Fee claimed. Sequence used. No action."}}, - {tecDIR_FULL, {"tecDIR_FULL", "Can not add entry to full directory."}}, - {tecFAILED_PROCESSING, - {"tecFAILED_PROCESSING", "Failed to correctly process transaction."}}, - {tecINSUF_RESERVE_LINE, - {"tecINSUF_RESERVE_LINE", "Insufficient reserve to add trust line."}}, - {tecINSUF_RESERVE_OFFER, - {"tecINSUF_RESERVE_OFFER", "Insufficient reserve to create offer."}}, - {tecNO_DST, - {"tecNO_DST", "Destination does not exist. Send XRP to create it."}}, - {tecNO_DST_INSUF_XRP, - {"tecNO_DST_INSUF_XRP", - "Destination does not exist. Too little XRP sent to create it."}}, - {tecNO_LINE_INSUF_RESERVE, - {"tecNO_LINE_INSUF_RESERVE", - "No such line. Too little reserve to create it."}}, - {tecNO_LINE_REDUNDANT, - {"tecNO_LINE_REDUNDANT", "Can't set non-existent line to default."}}, - {tecPATH_DRY, {"tecPATH_DRY", "Path could not send partial amount."}}, - {tecPATH_PARTIAL, - {"tecPATH_PARTIAL", "Path could not send full amount."}}, - {tecNO_ALTERNATIVE_KEY, - {"tecNO_ALTERNATIVE_KEY", - "The operation would remove the ability to sign transactions with " - "the account."}}, - {tecNO_REGULAR_KEY, {"tecNO_REGULAR_KEY", "Regular key is not set."}}, - {tecOVERSIZE, {"tecOVERSIZE", "Object exceeded serialization limits."}}, - {tecUNFUNDED, - {"tecUNFUNDED", "One of _ADD, _OFFER, or _SEND. Deprecated."}}, - {tecUNFUNDED_ADD, - {"tecUNFUNDED_ADD", "Insufficient XRP balance for WalletAdd."}}, - {tecUNFUNDED_OFFER, - {"tecUNFUNDED_OFFER", "Insufficient balance to fund created offer."}}, - {tecUNFUNDED_PAYMENT, - {"tecUNFUNDED_PAYMENT", "Insufficient XRP balance to send."}}, - {tecOWNERS, {"tecOWNERS", "Non-zero owner count."}}, - {tecNO_ISSUER, {"tecNO_ISSUER", "Issuer account does not exist."}}, - {tecNO_AUTH, {"tecNO_AUTH", "Not authorized to hold asset."}}, - {tecNO_LINE, {"tecNO_LINE", "No such line."}}, - {tecINSUFF_FEE, {"tecINSUFF_FEE", "Insufficient balance to pay fee."}}, - {tecFROZEN, {"tecFROZEN", "Asset is frozen."}}, - {tecNO_TARGET, {"tecNO_TARGET", "Target account does not exist."}}, - {tecNO_PERMISSION, - {"tecNO_PERMISSION", "No permission to perform requested operation."}}, - {tecNO_ENTRY, {"tecNO_ENTRY", "No matching entry found."}}, - {tecINSUFFICIENT_RESERVE, - {"tecINSUFFICIENT_RESERVE", - "Insufficient reserve to complete requested operation."}}, - {tecNEED_MASTER_KEY, - {"tecNEED_MASTER_KEY", - "The operation requires the use of the Master Key."}}, - {tecDST_TAG_NEEDED, - {"tecDST_TAG_NEEDED", "A destination tag is required."}}, - {tecINTERNAL, - {"tecINTERNAL", "An internal error has occurred during processing."}}, - {tecCRYPTOCONDITION_ERROR, - {"tecCRYPTOCONDITION_ERROR", - "Malformed, invalid, or mismatched conditional or fulfillment."}}, - {tecINVARIANT_FAILED, - {"tecINVARIANT_FAILED", - "One or more invariants for the transaction were not satisfied."}}, - {tecEXPIRED, {"tecEXPIRED", "Expiration time is passed."}}, - {tecDUPLICATE, {"tecDUPLICATE", "Ledger object already exists."}}, - {tecKILLED, {"tecKILLED", "FillOrKill offer killed."}}, - {tecHAS_OBLIGATIONS, - {"tecHAS_OBLIGATIONS", - "The account cannot be deleted since it has obligations."}}, - {tecTOO_SOON, - {"tecTOO_SOON", - "It is too early to attempt the requested operation. Please wait."}}, - - {tefALREADY, - {"tefALREADY", "The exact transaction was already in this ledger."}}, - {tefBAD_ADD_AUTH, - {"tefBAD_ADD_AUTH", "Not authorized to add account."}}, - {tefBAD_AUTH, - {"tefBAD_AUTH", "Transaction's public key is not authorized."}}, - {tefBAD_LEDGER, {"tefBAD_LEDGER", "Ledger in unexpected state."}}, - {tefBAD_QUORUM, - {"tefBAD_QUORUM", "Signatures provided do not meet the quorum."}}, - {tefBAD_SIGNATURE, - {"tefBAD_SIGNATURE", "A signature is provided for a non-signer."}}, - {tefCREATED, {"tefCREATED", "Can't add an already created account."}}, - {tefEXCEPTION, {"tefEXCEPTION", "Unexpected program state."}}, - {tefFAILURE, {"tefFAILURE", "Failed to apply."}}, - {tefINTERNAL, {"tefINTERNAL", "Internal error."}}, - {tefMASTER_DISABLED, {"tefMASTER_DISABLED", "Master key is disabled."}}, - {tefMAX_LEDGER, {"tefMAX_LEDGER", "Ledger sequence too high."}}, - {tefNO_AUTH_REQUIRED, {"tefNO_AUTH_REQUIRED", "Auth is not required."}}, - {tefNOT_MULTI_SIGNING, - {"tefNOT_MULTI_SIGNING", - "Account has no appropriate list of multi-signers."}}, - {tefPAST_SEQ, - {"tefPAST_SEQ", "This sequence number has already passed."}}, - {tefWRONG_PRIOR, - {"tefWRONG_PRIOR", "This previous transaction does not match."}}, - {tefBAD_AUTH_MASTER, - {"tefBAD_AUTH_MASTER", - "Auth for unclaimed account needs correct master key."}}, - {tefINVARIANT_FAILED, - {"tefINVARIANT_FAILED", - "Fee claim violated invariants for the transaction."}}, - {tefTOO_BIG, {"tefTOO_BIG", "Transaction affects too many items."}}, - - {telLOCAL_ERROR, {"telLOCAL_ERROR", "Local failure."}}, - {telBAD_DOMAIN, {"telBAD_DOMAIN", "Domain too long."}}, - {telBAD_PATH_COUNT, - {"telBAD_PATH_COUNT", "Malformed: Too many paths."}}, - {telBAD_PUBLIC_KEY, {"telBAD_PUBLIC_KEY", "Public key too long."}}, - {telFAILED_PROCESSING, - {"telFAILED_PROCESSING", "Failed to correctly process transaction."}}, - {telINSUF_FEE_P, {"telINSUF_FEE_P", "Fee insufficient."}}, - {telNO_DST_PARTIAL, - {"telNO_DST_PARTIAL", - "Partial payment to create account not allowed."}}, - {telCAN_NOT_QUEUE, {"telCAN_NOT_QUEUE", "Can not queue at this time."}}, - {telCAN_NOT_QUEUE_BALANCE, - {"telCAN_NOT_QUEUE_BALANCE", - "Can not queue at this time: insufficient balance to pay all queued " - "fees."}}, - {telCAN_NOT_QUEUE_BLOCKS, - {"telCAN_NOT_QUEUE_BLOCKS", - "Can not queue at this time: would block later queued " - "transaction(s)."}}, - {telCAN_NOT_QUEUE_BLOCKED, - {"telCAN_NOT_QUEUE_BLOCKED", - "Can not queue at this time: blocking transaction in queue."}}, - {telCAN_NOT_QUEUE_FEE, - {"telCAN_NOT_QUEUE_FEE", - "Can not queue at this time: fee insufficient to replace queued " - "transaction."}}, - {telCAN_NOT_QUEUE_FULL, - {"telCAN_NOT_QUEUE_FULL", - "Can not queue at this time: queue is full."}}, - - {temMALFORMED, {"temMALFORMED", "Malformed transaction."}}, - {temBAD_AMOUNT, {"temBAD_AMOUNT", "Can only send positive amounts."}}, - {temBAD_CURRENCY, {"temBAD_CURRENCY", "Malformed: Bad currency."}}, - {temBAD_EXPIRATION, - {"temBAD_EXPIRATION", "Malformed: Bad expiration."}}, - {temBAD_FEE, {"temBAD_FEE", "Invalid fee, negative or not XRP."}}, - {temBAD_ISSUER, {"temBAD_ISSUER", "Malformed: Bad issuer."}}, - {temBAD_LIMIT, {"temBAD_LIMIT", "Limits must be non-negative."}}, - {temBAD_OFFER, {"temBAD_OFFER", "Malformed: Bad offer."}}, - {temBAD_PATH, {"temBAD_PATH", "Malformed: Bad path."}}, - {temBAD_PATH_LOOP, {"temBAD_PATH_LOOP", "Malformed: Loop in path."}}, - {temBAD_QUORUM, {"temBAD_QUORUM", "Malformed: Quorum is unreachable."}}, - {temBAD_REGKEY, - {"temBAD_REGKEY", - "Malformed: Regular key cannot be same as master key."}}, - {temBAD_SEND_XRP_LIMIT, - {"temBAD_SEND_XRP_LIMIT", - "Malformed: Limit quality is not allowed for XRP to XRP."}}, - {temBAD_SEND_XRP_MAX, - {"temBAD_SEND_XRP_MAX", - "Malformed: Send max is not allowed for XRP to XRP."}}, - {temBAD_SEND_XRP_NO_DIRECT, - {"temBAD_SEND_XRP_NO_DIRECT", - "Malformed: No Ripple direct is not allowed for XRP to XRP."}}, - {temBAD_SEND_XRP_PARTIAL, - {"temBAD_SEND_XRP_PARTIAL", - "Malformed: Partial payment is not allowed for XRP to XRP."}}, - {temBAD_SEND_XRP_PATHS, - {"temBAD_SEND_XRP_PATHS", - "Malformed: Paths are not allowed for XRP to XRP."}}, - {temBAD_SEQUENCE, - {"temBAD_SEQUENCE", "Malformed: Sequence is not in the past."}}, - {temBAD_SIGNATURE, {"temBAD_SIGNATURE", "Malformed: Bad signature."}}, - {temBAD_SIGNER, - {"temBAD_SIGNER", - "Malformed: No signer may duplicate account or other signers."}}, - {temBAD_SRC_ACCOUNT, - {"temBAD_SRC_ACCOUNT", "Malformed: Bad source account."}}, - {temBAD_TRANSFER_RATE, - {"temBAD_TRANSFER_RATE", - "Malformed: Transfer rate must be >= 1.0 and <= 2.0"}}, - {temBAD_WEIGHT, - {"temBAD_WEIGHT", "Malformed: Weight must be a positive value."}}, - {temDST_IS_SRC, {"temDST_IS_SRC", "Destination may not be source."}}, - {temDST_NEEDED, {"temDST_NEEDED", "Destination not specified."}}, - {temINVALID, {"temINVALID", "The transaction is ill-formed."}}, - {temINVALID_FLAG, - {"temINVALID_FLAG", "The transaction has an invalid flag."}}, - {temREDUNDANT, {"temREDUNDANT", "Sends same currency to self."}}, - {temRIPPLE_EMPTY, {"temRIPPLE_EMPTY", "PathSet with no paths."}}, - {temUNCERTAIN, - {"temUNCERTAIN", "In process of determining result. Never returned."}}, - {temUNKNOWN, - {"temUNKNOWN", - "The transaction requires logic that is not implemented yet."}}, - {temDISABLED, - {"temDISABLED", - "The transaction requires logic that is currently disabled."}}, - {temBAD_TICK_SIZE, - {"temBAD_TICK_SIZE", "Malformed: Tick size out of range."}}, - {temINVALID_ACCOUNT_ID, - {"temINVALID_ACCOUNT_ID", - "Malformed: A field contains an invalid account ID."}}, - {temCANNOT_PREAUTH_SELF, - {"temCANNOT_PREAUTH_SELF", - "Malformed: An account may not preauthorize itself."}}, + // clang-format off + + // Macros are generally ugly, but they can help make code readable to + // humans without affecting the compiler. +#define MAKE_ERROR(code, desc) { code, { #code, desc } } + + static + std::unordered_map< + TERUnderlyingType, + std::pair> const results + { + MAKE_ERROR(tecCLAIM, "Fee claimed. Sequence used. No action."), + MAKE_ERROR(tecDIR_FULL, "Can not add entry to full directory."), + MAKE_ERROR(tecFAILED_PROCESSING, "Failed to correctly process transaction."), + MAKE_ERROR(tecINSUF_RESERVE_LINE, "Insufficient reserve to add trust line."), + MAKE_ERROR(tecINSUF_RESERVE_OFFER, "Insufficient reserve to create offer."), + MAKE_ERROR(tecNO_DST, "Destination does not exist. Send XRP to create it."), + MAKE_ERROR(tecNO_DST_INSUF_XRP, "Destination does not exist. Too little XRP sent to create it."), + MAKE_ERROR(tecNO_LINE_INSUF_RESERVE, "No such line. Too little reserve to create it."), + MAKE_ERROR(tecNO_LINE_REDUNDANT, "Can't set non-existent line to default."), + MAKE_ERROR(tecPATH_DRY, "Path could not send partial amount."), + MAKE_ERROR(tecPATH_PARTIAL, "Path could not send full amount."), + MAKE_ERROR(tecNO_ALTERNATIVE_KEY, "The operation would remove the ability to sign transactions with the account."), + MAKE_ERROR(tecNO_REGULAR_KEY, "Regular key is not set."), + MAKE_ERROR(tecOVERSIZE, "Object exceeded serialization limits."), + MAKE_ERROR(tecUNFUNDED, "Not enough XRP to satisfy the reserve requirement."), + MAKE_ERROR(tecUNFUNDED_ADD, "DEPRECATED."), + MAKE_ERROR(tecUNFUNDED_OFFER, "Insufficient balance to fund created offer."), + MAKE_ERROR(tecUNFUNDED_PAYMENT, "Insufficient XRP balance to send."), + MAKE_ERROR(tecOWNERS, "Non-zero owner count."), + MAKE_ERROR(tecNO_ISSUER, "Issuer account does not exist."), + MAKE_ERROR(tecNO_AUTH, "Not authorized to hold asset."), + MAKE_ERROR(tecNO_LINE, "No such line."), + MAKE_ERROR(tecINSUFF_FEE, "Insufficient balance to pay fee."), + MAKE_ERROR(tecFROZEN, "Asset is frozen."), + MAKE_ERROR(tecNO_TARGET, "Target account does not exist."), + MAKE_ERROR(tecNO_PERMISSION, "No permission to perform requested operation."), + MAKE_ERROR(tecNO_ENTRY, "No matching entry found."), + MAKE_ERROR(tecINSUFFICIENT_RESERVE, "Insufficient reserve to complete requested operation."), + MAKE_ERROR(tecNEED_MASTER_KEY, "The operation requires the use of the Master Key."), + MAKE_ERROR(tecDST_TAG_NEEDED, "A destination tag is required."), + MAKE_ERROR(tecINTERNAL, "An internal error has occurred during processing."), + MAKE_ERROR(tecCRYPTOCONDITION_ERROR, "Malformed, invalid, or mismatched conditional or fulfillment."), + MAKE_ERROR(tecINVARIANT_FAILED, "One or more invariants for the transaction were not satisfied."), + MAKE_ERROR(tecEXPIRED, "Expiration time is passed."), + MAKE_ERROR(tecDUPLICATE, "Ledger object already exists."), + MAKE_ERROR(tecKILLED, "FillOrKill offer killed."), + MAKE_ERROR(tecHAS_OBLIGATIONS, "The account cannot be deleted since it has obligations."), + MAKE_ERROR(tecTOO_SOON, "It is too early to attempt the requested operation. Please wait."), + + MAKE_ERROR(tefALREADY, "The exact transaction was already in this ledger."), + MAKE_ERROR(tefBAD_ADD_AUTH, "Not authorized to add account."), + MAKE_ERROR(tefBAD_AUTH, "Transaction's public key is not authorized."), + MAKE_ERROR(tefBAD_LEDGER, "Ledger in unexpected state."), + MAKE_ERROR(tefBAD_QUORUM, "Signatures provided do not meet the quorum."), + MAKE_ERROR(tefBAD_SIGNATURE, "A signature is provided for a non-signer."), + MAKE_ERROR(tefCREATED, "Can't add an already created account."), + MAKE_ERROR(tefEXCEPTION, "Unexpected program state."), + MAKE_ERROR(tefFAILURE, "Failed to apply."), + MAKE_ERROR(tefINTERNAL, "Internal error."), + MAKE_ERROR(tefMASTER_DISABLED, "Master key is disabled."), + MAKE_ERROR(tefMAX_LEDGER, "Ledger sequence too high."), + MAKE_ERROR(tefNO_AUTH_REQUIRED, "Auth is not required."), + MAKE_ERROR(tefNOT_MULTI_SIGNING, "Account has no appropriate list of multi-signers."), + MAKE_ERROR(tefPAST_SEQ, "This sequence number has already passed."), + MAKE_ERROR(tefWRONG_PRIOR, "This previous transaction does not match."), + MAKE_ERROR(tefBAD_AUTH_MASTER, "Auth for unclaimed account needs correct master key."), + MAKE_ERROR(tefINVARIANT_FAILED, "Fee claim violated invariants for the transaction."), + MAKE_ERROR(tefTOO_BIG, "Transaction affects too many items."), + + MAKE_ERROR(telLOCAL_ERROR, "Local failure."), + MAKE_ERROR(telBAD_DOMAIN, "Domain too long."), + MAKE_ERROR(telBAD_PATH_COUNT, "Malformed: Too many paths."), + MAKE_ERROR(telBAD_PUBLIC_KEY, "Public key too long."), + MAKE_ERROR(telFAILED_PROCESSING, "Failed to correctly process transaction."), + MAKE_ERROR(telINSUF_FEE_P, "Fee insufficient."), + MAKE_ERROR(telNO_DST_PARTIAL, "Partial payment to create account not allowed."), + MAKE_ERROR(telCAN_NOT_QUEUE, "Can not queue at this time."), + MAKE_ERROR(telCAN_NOT_QUEUE_BALANCE, "Can not queue at this time: insufficient balance to pay all queued fees."), + MAKE_ERROR(telCAN_NOT_QUEUE_BLOCKS, "Can not queue at this time: would block later queued transaction(s)."), + MAKE_ERROR(telCAN_NOT_QUEUE_BLOCKED, "Can not queue at this time: blocking transaction in queue."), + MAKE_ERROR(telCAN_NOT_QUEUE_FEE, "Can not queue at this time: fee insufficient to replace queued transaction."), + MAKE_ERROR(telCAN_NOT_QUEUE_FULL, "Can not queue at this time: queue is full."), + + MAKE_ERROR(temMALFORMED, "Malformed transaction."), + MAKE_ERROR(temBAD_AMOUNT, "Can only send positive amounts."), + MAKE_ERROR(temBAD_CURRENCY, "Malformed: Bad currency."), + MAKE_ERROR(temBAD_EXPIRATION, "Malformed: Bad expiration."), + MAKE_ERROR(temBAD_FEE, "Invalid fee, negative or not XRP."), + MAKE_ERROR(temBAD_ISSUER, "Malformed: Bad issuer."), + MAKE_ERROR(temBAD_LIMIT, "Limits must be non-negative."), + MAKE_ERROR(temBAD_OFFER, "Malformed: Bad offer."), + MAKE_ERROR(temBAD_PATH, "Malformed: Bad path."), + MAKE_ERROR(temBAD_PATH_LOOP, "Malformed: Loop in path."), + MAKE_ERROR(temBAD_QUORUM, "Malformed: Quorum is unreachable."), + MAKE_ERROR(temBAD_REGKEY, "Malformed: Regular key cannot be same as master key."), + MAKE_ERROR(temBAD_SEND_XRP_LIMIT, "Malformed: Limit quality is not allowed for XRP to XRP."), + MAKE_ERROR(temBAD_SEND_XRP_MAX, "Malformed: Send max is not allowed for XRP to XRP."), + MAKE_ERROR(temBAD_SEND_XRP_NO_DIRECT, "Malformed: No Ripple direct is not allowed for XRP to XRP."), + MAKE_ERROR(temBAD_SEND_XRP_PARTIAL, "Malformed: Partial payment is not allowed for XRP to XRP."), + MAKE_ERROR(temBAD_SEND_XRP_PATHS, "Malformed: Paths are not allowed for XRP to XRP."), + MAKE_ERROR(temBAD_SEQUENCE, "Malformed: Sequence is not in the past."), + MAKE_ERROR(temBAD_SIGNATURE, "Malformed: Bad signature."), + MAKE_ERROR(temBAD_SIGNER, "Malformed: No signer may duplicate account or other signers."), + MAKE_ERROR(temBAD_SRC_ACCOUNT, "Malformed: Bad source account."), + MAKE_ERROR(temBAD_TRANSFER_RATE, "Malformed: Transfer rate must be >= 1.0 and <= 2.0"), + MAKE_ERROR(temBAD_WEIGHT, "Malformed: Weight must be a positive value."), + MAKE_ERROR(temDST_IS_SRC, "Destination may not be source."), + MAKE_ERROR(temDST_NEEDED, "Destination not specified."), + MAKE_ERROR(temINVALID, "The transaction is ill-formed."), + MAKE_ERROR(temINVALID_FLAG, "The transaction has an invalid flag."), + MAKE_ERROR(temREDUNDANT, "Sends same currency to self."), + MAKE_ERROR(temRIPPLE_EMPTY, "PathSet with no paths."), + MAKE_ERROR(temUNCERTAIN, "In process of determining result. Never returned."), + MAKE_ERROR(temUNKNOWN, "The transaction requires logic that is not implemented yet."), + MAKE_ERROR(temDISABLED, "The transaction requires logic that is currently disabled."), + MAKE_ERROR(temBAD_TICK_SIZE, "Malformed: Tick size out of range."), + MAKE_ERROR(temINVALID_ACCOUNT_ID, "Malformed: A field contains an invalid account ID."), + MAKE_ERROR(temCANNOT_PREAUTH_SELF, "Malformed: An account may not preauthorize itself."), + + MAKE_ERROR(terRETRY, "Retry transaction."), + MAKE_ERROR(terFUNDS_SPENT, "DEPRECATED."), + MAKE_ERROR(terINSUF_FEE_B, "Account balance can't pay fee."), + MAKE_ERROR(terLAST, "DEPRECATED."), + MAKE_ERROR(terNO_RIPPLE, "Path does not permit rippling."), + MAKE_ERROR(terNO_ACCOUNT, "The source account does not exist."), + MAKE_ERROR(terNO_AUTH, "Not authorized to hold IOUs."), + MAKE_ERROR(terNO_LINE, "No such line."), + MAKE_ERROR(terPRE_SEQ, "Missing/inapplicable prior transaction."), + MAKE_ERROR(terOWNERS, "Non-zero owner count."), + MAKE_ERROR(terQUEUED, "Held until escalated fee drops."), + + MAKE_ERROR(tesSUCCESS, "The transaction was applied. Only final in a validated ledger."), + }; + // clang-format on - {terRETRY, {"terRETRY", "Retry transaction."}}, - {terFUNDS_SPENT, - {"terFUNDS_SPENT", - "Can't set password, password set funds already spent."}}, - {terINSUF_FEE_B, {"terINSUF_FEE_B", "Account balance can't pay fee."}}, - {terLAST, {"terLAST", "Process last."}}, - {terNO_RIPPLE, {"terNO_RIPPLE", "Path does not permit rippling."}}, - {terNO_ACCOUNT, - {"terNO_ACCOUNT", "The source account does not exist."}}, - {terNO_AUTH, {"terNO_AUTH", "Not authorized to hold IOUs."}}, - {terNO_LINE, {"terNO_LINE", "No such line."}}, - {terPRE_SEQ, {"terPRE_SEQ", "Missing/inapplicable prior transaction."}}, - {terOWNERS, {"terOWNERS", "Non-zero owner count."}}, - {terQUEUED, {"terQUEUED", "Held until escalated fee drops."}}, +#undef MAKE_ERROR - {tesSUCCESS, - {"tesSUCCESS", - "The transaction was applied. Only final in a validated ledger."}}, - }; return results; } From 5563d59153866459fac8f9692ced7354587e25d8 Mon Sep 17 00:00:00 2001 From: CJ Cobb <46455409+cjcobb23@users.noreply.github.com> Date: Tue, 12 May 2020 14:50:02 -0700 Subject: [PATCH 12/19] Add comments to protobuf files --- src/ripple/proto/org/xrpl/rpc/v1/get_account_info.proto | 5 +++++ .../org/xrpl/rpc/v1/get_account_transaction_history.proto | 7 +++++++ src/ripple/proto/org/xrpl/rpc/v1/get_transaction.proto | 6 +++++- src/ripple/proto/org/xrpl/rpc/v1/xrp_ledger.proto | 2 ++ 4 files changed, 19 insertions(+), 1 deletion(-) diff --git a/src/ripple/proto/org/xrpl/rpc/v1/get_account_info.proto b/src/ripple/proto/org/xrpl/rpc/v1/get_account_info.proto index b533ebe6a69..9a2a877cd6f 100644 --- a/src/ripple/proto/org/xrpl/rpc/v1/get_account_info.proto +++ b/src/ripple/proto/org/xrpl/rpc/v1/get_account_info.proto @@ -19,6 +19,11 @@ message GetAccountInfoRequest bool strict = 2; + // Which ledger to use to retrieve data. + // If this field is not set, the server will use the open ledger. + // The open ledger includes data that is not validated or final. + // To retrieve the most up to date and validated data, use + // SHORTCUT_VALIDATED LedgerSpecifier ledger = 3; bool queue = 4; diff --git a/src/ripple/proto/org/xrpl/rpc/v1/get_account_transaction_history.proto b/src/ripple/proto/org/xrpl/rpc/v1/get_account_transaction_history.proto index 5b9e677c4a4..c4889a6bdbe 100644 --- a/src/ripple/proto/org/xrpl/rpc/v1/get_account_transaction_history.proto +++ b/src/ripple/proto/org/xrpl/rpc/v1/get_account_transaction_history.proto @@ -16,6 +16,13 @@ message GetAccountTransactionHistoryRequest // What ledger to include results from. Specifying a not yet validated // ledger results in an error. Not specifying a ledger uses the entire // range of validated ledgers available to the server. + // Note, this parameter acts as a filter, and can only reduce the number of + // results. Specifying a single ledger will return only transactions from + // that ledger. This includes specifying a ledger with a Shortcut. For + // example, specifying SHORTCUT_VALIDATED will result in only transactions + // that were part of the most recently validated ledger being returned. + // Specifying a range of ledgers results in only transactions that were + // included in a ledger within the specified range being returned. oneof ledger { LedgerSpecifier ledger_specifier = 2; diff --git a/src/ripple/proto/org/xrpl/rpc/v1/get_transaction.proto b/src/ripple/proto/org/xrpl/rpc/v1/get_transaction.proto index f872619ad7d..e0c21c598a2 100644 --- a/src/ripple/proto/org/xrpl/rpc/v1/get_transaction.proto +++ b/src/ripple/proto/org/xrpl/rpc/v1/get_transaction.proto @@ -23,7 +23,11 @@ message GetTransactionRequest { // if true, return data in binary format. defaults to false bool binary = 2; - // search only specified range. optional + // If the transaction was not found, server will report whether the entire + // specified range was searched. The value is contained in the error message. + // The error message is of the form: + // "txn not found. searched_all = [true,false]" + // If the transaction was found, this parameter is ignored. LedgerRange ledger_range = 3; } diff --git a/src/ripple/proto/org/xrpl/rpc/v1/xrp_ledger.proto b/src/ripple/proto/org/xrpl/rpc/v1/xrp_ledger.proto index 444f97d12be..7cb52605016 100644 --- a/src/ripple/proto/org/xrpl/rpc/v1/xrp_ledger.proto +++ b/src/ripple/proto/org/xrpl/rpc/v1/xrp_ledger.proto @@ -12,6 +12,7 @@ import "org/xrpl/rpc/v1/get_account_transaction_history.proto"; // RPCs available to interact with the XRP Ledger. +// The gRPC API mimics the JSON API. Refer to xrpl.org for documentation service XRPLedgerAPIService { // Get account info for an account on the XRP Ledger. @@ -26,5 +27,6 @@ service XRPLedgerAPIService { // Get the status of a transaction rpc GetTransaction(GetTransactionRequest) returns (GetTransactionResponse); + // Get all validated transactions associated with a given account rpc GetAccountTransactionHistory(GetAccountTransactionHistoryRequest) returns (GetAccountTransactionHistoryResponse); } From 96c4da85792f8a2f0d7d6b4704e02dcad691f6a2 Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Thu, 20 Feb 2020 15:51:31 -0500 Subject: [PATCH 13/19] Improve handling of empty buffer in varint parsing (RIPD-1683) --- src/ripple/nodestore/impl/varint.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/ripple/nodestore/impl/varint.h b/src/ripple/nodestore/impl/varint.h index e74f53afd5c..75d8e58a751 100644 --- a/src/ripple/nodestore/impl/varint.h +++ b/src/ripple/nodestore/impl/varint.h @@ -55,6 +55,8 @@ template std::size_t read_varint(void const* buf, std::size_t buflen, std::size_t& t) { + if (buflen == 0) + return 0; t = 0; std::uint8_t const* p = reinterpret_cast(buf); std::size_t n = 0; From 9076f20b63020e763c28522d21144c975087c9f7 Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Wed, 27 May 2020 17:44:20 -0400 Subject: [PATCH 14/19] Consolidate "Not Synced" Error Messages * Fixes #3269 * Work on a version 2 of the XRP Network API has begun. The new API returns the code `notSynced` in place of `noClosed`, `noCurrent`, and `noNetwork`. And `invalidParams` is returned in place of `lgrIdxInvalid`. * The version 2 API can be specified by adding "api_version" = 2 to your json request. The default version remains 1 (if unspecified), except for the command line interface which tracks version 2. * It can be re-enabled by setting ApiMaximumSupportedVersion to 2. --- RELEASENOTES.md | 5 ++++ src/ripple/app/main/Application.cpp | 8 ++++--- src/ripple/app/main/GRPCServer.cpp | 3 ++- src/ripple/app/main/GRPCServer.h | 2 ++ src/ripple/net/impl/RPCCall.cpp | 10 ++++++-- src/ripple/protocol/ErrorCodes.h | 2 +- src/ripple/protocol/impl/ErrorCodes.cpp | 3 ++- src/ripple/rpc/Context.h | 2 +- src/ripple/rpc/handlers/AccountTx.cpp | 14 +++++++++-- src/ripple/rpc/handlers/AccountTxOld.cpp | 8 +++++-- src/ripple/rpc/handlers/LedgerRequest.cpp | 6 ++++- src/ripple/rpc/handlers/RipplePathFind.cpp | 4 +++- src/ripple/rpc/impl/Handler.h | 16 +++++++++---- src/ripple/rpc/impl/RPCHandler.cpp | 22 +++++++++++++---- src/ripple/rpc/impl/RPCHelpers.cpp | 26 ++++++++++++++++---- src/ripple/rpc/impl/ServerHandlerImp.cpp | 8 +++---- src/ripple/rpc/impl/TransactionSign.cpp | 14 +++++++---- src/test/app/Path_test.cpp | 12 ++++++---- src/test/rpc/AccountTx_test.cpp | 7 ++++-- src/test/rpc/LedgerRequestRPC_test.cpp | 16 ++++++++++--- src/test/rpc/RPCCall_test.cpp | 28 ++++++++++++++++++++-- 21 files changed, 169 insertions(+), 47 deletions(-) diff --git a/RELEASENOTES.md b/RELEASENOTES.md index 1cc76688909..1b80640a5af 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -7,6 +7,11 @@ This document contains the release notes for `rippled`, the reference server imp Have new ideas? Need help with setting up your node? Come visit us [here](https://github.com/ripple/rippled/issues/new/choose) +# Change Log + +- Work on a version 2 of the XRP Network API has begun. The new API returns the code `notSynced` in place of `noClosed`, `noCurrent`, and `noNetwork`. And `invalidLgrRange` is returned in place of `lgrIdxInvalid`. +- The version 2 API can be specified by adding "api_version" : 2 to your json request. The default version remains 1 (if unspecified), except for the command line interface which always uses the latest verison. + # Releases ## Version 1.5.0 diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index 15713a6dc3d..a755017e407 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -1756,9 +1756,11 @@ ApplicationImp::setup() getOPs(), getLedgerMaster(), c, - Role::ADMIN}, - jvCommand, - RPC::ApiMaximumSupportedVersion}; + Role::ADMIN, + {}, + {}, + RPC::ApiMaximumSupportedVersion}, + jvCommand}; Json::Value jvResult; RPC::doCommand(context, jvResult); diff --git a/src/ripple/app/main/GRPCServer.cpp b/src/ripple/app/main/GRPCServer.cpp index acf33784f38..007e5f0c499 100644 --- a/src/ripple/app/main/GRPCServer.cpp +++ b/src/ripple/app/main/GRPCServer.cpp @@ -142,7 +142,8 @@ GRPCServerImpl::CallData::process( usage, role, coro, - InfoSub::pointer()}, + InfoSub::pointer(), + apiVersion}, request_}; // Make sure we can currently handle the rpc diff --git a/src/ripple/app/main/GRPCServer.h b/src/ripple/app/main/GRPCServer.h index 5175e2e256d..bb06784c24f 100644 --- a/src/ripple/app/main/GRPCServer.h +++ b/src/ripple/app/main/GRPCServer.h @@ -105,6 +105,8 @@ class GRPCServerImpl final template using Handler = std::function( RPC::GRPCContext&)>; + // This implementation is currently limited to v1 of the API + static unsigned constexpr apiVersion = 1; public: explicit GRPCServerImpl(Application& app); diff --git a/src/ripple/net/impl/RPCCall.cpp b/src/ripple/net/impl/RPCCall.cpp index a565d4bc321..3b956f8cba3 100644 --- a/src/ripple/net/impl/RPCCall.cpp +++ b/src/ripple/net/impl/RPCCall.cpp @@ -314,7 +314,10 @@ class RPCParser if (uLedgerMax != -1 && uLedgerMax < uLedgerMin) { - return rpcError(rpcLGR_IDXS_INVALID); + // The command line always follows ApiMaximumSupportedVersion + if (RPC::ApiMaximumSupportedVersion == 1) + return rpcError(rpcLGR_IDXS_INVALID); + return rpcError(rpcNOT_SYNCED); } jvRequest[jss::ledger_index_min] = jvParams[1u].asInt(); @@ -384,7 +387,10 @@ class RPCParser if (uLedgerMax != -1 && uLedgerMax < uLedgerMin) { - return rpcError(rpcLGR_IDXS_INVALID); + // The command line always follows ApiMaximumSupportedVersion + if (RPC::ApiMaximumSupportedVersion == 1) + return rpcError(rpcLGR_IDXS_INVALID); + return rpcError(rpcNOT_SYNCED); } jvRequest[jss::ledger_index_min] = jvParams[1u].asInt(); diff --git a/src/ripple/protocol/ErrorCodes.h b/src/ripple/protocol/ErrorCodes.h index a3fb9e590bb..68c6c4395f5 100644 --- a/src/ripple/protocol/ErrorCodes.h +++ b/src/ripple/protocol/ErrorCodes.h @@ -64,9 +64,9 @@ enum error_code_i { rpcNO_CLOSED = 15, rpcNO_CURRENT = 16, rpcNO_NETWORK = 17, + rpcNOT_SYNCED = 18, // Ledger state - // unused 18, rpcACT_NOT_FOUND = 19, // unused 20, rpcLGR_NOT_FOUND = 21, diff --git a/src/ripple/protocol/impl/ErrorCodes.cpp b/src/ripple/protocol/impl/ErrorCodes.cpp index 9110daf40f7..3df10624655 100644 --- a/src/ripple/protocol/impl/ErrorCodes.cpp +++ b/src/ripple/protocol/impl/ErrorCodes.cpp @@ -90,8 +90,9 @@ constexpr static ErrorInfo unorderedErrorInfos[]{ {rpcNOT_SUPPORTED, "notSupported", "Operation not supported."}, {rpcNO_CLOSED, "noClosed", "Closed ledger is unavailable."}, {rpcNO_CURRENT, "noCurrent", "Current ledger is unavailable."}, + {rpcNOT_SYNCED, "notSynced", "Not synced to the network."}, {rpcNO_EVENTS, "noEvents", "Current transport does not support events."}, - {rpcNO_NETWORK, "noNetwork", "Not synced to Ripple network."}, + {rpcNO_NETWORK, "noNetwork", "Not synced to the network."}, {rpcNO_PERMISSION, "noPermission", "You don't have permission for this command."}, diff --git a/src/ripple/rpc/Context.h b/src/ripple/rpc/Context.h index 1188006362e..7a22ed9fe0c 100644 --- a/src/ripple/rpc/Context.h +++ b/src/ripple/rpc/Context.h @@ -47,6 +47,7 @@ struct Context Role role; std::shared_ptr coro{}; InfoSub::pointer infoSub{}; + unsigned int apiVersion; }; struct JsonContext : public Context @@ -62,7 +63,6 @@ struct JsonContext : public Context Json::Value params; - unsigned int apiVersion; Headers headers{}; }; diff --git a/src/ripple/rpc/handlers/AccountTx.cpp b/src/ripple/rpc/handlers/AccountTx.cpp index f4e3a6b9d3f..ffcbe145fbb 100644 --- a/src/ripple/rpc/handlers/AccountTx.cpp +++ b/src/ripple/rpc/handlers/AccountTx.cpp @@ -214,7 +214,9 @@ getLedgerRange( if (!bValidated) { // Don't have a validated ledger range. - return rpcLGR_IDXS_INVALID; + if (context.apiVersion == 1) + return rpcLGR_IDXS_INVALID; + return rpcNOT_SYNCED; } std::uint32_t uLedgerMin = uValidatedMin; @@ -236,7 +238,11 @@ getLedgerRange( uLedgerMax = ls.max; } if (uLedgerMax < uLedgerMin) - return rpcLGR_IDXS_INVALID; + { + if (context.apiVersion == 1) + return rpcLGR_IDXS_INVALID; + return rpcINVALID_LGR_RANGE; + } } else { @@ -330,6 +336,10 @@ populateProtoResponse( { status = {grpc::StatusCode::NOT_FOUND, error.message()}; } + else if (error.toErrorCode() == rpcNOT_SYNCED) + { + status = {grpc::StatusCode::FAILED_PRECONDITION, error.message()}; + } else { status = {grpc::StatusCode::INVALID_ARGUMENT, error.message()}; diff --git a/src/ripple/rpc/handlers/AccountTxOld.cpp b/src/ripple/rpc/handlers/AccountTxOld.cpp index 472f999b621..5950f474d36 100644 --- a/src/ripple/rpc/handlers/AccountTxOld.cpp +++ b/src/ripple/rpc/handlers/AccountTxOld.cpp @@ -105,7 +105,9 @@ doAccountTxOld(RPC::JsonContext& context) if (!bValidated && (iLedgerMin == -1 || iLedgerMax == -1)) { // Don't have a validated ledger range. - return rpcError(rpcLGR_IDXS_INVALID); + if (context.apiVersion == 1) + return rpcError(rpcLGR_IDXS_INVALID); + return rpcError(rpcNOT_SYNCED); } uLedgerMin = iLedgerMin == -1 ? uValidatedMin : iLedgerMin; @@ -113,7 +115,9 @@ doAccountTxOld(RPC::JsonContext& context) if (uLedgerMax < uLedgerMin) { - return rpcError(rpcLGR_IDXS_INVALID); + if (context.apiVersion == 1) + return rpcError(rpcLGR_IDXS_INVALID); + return rpcError(rpcNOT_SYNCED); } } else diff --git a/src/ripple/rpc/handlers/LedgerRequest.cpp b/src/ripple/rpc/handlers/LedgerRequest.cpp index f7b12e95eb9..5d2d11b00aa 100644 --- a/src/ripple/rpc/handlers/LedgerRequest.cpp +++ b/src/ripple/rpc/handlers/LedgerRequest.cpp @@ -67,7 +67,11 @@ doLedgerRequest(RPC::JsonContext& context) // We need a validated ledger to get the hash from the sequence if (ledgerMaster.getValidatedLedgerAge() > RPC::Tuning::maxValidatedLedgerAge) - return rpcError(rpcNO_CURRENT); + { + if (context.apiVersion == 1) + return rpcError(rpcNO_CURRENT); + return rpcError(rpcNOT_SYNCED); + } ledgerIndex = jsonIndex.asInt(); auto ledger = ledgerMaster.getValidatedLedger(); diff --git a/src/ripple/rpc/handlers/RipplePathFind.cpp b/src/ripple/rpc/handlers/RipplePathFind.cpp index 4b03bb93f53..5e23a47bd52 100644 --- a/src/ripple/rpc/handlers/RipplePathFind.cpp +++ b/src/ripple/rpc/handlers/RipplePathFind.cpp @@ -49,7 +49,9 @@ doRipplePathFind(RPC::JsonContext& context) if (context.app.getLedgerMaster().getValidatedLedgerAge() > RPC::Tuning::maxValidatedLedgerAge) { - return rpcError(rpcNO_NETWORK); + if (context.apiVersion == 1) + return rpcError(rpcNO_NETWORK); + return rpcError(rpcNOT_SYNCED); } PathRequest::pointer request; diff --git a/src/ripple/rpc/impl/Handler.h b/src/ripple/rpc/impl/Handler.h index 09acd6ba165..6b6fa71e7cb 100644 --- a/src/ripple/rpc/impl/Handler.h +++ b/src/ripple/rpc/impl/Handler.h @@ -83,7 +83,9 @@ conditionMet(Condition condition_required, T& context) JLOG(context.j.info()) << "Insufficient network mode for RPC: " << context.netOps.strOperatingMode(); - return rpcNO_NETWORK; + if (context.apiVersion == 1) + return rpcNO_NETWORK; + return rpcNOT_SYNCED; } if (context.app.getOPs().isAmendmentBlocked() && @@ -99,7 +101,9 @@ conditionMet(Condition condition_required, T& context) if (context.ledgerMaster.getValidatedLedgerAge() > Tuning::maxValidatedLedgerAge) { - return rpcNO_CURRENT; + if (context.apiVersion == 1) + return rpcNO_CURRENT; + return rpcNOT_SYNCED; } auto const cID = context.ledgerMaster.getCurrentLedgerIndex(); @@ -110,14 +114,18 @@ conditionMet(Condition condition_required, T& context) JLOG(context.j.debug()) << "Current ledger ID(" << cID << ") is less than validated ledger ID(" << vID << ")"; - return rpcNO_CURRENT; + if (context.apiVersion == 1) + return rpcNO_CURRENT; + return rpcNOT_SYNCED; } } if ((condition_required & NEEDS_CLOSED_LEDGER) && !context.ledgerMaster.getClosedLedger()) { - return rpcNO_CLOSED; + if (context.apiVersion == 1) + return rpcNO_CLOSED; + return rpcNOT_SYNCED; } return rpcSUCCESS; diff --git a/src/ripple/rpc/impl/RPCHandler.cpp b/src/ripple/rpc/impl/RPCHandler.cpp index 9d4885b1ebf..cd15e91e0a7 100644 --- a/src/ripple/rpc/impl/RPCHandler.cpp +++ b/src/ripple/rpc/impl/RPCHandler.cpp @@ -65,9 +65,16 @@ namespace { Failure: { "result" : { + // api_version == 1 "error" : "noNetwork", - "error_code" : 16, - "error_message" : "Not synced to Ripple network.", + "error_code" : 17, + "error_message" : "Not synced to the network.", + + // api_version == 2 + "error" : "notSynced", + "error_code" : 18, + "error_message" : "Not synced to the network.", + "request" : { "command" : "ledger", "ledger_index" : 10300865 @@ -95,9 +102,16 @@ namespace { Failure: { + // api_version == 1 "error" : "noNetwork", - "error_code" : 16, - "error_message" : "Not synced to Ripple network.", + "error_code" : 17, + "error_message" : "Not synced to the network.", + + // api_version == 2 + "error" : "notSynced", + "error_code" : 18, + "error_message" : "Not synced to the network.", + "request" : { "command" : "ledger", "ledger_index" : 10300865 diff --git a/src/ripple/rpc/impl/RPCHelpers.cpp b/src/ripple/rpc/impl/RPCHelpers.cpp index 1104caa59f2..060862bede9 100644 --- a/src/ripple/rpc/impl/RPCHelpers.cpp +++ b/src/ripple/rpc/impl/RPCHelpers.cpp @@ -347,7 +347,9 @@ getLedger(T& ledger, uint32_t ledgerIndex, Context& context) isValidatedOld(context.ledgerMaster, context.app.config().standalone())) { ledger.reset(); - return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + if (context.apiVersion == 1) + return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + return {rpcNOT_SYNCED, "notSynced"}; } return Status::OK; @@ -358,13 +360,21 @@ Status getLedger(T& ledger, LedgerShortcut shortcut, Context& context) { if (isValidatedOld(context.ledgerMaster, context.app.config().standalone())) - return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + { + if (context.apiVersion == 1) + return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + return {rpcNOT_SYNCED, "notSynced"}; + } if (shortcut == LedgerShortcut::VALIDATED) { ledger = context.ledgerMaster.getValidatedLedger(); if (ledger == nullptr) - return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + { + if (context.apiVersion == 1) + return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + return {rpcNOT_SYNCED, "notSynced"}; + } assert(!ledger->open()); } @@ -386,7 +396,11 @@ getLedger(T& ledger, LedgerShortcut shortcut, Context& context) } if (ledger == nullptr) - return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + { + if (context.apiVersion == 1) + return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + return {rpcNOT_SYNCED, "notSynced"}; + } static auto const minSequenceGap = 10; @@ -394,7 +408,9 @@ getLedger(T& ledger, LedgerShortcut shortcut, Context& context) context.ledgerMaster.getValidLedgerIndex()) { ledger.reset(); - return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + if (context.apiVersion == 1) + return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + return {rpcNOT_SYNCED, "notSynced"}; } } return Status::OK; diff --git a/src/ripple/rpc/impl/ServerHandlerImp.cpp b/src/ripple/rpc/impl/ServerHandlerImp.cpp index 3f404321bd0..0fb0018f390 100644 --- a/src/ripple/rpc/impl/ServerHandlerImp.cpp +++ b/src/ripple/rpc/impl/ServerHandlerImp.cpp @@ -443,9 +443,9 @@ ServerHandlerImp::processSession( is->getConsumer(), role, coro, - is}, + is, + apiVersion}, jv, - apiVersion, {is->user(), is->forwarded_for()}}; RPC::doCommand(context, jr[jss::result]); @@ -829,9 +829,9 @@ ServerHandlerImp::processRequest( usage, role, coro, - InfoSub::pointer()}, + InfoSub::pointer(), + apiVersion}, params, - apiVersion, {user, forwardedFor}}; Json::Value result; RPC::doCommand(context, result); diff --git a/src/ripple/rpc/impl/TransactionSign.cpp b/src/ripple/rpc/impl/TransactionSign.cpp index 0ebeef36326..0cceca14223 100644 --- a/src/ripple/rpc/impl/TransactionSign.cpp +++ b/src/ripple/rpc/impl/TransactionSign.cpp @@ -270,7 +270,8 @@ checkTxJsonFields( bool const verify, std::chrono::seconds validatedLedgerAge, Config const& config, - LoadFeeTrack const& feeTrack) + LoadFeeTrack const& feeTrack, + unsigned apiVersion) { std::pair ret; @@ -308,7 +309,10 @@ checkTxJsonFields( if (verify && !config.standalone() && (validatedLedgerAge > Tuning::maxValidatedLedgerAge)) { - ret.first = rpcError(rpcNO_CURRENT); + if (apiVersion == 1) + ret.first = rpcError(rpcNO_CURRENT); + else + ret.first = rpcError(rpcNOT_SYNCED); return ret; } @@ -384,7 +388,8 @@ transactionPreProcessImpl( verify, validatedLedgerAge, app.config(), - app.getFeeTrack()); + app.getFeeTrack(), + getAPIVersionNumber(params)); if (RPC::contains_error(txJsonResult)) return std::move(txJsonResult); @@ -1068,7 +1073,8 @@ transactionSubmitMultiSigned( true, validatedLedgerAge, app.config(), - app.getFeeTrack()); + app.getFeeTrack(), + getAPIVersionNumber(jvRequest)); if (RPC::contains_error(txJsonResult)) return std::move(txJsonResult); diff --git a/src/test/app/Path_test.cpp b/src/test/app/Path_test.cpp index 7b273666ac5..17e15c95043 100644 --- a/src/test/app/Path_test.cpp +++ b/src/test/app/Path_test.cpp @@ -223,9 +223,11 @@ class Path_test : public beast::unit_test::suite app.getOPs(), app.getLedgerMaster(), c, - Role::USER}, + Role::USER, + {}, + {}, + RPC::APIVersionIfUnspecified}, {}, - RPC::APIVersionIfUnspecified, {}}; Json::Value params = Json::objectValue; @@ -329,9 +331,11 @@ class Path_test : public beast::unit_test::suite app.getOPs(), app.getLedgerMaster(), c, - Role::USER}, + Role::USER, + {}, + {}, + RPC::APIVersionIfUnspecified}, {}, - RPC::APIVersionIfUnspecified, {}}; Json::Value result; gate g; diff --git a/src/test/rpc/AccountTx_test.cpp b/src/test/rpc/AccountTx_test.cpp index 534dd8235f8..aa40d6c0b5d 100644 --- a/src/test/rpc/AccountTx_test.cpp +++ b/src/test/rpc/AccountTx_test.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -175,7 +176,8 @@ class AccountTx_test : public beast::unit_test::suite p[jss::ledger_index_max] = 1; BEAST_EXPECT(isErr( env.rpc("json", "account_tx", to_string(p)), - rpcLGR_IDXS_INVALID)); + (RPC::ApiMaximumSupportedVersion == 1 ? rpcLGR_IDXS_INVALID + : rpcINVALID_LGR_RANGE))); } // Ledger index min only @@ -190,7 +192,8 @@ class AccountTx_test : public beast::unit_test::suite p[jss::ledger_index_min] = env.current()->info().seq; BEAST_EXPECT(isErr( env.rpc("json", "account_tx", to_string(p)), - rpcLGR_IDXS_INVALID)); + (RPC::ApiMaximumSupportedVersion == 1 ? rpcLGR_IDXS_INVALID + : rpcINVALID_LGR_RANGE))); } // Ledger index max only diff --git a/src/test/rpc/LedgerRequestRPC_test.cpp b/src/test/rpc/LedgerRequestRPC_test.cpp index c7d84009969..eab8280517b 100644 --- a/src/test/rpc/LedgerRequestRPC_test.cpp +++ b/src/test/rpc/LedgerRequestRPC_test.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include namespace ripple { @@ -297,10 +298,19 @@ class LedgerRequestRPC_test : public beast::unit_test::suite // date check to trigger env.timeKeeper().adjustCloseTime(weeks{3}); result = env.rpc("ledger_request", "1")[jss::result]; - BEAST_EXPECT(result[jss::error] == "noCurrent"); BEAST_EXPECT(result[jss::status] == "error"); - BEAST_EXPECT( - result[jss::error_message] == "Current ledger is unavailable."); + if (RPC::ApiMaximumSupportedVersion == 1) + { + BEAST_EXPECT(result[jss::error] == "noCurrent"); + BEAST_EXPECT( + result[jss::error_message] == "Current ledger is unavailable."); + } + else + { + BEAST_EXPECT(result[jss::error] == "notSynced"); + BEAST_EXPECT( + result[jss::error_message] == "Not synced to the network."); + } } void diff --git a/src/test/rpc/RPCCall_test.cpp b/src/test/rpc/RPCCall_test.cpp index 174feb0b7f8..b0292020398 100644 --- a/src/test/rpc/RPCCall_test.cpp +++ b/src/test/rpc/RPCCall_test.cpp @@ -1437,7 +1437,8 @@ static RPCCallTestData const rpcCallTestArray[] = { __LINE__, {"account_tx", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "580", "579"}, RPCCallTestData::no_exception, - R"({ + RPC::ApiMaximumSupportedVersion == 1 ? + R"({ "method" : "account_tx", "params" : [ { @@ -1446,6 +1447,17 @@ static RPCCallTestData const rpcCallTestArray[] = { "error_message" : "Ledger indexes invalid." } ] + })" + : + R"({ + "method" : "account_tx", + "params" : [ + { + "error" : "notSynced", + "error_code" : 55, + "error_message" : "Not synced to the network." + } + ] })", }, { @@ -5905,7 +5917,8 @@ static RPCCallTestData const rpcCallTestArray[] = { __LINE__, {"tx_account", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "580", "579"}, RPCCallTestData::no_exception, - R"({ + RPC::ApiMaximumSupportedVersion == 1 ? + R"({ "method" : "tx_account", "params" : [ { @@ -5914,6 +5927,17 @@ static RPCCallTestData const rpcCallTestArray[] = { "error_message" : "Ledger indexes invalid." } ] + })" + : + R"({ + "method" : "tx_account", + "params" : [ + { + "error" : "notSynced", + "error_code" : 55, + "error_message" : "Not synced to the network." + } + ] })", }, { From 6acf35df6bfc2b6ac04b88a1aab09867da948938 Mon Sep 17 00:00:00 2001 From: Miguel Portilla Date: Thu, 11 Jun 2020 14:50:21 -0400 Subject: [PATCH 15/19] Improve received node process --- src/ripple/app/ledger/InboundLedger.h | 18 +- src/ripple/app/ledger/impl/InboundLedger.cpp | 191 +++++------------- src/ripple/app/ledger/impl/InboundLedgers.cpp | 8 +- src/ripple/shamap/SHAMapTreeNode.h | 31 +-- src/ripple/shamap/impl/SHAMap.cpp | 8 +- src/ripple/shamap/impl/SHAMapSync.cpp | 6 +- src/ripple/shamap/impl/SHAMapTreeNode.cpp | 38 ++-- 7 files changed, 79 insertions(+), 221 deletions(-) diff --git a/src/ripple/app/ledger/InboundLedger.h b/src/ripple/app/ledger/InboundLedger.h index 009bd627a02..c94204d81bd 100644 --- a/src/ripple/app/ledger/InboundLedger.h +++ b/src/ripple/app/ledger/InboundLedger.h @@ -174,23 +174,13 @@ class InboundLedger final : public PeerSet, bool takeHeader(std::string const& data); - bool - takeTxNode( - const std::vector& IDs, - const std::vector& data, - SHAMapAddNode&); + + void + receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode&); + bool takeTxRootNode(Slice const& data, SHAMapAddNode&); - // VFALCO TODO Rename to receiveAccountStateNode - // Don't use acronyms, but if we are going to use them at least - // capitalize them correctly. - // - bool - takeAsNode( - const std::vector& IDs, - const std::vector& data, - SHAMapAddNode&); bool takeAsRootNode(Slice const& data, SHAMapAddNode&); diff --git a/src/ripple/app/ledger/impl/InboundLedger.cpp b/src/ripple/app/ledger/impl/InboundLedger.cpp index e8a1de81ff8..cbc635cb73c 100644 --- a/src/ripple/app/ledger/impl/InboundLedger.cpp +++ b/src/ripple/app/ledger/impl/InboundLedger.cpp @@ -47,7 +47,7 @@ enum { , peerCountAdd = 2 - // how many timeouts before we giveup + // how many timeouts before we give up , ledgerTimeoutRetriesMax = 10 @@ -876,164 +876,87 @@ InboundLedger::takeHeader(std::string const& data) return true; } -/** Process TX data received from a peer +/** Process node data received from a peer Call with a lock */ -bool -InboundLedger::takeTxNode( - const std::vector& nodeIDs, - const std::vector& data, - SHAMapAddNode& san) +void +InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san) { if (!mHaveHeader) { - JLOG(m_journal.warn()) << "TX node without header"; + JLOG(m_journal.warn()) << "Missing ledger header"; san.incInvalid(); - return false; - } - - if (mHaveTransactions || mFailed) - { - san.incDuplicate(); - return true; - } - - try - { - auto nodeIDit = nodeIDs.cbegin(); - auto nodeDatait = data.begin(); - TransactionStateSF filter( - mLedger->txMap().family().db(), app_.getLedgerMaster()); - - while (nodeIDit != nodeIDs.cend()) - { - if (nodeIDit->isRoot()) - { - san += mLedger->txMap().addRootNode( - SHAMapHash{mLedger->info().txHash}, - makeSlice(*nodeDatait), - &filter); - if (!san.isGood()) - return false; - } - else - { - san += mLedger->txMap().addKnownNode( - *nodeIDit, makeSlice(*nodeDatait), &filter); - if (!san.isGood()) - return false; - } - - ++nodeIDit; - ++nodeDatait; - } - } - catch (std::exception const& ex) - { - JLOG(m_journal.error()) << "Peer sent bad tx node data: " << ex.what(); - return false; + return; } - - if (!mLedger->txMap().isSynching()) + if (packet.type() == protocol::liTX_NODE) { - mHaveTransactions = true; - - if (mHaveState) + if (mHaveTransactions || mFailed) { - mComplete = true; - done(); + san.incDuplicate(); + return; } } - - return true; -} - -/** Process AS data received from a peer - Call with a lock -*/ -bool -InboundLedger::takeAsNode( - const std::vector& nodeIDs, - const std::vector& data, - SHAMapAddNode& san) -{ - JLOG(m_journal.trace()) - << "got ASdata (" << nodeIDs.size() << ") acquiring ledger " << mHash; - if (nodeIDs.size() == 1) - { - JLOG(m_journal.trace()) << "got AS node: " << nodeIDs.front(); - } - - ScopedLockType sl(mLock); - - if (!mHaveHeader) - { - JLOG(m_journal.warn()) << "Don't have ledger header"; - san.incInvalid(); - return false; - } - - if (mHaveState || mFailed) + else if (mHaveState || mFailed) { san.incDuplicate(); - return true; + return; } + auto [map, filter] = + [&]() -> std::pair> { + if (packet.type() == protocol::liTX_NODE) + return { + mLedger->txMap(), + std::make_unique( + mLedger->txMap().family().db(), app_.getLedgerMaster())}; + return { + mLedger->stateMap(), + std::make_unique( + mLedger->stateMap().family().db(), app_.getLedgerMaster())}; + }(); + try { - auto nodeIDit = nodeIDs.cbegin(); - auto nodeDatait = data.begin(); - AccountStateSF filter( - mLedger->stateMap().family().db(), app_.getLedgerMaster()); - - while (nodeIDit != nodeIDs.cend()) + for (auto const& node : packet.nodes()) { - if (nodeIDit->isRoot()) - { - san += mLedger->stateMap().addRootNode( + SHAMapNodeID const nodeID( + node.nodeid().data(), node.nodeid().size()); + if (nodeID.isRoot()) + san += map.addRootNode( SHAMapHash{mLedger->info().accountHash}, - makeSlice(*nodeDatait), - &filter); - if (!san.isGood()) - { - JLOG(m_journal.warn()) << "Unable to add AS root node"; - return false; - } - } + makeSlice(node.nodedata()), + filter.get()); else + san += map.addKnownNode( + nodeID, makeSlice(node.nodedata()), filter.get()); + + if (!san.isGood()) { - san += mLedger->stateMap().addKnownNode( - *nodeIDit, makeSlice(*nodeDatait), &filter); - if (!san.isGood()) - { - JLOG(m_journal.warn()) << "Unable to add AS node"; - return false; - } + JLOG(m_journal.warn()) << "Received bad node data"; + return; } - - ++nodeIDit; - ++nodeDatait; } } - catch (std::exception const& ex) + catch (std::exception const& e) { - JLOG(m_journal.error()) - << "Peer sent bad account state node data: " << ex.what(); - return false; + JLOG(m_journal.error()) << "Received bad node data: " << e.what(); + san.incInvalid(); + return; } - if (!mLedger->stateMap().isSynching()) + if (!map.isSynching()) { - mHaveState = true; + if (packet.type() == protocol::liTX_NODE) + mHaveTransactions = true; + else + mHaveState = true; - if (mHaveTransactions) + if (mHaveTransactions && mHaveState) { mComplete = true; done(); } } - - return true; } /** Process AS root node received from a peer @@ -1222,38 +1145,26 @@ InboundLedger::processData( return -1; } - std::vector nodeIDs; - nodeIDs.reserve(packet.nodes().size()); - std::vector nodeData; - nodeData.reserve(packet.nodes().size()); - - for (int i = 0; i < packet.nodes().size(); ++i) + // Verify node IDs and data are complete + for (auto const& node : packet.nodes()) { - const protocol::TMLedgerNode& node = packet.nodes(i); - if (!node.has_nodeid() || !node.has_nodedata()) { JLOG(m_journal.warn()) << "Got bad node"; peer->charge(Resource::feeInvalidRequest); return -1; } - - nodeIDs.push_back( - SHAMapNodeID(node.nodeid().data(), node.nodeid().size())); - nodeData.push_back( - Blob(node.nodedata().begin(), node.nodedata().end())); } SHAMapAddNode san; + receiveNode(packet, san); if (packet.type() == protocol::liTX_NODE) { - takeTxNode(nodeIDs, nodeData, san); JLOG(m_journal.debug()) << "Ledger TX node stats: " << san.get(); } else { - takeAsNode(nodeIDs, nodeData, san); JLOG(m_journal.debug()) << "Ledger AS node stats: " << san.get(); } diff --git a/src/ripple/app/ledger/impl/InboundLedgers.cpp b/src/ripple/app/ledger/impl/InboundLedgers.cpp index ae81fde9155..91bb735086c 100644 --- a/src/ripple/app/ledger/impl/InboundLedgers.cpp +++ b/src/ripple/app/ledger/impl/InboundLedgers.cpp @@ -247,14 +247,8 @@ class InboundLedgersImp : public InboundLedgers, public Stoppable if (!node.has_nodeid() || !node.has_nodedata()) return; - auto id_string = node.nodeid(); auto newNode = SHAMapAbstractNode::makeFromWire( - makeSlice(node.nodedata()), - 0, - SHAMapHash{uZero}, - false, - app_.journal("SHAMapNodeID"), - SHAMapNodeID(id_string.data(), id_string.size())); + makeSlice(node.nodedata())); if (!newNode) return; diff --git a/src/ripple/shamap/SHAMapTreeNode.h b/src/ripple/shamap/SHAMapTreeNode.h index a08428cb8a3..f453d7cd86f 100644 --- a/src/ripple/shamap/SHAMapTreeNode.h +++ b/src/ripple/shamap/SHAMapTreeNode.h @@ -176,21 +176,10 @@ class SHAMapAbstractNode invariants(bool is_root = false) const = 0; static std::shared_ptr - makeFromPrefix( - Slice rawNode, - std::uint32_t seq, - SHAMapHash const& hash, - bool hashValid, - beast::Journal j); + makeFromPrefix(Slice rawNode, SHAMapHash const& hash); static std::shared_ptr - makeFromWire( - Slice rawNode, - std::uint32_t seq, - SHAMapHash const& hash, - bool hashValid, - beast::Journal j, - SHAMapNodeID const& id); + makeFromWire(Slice rawNode); private: static std::shared_ptr @@ -276,11 +265,7 @@ class SHAMapInnerNode : public SHAMapAbstractNode bool hashValid); static std::shared_ptr - makeCompressedInner( - Slice data, - std::uint32_t seq, - SHAMapHash const& hash, - bool hashValid); + makeCompressedInner(Slice data, std::uint32_t seq); }; // SHAMapTreeNode represents a leaf, and may eventually be renamed to reflect @@ -315,10 +300,6 @@ class SHAMapTreeNode : public SHAMapAbstractNode invariants(bool is_root = false) const override; public: // public only to SHAMap - // inner node functions - bool - isInnerNode() const; - // item node function bool hasItem() const; @@ -432,12 +413,6 @@ SHAMapInnerNode::setFullBelowGen(std::uint32_t gen) // SHAMapTreeNode -inline bool -SHAMapTreeNode::isInnerNode() const -{ - return !mItem; -} - inline bool SHAMapTreeNode::hasItem() const { diff --git a/src/ripple/shamap/impl/SHAMap.cpp b/src/ripple/shamap/impl/SHAMap.cpp index 70329585a96..326ea23f1c4 100644 --- a/src/ripple/shamap/impl/SHAMap.cpp +++ b/src/ripple/shamap/impl/SHAMap.cpp @@ -152,7 +152,7 @@ SHAMap::fetchNodeFromDB(SHAMapHash const& hash) const try { node = SHAMapAbstractNode::makeFromPrefix( - makeSlice(obj->getData()), 0, hash, true, f_.journal()); + makeSlice(obj->getData()), hash); if (node) canonicalize(hash, node); } @@ -180,8 +180,8 @@ SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const { try { - auto node = SHAMapAbstractNode::makeFromPrefix( - makeSlice(*nodeData), 0, hash, true, f_.journal()); + auto node = + SHAMapAbstractNode::makeFromPrefix(makeSlice(*nodeData), hash); if (node) { filter->gotNode( @@ -382,7 +382,7 @@ SHAMap::descendAsync( return nullptr; ptr = SHAMapAbstractNode::makeFromPrefix( - makeSlice(obj->getData()), 0, hash, true, f_.journal()); + makeSlice(obj->getData()), hash); if (ptr && backed_) canonicalize(hash, ptr); } diff --git a/src/ripple/shamap/impl/SHAMapSync.cpp b/src/ripple/shamap/impl/SHAMapSync.cpp index fbaced398a8..5fa9594e1e3 100644 --- a/src/ripple/shamap/impl/SHAMapSync.cpp +++ b/src/ripple/shamap/impl/SHAMapSync.cpp @@ -557,8 +557,7 @@ SHAMap::addRootNode( } assert(seq_ >= 1); - auto node = SHAMapAbstractNode::makeFromWire( - rootNode, 0, SHAMapHash{}, false, f_.journal(), {}); + auto node = SHAMapAbstractNode::makeFromWire(rootNode); if (!node || !node->isValid() || node->getNodeHash() != hash) return SHAMapAddNode::invalid(); @@ -601,8 +600,7 @@ SHAMap::addKnownNode( } std::uint32_t generation = f_.fullbelow().getGeneration(); - auto newNode = SHAMapAbstractNode::makeFromWire( - rawNode, 0, SHAMapHash{}, false, f_.journal(), node); + auto newNode = SHAMapAbstractNode::makeFromWire(rawNode); SHAMapNodeID iNodeID; auto iNode = root_.get(); diff --git a/src/ripple/shamap/impl/SHAMapTreeNode.cpp b/src/ripple/shamap/impl/SHAMapTreeNode.cpp index 0aacfcd7c07..147787ae929 100644 --- a/src/ripple/shamap/impl/SHAMapTreeNode.cpp +++ b/src/ripple/shamap/impl/SHAMapTreeNode.cpp @@ -191,11 +191,7 @@ SHAMapInnerNode::makeFullInner( } std::shared_ptr -SHAMapInnerNode::makeCompressedInner( - Slice data, - std::uint32_t seq, - SHAMapHash const& hash, - bool hashValid) +SHAMapInnerNode::makeCompressedInner(Slice data, std::uint32_t seq) { Serializer s(data.data(), data.size()); @@ -219,21 +215,13 @@ SHAMapInnerNode::makeCompressedInner( ret->mIsBranch |= (1 << pos); } - if (hashValid) - ret->mHash = hash; - else - ret->updateHash(); + ret->updateHash(); + return ret; } std::shared_ptr -SHAMapAbstractNode::makeFromWire( - Slice rawNode, - std::uint32_t seq, - SHAMapHash const& hash, - bool hashValid, - beast::Journal j, - SHAMapNodeID const& id) +SHAMapAbstractNode::makeFromWire(Slice rawNode) { if (rawNode.empty()) return {}; @@ -242,6 +230,11 @@ SHAMapAbstractNode::makeFromWire( rawNode.remove_suffix(1); + bool const hashValid = false; + SHAMapHash const hash; + + std::uint32_t const seq = 0; + if (type == 0) return makeTransaction(rawNode, seq, hash, hashValid); @@ -252,8 +245,7 @@ SHAMapAbstractNode::makeFromWire( return SHAMapInnerNode::makeFullInner(rawNode, seq, hash, hashValid); if (type == 3) - return SHAMapInnerNode::makeCompressedInner( - rawNode, seq, hash, hashValid); + return SHAMapInnerNode::makeCompressedInner(rawNode, seq); if (type == 4) return makeTransactionWithMeta(rawNode, seq, hash, hashValid); @@ -263,12 +255,7 @@ SHAMapAbstractNode::makeFromWire( } std::shared_ptr -SHAMapAbstractNode::makeFromPrefix( - Slice rawNode, - std::uint32_t seq, - SHAMapHash const& hash, - bool hashValid, - beast::Journal j) +SHAMapAbstractNode::makeFromPrefix(Slice rawNode, SHAMapHash const& hash) { if (rawNode.size() < 4) Throw("prefix: short node"); @@ -283,6 +270,9 @@ SHAMapAbstractNode::makeFromPrefix( rawNode.remove_prefix(4); + bool const hashValid = true; + std::uint32_t const seq = 0; + if (type == HashPrefix::transactionID) return makeTransaction(rawNode, seq, hash, hashValid); From 50ce9b22fe32940185c334bc726648fe09478656 Mon Sep 17 00:00:00 2001 From: Miguel Portilla Date: Wed, 3 Jun 2020 15:24:57 -0400 Subject: [PATCH 16/19] Use NuDB version 2.0.3 --- Builds/CMake/deps/Nudb.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Builds/CMake/deps/Nudb.cmake b/Builds/CMake/deps/Nudb.cmake index b8b9a73cd9f..750b940bfd5 100644 --- a/Builds/CMake/deps/Nudb.cmake +++ b/Builds/CMake/deps/Nudb.cmake @@ -12,7 +12,7 @@ if (is_root_project) # NuDB not needed in the case of xrpl_core inclusion build FetchContent_Declare( nudb_src GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git - GIT_TAG 2.0.1 + GIT_TAG 2.0.3 ) FetchContent_GetProperties(nudb_src) if(NOT nudb_src_POPULATED) @@ -23,7 +23,7 @@ if (is_root_project) # NuDB not needed in the case of xrpl_core inclusion build ExternalProject_Add (nudb_src PREFIX ${nih_cache_path} GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git - GIT_TAG 2.0.1 + GIT_TAG 2.0.3 CONFIGURE_COMMAND "" BUILD_COMMAND "" TEST_COMMAND "" From 9c046f5a295269b79b04c842d41359712848d298 Mon Sep 17 00:00:00 2001 From: p2peer Date: Wed, 1 Apr 2020 18:30:40 -0400 Subject: [PATCH 17/19] Add support for deterministic database shards (#2688): This commit, if merged, adds support to allow multiple indepedent nodes to produce a binary identical shard for a given range of ledgers. The advantage is that servers can use content-addressable storage, and can more efficiently retrieve shards by downloading from multiple peers at once and then verifying the integrity of a shard by cross-checking its checksum with the checksum other servers report. --- Builds/CMake/RippledCore.cmake | 1 + src/ripple/nodestore/Backend.h | 19 ++ src/ripple/nodestore/DeterministicShard.md | 103 ++++++++ src/ripple/nodestore/backend/NuDBFactory.cpp | 35 ++- .../nodestore/impl/DeterministicShard.cpp | 206 ++++++++++++++++ .../nodestore/impl/DeterministicShard.h | 144 +++++++++++ src/ripple/nodestore/impl/Shard.cpp | 57 ++++- src/ripple/nodestore/impl/Shard.h | 14 +- src/test/nodestore/DatabaseShard_test.cpp | 224 +++++++++++++++++- 9 files changed, 780 insertions(+), 23 deletions(-) create mode 100644 src/ripple/nodestore/DeterministicShard.md create mode 100644 src/ripple/nodestore/impl/DeterministicShard.cpp create mode 100644 src/ripple/nodestore/impl/DeterministicShard.h diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 5445b092540..339376588ec 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -508,6 +508,7 @@ target_sources (rippled PRIVATE src/ripple/nodestore/impl/DatabaseNodeImp.cpp src/ripple/nodestore/impl/DatabaseRotatingImp.cpp src/ripple/nodestore/impl/DatabaseShardImp.cpp + src/ripple/nodestore/impl/DeterministicShard.cpp src/ripple/nodestore/impl/DecodedBlob.cpp src/ripple/nodestore/impl/DummyScheduler.cpp src/ripple/nodestore/impl/EncodedBlob.cpp diff --git a/src/ripple/nodestore/Backend.h b/src/ripple/nodestore/Backend.h index d3abc33142f..4c3ae2cdbd2 100644 --- a/src/ripple/nodestore/Backend.h +++ b/src/ripple/nodestore/Backend.h @@ -58,6 +58,25 @@ class Backend virtual void open(bool createIfMissing = true) = 0; + /** Open the backend. + @param createIfMissing Create the database files if necessary. + @param appType Deterministic appType used to create a backend. + @param uid Deterministic uid used to create a backend. + @param salt Deterministic salt used to create a backend. + This allows the caller to catch exceptions. + */ + virtual void + open( + bool createIfMissing, + boost::optional appType, + boost::optional uid, + boost::optional salt) + { + Throw(std::string( + "Deterministic appType/uid/salt not supported by backend " + + getName())); + } + /** Close the backend. This allows the caller to catch exceptions. */ diff --git a/src/ripple/nodestore/DeterministicShard.md b/src/ripple/nodestore/DeterministicShard.md new file mode 100644 index 00000000000..10fd6465327 --- /dev/null +++ b/src/ripple/nodestore/DeterministicShard.md @@ -0,0 +1,103 @@ +# Deterministic Database Shards + +This doc describes the standard way to assemble the database shard. A shard assembled using this approach becomes deterministic i.e. if two independent sides assemble the shard consists of the same ledgers, accounts and transactions, then they will obtain the same shard files `nudb.dat` and `nudb.key`. The approach deals with the `NuDB` database format only, refer to `https://github.com/vinniefalco/NuDB`. + + +## Headers + +Due to NuDB database definition, the following headers are used for database files: + +nudb.key: +``` +char[8] Type The characters "nudb.key" +uint16 Version Holds the version number +uint64 UID Unique ID generated on creation +uint64 Appnum Application defined constant +uint16 KeySize Key size in bytes +uint64 Salt A random seed +uint64 Pepper The salt hashed +uint16 BlockSize Size of a file block in bytes +uint16 LoadFactor Target fraction in 65536ths +uint8[56] Reserved Zeroes +uint8[] Reserved Zero-pad to block size +``` + +nudb.dat: +``` +char[8] Type The characters "nudb.dat" +uint16 Version Holds the version number +uint64 UID Unique ID generated on creation +uint64 Appnum Application defined constant +uint16 KeySize Key size in bytes +uint8[64] (reserved) Zeroes +``` +there all fields are saved using network byte order (most significant byte first). + +To make the shard deterministic the following parameters are used as values of header field both for `nudb.key` and `nudb.dat` files. +``` +Version 2 +UID digest(0) +Appnum digest(2) | 0x5348524400000000 /* 'SHRD' */ +KeySize 32 +Salt digest(1) +Pepper XXH64(Salt) +BlockSize 0x1000 (4096 bytes) +LoadFactor 0.5 (numeric 0x8000) +``` +Note: XXH64() is well-known hash algorithm. + +The `digest(i)` mentioned above defined as the follows: + +First, RIPEMD160 hash `H` calculated of the following structure +``` +uint256 lastHash Hash of last ledger in shard +uint32 index Index of the shard +uint32 firstSeq Sequence number of first ledger in the shard +uint32 lastSeq Sequence number of last ledger in the shard +uint32 version Version of shard, 2 at the present +``` +there all 32-bit integers are hashed in network byte order. + +Then, `digest(i)` is defined as the following portion of the above hash `H`: +``` +digest(0) = H[0] << 56 | H[2] << 48 | ... | H[14] << 0, +digest(1) = H[1] << 56 | H[3] << 48 | ... | H[15] << 0, +digest(2) = H[19] << 24 | H[18] << 16 | ... | H[16] << 0, +``` +where `H[i]` denotes `i`-th byte of hash `H`. + + +## Contents + +After deterministic shard is created using the above mentioned headers, it filled with objects. First, all objects of the shard are collected and sorted in according to their hashes. Here the objects are: ledgers, SHAmap tree nodes including accounts and transactions, and final key object with hash 0. Objects are sorted by increasing of their hashes, precisely, by increasing of hex representations of hashes in lexicographic order. + +For example, the following is an example of sorted hashes in their hex representation: +``` +0000000000000000000000000000000000000000000000000000000000000000 +154F29A919B30F50443A241C466691B046677C923EE7905AB97A4DBE8A5C2423 +2231553FC01D37A66C61BBEEACBB8C460994493E5659D118E19A8DDBB1444273 +272DCBFD8E4D5D786CF11A5444B30FB35435933B5DE6C660AA46E68CF0F5C447 +3C062FD9F0BCDCA31ACEBCD8E530D0BDAD1F1D1257B89C435616506A3EE6CB9E +58A0E5AE427CDDC1C7C06448E8C3E4BF718DE036D827881624B20465C3E1334F +... +``` + +Finally, objects added to the shard one by one in the sorted order from low to high hashes. + + +## Tests + +To perform test to deterministic shards implementation one can enter the following command: +``` +rippled --unittest ripple.NodeStore.DatabaseShard +``` + +The following is the right output of deterministic shards test: +``` +ripple.NodeStore.DatabaseShard DatabaseShard deterministic_shard with backend nudb +Iteration 0: RIPEMD160[nudb.key] = 4CFA8985836B549EC99D2E9705707F488DC91E4E +Iteration 0: RIPEMD160[nudb.dat] = 8CC61F503C36339803F8C2FC652C1102DDB889F1 +Iteration 1: RIPEMD160[nudb.key] = 4CFA8985836B549EC99D2E9705707F488DC91E4E +Iteration 1: RIPEMD160[nudb.dat] = 8CC61F503C36339803F8C2FC652C1102DDB889F1 +``` + diff --git a/src/ripple/nodestore/backend/NuDBFactory.cpp b/src/ripple/nodestore/backend/NuDBFactory.cpp index 8147f218cc0..04dfa208551 100644 --- a/src/ripple/nodestore/backend/NuDBFactory.cpp +++ b/src/ripple/nodestore/backend/NuDBFactory.cpp @@ -38,7 +38,10 @@ namespace NodeStore { class NuDBBackend : public Backend { public: - static constexpr std::size_t currentType = 1; + static constexpr std::uint64_t currentType = 1; + static constexpr std::uint64_t deterministicType = 0x5348524400000000ull; + /* "SHRD" in ASCII */ + static constexpr std::uint64_t deterministicMask = 0xFFFFFFFF00000000ull; beast::Journal const j_; size_t const keyBytes_; @@ -93,7 +96,11 @@ class NuDBBackend : public Backend } void - open(bool createIfMissing) override + open( + bool createIfMissing, + boost::optional appType, + boost::optional uid, + boost::optional salt) override { using namespace boost::filesystem; if (db_.is_open()) @@ -114,8 +121,9 @@ class NuDBBackend : public Backend dp, kp, lp, - currentType, - nudb::make_salt(), + appType.value_or(currentType), + uid.value_or(nudb::make_uid()), + salt.value_or(nudb::make_salt()), keyBytes_, nudb::block_size(kp), 0.50, @@ -128,10 +136,27 @@ class NuDBBackend : public Backend db_.open(dp, kp, lp, ec); if (ec) Throw(ec); - if (db_.appnum() != currentType) + + /** Old value currentType is accepted for appnum in traditional + * databases, new value is used for deterministic shard databases. + * New 64-bit value is constructed from fixed and random parts. + * Fixed part is bounded by bitmask deterministicMask, + * and the value of fixed part is deterministicType. + * Random part depends on the contents of the shard and may be any. + * The contents of appnum field should match either old or new rule. + */ + if (db_.appnum() != appType.value_or(currentType) && + (appType || + (db_.appnum() & deterministicMask) != deterministicType)) Throw("nodestore: unknown appnum"); } + void + open(bool createIfMissing) override + { + open(createIfMissing, boost::none, boost::none, boost::none); + } + void close() override { diff --git a/src/ripple/nodestore/impl/DeterministicShard.cpp b/src/ripple/nodestore/impl/DeterministicShard.cpp new file mode 100644 index 00000000000..151752cfcac --- /dev/null +++ b/src/ripple/nodestore/impl/DeterministicShard.cpp @@ -0,0 +1,206 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { +namespace NodeStore { + +DeterministicShard::DeterministicShard( + Application& app, + DatabaseShard const& db, + std::uint32_t index, + uint256 const& lastHash, + beast::Journal j) + : inited_(false) + , nodeset_([](nodeptr l, nodeptr r) { return l->getHash() < r->getHash(); }) + , app_(app) + , db_(db) + , index_(index) + , hash_(hash(lastHash)) + , tempdir_(db.getRootDir() / (std::to_string(index_) + ".tmp")) + , finaldir_(db.getRootDir() / std::to_string(index_)) + , ctx_(std::make_unique()) + , j_(j) +{ +} + +DeterministicShard::~DeterministicShard() +{ + close(true); +} + +uint160 +DeterministicShard::hash(uint256 const& lastHash) const +{ + using beast::hash_append; + ripemd160_hasher h; + + hash_append(h, lastHash); + hash_append(h, index_); + hash_append(h, db_.firstLedgerSeq(index_)); + hash_append(h, db_.lastLedgerSeq(index_)); + hash_append(h, Shard::version); + + auto const result = static_cast(h); + return uint160::fromVoid(result.data()); +} + +std::uint64_t +DeterministicShard::digest(int n) const +{ + auto const data = hash_.data(); + + if (n == 2) + { // Extract 32 bits: + return (static_cast(data[19]) << 24) + + (static_cast(data[18]) << 16) + + (static_cast(data[17]) << 8) + + (static_cast(data[16])); + } + + std::uint64_t ret = 0; + + if (n == 0 || n == 1) + { // Extract 64 bits + for (int i = n; i < 16; i += 2) + ret = (ret << 8) + data[i]; + } + + return ret; +} + +bool +DeterministicShard::init() +{ + if (index_ < db_.earliestShardIndex()) + { + JLOG(j_.error()) << "shard " << index_ << " is illegal"; + return false; + } + + Config const& config{app_.config()}; + + Section section{config.section(ConfigSection::shardDatabase())}; + std::string const type{get(section, "type", "nudb")}; + + if (type != "nudb") + { + JLOG(j_.error()) << "shard " << index_ << " backend type " << type + << " not supported"; + return false; + } + + auto factory{Manager::instance().find(type)}; + if (!factory) + { + JLOG(j_.error()) << "shard " << index_ + << " failed to create factory for backend type " + << type; + return false; + } + + ctx_->start(); + + section.set("path", tempdir_.string()); + backend_ = factory->createInstance( + NodeObject::keyBytes, section, scheduler_, *ctx_, j_); + + if (!backend_) + { + JLOG(j_.error()) << "shard " << index_ + << " failed to create backend type " << type; + return false; + } + + // Open or create the NuDB key/value store + bool preexist = exists(tempdir_); + if (preexist) + { + remove_all(tempdir_); + preexist = false; + } + + backend_->open( + !preexist, + digest(2) | 0x5348524400000000ll, /* appType */ + digest(0), /* uid */ + digest(1) /* salt */ + ); + + inited_ = true; + + return true; +} + +void +DeterministicShard::close(bool cancel) +{ + if (!inited_) + return; + + backend_->close(); + if (cancel) + { + remove_all(tempdir_); + } + else + { + flush(); + remove_all(finaldir_); + rename(tempdir_, finaldir_); + } + inited_ = false; +} + +void +DeterministicShard::store(nodeptr nObj) +{ + if (!inited_) + return; + + nodeset_.insert(nObj); +} + +void +DeterministicShard::flush() +{ + if (!inited_) + return; + + for (auto nObj : nodeset_) + { + backend_->store(nObj); + } + + nodeset_.clear(); +} + +} // namespace NodeStore +} // namespace ripple diff --git a/src/ripple/nodestore/impl/DeterministicShard.h b/src/ripple/nodestore/impl/DeterministicShard.h new file mode 100644 index 00000000000..91bdf1e867a --- /dev/null +++ b/src/ripple/nodestore/impl/DeterministicShard.h @@ -0,0 +1,144 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_NODESTORE_DETERMINISTICSHARD_H_INCLUDED +#define RIPPLE_NODESTORE_DETERMINISTICSHARD_H_INCLUDED + +#include +#include +#include +#include + +namespace ripple { +namespace NodeStore { + +/** DeterministicShard class. + * + * 1. The init() method creates temporary folder tempdir_, + * and the deterministic shard is initialized in that folder. + * 2. The store() method adds object to memory pool. + * 3. The flush() method stores all objects from memory pool to the shard + * located in tempdir_ in sorted order. + * 4. The close(true) method finalizes the shard and moves it from tempdir_ + * temporary folder to filandir_ permanent folder, + * deleting old (non-deterministic) shard located in finaldir_. + */ +class DeterministicShard +{ +public: + using nodeptr = std::shared_ptr; + + DeterministicShard(DeterministicShard const&) = delete; + DeterministicShard& + operator=(DeterministicShard const&) = delete; + + /** Creates the object for shard database + * + * @param app Application object + * @param db Shard Database which deterministic shard belongs to + * @param index Index of the shard + * @param lastHash Hash of last ledger in the shard + * @param j Journal to logging + */ + DeterministicShard( + Application& app, + DatabaseShard const& db, + std::uint32_t index, + uint256 const& lastHash, + beast::Journal j); + + ~DeterministicShard(); + + /** Initializes the deterministic shard. + * + * @return true is success, false if errored + */ + bool + init(); + + /** Finalizes and closes the shard. + * + * @param cancel True if reject the shard and delete all files, + * false if finalize the shard and store them + */ + void + close(bool cancel = false); + + /** Store the object into memory pool + * + * @param nobj Object to store. + */ + void + store(nodeptr nobj); + + /** Flush all objects from memory pool to shard + */ + void + flush(); + +private: + // Count hash of shard parameters: lashHash, firstSeq, lastSeq, index + uint160 + hash(const uint256& lastHash) const; + + // Get n-th 64-bit portion of shard parameters's hash + std::uint64_t + digest(int n) const; + + // If database inited + bool inited_; + + // Sorted set of stored and not flushed objects + std::set> nodeset_; + + // Application reference + Application& app_; + + // Shard database + DatabaseShard const& db_; + + // Shard Index + std::uint32_t const index_; + + // Hash used for digests + uint160 const hash_; + + // Path to temporary database files + boost::filesystem::path const tempdir_; + + // Path to final database files + boost::filesystem::path const finaldir_; + + // Dummy scheduler for deterministic write + DummyScheduler scheduler_; + + // NuDB context + std::unique_ptr ctx_; + + // NuDB key/value store for node objects + std::shared_ptr backend_; + + // Journal + beast::Journal const j_; +}; + +} // namespace NodeStore +} // namespace ripple + +#endif diff --git a/src/ripple/nodestore/impl/Shard.cpp b/src/ripple/nodestore/impl/Shard.cpp index f8799ff3d27..6f8696997c6 100644 --- a/src/ripple/nodestore/impl/Shard.cpp +++ b/src/ripple/nodestore/impl/Shard.cpp @@ -40,6 +40,7 @@ Shard::Shard( std::uint32_t index, beast::Journal j) : app_(app) + , db_(db) , index_(index) , firstSeq_(db.firstLedgerSeq(index)) , lastSeq_(std::max(firstSeq_, db.lastLedgerSeq(index))) @@ -397,7 +398,8 @@ Shard::isLegacy() const bool Shard::finalize( bool const writeSQLite, - boost::optional const& expectedHash) + boost::optional const& expectedHash, + const bool writeDeterministicShard) { assert(backend_); @@ -511,6 +513,17 @@ Shard::finalize( std::shared_ptr next; auto const lastLedgerHash{hash}; + std::shared_ptr dsh; + if (writeDeterministicShard) + { + dsh = std::make_shared( + app_, db_, index_, lastLedgerHash, j_); + if (!dsh->init()) + { + return fail("can't create deterministic shard"); + } + } + // Start with the last ledger in the shard and walk backwards from // child to parent until we reach the first ledger seq = lastSeq_; @@ -547,8 +560,11 @@ Shard::finalize( return fail("missing root TXN node"); } - if (!valLedger(ledger, next)) - return fail("failed to validate ledger"); + if (dsh) + dsh->store(nObj); + + if (!verifyLedger(ledger, next, dsh)) + return fail("verification check failed"); if (writeSQLite) { @@ -609,6 +625,12 @@ Shard::finalize( { backend_->store(nObj); + if (dsh) + { + dsh->store(nObj); + dsh->flush(); + } + std::lock_guard lock(mutex_); final_ = true; @@ -628,6 +650,23 @@ Shard::finalize( std::string("exception ") + e.what() + " in function " + __func__); } + if (dsh) + { + /* Close non-deterministic shard database. */ + backend_->close(); + /* Replace non-deterministic shard by deterministic one. */ + dsh->close(); + /* Re-open deterministic shard database. */ + backend_->open(false); + /** The finalize() function verifies the shard and, if third parameter + * is true, then replaces the shard by deterministic copy of the shard. + * After deterministic shard is created it verifies again, + * the finalize() function called here to verify deterministic shard, + * third parameter is false. + */ + return finalize(false, expectedHash, false); + } + return true; } @@ -922,9 +961,10 @@ Shard::setFileStats(std::lock_guard const&) } bool -Shard::valLedger( +Shard::verifyLedger( std::shared_ptr const& ledger, - std::shared_ptr const& next) const + std::shared_ptr const& next, + std::shared_ptr dsh) const { auto fail = [j = j_, index = index_, &ledger](std::string const& msg) { JLOG(j.fatal()) << "shard " << index << ". " << msg @@ -943,11 +983,14 @@ Shard::valLedger( return fail("Invalid ledger account hash"); bool error{false}; - auto visit = [this, &error](SHAMapAbstractNode& node) { + auto visit = [this, &error, dsh](SHAMapAbstractNode& node) { if (stop_) return false; - if (!valFetch(node.getNodeHash().as_uint256())) + auto nObj = valFetch(node.getNodeHash().as_uint256()); + if (!nObj) error = true; + else if (dsh) + dsh->store(nObj); return !error; }; diff --git a/src/ripple/nodestore/impl/Shard.h b/src/ripple/nodestore/impl/Shard.h index d43fe22a892..bf511a6f2ef 100644 --- a/src/ripple/nodestore/impl/Shard.h +++ b/src/ripple/nodestore/impl/Shard.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -128,11 +129,14 @@ class Shard final verified backend data. @param referenceHash If present, this hash must match the hash of the last ledger in the shard. + @param writeDeterministicShard If true, shard will be rewritten + deterministically. */ bool finalize( bool const writeSQLite, - boost::optional const& referenceHash); + boost::optional const& referenceHash, + const bool writeDeterministicShard = true); void stop() @@ -170,6 +174,8 @@ class Shard final Application& app_; mutable std::recursive_mutex mutex_; + DatabaseShard const& db_; + // Shard Index std::uint32_t const index_; @@ -252,10 +258,12 @@ class Shard final setFileStats(std::lock_guard const& lock); // Validate this ledger by walking its SHAMaps and verifying Merkle trees + // If dsh != NULL then save all walking SHAMaps to deterministic shard dsh bool - valLedger( + verifyLedger( std::shared_ptr const& ledger, - std::shared_ptr const& next) const; + std::shared_ptr const& next, + std::shared_ptr dsh = {}) const; // Fetches from backend and log errors based on status codes std::shared_ptr diff --git a/src/test/nodestore/DatabaseShard_test.cpp b/src/test/nodestore/DatabaseShard_test.cpp index 7e0b746cb62..c4606ecd7b4 100644 --- a/src/test/nodestore/DatabaseShard_test.cpp +++ b/src/test/nodestore/DatabaseShard_test.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -27,13 +28,135 @@ #include #include #include +#include +#include #include +#include +#include +#include #include #include namespace ripple { namespace NodeStore { +/** std::uniform_int_distribution is platform dependent. + * Unit test for deterministic shards is the following: it generates + * predictable accounts and transactions, packs them into ledgers + * and makes the shard. The hash of this shard should be equal to the + * given value. On different platforms (precisely, Linux and Mac) + * hashes of the resulting shard was different. It was unvestigated + * that the problem is in the class std::uniform_int_distribution + * which generates different pseudorandom sequences on different + * platforms, but we need predictable sequence. + */ +template +struct uniformIntDistribution +{ + using resultType = IntType; + + const resultType A, B; + + struct paramType + { + const resultType A, B; + + paramType(resultType aa, resultType bb) : A(aa), B(bb) + { + } + }; + + explicit uniformIntDistribution( + const resultType a = 0, + const resultType b = std::numeric_limits::max()) + : A(a), B(b) + { + } + + explicit uniformIntDistribution(const paramType& params) + : A(params.A), B(params.B) + { + } + + template + resultType + operator()(Generator& g) const + { + return rnd(g, A, B); + } + + template + resultType + operator()(Generator& g, const paramType& params) const + { + return rnd(g, params.A, params.B); + } + + resultType + a() const + { + return A; + } + + resultType + b() const + { + return B; + } + + resultType + min() const + { + return A; + } + + resultType + max() const + { + return B; + } + +private: + template + resultType + rnd(Generator& g, const resultType a, const resultType b) const + { + static_assert( + std::is_convertible:: + value, + "Ups..."); + static_assert( + Generator::min() == 0, "If non-zero we have handle the offset"); + const resultType range = b - a + 1; + assert(Generator::max() >= range); // Just for safety + const resultType rejectLim = g.max() % range; + resultType n; + do + n = g(); + while (n <= rejectLim); + return (n % range) + a; + } +}; + +template +Integral +randInt(Engine& engine, Integral min, Integral max) +{ + assert(max > min); + + // This should have no state and constructing it should + // be very cheap. If that turns out not to be the case + // it could be hand-optimized. + return uniformIntDistribution(min, max)(engine); +} + +template +Integral +randInt(Engine& engine, Integral max) +{ + return randInt(engine, Integral(0), max); +} + // Tests DatabaseShard class // class DatabaseShard_test : public TestBase @@ -87,7 +210,7 @@ class DatabaseShard_test : public TestBase { int p; if (n >= 2) - p = rand_int(rng_, 2 * dataSize); + p = randInt(rng_, 2 * dataSize); else p = 0; @@ -99,27 +222,27 @@ class DatabaseShard_test : public TestBase int from, to; do { - from = rand_int(rng_, n - 1); - to = rand_int(rng_, n - 1); + from = randInt(rng_, n - 1); + to = randInt(rng_, n - 1); } while (from == to); pay.push_back(std::make_pair(from, to)); } - n += !rand_int(rng_, nLedgers / dataSize); + n += !randInt(rng_, nLedgers / dataSize); if (n > accounts_.size()) { char str[9]; for (int j = 0; j < 8; ++j) - str[j] = 'a' + rand_int(rng_, 'z' - 'a'); + str[j] = 'a' + randInt(rng_, 'z' - 'a'); str[8] = 0; accounts_.emplace_back(str); } nAccounts_.push_back(n); payAccounts_.push_back(std::move(pay)); - xrpAmount_.push_back(rand_int(rng_, 90) + 10); + xrpAmount_.push_back(randInt(rng_, 90) + 10); } } @@ -495,7 +618,7 @@ class DatabaseShard_test : public TestBase } std::optional - createShard(TestData& data, DatabaseShard& db, int maxShardNumber) + createShard(TestData& data, DatabaseShard& db, int maxShardNumber = 1) { int shardNumber = -1; @@ -669,7 +792,7 @@ class DatabaseShard_test : public TestBase for (std::uint32_t i = 0; i < nTestShards * 2; ++i) { - std::uint32_t n = rand_int(data.rng_, nTestShards - 1) + 1; + std::uint32_t n = randInt(data.rng_, nTestShards - 1) + 1; if (bitMask & (1ll << n)) { db->removePreShard(n); @@ -978,6 +1101,90 @@ class DatabaseShard_test : public TestBase } } + std::string + ripemd160File(std::string filename) + { + using beast::hash_append; + std::ifstream input(filename, std::ios::in | std::ios::binary); + char buf[4096]; + ripemd160_hasher h; + + while (input.read(buf, 4096), input.gcount() > 0) + hash_append(h, buf, input.gcount()); + + auto const binResult = static_cast(h); + const auto charDigest = binResult.data(); + std::string result; + boost::algorithm::hex( + charDigest, + charDigest + sizeof(binResult), + std::back_inserter(result)); + + return result; + } + + void + testDeterministicShard( + std::string const& backendType, + std::uint64_t const seedValue) + { + using namespace test::jtx; + + std::string ripemd160Key("4CFA8985836B549EC99D2E9705707F488DC91E4E"), + ripemd160Dat("8CC61F503C36339803F8C2FC652C1102DDB889F1"); + + for (int i = 0; i < 2; i++) + { + beast::temp_dir shardDir; + { + Env env{ + *this, + testConfig( + (i ? "" : "deterministicShard"), + backendType, + shardDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + BEAST_EXPECT(db); + + TestData data(seedValue, 4); + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + if (createShard(data, *db) < 0) + return; + } + { + Env env{*this, testConfig("", backendType, shardDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + BEAST_EXPECT(db); + + TestData data(seedValue, 4); + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + waitShard(*db, 1); + + for (std::uint32_t j = 0; j < ledgersPerShard; ++j) + checkLedger(data, *db, *data.ledgers_[j]); + } + + boost::filesystem::path path(shardDir.path()); + path /= "1"; + boost::filesystem::path keypath = path / (backendType + ".key"); + std::string key = ripemd160File(keypath.string()); + boost::filesystem::path datpath = path / (backendType + ".dat"); + std::string dat = ripemd160File(datpath.string()); + + std::cerr << "Iteration " << i << ": RIPEMD160[" << backendType + << ".key] = " << key << std::endl; + std::cerr << "Iteration " << i << ": RIPEMD160[" << backendType + << ".dat] = " << dat << std::endl; + + BEAST_EXPECT(key == ripemd160Key); + BEAST_EXPECT(dat == ripemd160Dat); + } + } + void testAll(std::string const& backendType) { @@ -991,6 +1198,7 @@ class DatabaseShard_test : public TestBase testCorruptedDatabase(backendType, seedValue + 40); testIllegalFinalKey(backendType, seedValue + 50); testImport(backendType, seedValue + 60); + testDeterministicShard(backendType, seedValue + 70); } public: From cc34b096eaa8f6d93803fb5c6971aff4b1190fd1 Mon Sep 17 00:00:00 2001 From: Miguel Portilla Date: Thu, 21 May 2020 18:19:21 -0400 Subject: [PATCH 18/19] Improve ledgermaster shard acquire --- src/ripple/app/ledger/InboundLedger.h | 3 +- src/ripple/app/ledger/LedgerMaster.h | 5 +- src/ripple/app/ledger/impl/InboundLedger.cpp | 65 +++++++++++++------ src/ripple/app/ledger/impl/LedgerMaster.cpp | 26 ++++++-- .../nodestore/impl/DatabaseShardImp.cpp | 4 +- src/ripple/nodestore/impl/Shard.cpp | 6 +- src/ripple/rpc/impl/RPCHelpers.cpp | 3 +- src/ripple/rpc/impl/ShardArchiveHandler.cpp | 3 +- 8 files changed, 76 insertions(+), 39 deletions(-) diff --git a/src/ripple/app/ledger/InboundLedger.h b/src/ripple/app/ledger/InboundLedger.h index c94204d81bd..6688d0dc358 100644 --- a/src/ripple/app/ledger/InboundLedger.h +++ b/src/ripple/app/ledger/InboundLedger.h @@ -142,8 +142,9 @@ class InboundLedger final : public PeerSet, void addPeers(); + void - tryDB(Family& f); + tryDB(NodeStore::Database& srcDB); void done(); diff --git a/src/ripple/app/ledger/LedgerMaster.h b/src/ripple/app/ledger/LedgerMaster.h index 67a49712ae3..7ce8d16273e 100644 --- a/src/ripple/app/ledger/LedgerMaster.h +++ b/src/ripple/app/ledger/LedgerMaster.h @@ -164,7 +164,7 @@ class LedgerMaster : public Stoppable, public AbstractFetchPackContainer /** Walk to a ledger's hash using the skip list */ boost::optional - walkHashBySeq(std::uint32_t index); + walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason); /** Walk the chain of ledger hashes to determine the hash of the ledger with the specified index. The referenceLedger is used as @@ -176,7 +176,8 @@ class LedgerMaster : public Stoppable, public AbstractFetchPackContainer boost::optional walkHashBySeq( std::uint32_t index, - std::shared_ptr const& referenceLedger); + std::shared_ptr const& referenceLedger, + InboundLedger::Reason reason); std::shared_ptr getLedgerBySeq(std::uint32_t index); diff --git a/src/ripple/app/ledger/impl/InboundLedger.cpp b/src/ripple/app/ledger/impl/InboundLedger.cpp index cbc635cb73c..3e7bdc6dce9 100644 --- a/src/ripple/app/ledger/impl/InboundLedger.cpp +++ b/src/ripple/app/ledger/impl/InboundLedger.cpp @@ -97,9 +97,11 @@ InboundLedger::init(ScopedLockType& collectionLock) { ScopedLockType sl(mLock); collectionLock.unlock(); - tryDB(app_.family()); + + tryDB(app_.family().db()); if (mFailed) return; + if (!mComplete) { auto shardStore = app_.getShardStore(); @@ -112,11 +114,13 @@ InboundLedger::init(ScopedLockType& collectionLock) mFailed = true; return; } + mHaveHeader = false; mHaveTransactions = false; mHaveState = false; mLedger.reset(); - tryDB(*app_.shardFamily()); + + tryDB(app_.shardFamily()->db()); if (mFailed) return; } @@ -197,11 +201,11 @@ InboundLedger::checkLocal() if (!isDone()) { if (mLedger) - tryDB(mLedger->stateMap().family()); + tryDB(mLedger->stateMap().family().db()); else if (mReason == Reason::SHARD) - tryDB(*app_.shardFamily()); + tryDB(app_.shardFamily()->db()); else - tryDB(app_.family()); + tryDB(app_.family().db()); if (mFailed || mComplete) { done(); @@ -293,14 +297,17 @@ deserializePrefixedHeader(Slice data) // See how much of the ledger data is stored locally // Data found in a fetch pack will be stored void -InboundLedger::tryDB(Family& f) +InboundLedger::tryDB(NodeStore::Database& srcDB) { if (!mHaveHeader) { auto makeLedger = [&, this](Blob const& data) { JLOG(m_journal.trace()) << "Ledger header found in fetch pack"; mLedger = std::make_shared( - deserializePrefixedHeader(makeSlice(data)), app_.config(), f); + deserializePrefixedHeader(makeSlice(data)), + app_.config(), + mReason == Reason::SHARD ? *app_.shardFamily() + : app_.family()); if (mLedger->info().hash != mHash || (mSeq != 0 && mSeq != mLedger->info().seq)) { @@ -314,25 +321,41 @@ InboundLedger::tryDB(Family& f) }; // Try to fetch the ledger header from the DB - auto node = f.db().fetch(mHash, mSeq); - if (!node) + if (auto node = srcDB.fetch(mHash, mSeq)) + { + JLOG(m_journal.trace()) << "Ledger header found in local store"; + + makeLedger(node->getData()); + if (mFailed) + return; + + // Store the ledger header if the source and destination differ + auto& dstDB{mLedger->stateMap().family().db()}; + if (std::addressof(dstDB) != std::addressof(srcDB)) + { + Blob blob{node->getData()}; + dstDB.store( + hotLEDGER, std::move(blob), mHash, mLedger->info().seq); + } + } + else { + // Try to fetch the ledger header from a fetch pack auto data = app_.getLedgerMaster().getFetchPack(mHash); if (!data) return; + JLOG(m_journal.trace()) << "Ledger header found in fetch pack"; + makeLedger(*data); - if (mLedger) - f.db().store( - hotLEDGER, std::move(*data), mHash, mLedger->info().seq); - } - else - { - JLOG(m_journal.trace()) << "Ledger header found in node store"; - makeLedger(node->getData()); + if (mFailed) + return; + + // Store the ledger header in the ledger's database + mLedger->stateMap().family().db().store( + hotLEDGER, std::move(*data), mHash, mLedger->info().seq); } - if (mFailed) - return; + if (mSeq == 0) mSeq = mLedger->info().seq; mLedger->stateMap().setLedgerSeq(mSeq); @@ -540,7 +563,9 @@ InboundLedger::trigger(std::shared_ptr const& peer, TriggerReason reason) if (!mHaveHeader) { - tryDB(mReason == Reason::SHARD ? *app_.shardFamily() : app_.family()); + tryDB( + mReason == Reason::SHARD ? app_.shardFamily()->db() + : app_.family().db()); if (mFailed) { JLOG(m_journal.warn()) << " failed local for " << mHash; diff --git a/src/ripple/app/ledger/impl/LedgerMaster.cpp b/src/ripple/app/ledger/impl/LedgerMaster.cpp index da23bf12ddc..2cc9d525da5 100644 --- a/src/ripple/app/ledger/impl/LedgerMaster.cpp +++ b/src/ripple/app/ledger/impl/LedgerMaster.cpp @@ -735,7 +735,18 @@ LedgerMaster::tryFill(Job& job, std::shared_ptr ledger) void LedgerMaster::getFetchPack(LedgerIndex missing, InboundLedger::Reason reason) { - auto haveHash{getLedgerHashForHistory(missing + 1, reason)}; + LedgerIndex ledgerIndex{missing + 1}; + if (reason == InboundLedger::Reason::SHARD) + { + // Do not acquire a ledger sequence greater + // than the last ledger in the shard + auto const shardStore{app_.getShardStore()}; + auto const shardIndex{shardStore->seqToShardIndex(missing)}; + ledgerIndex = + std::min(ledgerIndex, shardStore->lastLedgerSeq(shardIndex)); + } + + auto haveHash{getLedgerHashForHistory(ledgerIndex, reason)}; if (!haveHash || haveHash->isZero()) { if (reason == InboundLedger::Reason::SHARD) @@ -1175,11 +1186,11 @@ LedgerMaster::getLedgerHashForHistory( { ret = hashOfSeq(*l, index, m_journal); if (!ret) - ret = walkHashBySeq(index, l); + ret = walkHashBySeq(index, l, reason); } if (!ret) - ret = walkHashBySeq(index); + ret = walkHashBySeq(index, reason); return ret; } @@ -1512,12 +1523,12 @@ LedgerMaster::getHashBySeq(std::uint32_t index) } boost::optional -LedgerMaster::walkHashBySeq(std::uint32_t index) +LedgerMaster::walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason) { boost::optional ledgerHash; if (auto referenceLedger = mValidLedger.get()) - ledgerHash = walkHashBySeq(index, referenceLedger); + ledgerHash = walkHashBySeq(index, referenceLedger, reason); return ledgerHash; } @@ -1525,7 +1536,8 @@ LedgerMaster::walkHashBySeq(std::uint32_t index) boost::optional LedgerMaster::walkHashBySeq( std::uint32_t index, - std::shared_ptr const& referenceLedger) + std::shared_ptr const& referenceLedger, + InboundLedger::Reason reason) { if (!referenceLedger || (referenceLedger->info().seq < index)) { @@ -1564,7 +1576,7 @@ LedgerMaster::walkHashBySeq( if (!ledger) { if (auto const l = app_.getInboundLedgers().acquire( - *refHash, refIndex, InboundLedger::Reason::GENERIC)) + *refHash, refIndex, reason)) { ledgerHash = hashOfSeq(*l, index, m_journal); assert(ledgerHash); diff --git a/src/ripple/nodestore/impl/DatabaseShardImp.cpp b/src/ripple/nodestore/impl/DatabaseShardImp.cpp index d9a771d66d0..2ac011f4a47 100644 --- a/src/ripple/nodestore/impl/DatabaseShardImp.cpp +++ b/src/ripple/nodestore/impl/DatabaseShardImp.cpp @@ -355,8 +355,8 @@ DatabaseShardImp::importShard( return false; } - auto expectedHash = - app_.getLedgerMaster().walkHashBySeq(lastLedgerSeq(shardIndex)); + auto expectedHash = app_.getLedgerMaster().walkHashBySeq( + lastLedgerSeq(shardIndex), InboundLedger::Reason::GENERIC); if (!expectedHash) { diff --git a/src/ripple/nodestore/impl/Shard.cpp b/src/ripple/nodestore/impl/Shard.cpp index 6f8696997c6..5d81ada4cf2 100644 --- a/src/ripple/nodestore/impl/Shard.cpp +++ b/src/ripple/nodestore/impl/Shard.cpp @@ -184,11 +184,8 @@ Shard::open(Scheduler& scheduler, nudb::context& ctx) } if (boost::icl::length(storedSeqs) == maxLedgers_) - { // All ledgers have been acquired, shard is complete - acquireInfo_.reset(); backendComplete_ = true; - } } } else @@ -292,7 +289,6 @@ Shard::store(std::shared_ptr const& ledger) if (!initSQLite(lock)) return false; - acquireInfo_.reset(); backendComplete_ = true; setBackendCache(lock); } @@ -574,8 +570,8 @@ Shard::finalize( } hash = ledger->info().parentHash; + next = std::move(ledger); --seq; - next = ledger; } JLOG(j_.debug()) << "shard " << index_ << " is valid"; diff --git a/src/ripple/rpc/impl/RPCHelpers.cpp b/src/ripple/rpc/impl/RPCHelpers.cpp index 060862bede9..dbc86774d19 100644 --- a/src/ripple/rpc/impl/RPCHelpers.cpp +++ b/src/ripple/rpc/impl/RPCHelpers.cpp @@ -444,7 +444,8 @@ isValidated( // Use the skip list in the last validated ledger to see if ledger // comes before the last validated ledger (and thus has been // validated). - auto hash = ledgerMaster.walkHashBySeq(seq); + auto hash = + ledgerMaster.walkHashBySeq(seq, InboundLedger::Reason::GENERIC); if (!hash || ledger.info().hash != *hash) { diff --git a/src/ripple/rpc/impl/ShardArchiveHandler.cpp b/src/ripple/rpc/impl/ShardArchiveHandler.cpp index dac964a3022..39cfdd90b51 100644 --- a/src/ripple/rpc/impl/ShardArchiveHandler.cpp +++ b/src/ripple/rpc/impl/ShardArchiveHandler.cpp @@ -328,7 +328,8 @@ ShardArchiveHandler::next(std::lock_guard const& l) if (auto const seq = app_.getShardStore()->lastLedgerSeq(shardIndex); (shouldHaveHash = app_.getLedgerMaster().getValidLedgerIndex() > seq)) { - expectedHash = app_.getLedgerMaster().walkHashBySeq(seq); + expectedHash = app_.getLedgerMaster().walkHashBySeq( + seq, InboundLedger::Reason::GENERIC); } if (!expectedHash) From d4f91d38bcd0231c9116351a6786366891fe859b Mon Sep 17 00:00:00 2001 From: Miguel Portilla Date: Tue, 2 Jun 2020 16:52:13 -0400 Subject: [PATCH 19/19] Add Shard Family --- Builds/CMake/RippledCore.cmake | 2 + src/ripple/app/consensus/RCLConsensus.cpp | 2 +- src/ripple/app/ledger/Ledger.cpp | 4 +- src/ripple/app/ledger/impl/InboundLedger.cpp | 21 +- .../app/ledger/impl/InboundTransactions.cpp | 2 +- src/ripple/app/ledger/impl/LedgerMaster.cpp | 23 +- .../app/ledger/impl/TransactionAcquire.cpp | 4 +- src/ripple/app/main/Application.cpp | 215 ++---------------- src/ripple/app/main/Application.h | 4 +- src/ripple/app/misc/SHAMapStoreImp.cpp | 4 +- .../nodestore/impl/DatabaseShardImp.cpp | 5 +- src/ripple/nodestore/impl/Shard.cpp | 8 +- src/ripple/nodestore/impl/Shard.h | 2 +- src/ripple/rpc/handlers/GetCounts.cpp | 21 +- src/ripple/shamap/Family.h | 47 ++-- src/ripple/shamap/NodeFamily.h | 112 +++++++++ src/ripple/shamap/ShardFamily.h | 124 ++++++++++ src/ripple/shamap/TreeNodeCache.h | 3 - src/ripple/shamap/impl/NodeFamily.cpp | 108 +++++++++ src/ripple/shamap/impl/SHAMap.cpp | 9 +- src/ripple/shamap/impl/SHAMapSync.cpp | 18 +- src/ripple/shamap/impl/ShardFamily.cpp | 195 ++++++++++++++++ src/test/app/LedgerHistory_test.cpp | 2 +- src/test/app/RCLValidations_test.cpp | 10 +- src/test/app/Regression_test.cpp | 2 +- src/test/ledger/SkipList_test.cpp | 2 +- src/test/ledger/View_test.cpp | 12 +- src/test/shamap/FetchPack_test.cpp | 2 +- src/test/shamap/SHAMapSync_test.cpp | 2 +- src/test/shamap/SHAMap_test.cpp | 4 +- src/test/shamap/common.h | 92 ++++---- 31 files changed, 732 insertions(+), 329 deletions(-) create mode 100644 src/ripple/shamap/NodeFamily.h create mode 100644 src/ripple/shamap/ShardFamily.h create mode 100644 src/ripple/shamap/impl/NodeFamily.cpp create mode 100644 src/ripple/shamap/impl/ShardFamily.cpp diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 339376588ec..4bd48d72388 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -637,12 +637,14 @@ target_sources (rippled PRIVATE main sources: subdir: shamap #]===============================] + src/ripple/shamap/impl/NodeFamily.cpp src/ripple/shamap/impl/SHAMap.cpp src/ripple/shamap/impl/SHAMapDelta.cpp src/ripple/shamap/impl/SHAMapItem.cpp src/ripple/shamap/impl/SHAMapNodeID.cpp src/ripple/shamap/impl/SHAMapSync.cpp src/ripple/shamap/impl/SHAMapTreeNode.cpp + src/ripple/shamap/impl/ShardFamily.cpp #[===============================[ test sources: subdir: app diff --git a/src/ripple/app/consensus/RCLConsensus.cpp b/src/ripple/app/consensus/RCLConsensus.cpp index f05497a5332..f999ff4bbba 100644 --- a/src/ripple/app/consensus/RCLConsensus.cpp +++ b/src/ripple/app/consensus/RCLConsensus.cpp @@ -300,7 +300,7 @@ RCLConsensus::Adaptor::onClose( auto initialLedger = app_.openLedger().current(); auto initialSet = - std::make_shared(SHAMapType::TRANSACTION, app_.family()); + std::make_shared(SHAMapType::TRANSACTION, app_.getNodeFamily()); initialSet->setUnbacked(); // Build SHAMap containing all transactions in our open ledger diff --git a/src/ripple/app/ledger/Ledger.cpp b/src/ripple/app/ledger/Ledger.cpp index b583b540633..55ffb36baa3 100644 --- a/src/ripple/app/ledger/Ledger.cpp +++ b/src/ripple/app/ledger/Ledger.cpp @@ -248,7 +248,7 @@ Ledger::Ledger( { info_.hash = calculateLedgerHash(info_); if (acquire) - family.missing_node(info_.hash, info_.seq); + family.missingNode(info_.hash, info_.seq); } } @@ -1077,7 +1077,7 @@ loadLedgerHelper(std::string const& sqlSuffix, Application& app, bool acquire) loaded, acquire, app.config(), - app.family(), + app.getNodeFamily(), app.journal("Ledger")); if (!loaded) diff --git a/src/ripple/app/ledger/impl/InboundLedger.cpp b/src/ripple/app/ledger/impl/InboundLedger.cpp index 3e7bdc6dce9..fdfc9739140 100644 --- a/src/ripple/app/ledger/impl/InboundLedger.cpp +++ b/src/ripple/app/ledger/impl/InboundLedger.cpp @@ -98,7 +98,7 @@ InboundLedger::init(ScopedLockType& collectionLock) ScopedLockType sl(mLock); collectionLock.unlock(); - tryDB(app_.family().db()); + tryDB(app_.getNodeFamily().db()); if (mFailed) return; @@ -107,7 +107,7 @@ InboundLedger::init(ScopedLockType& collectionLock) auto shardStore = app_.getShardStore(); if (mReason == Reason::SHARD) { - if (!shardStore || !app_.shardFamily()) + if (!shardStore) { JLOG(m_journal.error()) << "Acquiring shard with no shard store available"; @@ -120,7 +120,7 @@ InboundLedger::init(ScopedLockType& collectionLock) mHaveState = false; mLedger.reset(); - tryDB(app_.shardFamily()->db()); + tryDB(app_.getShardFamily()->db()); if (mFailed) return; } @@ -203,9 +203,9 @@ InboundLedger::checkLocal() if (mLedger) tryDB(mLedger->stateMap().family().db()); else if (mReason == Reason::SHARD) - tryDB(app_.shardFamily()->db()); + tryDB(app_.getShardFamily()->db()); else - tryDB(app_.family().db()); + tryDB(app_.getNodeFamily().db()); if (mFailed || mComplete) { done(); @@ -306,8 +306,8 @@ InboundLedger::tryDB(NodeStore::Database& srcDB) mLedger = std::make_shared( deserializePrefixedHeader(makeSlice(data)), app_.config(), - mReason == Reason::SHARD ? *app_.shardFamily() - : app_.family()); + mReason == Reason::SHARD ? *app_.getShardFamily() + : app_.getNodeFamily()); if (mLedger->info().hash != mHash || (mSeq != 0 && mSeq != mLedger->info().seq)) { @@ -564,8 +564,8 @@ InboundLedger::trigger(std::shared_ptr const& peer, TriggerReason reason) if (!mHaveHeader) { tryDB( - mReason == Reason::SHARD ? app_.shardFamily()->db() - : app_.family().db()); + mReason == Reason::SHARD ? app_.getShardFamily()->db() + : app_.getNodeFamily().db()); if (mFailed) { JLOG(m_journal.warn()) << " failed local for " << mHash; @@ -866,7 +866,8 @@ InboundLedger::takeHeader(std::string const& data) if (mComplete || mFailed || mHaveHeader) return true; - auto* f = mReason == Reason::SHARD ? app_.shardFamily() : &app_.family(); + auto* f = mReason == Reason::SHARD ? app_.getShardFamily() + : &app_.getNodeFamily(); mLedger = std::make_shared( deserializeHeader(makeSlice(data)), app_.config(), *f); if (mLedger->info().hash != mHash || diff --git a/src/ripple/app/ledger/impl/InboundTransactions.cpp b/src/ripple/app/ledger/impl/InboundTransactions.cpp index 4487caca69a..b4c2cf734b0 100644 --- a/src/ripple/app/ledger/impl/InboundTransactions.cpp +++ b/src/ripple/app/ledger/impl/InboundTransactions.cpp @@ -75,7 +75,7 @@ class InboundTransactionsImp : public InboundTransactions, public Stoppable , m_gotSet(std::move(gotSet)) { m_zeroSet.mSet = std::make_shared( - SHAMapType::TRANSACTION, uint256(), app_.family()); + SHAMapType::TRANSACTION, uint256(), app_.getNodeFamily()); m_zeroSet.mSet->setUnbacked(); } diff --git a/src/ripple/app/ledger/impl/LedgerMaster.cpp b/src/ripple/app/ledger/impl/LedgerMaster.cpp index 2cc9d525da5..f7db824e55d 100644 --- a/src/ripple/app/ledger/impl/LedgerMaster.cpp +++ b/src/ripple/app/ledger/impl/LedgerMaster.cpp @@ -735,18 +735,19 @@ LedgerMaster::tryFill(Job& job, std::shared_ptr ledger) void LedgerMaster::getFetchPack(LedgerIndex missing, InboundLedger::Reason reason) { - LedgerIndex ledgerIndex{missing + 1}; - if (reason == InboundLedger::Reason::SHARD) - { - // Do not acquire a ledger sequence greater - // than the last ledger in the shard - auto const shardStore{app_.getShardStore()}; - auto const shardIndex{shardStore->seqToShardIndex(missing)}; - ledgerIndex = - std::min(ledgerIndex, shardStore->lastLedgerSeq(shardIndex)); - } + LedgerIndex const ledgerIndex([&]() { + if (reason == InboundLedger::Reason::SHARD) + { + // Do not acquire a ledger sequence greater + // than the last ledger in the shard + auto const shardStore{app_.getShardStore()}; + auto const shardIndex{shardStore->seqToShardIndex(missing)}; + return std::min(missing + 1, shardStore->lastLedgerSeq(shardIndex)); + } + return missing + 1; + }()); - auto haveHash{getLedgerHashForHistory(ledgerIndex, reason)}; + auto const haveHash{getLedgerHashForHistory(ledgerIndex, reason)}; if (!haveHash || haveHash->isZero()) { if (reason == InboundLedger::Reason::SHARD) diff --git a/src/ripple/app/ledger/impl/TransactionAcquire.cpp b/src/ripple/app/ledger/impl/TransactionAcquire.cpp index 3abb88e7f41..fa71ca4daba 100644 --- a/src/ripple/app/ledger/impl/TransactionAcquire.cpp +++ b/src/ripple/app/ledger/impl/TransactionAcquire.cpp @@ -43,8 +43,8 @@ TransactionAcquire::TransactionAcquire(Application& app, uint256 const& hash) : PeerSet(app, hash, TX_ACQUIRE_TIMEOUT, app.journal("TransactionAcquire")) , mHaveRoot(false) { - mMap = - std::make_shared(SHAMapType::TRANSACTION, hash, app_.family()); + mMap = std::make_shared( + SHAMapType::TRANSACTION, hash, app_.getNodeFamily()); mMap->setUnbacked(); } diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index a755017e407..8c1a47c6f58 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -29,10 +29,10 @@ #include #include #include +#include #include #include #include -#include #include #include #include @@ -64,8 +64,9 @@ #include #include #include +#include +#include -#include #include #include #include @@ -77,174 +78,6 @@ namespace ripple { -//------------------------------------------------------------------------------ - -namespace detail { - -class AppFamily : public Family -{ -private: - Application& app_; - TreeNodeCache treecache_; - FullBelowCache fullbelow_; - NodeStore::Database& db_; - bool const shardBacked_; - beast::Journal const j_; - - // missing node handler - LedgerIndex maxSeq = 0; - std::mutex maxSeqLock; - - void - acquire(uint256 const& hash, std::uint32_t seq) - { - if (hash.isNonZero()) - { - auto j = app_.journal("Ledger"); - - JLOG(j.error()) << "Missing node in " << to_string(hash); - - app_.getInboundLedgers().acquire( - hash, - seq, - shardBacked_ ? InboundLedger::Reason::SHARD - : InboundLedger::Reason::GENERIC); - } - } - -public: - AppFamily(AppFamily const&) = delete; - AppFamily& - operator=(AppFamily const&) = delete; - - AppFamily( - Application& app, - NodeStore::Database& db, - CollectorManager& collectorManager) - : app_(app) - , treecache_( - "TreeNodeCache", - 65536, - std::chrono::minutes{1}, - stopwatch(), - app.journal("TaggedCache")) - , fullbelow_( - "full_below", - stopwatch(), - collectorManager.collector(), - fullBelowTargetSize, - fullBelowExpiration) - , db_(db) - , shardBacked_(dynamic_cast(&db) != nullptr) - , j_(app.journal("SHAMap")) - { - } - - beast::Journal const& - journal() override - { - return j_; - } - - FullBelowCache& - fullbelow() override - { - return fullbelow_; - } - - FullBelowCache const& - fullbelow() const override - { - return fullbelow_; - } - - TreeNodeCache& - treecache() override - { - return treecache_; - } - - TreeNodeCache const& - treecache() const override - { - return treecache_; - } - - NodeStore::Database& - db() override - { - return db_; - } - - NodeStore::Database const& - db() const override - { - return db_; - } - - bool - isShardBacked() const override - { - return shardBacked_; - } - - void - missing_node(std::uint32_t seq) override - { - auto j = app_.journal("Ledger"); - - JLOG(j.error()) << "Missing node in " << seq; - - // prevent recursive invocation - std::unique_lock lock(maxSeqLock); - - if (maxSeq == 0) - { - maxSeq = seq; - - do - { - // Try to acquire the most recent missing ledger - seq = maxSeq; - - lock.unlock(); - - // This can invoke the missing node handler - acquire(app_.getLedgerMaster().getHashBySeq(seq), seq); - - lock.lock(); - } while (maxSeq != seq); - } - else if (maxSeq < seq) - { - // We found a more recent ledger with a - // missing node - maxSeq = seq; - } - } - - void - missing_node(uint256 const& hash, std::uint32_t seq) override - { - acquire(hash, seq); - } - - void - reset() override - { - { - std::lock_guard lock(maxSeqLock); - maxSeq = 0; - } - fullbelow_.reset(); - treecache_.reset(); - } -}; - -} // namespace detail - -//------------------------------------------------------------------------------ - // VFALCO TODO Move the function definitions into the class declaration class ApplicationImp : public Application, public RootStoppable, public BasicApp { @@ -343,9 +176,9 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp // These are Stoppable-related std::unique_ptr m_jobQueue; std::unique_ptr m_nodeStore; - detail::AppFamily family_; + NodeFamily nodeFamily_; std::unique_ptr shardStore_; - std::unique_ptr shardFamily_; + std::unique_ptr shardFamily_; std::unique_ptr shardArchiveHandler_; // VFALCO TODO Make OrderBookDB abstract OrderBookDB m_orderBookDB; @@ -476,7 +309,7 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp , m_nodeStore(m_shaMapStore->makeNodeStore("NodeStore.main", 4)) - , family_(*this, *m_nodeStore, *m_collectorManager) + , nodeFamily_(*this, *m_collectorManager) // The shard store is optional and make_ShardStore can return null. , shardStore_(make_ShardStore( @@ -671,13 +504,15 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp } Family& - family() override + getNodeFamily() override { - return family_; + return nodeFamily_; } + // The shard store is an optional feature. If the sever is configured for + // shards, this function will return a valid pointer, otherwise a nullptr. Family* - shardFamily() override + getShardFamily() override { return shardFamily_.get(); } @@ -779,6 +614,8 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp return *m_nodeStore; } + // The shard store is an optional feature. If the sever is configured for + // shards, this function will return a valid pointer, otherwise a nullptr. NodeStore::DatabaseShard* getShardStore() override { @@ -1127,11 +964,6 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp config_->getValueFor(SizedItem::ledgerSize), seconds{config_->getValueFor(SizedItem::ledgerAge)}); - family().treecache().setTargetSize( - config_->getValueFor(SizedItem::treeCacheSize)); - family().treecache().setTargetAge( - seconds{config_->getValueFor(SizedItem::treeCacheAge)}); - return true; } @@ -1372,9 +1204,9 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp // VFALCO TODO fix the dependency inversion using an observer, // have listeners register for "onSweep ()" notification. - family().fullbelow().sweep(); + nodeFamily_.sweep(); if (shardFamily_) - shardFamily_->fullbelow().sweep(); + shardFamily_->sweep(); getMasterTransaction().sweep(); getNodeStore().sweep(); if (shardStore_) @@ -1384,9 +1216,6 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp getValidations().expire(); getInboundLedgers().sweep(); m_acceptedLedgerCache.sweep(); - family().treecache().sweep(); - if (shardFamily_) - shardFamily_->treecache().sweep(); cachedSLEs_.expire(); // Set timer to do another sweep later. @@ -1491,14 +1320,8 @@ ApplicationImp::setup() if (shardStore_) { - shardFamily_ = std::make_unique( - *this, *shardStore_, *m_collectorManager); - - using namespace std::chrono; - shardFamily_->treecache().setTargetSize( - config_->getValueFor(SizedItem::treeCacheSize)); - shardFamily_->treecache().setTargetAge( - seconds{config_->getValueFor(SizedItem::treeCacheAge)}); + shardFamily_ = + std::make_unique(*this, *m_collectorManager); if (!shardStore_->init()) return false; @@ -1906,7 +1729,7 @@ ApplicationImp::startGenesisLedger() : std::vector{}; std::shared_ptr const genesis = std::make_shared( - create_genesis, *config_, initialAmendments, family()); + create_genesis, *config_, initialAmendments, nodeFamily_); m_ledgerMaster->storeLedger(genesis); auto const next = @@ -2037,7 +1860,7 @@ ApplicationImp::loadLedgerFromFile(std::string const& name) } auto loadLedger = - std::make_shared(seq, closeTime, *config_, family()); + std::make_shared(seq, closeTime, *config_, nodeFamily_); loadLedger->setTotalDrops(totalDrops); for (Json::UInt index = 0; index < ledger.get().size(); ++index) diff --git a/src/ripple/app/main/Application.h b/src/ripple/app/main/Application.h index 71746cdbd8a..f2c33aa2adc 100644 --- a/src/ripple/app/main/Application.h +++ b/src/ripple/app/main/Application.h @@ -147,9 +147,9 @@ class Application : public beast::PropertyStream::Source virtual CollectorManager& getCollectorManager() = 0; virtual Family& - family() = 0; + getNodeFamily() = 0; virtual Family* - shardFamily() = 0; + getShardFamily() = 0; virtual TimeKeeper& timeKeeper() = 0; virtual JobQueue& diff --git a/src/ripple/app/misc/SHAMapStoreImp.cpp b/src/ripple/app/misc/SHAMapStoreImp.cpp index 590decf1924..da1c29452bb 100644 --- a/src/ripple/app/misc/SHAMapStoreImp.cpp +++ b/src/ripple/app/misc/SHAMapStoreImp.cpp @@ -319,8 +319,8 @@ SHAMapStoreImp::run() LedgerIndex lastRotated = state_db_.getState().lastRotated; netOPs_ = &app_.getOPs(); ledgerMaster_ = &app_.getLedgerMaster(); - fullBelowCache_ = &app_.family().fullbelow(); - treeNodeCache_ = &app_.family().treecache(); + fullBelowCache_ = &(*app_.getNodeFamily().getFullBelowCache(0)); + treeNodeCache_ = &(*app_.getNodeFamily().getTreeNodeCache(0)); transactionDb_ = &app_.getTxnDB(); ledgerDb_ = &app_.getLedgerDB(); diff --git a/src/ripple/nodestore/impl/DatabaseShardImp.cpp b/src/ripple/nodestore/impl/DatabaseShardImp.cpp index 2ac011f4a47..7b6478c53d1 100644 --- a/src/ripple/nodestore/impl/DatabaseShardImp.cpp +++ b/src/ripple/nodestore/impl/DatabaseShardImp.cpp @@ -475,7 +475,7 @@ DatabaseShardImp::fetchLedger(uint256 const& hash, std::uint32_t seq) auto ledger{std::make_shared( deserializePrefixedHeader(makeSlice(nObj->getData())), app_.config(), - *app_.shardFamily())}; + *app_.getShardFamily())}; if (ledger->info().seq != seq) { @@ -600,7 +600,7 @@ DatabaseShardImp::validate() shard->finalize(true, boost::none); } - app_.shardFamily()->reset(); + app_.getShardFamily()->reset(); } void @@ -742,7 +742,6 @@ DatabaseShardImp::import(Database& source) } // Create the new shard - app_.shardFamily()->reset(); auto shard{std::make_unique(app_, *this, shardIndex, j_)}; if (!shard->open(scheduler_, *ctx_)) continue; diff --git a/src/ripple/nodestore/impl/Shard.cpp b/src/ripple/nodestore/impl/Shard.cpp index 5d81ada4cf2..1bf7877c752 100644 --- a/src/ripple/nodestore/impl/Shard.cpp +++ b/src/ripple/nodestore/impl/Shard.cpp @@ -184,7 +184,7 @@ Shard::open(Scheduler& scheduler, nudb::context& ctx) } if (boost::icl::length(storedSeqs) == maxLedgers_) - // All ledgers have been acquired, shard is complete + // All ledgers have been acquired, shard backend is complete backendComplete_ = true; } } @@ -238,7 +238,7 @@ Shard::prepare() if (backendComplete_) { JLOG(j_.warn()) << "shard " << index_ - << " prepare called when shard is complete"; + << " prepare called when shard backend is complete"; return {}; } @@ -417,7 +417,7 @@ Shard::finalize( { std::unique_lock lock(mutex_); if (!backendComplete_) - return fail("incomplete"); + return fail("backend incomplete"); /* TODO MP @@ -535,7 +535,7 @@ Shard::finalize( ledger = std::make_shared( deserializePrefixedHeader(makeSlice(nObj->getData())), app_.config(), - *app_.shardFamily()); + *app_.getShardFamily()); if (ledger->info().seq != seq) return fail("invalid ledger sequence"); if (ledger->info().hash != hash) diff --git a/src/ripple/nodestore/impl/Shard.h b/src/ripple/nodestore/impl/Shard.h index bf511a6f2ef..0685942636d 100644 --- a/src/ripple/nodestore/impl/Shard.h +++ b/src/ripple/nodestore/impl/Shard.h @@ -214,7 +214,7 @@ class Shard final std::unique_ptr txSQLiteDB_; // Tracking information used only when acquiring a shard from the network. - // If the shard is complete, this member will be null. + // If the shard is final, this member will be null. std::unique_ptr acquireInfo_; beast::Journal const j_; diff --git a/src/ripple/rpc/handlers/GetCounts.cpp b/src/ripple/rpc/handlers/GetCounts.cpp index 45e21b7d64c..314ad190ae3 100644 --- a/src/ripple/rpc/handlers/GetCounts.cpp +++ b/src/ripple/rpc/handlers/GetCounts.cpp @@ -32,6 +32,7 @@ #include #include #include +#include namespace ripple { @@ -103,9 +104,11 @@ getCountsJson(Application& app, int minObjectCount) ret[jss::AL_hit_rate] = app.getAcceptedLedgerCache().getHitRate(); ret[jss::fullbelow_size] = - static_cast(app.family().fullbelow().size()); - ret[jss::treenode_cache_size] = app.family().treecache().getCacheSize(); - ret[jss::treenode_track_size] = app.family().treecache().getTrackSize(); + static_cast(app.getNodeFamily().getFullBelowCache(0)->size()); + ret[jss::treenode_cache_size] = + app.getNodeFamily().getTreeNodeCache(0)->getCacheSize(); + ret[jss::treenode_track_size] = + app.getNodeFamily().getTreeNodeCache(0)->getTrackSize(); std::string uptime; auto s = UptimeClock::now(); @@ -125,13 +128,13 @@ getCountsJson(Application& app, int minObjectCount) if (auto shardStore = app.getShardStore()) { + auto shardFamily{dynamic_cast(app.getShardFamily())}; + auto const [cacheSz, trackSz] = shardFamily->getTreeNodeCacheSize(); Json::Value& jv = (ret[jss::shards] = Json::objectValue); - jv[jss::fullbelow_size] = - static_cast(app.shardFamily()->fullbelow().size()); - jv[jss::treenode_cache_size] = - app.shardFamily()->treecache().getCacheSize(); - jv[jss::treenode_track_size] = - app.shardFamily()->treecache().getTrackSize(); + + jv[jss::fullbelow_size] = shardFamily->getFullBelowCacheSize(); + jv[jss::treenode_cache_size] = cacheSz; + jv[jss::treenode_track_size] = trackSz; ret[jss::write_load] = shardStore->getWriteLoad(); ret[jss::node_hit_rate] = shardStore->getCacheHitRate(); jv[jss::node_writes] = shardStore->getStoreCount(); diff --git a/src/ripple/shamap/Family.h b/src/ripple/shamap/Family.h index 897ea6a9938..72c9a6cb07a 100644 --- a/src/ripple/shamap/Family.h +++ b/src/ripple/shamap/Family.h @@ -32,22 +32,17 @@ namespace ripple { class Family { public: - virtual ~Family() = default; - - virtual beast::Journal const& - journal() = 0; + Family(Family const&) = delete; + Family(Family&&) = delete; - virtual FullBelowCache& - fullbelow() = 0; + Family& + operator=(Family const&) = delete; - virtual FullBelowCache const& - fullbelow() const = 0; + Family& + operator=(Family&&) = delete; - virtual TreeNodeCache& - treecache() = 0; - - virtual TreeNodeCache const& - treecache() const = 0; + explicit Family() = default; + virtual ~Family() = default; virtual NodeStore::Database& db() = 0; @@ -55,14 +50,36 @@ class Family virtual NodeStore::Database const& db() const = 0; + virtual beast::Journal const& + journal() = 0; + + /** Return a pointer to the Family Full Below Cache + + @param ledgerSeq ledger sequence determines a corresponding shard cache + @note ledgerSeq is used by ShardFamily and ignored by NodeFamily + */ + virtual std::shared_ptr + getFullBelowCache(std::uint32_t ledgerSeq) = 0; + + /** Return a pointer to the Family Tree Node Cache + + @param ledgerSeq ledger sequence determines a corresponding shard cache + @note ledgerSeq is used by ShardFamily and ignored by NodeFamily + */ + virtual std::shared_ptr + getTreeNodeCache(std::uint32_t ledgerSeq) = 0; + + virtual void + sweep() = 0; + virtual bool isShardBacked() const = 0; virtual void - missing_node(std::uint32_t refNum) = 0; + missingNode(std::uint32_t refNum) = 0; virtual void - missing_node(uint256 const& refHash, std::uint32_t refNum) = 0; + missingNode(uint256 const& refHash, std::uint32_t refNum) = 0; virtual void reset() = 0; diff --git a/src/ripple/shamap/NodeFamily.h b/src/ripple/shamap/NodeFamily.h new file mode 100644 index 00000000000..2d8236705b5 --- /dev/null +++ b/src/ripple/shamap/NodeFamily.h @@ -0,0 +1,112 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_SHAMAP_NODEFAMILY_H_INCLUDED +#define RIPPLE_SHAMAP_NODEFAMILY_H_INCLUDED + +#include +#include + +namespace ripple { + +class Application; + +class NodeFamily : public Family +{ +public: + NodeFamily() = delete; + NodeFamily(NodeFamily const&) = delete; + NodeFamily(NodeFamily&&) = delete; + + NodeFamily& + operator=(NodeFamily const&) = delete; + + NodeFamily& + operator=(NodeFamily&&) = delete; + + NodeFamily(Application& app, CollectorManager& cm); + + NodeStore::Database& + db() override + { + return db_; + } + + NodeStore::Database const& + db() const override + { + return db_; + } + + beast::Journal const& + journal() override + { + return j_; + } + + bool + isShardBacked() const override + { + return false; + } + + std::shared_ptr getFullBelowCache(std::uint32_t) override + { + return fbCache_; + } + + std::shared_ptr getTreeNodeCache(std::uint32_t) override + { + return tnCache_; + } + + void + sweep() override; + + void + reset() override; + + void + missingNode(std::uint32_t seq) override; + + void + missingNode(uint256 const& hash, std::uint32_t seq) override + { + acquire(hash, seq); + } + +private: + Application& app_; + NodeStore::Database& db_; + beast::Journal const j_; + + std::shared_ptr fbCache_; + std::shared_ptr tnCache_; + + // Missing node handler + LedgerIndex maxSeq_{0}; + std::mutex maxSeqMutex_; + + void + acquire(uint256 const& hash, std::uint32_t seq); +}; + +} // namespace ripple + +#endif diff --git a/src/ripple/shamap/ShardFamily.h b/src/ripple/shamap/ShardFamily.h new file mode 100644 index 00000000000..550efeb5b81 --- /dev/null +++ b/src/ripple/shamap/ShardFamily.h @@ -0,0 +1,124 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_SHAMAP_SHARDFAMILY_H_INCLUDED +#define RIPPLE_SHAMAP_SHARDFAMILY_H_INCLUDED + +#include +#include + +namespace ripple { + +class Application; + +class ShardFamily : public Family +{ +public: + ShardFamily() = delete; + ShardFamily(ShardFamily const&) = delete; + ShardFamily(ShardFamily&&) = delete; + + ShardFamily& + operator=(ShardFamily const&) = delete; + + ShardFamily& + operator=(ShardFamily&&) = delete; + + ShardFamily(Application& app, CollectorManager& cm); + + NodeStore::Database& + db() override + { + return db_; + } + + NodeStore::Database const& + db() const override + { + return db_; + } + + beast::Journal const& + journal() override + { + return j_; + } + + bool + isShardBacked() const override + { + return true; + } + + std::shared_ptr + getFullBelowCache(std::uint32_t ledgerSeq) override; + + /** Return the number of entries in the cache */ + int + getFullBelowCacheSize(); + + std::shared_ptr + getTreeNodeCache(std::uint32_t ledgerSeq) override; + + /** Return a pair where the first item is the number of items cached + and the second item is the number of entries in the cached + */ + std::pair + getTreeNodeCacheSize(); + + void + sweep() override; + + void + reset() override; + + void + missingNode(std::uint32_t seq) override; + + void + missingNode(uint256 const& hash, std::uint32_t seq) override + { + acquire(hash, seq); + } + +private: + Application& app_; + NodeStore::Database& db_; + CollectorManager& cm_; + beast::Journal const j_; + + std::unordered_map> fbCache_; + std::mutex fbCacheMutex_; + + std::unordered_map> tnCache_; + std::mutex tnCacheMutex_; + int const tnTargetSize_; + std::chrono::seconds const tnTargetAge_; + + // Missing node handler + LedgerIndex maxSeq_{0}; + std::mutex maxSeqMutex_; + + void + acquire(uint256 const& hash, std::uint32_t seq); +}; + +} // namespace ripple + +#endif diff --git a/src/ripple/shamap/TreeNodeCache.h b/src/ripple/shamap/TreeNodeCache.h index 110f75c089e..9951db73dc2 100644 --- a/src/ripple/shamap/TreeNodeCache.h +++ b/src/ripple/shamap/TreeNodeCache.h @@ -21,12 +21,9 @@ #define RIPPLE_SHAMAP_TREENODECACHE_H_INCLUDED #include -#include namespace ripple { -class SHAMapAbstractNode; - using TreeNodeCache = TaggedCache; } // namespace ripple diff --git a/src/ripple/shamap/impl/NodeFamily.cpp b/src/ripple/shamap/impl/NodeFamily.cpp new file mode 100644 index 00000000000..f817020377c --- /dev/null +++ b/src/ripple/shamap/impl/NodeFamily.cpp @@ -0,0 +1,108 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include + +namespace ripple { + +NodeFamily::NodeFamily(Application& app, CollectorManager& cm) + : app_(app) + , db_(app.getNodeStore()) + , j_(app.journal("NodeFamily")) + , fbCache_(std::make_shared( + "Node family full below cache", + stopwatch(), + cm.collector(), + fullBelowTargetSize, + fullBelowExpiration)) + , tnCache_(std::make_shared( + "Node family tree node cache", + app.config().getValueFor(SizedItem::treeCacheSize), + std::chrono::seconds( + app.config().getValueFor(SizedItem::treeCacheAge)), + stopwatch(), + j_)) +{ +} + +void +NodeFamily::sweep() +{ + fbCache_->sweep(); + tnCache_->sweep(); +} + +void +NodeFamily::reset() +{ + { + std::lock_guard lock(maxSeqMutex_); + maxSeq_ = 0; + } + + fbCache_->reset(); + tnCache_->reset(); +} + +void +NodeFamily::missingNode(std::uint32_t seq) +{ + JLOG(j_.error()) << "Missing node in " << seq; + + std::unique_lock lock(maxSeqMutex_); + if (maxSeq_ == 0) + { + maxSeq_ = seq; + + do + { + // Try to acquire the most recent missing ledger + seq = maxSeq_; + + lock.unlock(); + + // This can invoke the missing node handler + acquire(app_.getLedgerMaster().getHashBySeq(seq), seq); + + lock.lock(); + } while (maxSeq_ != seq); + } + else if (maxSeq_ < seq) + { + // We found a more recent ledger with a missing node + maxSeq_ = seq; + } +} + +void +NodeFamily::acquire(uint256 const& hash, std::uint32_t seq) +{ + if (hash.isNonZero()) + { + JLOG(j_.error()) << "Missing node in " << to_string(hash); + + app_.getInboundLedgers().acquire( + hash, seq, InboundLedger::Reason::GENERIC); + } +} + +} // namespace ripple diff --git a/src/ripple/shamap/impl/SHAMap.cpp b/src/ripple/shamap/impl/SHAMap.cpp index 326ea23f1c4..18d83608c9c 100644 --- a/src/ripple/shamap/impl/SHAMap.cpp +++ b/src/ripple/shamap/impl/SHAMap.cpp @@ -164,7 +164,7 @@ SHAMap::fetchNodeFromDB(SHAMapHash const& hash) const } else if (full_) { - f_.missing_node(ledgerSeq_); + f_.missingNode(ledgerSeq_); const_cast(full_) = false; } } @@ -332,10 +332,10 @@ SHAMap::descend( assert(!parent->isEmptyBranch(branch)); SHAMapAbstractNode* child = parent->getChildPointer(branch); - auto const& childHash = parent->getChildHash(branch); if (!child) { + auto const& childHash = parent->getChildHash(branch); std::shared_ptr childNode = fetchNodeNT(childHash, filter); @@ -1115,7 +1115,7 @@ SHAMap::dump(bool hash) const std::shared_ptr SHAMap::getCache(SHAMapHash const& hash) const { - auto ret = f_.treecache().fetch(hash.as_uint256()); + auto ret = f_.getTreeNodeCache(ledgerSeq_)->fetch(hash.as_uint256()); assert(!ret || !ret->getSeq()); return ret; } @@ -1129,7 +1129,8 @@ SHAMap::canonicalize( assert(node->getSeq() == 0); assert(node->getNodeHash() == hash); - f_.treecache().canonicalize_replace_client(hash.as_uint256(), node); + f_.getTreeNodeCache(ledgerSeq_) + ->canonicalize_replace_client(hash.as_uint256(), node); } void diff --git a/src/ripple/shamap/impl/SHAMapSync.cpp b/src/ripple/shamap/impl/SHAMapSync.cpp index 5fa9594e1e3..19f3937985a 100644 --- a/src/ripple/shamap/impl/SHAMapSync.cpp +++ b/src/ripple/shamap/impl/SHAMapSync.cpp @@ -198,7 +198,9 @@ SHAMap::gmn_ProcessNodes(MissingNodes& mn, MissingNodes::StackEntry& se) fullBelow = false; } else if ( - !backed_ || !f_.fullbelow().touch_if_exists(childHash.as_uint256())) + !backed_ || + !f_.getFullBelowCache(ledgerSeq_) + ->touch_if_exists(childHash.as_uint256())) { SHAMapNodeID childID = nodeID.getChildNodeID(branch); bool pending = false; @@ -243,7 +245,10 @@ SHAMap::gmn_ProcessNodes(MissingNodes& mn, MissingNodes::StackEntry& se) { // No partial node encountered below this node node->setFullBelowGen(mn.generation_); if (backed_) - f_.fullbelow().insert(node->getNodeHash().as_uint256()); + { + f_.getFullBelowCache(ledgerSeq_) + ->insert(node->getNodeHash().as_uint256()); + } } node = nullptr; @@ -323,7 +328,7 @@ SHAMap::getMissingNodes(int max, SHAMapSyncFilter* filter) max, filter, f_.db().getDesiredAsyncReadCount(ledgerSeq_), - f_.fullbelow().getGeneration()); + f_.getFullBelowCache(ledgerSeq_)->getGeneration()); if (!root_->isInner() || std::static_pointer_cast(root_)->isFullBelow( @@ -599,7 +604,7 @@ SHAMap::addKnownNode( return SHAMapAddNode::duplicate(); } - std::uint32_t generation = f_.fullbelow().getGeneration(); + auto const generation = f_.getFullBelowCache(ledgerSeq_)->getGeneration(); auto newNode = SHAMapAbstractNode::makeFromWire(rawNode); SHAMapNodeID iNodeID; auto iNode = root_.get(); @@ -618,8 +623,11 @@ SHAMap::addKnownNode( } auto childHash = inner->getChildHash(branch); - if (f_.fullbelow().touch_if_exists(childHash.as_uint256())) + if (f_.getFullBelowCache(ledgerSeq_) + ->touch_if_exists(childHash.as_uint256())) + { return SHAMapAddNode::duplicate(); + } auto prevNode = inner; std::tie(iNode, iNodeID) = descend(inner, iNodeID, branch, filter); diff --git a/src/ripple/shamap/impl/ShardFamily.cpp b/src/ripple/shamap/impl/ShardFamily.cpp new file mode 100644 index 00000000000..ea80f85ba38 --- /dev/null +++ b/src/ripple/shamap/impl/ShardFamily.cpp @@ -0,0 +1,195 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include + +namespace ripple { + +static NodeStore::Database& +getShardStore(Application& app) +{ + auto const dbPtr = app.getShardStore(); + assert(dbPtr); + return *dbPtr; +} + +ShardFamily::ShardFamily(Application& app, CollectorManager& cm) + : app_(app) + , db_(getShardStore(app)) + , cm_(cm) + , j_(app.journal("ShardFamily")) + , tnTargetSize_(app.config().getValueFor(SizedItem::treeCacheSize)) + , tnTargetAge_(app.config().getValueFor(SizedItem::treeCacheAge)) +{ +} + +std::shared_ptr +ShardFamily::getFullBelowCache(std::uint32_t ledgerSeq) +{ + auto const shardIndex{app_.getShardStore()->seqToShardIndex(ledgerSeq)}; + std::lock_guard lock(fbCacheMutex_); + if (auto const it{fbCache_.find(shardIndex)}; it != fbCache_.end()) + return it->second; + + // Create a cache for the corresponding shard + auto fbCache{std::make_shared( + "Shard family full below cache shard " + std::to_string(shardIndex), + stopwatch(), + cm_.collector(), + fullBelowTargetSize, + fullBelowExpiration)}; + return fbCache_.emplace(shardIndex, std::move(fbCache)).first->second; +} + +int +ShardFamily::getFullBelowCacheSize() +{ + size_t sz{0}; + std::lock_guard lock(fbCacheMutex_); + for (auto const& e : fbCache_) + sz += e.second->size(); + return sz; +} + +std::shared_ptr +ShardFamily::getTreeNodeCache(std::uint32_t ledgerSeq) +{ + auto const shardIndex{app_.getShardStore()->seqToShardIndex(ledgerSeq)}; + std::lock_guard lock(tnCacheMutex_); + if (auto const it{tnCache_.find(shardIndex)}; it != tnCache_.end()) + return it->second; + + // Create a cache for the corresponding shard + auto tnCache{std::make_shared( + "Shard family tree node cache shard " + std::to_string(shardIndex), + tnTargetSize_, + tnTargetAge_, + stopwatch(), + j_)}; + return tnCache_.emplace(shardIndex, std::move(tnCache)).first->second; +} + +std::pair +ShardFamily::getTreeNodeCacheSize() +{ + int cacheSz{0}; + int trackSz{0}; + std::lock_guard lock(tnCacheMutex_); + for (auto const& e : tnCache_) + { + cacheSz += e.second->getCacheSize(); + trackSz += e.second->getTrackSize(); + } + return {cacheSz, trackSz}; +} + +void +ShardFamily::sweep() +{ + { + std::lock_guard lock(fbCacheMutex_); + for (auto it = fbCache_.cbegin(); it != fbCache_.cend();) + { + it->second->sweep(); + + // Remove cache if empty + if (it->second->size() == 0) + it = fbCache_.erase(it); + else + ++it; + } + } + + std::lock_guard lock(tnCacheMutex_); + for (auto it = tnCache_.cbegin(); it != tnCache_.cend();) + { + it->second->sweep(); + + // Remove cache if empty + if (it->second->getTrackSize() == 0) + it = tnCache_.erase(it); + else + ++it; + } +} + +void +ShardFamily::reset() +{ + { + std::lock_guard lock(maxSeqMutex_); + maxSeq_ = 0; + } + + { + std::lock_guard lock(fbCacheMutex_); + fbCache_.clear(); + } + + std::lock_guard lock(tnCacheMutex_); + tnCache_.clear(); +} + +void +ShardFamily::missingNode(std::uint32_t seq) +{ + JLOG(j_.error()) << "Missing node in ledger sequence " << seq; + + std::unique_lock lock(maxSeqMutex_); + if (maxSeq_ == 0) + { + maxSeq_ = seq; + + do + { + // Try to acquire the most recent missing ledger + seq = maxSeq_; + + lock.unlock(); + + // This can invoke the missing node handler + acquire(app_.getLedgerMaster().getHashBySeq(seq), seq); + + lock.lock(); + } while (maxSeq_ != seq); + } + else if (maxSeq_ < seq) + { + // We found a more recent ledger with a missing node + maxSeq_ = seq; + } +} + +void +ShardFamily::acquire(uint256 const& hash, std::uint32_t seq) +{ + if (hash.isNonZero()) + { + JLOG(j_.error()) << "Missing node in " << to_string(hash); + + app_.getInboundLedgers().acquire( + hash, seq, InboundLedger::Reason::SHARD); + } +} + +} // namespace ripple diff --git a/src/test/app/LedgerHistory_test.cpp b/src/test/app/LedgerHistory_test.cpp index cbc9c95b325..513905a6bf7 100644 --- a/src/test/app/LedgerHistory_test.cpp +++ b/src/test/app/LedgerHistory_test.cpp @@ -57,7 +57,7 @@ class LedgerHistory_test : public beast::unit_test::suite create_genesis, env.app().config(), std::vector{}, - env.app().family()); + env.app().getNodeFamily()); } auto res = std::make_shared( *prev, prev->info().closeTime + closeOffset); diff --git a/src/test/app/RCLValidations_test.cpp b/src/test/app/RCLValidations_test.cpp index eca66a26a88..14a54a1492f 100644 --- a/src/test/app/RCLValidations_test.cpp +++ b/src/test/app/RCLValidations_test.cpp @@ -77,7 +77,10 @@ class RCLValidations_test : public beast::unit_test::suite jtx::Env env(*this); Config config; auto prev = std::make_shared( - create_genesis, config, std::vector{}, env.app().family()); + create_genesis, + config, + std::vector{}, + env.app().getNodeFamily()); history.push_back(prev); for (auto i = 0; i < (2 * maxAncestors + 1); ++i) { @@ -237,7 +240,10 @@ class RCLValidations_test : public beast::unit_test::suite auto& j = env.journal; Config config; auto prev = std::make_shared( - create_genesis, config, std::vector{}, env.app().family()); + create_genesis, + config, + std::vector{}, + env.app().getNodeFamily()); history.push_back(prev); for (auto i = 0; i < (maxAncestors + 10); ++i) { diff --git a/src/test/app/Regression_test.cpp b/src/test/app/Regression_test.cpp index e7c4dc7d5b6..6431f81dbd6 100644 --- a/src/test/app/Regression_test.cpp +++ b/src/test/app/Regression_test.cpp @@ -60,7 +60,7 @@ struct Regression_test : public beast::unit_test::suite create_genesis, env.app().config(), std::vector{}, - env.app().family()); + env.app().getNodeFamily()); auto expectedDrops = INITIAL_XRP; BEAST_EXPECT(closed->info().drops == expectedDrops); diff --git a/src/test/ledger/SkipList_test.cpp b/src/test/ledger/SkipList_test.cpp index 56c1efa0375..386a8027746 100644 --- a/src/test/ledger/SkipList_test.cpp +++ b/src/test/ledger/SkipList_test.cpp @@ -39,7 +39,7 @@ class SkipList_test : public beast::unit_test::suite create_genesis, config, std::vector{}, - env.app().family()); + env.app().getNodeFamily()); history.push_back(prev); for (auto i = 0; i < 1023; ++i) { diff --git a/src/test/ledger/View_test.cpp b/src/test/ledger/View_test.cpp index 3c462df280f..6822eb015c9 100644 --- a/src/test/ledger/View_test.cpp +++ b/src/test/ledger/View_test.cpp @@ -134,7 +134,10 @@ class View_test : public beast::unit_test::suite Env env(*this); Config config; std::shared_ptr const genesis = std::make_shared( - create_genesis, config, std::vector{}, env.app().family()); + create_genesis, + config, + std::vector{}, + env.app().getNodeFamily()); auto const ledger = std::make_shared( *genesis, env.app().timeKeeper().closeTime()); wipe(*ledger); @@ -388,7 +391,10 @@ class View_test : public beast::unit_test::suite Env env(*this); Config config; std::shared_ptr const genesis = std::make_shared( - create_genesis, config, std::vector{}, env.app().family()); + create_genesis, + config, + std::vector{}, + env.app().getNodeFamily()); auto const ledger = std::make_shared( *genesis, env.app().timeKeeper().closeTime()); auto setup123 = [&ledger, this]() { @@ -769,7 +775,7 @@ class View_test : public beast::unit_test::suite create_genesis, config, std::vector{}, - env.app().family()); + env.app().getNodeFamily()); auto const ledger = std::make_shared( *genesis, env.app().timeKeeper().closeTime()); wipe(*ledger); diff --git a/src/test/shamap/FetchPack_test.cpp b/src/test/shamap/FetchPack_test.cpp index eaa4dbc79c3..348e59e704a 100644 --- a/src/test/shamap/FetchPack_test.cpp +++ b/src/test/shamap/FetchPack_test.cpp @@ -118,7 +118,7 @@ class FetchPack_test : public beast::unit_test::suite using namespace beast::severities; test::SuiteJournal journal("FetchPack_test", *this); - TestFamily f(journal); + TestNodeFamily f(journal); std::shared_ptr t1(std::make_shared
(SHAMapType::FREE, f)); pass(); diff --git a/src/test/shamap/SHAMapSync_test.cpp b/src/test/shamap/SHAMapSync_test.cpp index 7ebdc99a12a..61b4eaae150 100644 --- a/src/test/shamap/SHAMapSync_test.cpp +++ b/src/test/shamap/SHAMapSync_test.cpp @@ -91,7 +91,7 @@ class SHAMapSync_test : public beast::unit_test::suite using namespace beast::severities; test::SuiteJournal journal("SHAMapSync_test", *this); - TestFamily f(journal), f2(journal); + TestNodeFamily f(journal), f2(journal); SHAMap source(SHAMapType::FREE, f); SHAMap destination(SHAMapType::FREE, f2); diff --git a/src/test/shamap/SHAMap_test.cpp b/src/test/shamap/SHAMap_test.cpp index ae5ae575032..49d5d5638ec 100644 --- a/src/test/shamap/SHAMap_test.cpp +++ b/src/test/shamap/SHAMap_test.cpp @@ -139,7 +139,7 @@ class SHAMap_test : public beast::unit_test::suite else testcase("add/traverse unbacked"); - tests::TestFamily f(journal); + tests::TestNodeFamily f(journal); // h3 and h4 differ only in the leaf, same terminal node (level 19) uint256 h1, h2, h3, h4, h5; @@ -327,7 +327,7 @@ class SHAMap_test : public beast::unit_test::suite "292891fe4ef6cee585fdc6fda1e09eb4d386363158ec3321b8123e5a772c6c" "a8"); - tests::TestFamily tf{journal}; + tests::TestNodeFamily tf{journal}; SHAMap map{SHAMapType::FREE, tf}; if (!backed) map.setUnbacked(); diff --git a/src/test/shamap/common.h b/src/test/shamap/common.h index 45177da4b64..760f8ca4e82 100644 --- a/src/test/shamap/common.h +++ b/src/test/shamap/common.h @@ -29,22 +29,31 @@ namespace ripple { namespace tests { -class TestFamily : public Family +class TestNodeFamily : public Family { private: + std::unique_ptr db_; + + std::shared_ptr fbCache_; + std::shared_ptr tnCache_; + TestStopwatch clock_; NodeStore::DummyScheduler scheduler_; - TreeNodeCache treecache_; - FullBelowCache fullbelow_; RootStoppable parent_; - std::unique_ptr db_; - bool shardBacked_; - beast::Journal j_; + + beast::Journal const j_; public: - TestFamily(beast::Journal j) - : treecache_("TreeNodeCache", 65536, std::chrono::minutes{1}, clock_, j) - , fullbelow_("full_below", clock_) + TestNodeFamily(beast::Journal j) + : fbCache_(std::make_shared( + "App family full below cache", + clock_)) + , tnCache_(std::make_shared( + "App family tree node cache", + 65536, + std::chrono::minutes{1}, + clock_, + j)) , parent_("TestRootStoppable") , j_(j) { @@ -53,72 +62,57 @@ class TestFamily : public Family testSection.set("Path", "SHAMap_test"); db_ = NodeStore::Manager::instance().make_Database( "test", scheduler_, 1, parent_, testSection, j); - shardBacked_ = - dynamic_cast(db_.get()) != nullptr; - } - - beast::manual_clock - clock() - { - return clock_; - } - - beast::Journal const& - journal() override - { - return j_; } - FullBelowCache& - fullbelow() override + NodeStore::Database& + db() override { - return fullbelow_; + return *db_; } - FullBelowCache const& - fullbelow() const override + NodeStore::Database const& + db() const override { - return fullbelow_; + return *db_; } - TreeNodeCache& - treecache() override + beast::Journal const& + journal() override { - return treecache_; + return j_; } - TreeNodeCache const& - treecache() const override + std::shared_ptr getFullBelowCache(std::uint32_t) override { - return treecache_; + return fbCache_; } - NodeStore::Database& - db() override + std::shared_ptr getTreeNodeCache(std::uint32_t) override { - return *db_; + return tnCache_; } - NodeStore::Database const& - db() const override + void + sweep() override { - return *db_; + fbCache_->sweep(); + tnCache_->sweep(); } bool isShardBacked() const override { - return shardBacked_; + return true; } void - missing_node(std::uint32_t refNum) override + missingNode(std::uint32_t refNum) override { Throw("missing node"); } void - missing_node(uint256 const& refHash, std::uint32_t refNum) override + missingNode(uint256 const& refHash, std::uint32_t refNum) override { Throw("missing node"); } @@ -126,8 +120,14 @@ class TestFamily : public Family void reset() override { - fullbelow_.reset(); - treecache_.reset(); + fbCache_->reset(); + tnCache_->reset(); + } + + beast::manual_clock + clock() + { + return clock_; } };