diff --git a/Builds/CMake/CMakeFuncs.cmake b/Builds/CMake/CMakeFuncs.cmake index bb24bdc31f9..fb60fd9b4eb 100644 --- a/Builds/CMake/CMakeFuncs.cmake +++ b/Builds/CMake/CMakeFuncs.cmake @@ -35,10 +35,10 @@ function (print_ep_logs _target) COMMENT "${_target} BUILD OUTPUT" COMMAND ${CMAKE_COMMAND} -DIN_FILE=${STAMP_DIR}/${_target}-build-out.log - -P ${CMAKE_SOURCE_DIR}/Builds/CMake/echo_file.cmake + -P ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/echo_file.cmake COMMAND ${CMAKE_COMMAND} -DIN_FILE=${STAMP_DIR}/${_target}-build-err.log - -P ${CMAKE_SOURCE_DIR}/Builds/CMake/echo_file.cmake) + -P ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/echo_file.cmake) endfunction () #[=========================================================[ @@ -177,7 +177,7 @@ function (git_hash hash_val) endif () endif () execute_process (COMMAND ${GIT_EXECUTABLE} "log" "--pretty=${_format}" "-n1" - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} RESULT_VARIABLE _git_exit_code OUTPUT_VARIABLE _temp_hash OUTPUT_STRIP_TRAILING_WHITESPACE @@ -194,7 +194,7 @@ function (git_branch branch_val) endif () set (_branch "") execute_process (COMMAND ${GIT_EXECUTABLE} "rev-parse" "--abbrev-ref" "HEAD" - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} RESULT_VARIABLE _git_exit_code OUTPUT_VARIABLE _temp_branch OUTPUT_STRIP_TRAILING_WHITESPACE diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 5445b092540..ba8a4db8d29 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -150,6 +150,7 @@ install ( src/ripple/basics/IOUAmount.h src/ripple/basics/LocalValue.h src/ripple/basics/Log.h + src/ripple/basics/MathUtilities.h src/ripple/basics/safe_cast.h src/ripple/basics/Slice.h src/ripple/basics/StringUtilities.h @@ -376,6 +377,7 @@ target_sources (rippled PRIVATE src/ripple/app/misc/CanonicalTXSet.cpp src/ripple/app/misc/FeeVoteImpl.cpp src/ripple/app/misc/HashRouter.cpp + src/ripple/app/misc/NegativeUNLVote.cpp src/ripple/app/misc/NetworkOPs.cpp src/ripple/app/misc/SHAMapStoreImp.cpp src/ripple/app/misc/impl/AccountTxPaging.cpp @@ -508,6 +510,7 @@ target_sources (rippled PRIVATE src/ripple/nodestore/impl/DatabaseNodeImp.cpp src/ripple/nodestore/impl/DatabaseRotatingImp.cpp src/ripple/nodestore/impl/DatabaseShardImp.cpp + src/ripple/nodestore/impl/DeterministicShard.cpp src/ripple/nodestore/impl/DecodedBlob.cpp src/ripple/nodestore/impl/DummyScheduler.cpp src/ripple/nodestore/impl/EncodedBlob.cpp @@ -636,12 +639,14 @@ target_sources (rippled PRIVATE main sources: subdir: shamap #]===============================] + src/ripple/shamap/impl/NodeFamily.cpp src/ripple/shamap/impl/SHAMap.cpp src/ripple/shamap/impl/SHAMapDelta.cpp src/ripple/shamap/impl/SHAMapItem.cpp src/ripple/shamap/impl/SHAMapNodeID.cpp src/ripple/shamap/impl/SHAMapSync.cpp src/ripple/shamap/impl/SHAMapTreeNode.cpp + src/ripple/shamap/impl/ShardFamily.cpp #[===============================[ test sources: subdir: app @@ -742,6 +747,7 @@ target_sources (rippled PRIVATE src/test/consensus/DistributedValidatorsSim_test.cpp src/test/consensus/LedgerTiming_test.cpp src/test/consensus/LedgerTrie_test.cpp + src/test/consensus/NegativeUNL_test.cpp src/test/consensus/ScaleFreeSim_test.cpp src/test/consensus/Validations_test.cpp #[===============================[ @@ -867,6 +873,7 @@ target_sources (rippled PRIVATE test sources: subdir: protocol #]===============================] + src/test/protocol/BuildInfo_test.cpp src/test/protocol/InnerObjectFormats_test.cpp src/test/protocol/Issue_test.cpp src/test/protocol/PublicKey_test.cpp diff --git a/Builds/CMake/RippledCov.cmake b/Builds/CMake/RippledCov.cmake index b9d93f41af8..e177aa52ae2 100644 --- a/Builds/CMake/RippledCov.cmake +++ b/Builds/CMake/RippledCov.cmake @@ -28,7 +28,7 @@ if (coverage) set (extract_pattern "") if (coverage_core_only) - set (extract_pattern "${CMAKE_SOURCE_DIR}/src/ripple/") + set (extract_pattern "${CMAKE_CURRENT_SOURCE_DIR}/src/ripple/") endif () if (LLVM_COV AND LLVM_PROFDATA) @@ -72,14 +72,14 @@ if (coverage) COMMAND ${CMAKE_COMMAND} -E echo "Generating coverage- results will be in ${CMAKE_BINARY_DIR}/coverage/index.html." # create baseline info file COMMAND ${LCOV} - --no-external -d "${CMAKE_SOURCE_DIR}" -c -d . -i -o baseline.info + --no-external -d "${CMAKE_CURRENT_SOURCE_DIR}" -c -d . -i -o baseline.info | grep -v "ignoring data for external file" # run tests COMMAND ${CMAKE_COMMAND} -E echo "Running rippled tests for coverage report." COMMAND rippled --unittest$<$:=${coverage_test}> --quiet --unittest-log # Create test coverage data file COMMAND ${LCOV} - --no-external -d "${CMAKE_SOURCE_DIR}" -c -d . -o tests.info + --no-external -d "${CMAKE_CURRENT_SOURCE_DIR}" -c -d . -o tests.info | grep -v "ignoring data for external file" # Combine baseline and test coverage data COMMAND ${LCOV} diff --git a/Builds/CMake/RippledDocs.cmake b/Builds/CMake/RippledDocs.cmake index d0440f5ff8b..a3e2ae1ba5a 100644 --- a/Builds/CMake/RippledDocs.cmake +++ b/Builds/CMake/RippledDocs.cmake @@ -9,7 +9,7 @@ if (NOT TARGET Doxygen::doxygen) endif () set (doxygen_output_directory "${CMAKE_BINARY_DIR}/docs") -set (doxygen_include_path "${CMAKE_SOURCE_DIR}/src") +set (doxygen_include_path "${CMAKE_CURRENT_SOURCE_DIR}/src") set (doxygen_index_file "${doxygen_output_directory}/html/index.html") set (doxyfile "${CMAKE_CURRENT_SOURCE_DIR}/docs/Doxyfile") diff --git a/Builds/CMake/RippledNIH.cmake b/Builds/CMake/RippledNIH.cmake index e0c161aba78..60ab3e4bf85 100644 --- a/Builds/CMake/RippledNIH.cmake +++ b/Builds/CMake/RippledNIH.cmake @@ -13,7 +13,7 @@ if (NOT DEFINED NIH_CACHE_ROOT) if (DEFINED ENV{NIH_CACHE_ROOT}) set (NIH_CACHE_ROOT $ENV{NIH_CACHE_ROOT}) else () - set (NIH_CACHE_ROOT "${CMAKE_SOURCE_DIR}/.nih_c") + set (NIH_CACHE_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/.nih_c") endif () endif () set (nih_cache_path diff --git a/Builds/CMake/RippledRelease.cmake b/Builds/CMake/RippledRelease.cmake index eb08566b2ff..b10bf6cf023 100644 --- a/Builds/CMake/RippledRelease.cmake +++ b/Builds/CMake/RippledRelease.cmake @@ -61,7 +61,7 @@ if (is_root_project) docker run -e NIH_CACHE_ROOT=/opt/rippled_bld/pkg/.nih_c -v ${NIH_CACHE_ROOT}/pkgbuild:/opt/rippled_bld/pkg/.nih_c - -v ${CMAKE_SOURCE_DIR}:/opt/rippled_bld/pkg/rippled + -v ${CMAKE_CURRENT_SOURCE_DIR}:/opt/rippled_bld/pkg/rippled -v ${CMAKE_CURRENT_BINARY_DIR}/packages:/opt/rippled_bld/pkg/out "$<$:--volume=/etc/passwd:/etc/passwd;--volume=/etc/group:/etc/group;--user=${DOCKER_USER_ID}:${DOCKER_GROUP_ID}>" -t rippled-rpm-builder:${container_label} @@ -124,7 +124,7 @@ if (is_root_project) docker run -e NIH_CACHE_ROOT=/opt/rippled_bld/pkg/.nih_c -v ${NIH_CACHE_ROOT}/pkgbuild:/opt/rippled_bld/pkg/.nih_c - -v ${CMAKE_SOURCE_DIR}:/opt/rippled_bld/pkg/rippled + -v ${CMAKE_CURRENT_SOURCE_DIR}:/opt/rippled_bld/pkg/rippled -v ${CMAKE_CURRENT_BINARY_DIR}/packages:/opt/rippled_bld/pkg/out "$<$:--volume=/etc/passwd:/etc/passwd;--volume=/etc/group:/etc/group;--user=${DOCKER_USER_ID}:${DOCKER_GROUP_ID}>" -t rippled-dpkg-builder:${container_label} diff --git a/Builds/CMake/deps/Nudb.cmake b/Builds/CMake/deps/Nudb.cmake index b8b9a73cd9f..750b940bfd5 100644 --- a/Builds/CMake/deps/Nudb.cmake +++ b/Builds/CMake/deps/Nudb.cmake @@ -12,7 +12,7 @@ if (is_root_project) # NuDB not needed in the case of xrpl_core inclusion build FetchContent_Declare( nudb_src GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git - GIT_TAG 2.0.1 + GIT_TAG 2.0.3 ) FetchContent_GetProperties(nudb_src) if(NOT nudb_src_POPULATED) @@ -23,7 +23,7 @@ if (is_root_project) # NuDB not needed in the case of xrpl_core inclusion build ExternalProject_Add (nudb_src PREFIX ${nih_cache_path} GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git - GIT_TAG 2.0.1 + GIT_TAG 2.0.3 CONFIGURE_COMMAND "" BUILD_COMMAND "" TEST_COMMAND "" diff --git a/Builds/CMake/deps/Rocksdb.cmake b/Builds/CMake/deps/Rocksdb.cmake index f61cab3f740..eed6cefe162 100644 --- a/Builds/CMake/deps/Rocksdb.cmake +++ b/Builds/CMake/deps/Rocksdb.cmake @@ -64,13 +64,13 @@ if (local_rocksdb) PATCH_COMMAND # only used by windows build ${CMAKE_COMMAND} -E copy - ${CMAKE_SOURCE_DIR}/Builds/CMake/rocks_thirdparty.inc + ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/rocks_thirdparty.inc /thirdparty.inc COMMAND # fixup their build version file to keep the values # from changing always ${CMAKE_COMMAND} -E copy_if_different - ${CMAKE_SOURCE_DIR}/Builds/CMake/rocksdb_build_version.cc.in + ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/rocksdb_build_version.cc.in /util/build_version.cc.in CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} diff --git a/Builds/CMake/deps/Soci.cmake b/Builds/CMake/deps/Soci.cmake index 0bf022bea26..4015a3f2dea 100644 --- a/Builds/CMake/deps/Soci.cmake +++ b/Builds/CMake/deps/Soci.cmake @@ -51,7 +51,7 @@ else() # This patch process is likely fragile and should be reviewed carefully # whenever we update the GIT_TAG above. PATCH_COMMAND - ${CMAKE_COMMAND} -P ${CMAKE_SOURCE_DIR}/Builds/CMake/soci_patch.cmake + ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/soci_patch.cmake CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} diff --git a/Builds/CMake/deps/Sqlite.cmake b/Builds/CMake/deps/Sqlite.cmake index be2a7904e97..73760f34389 100644 --- a/Builds/CMake/deps/Sqlite.cmake +++ b/Builds/CMake/deps/Sqlite.cmake @@ -31,7 +31,7 @@ else() # for the single amalgamation source file. PATCH_COMMAND ${CMAKE_COMMAND} -E copy_if_different - ${CMAKE_SOURCE_DIR}/Builds/CMake/CMake_sqlite3.txt + ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/CMake_sqlite3.txt /CMakeLists.txt CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} diff --git a/Builds/CMake/deps/gRPC.cmake b/Builds/CMake/deps/gRPC.cmake index f1d2adbae13..5195ff1979c 100644 --- a/Builds/CMake/deps/gRPC.cmake +++ b/Builds/CMake/deps/gRPC.cmake @@ -312,7 +312,7 @@ set (GRPC_GEN_DIR "${CMAKE_BINARY_DIR}/proto_gen_grpc") file (MAKE_DIRECTORY ${GRPC_GEN_DIR}) set (GRPC_PROTO_SRCS) set (GRPC_PROTO_HDRS) -set (GRPC_PROTO_ROOT "${CMAKE_SOURCE_DIR}/src/ripple/proto/org") +set (GRPC_PROTO_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/src/ripple/proto/org") file(GLOB_RECURSE GRPC_DEFINITION_FILES LIST_DIRECTORIES false "${GRPC_PROTO_ROOT}/*.proto") foreach(file ${GRPC_DEFINITION_FILES}) get_filename_component(_abs_file ${file} ABSOLUTE) diff --git a/Builds/containers/gitlab-ci/docker_alpine_setup.sh b/Builds/containers/gitlab-ci/docker_alpine_setup.sh index 43eeed8a914..00cf6eb5fa5 100644 --- a/Builds/containers/gitlab-ci/docker_alpine_setup.sh +++ b/Builds/containers/gitlab-ci/docker_alpine_setup.sh @@ -5,6 +5,7 @@ set -ex echo $(nproc) docker login -u rippled \ -p ${ARTIFACTORY_DEPLOY_KEY_RIPPLED} ${ARTIFACTORY_HUB} +apk add --update py-pip apk add \ bash util-linux coreutils binutils grep \ make ninja cmake build-base gcc g++ abuild git \ diff --git a/RELEASENOTES.md b/RELEASENOTES.md index 1cc76688909..1b80640a5af 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -7,6 +7,11 @@ This document contains the release notes for `rippled`, the reference server imp Have new ideas? Need help with setting up your node? Come visit us [here](https://github.com/ripple/rippled/issues/new/choose) +# Change Log + +- Work on a version 2 of the XRP Network API has begun. The new API returns the code `notSynced` in place of `noClosed`, `noCurrent`, and `noNetwork`. And `invalidLgrRange` is returned in place of `lgrIdxInvalid`. +- The version 2 API can be specified by adding "api_version" : 2 to your json request. The default version remains 1 (if unspecified), except for the command line interface which always uses the latest verison. + # Releases ## Version 1.5.0 diff --git a/cfg/rippled-example.cfg b/cfg/rippled-example.cfg index c4917d65044..b2b3c03f346 100644 --- a/cfg/rippled-example.cfg +++ b/cfg/rippled-example.cfg @@ -36,7 +36,7 @@ # For more information on where the rippled server instance searches for the # file, visit: # -# https://developers.ripple.com/commandline-usage.html#generic-options +# https://xrpl.org/commandline-usage.html#generic-options # # This file should be named rippled.cfg. This file is UTF-8 with DOS, UNIX, # or Mac style end of lines. Blank lines and lines beginning with '#' are @@ -869,18 +869,65 @@ # # These keys are possible for any type of backend: # +# earliest_seq The default is 32570 to match the XRP ledger +# network's earliest allowed sequence. Alternate +# networks may set this value. Minimum value of 1. +# If a [shard_db] section is defined, and this +# value is present either [node_db] or [shard_db], +# it must be defined with the same value in both +# sections. +# # online_delete Minimum value of 256. Enable automatic purging # of older ledger information. Maintain at least this # number of ledger records online. Must be greater # than or equal to ledger_history. # -# advisory_delete 0 for disabled, 1 for enabled. If set, then -# require administrative RPC call "can_delete" -# to enable online deletion of ledger records. +# These keys modify the behavior of online_delete, and thus are only +# relevant if online_delete is defined and non-zero: # -# earliest_seq The default is 32570 to match the XRP ledger -# network's earliest allowed sequence. Alternate -# networks may set this value. Minimum value of 1. +# advisory_delete 0 for disabled, 1 for enabled. If set, the +# administrative RPC call "can_delete" is required +# to enable online deletion of ledger records. +# Online deletion does not run automatically if +# non-zero and the last deletion was on a ledger +# greater than the current "can_delete" setting. +# Default is 0. +# +# delete_batch When automatically purging, SQLite database +# records are deleted in batches. This value +# controls the maximum size of each batch. Larger +# batches keep the databases locked for more time, +# which may cause other functions to fall behind, +# and thus cause the node to lose sync. +# Default is 100. +# +# back_off_milliseconds +# Number of milliseconds to wait between +# online_delete batches to allow other functions +# to catch up. +# Default is 100. +# +# age_threshold_seconds +# The online delete process will only run if the +# latest validated ledger is younger than this +# number of seconds. +# Default is 60. +# +# recovery_wait_seconds +# The online delete process checks periodically +# that rippled is still in sync with the network, +# and that the validated ledger is less than +# 'age_threshold_seconds' old. By default, if it +# is not the online delete process aborts and +# tries again later. If 'recovery_wait_seconds' +# is set and rippled is out of sync, but likely to +# recover quickly, then online delete will wait +# this number of seconds for rippled to get back +# into sync before it aborts. +# Set this value if the node is otherwise staying +# in sync, or recovering quickly, but the online +# delete process is unable to finish. +# Default is unset. # # Notes: # The 'node_db' entry configures the primary, persistent storage. @@ -892,6 +939,12 @@ # [import_db] Settings for performing a one-time import (optional) # [database_path] Path to the book-keeping databases. # +# The server creates and maintains 4 to 5 bookkeeping SQLite databases in +# the 'database_path' location. If you omit this configuration setting, +# the server creates a directory called "db" located in the same place as +# your rippled.cfg file. +# Partial pathnames are relative to the location of the rippled executable. +# # [shard_db] Settings for the Shard Database (optional) # # Format (without spaces): @@ -907,12 +960,84 @@ # # max_size_gb Maximum disk space the database will utilize (in gigabytes) # +# [sqlite] Tuning settings for the SQLite databases (optional) +# +# Format (without spaces): +# One or more lines of case-insensitive key / value pairs: +# '=' +# ... +# +# Example 1: +# sync_level=low +# +# Example 2: +# journal_mode=off +# synchronous=off +# +# WARNING: These settings can have significant effects on data integrity, +# particularly in systemic failure scenarios. It is strongly recommended +# that they be left at their defaults unless the server is having +# performance issues during normal operation or during automatic purging +# (online_delete) operations. A warning will be logged on startup if +# 'ledger_history' is configured to store more than 10,000,000 ledgers and +# any of these settings are less safe than the default. This is due to the +# inordinate amount of time and bandwidth it will take to safely rebuild a +# corrupted database of that size from other peers. +# +# Optional keys: # -# There are 4 bookkeeping SQLite database that the server creates and -# maintains. If you omit this configuration setting, it will default to -# creating a directory called "db" located in the same place as your -# rippled.cfg file. Partial pathnames will be considered relative to -# the location of the rippled executable. +# safety_level Valid values: high, low +# The default is "high", which tunes the SQLite +# databases in the most reliable mode, and is +# equivalent to: +# journal_mode=wal +# synchronous=normal +# temp_store=file +# "low" is equivalent to: +# journal_mode=memory +# synchronous=off +# temp_store=memory +# These "low" settings trade speed and reduced I/O +# for a higher risk of data loss. See the +# individual settings below for more information. +# This setting may not be combined with any of the +# other tuning settings: "journal_mode", +# "synchronous", or "temp_store". +# +# journal_mode Valid values: delete, truncate, persist, memory, wal, off +# The default is "wal", which uses a write-ahead +# log to implement database transactions. +# Alternately, "memory" saves disk I/O, but if +# rippled crashes during a transaction, the +# database is likely to be corrupted. +# See https://www.sqlite.org/pragma.html#pragma_journal_mode +# for more details about the available options. +# This setting may not be combined with the +# "safety_level" setting. +# +# synchronous Valid values: off, normal, full, extra +# The default is "normal", which works well with +# the "wal" journal mode. Alternatively, "off" +# allows rippled to continue as soon as data is +# passed to the OS, which can significantly +# increase speed, but risks data corruption if +# the host computer crashes before writing that +# data to disk. +# See https://www.sqlite.org/pragma.html#pragma_synchronous +# for more details about the available options. +# This setting may not be combined with the +# "safety_level" setting. +# +# temp_store Valid values: default, file, memory +# The default is "file", which will use files +# for temporary database tables and indices. +# Alternatively, "memory" may save I/O, but +# rippled does not currently use many, if any, +# of these temporary objects. +# See https://www.sqlite.org/pragma.html#pragma_temp_store +# for more details about the available options. +# This setting may not be combined with the +# "safety_level" setting. # # # @@ -1212,24 +1337,27 @@ medium # This is primary persistent datastore for rippled. This includes transaction # metadata, account states, and ledger headers. Helpful information can be -# found here: https://ripple.com/wiki/NodeBackEnd -# delete old ledgers while maintaining at least 2000. Do not require an -# external administrative command to initiate deletion. +# found at https://xrpl.org/capacity-planning.html#node-db-type +# type=NuDB is recommended for non-validators with fast SSDs. Validators or +# slow / spinning disks should use RocksDB. Caution: Spinning disks are +# not recommended. They do not perform well enough to consistently remain +# synced to the network. +# online_delete=512 is recommended to delete old ledgers while maintaining at +# least 512. +# advisory_delete=0 allows the online delete process to run automatically +# when the node has approximately two times the "online_delete" value of +# ledgers. No external administrative command is required to initiate +# deletion. [node_db] -type=RocksDB -path=/var/lib/rippled/db/rocksdb -open_files=2000 -filter_bits=12 -cache_mb=256 -file_size_mb=8 -file_size_mult=2 -online_delete=2000 +type=NuDB +path=/var/lib/rippled/db/nudb +online_delete=512 advisory_delete=0 # This is the persistent datastore for shards. It is important for the health # of the ripple network that rippled operators shard as much as practical. -# NuDB requires SSD storage. Helpful information can be found here -# https://ripple.com/build/history-sharding +# NuDB requires SSD storage. Helpful information can be found at +# https://xrpl.org/history-sharding.html #[shard_db] #path=/var/lib/rippled/db/shards/nudb #max_size_gb=500 @@ -1248,7 +1376,8 @@ time.apple.com time.nist.gov pool.ntp.org -# To use the XRP test network (see https://ripple.com/build/xrp-test-net/), +# To use the XRP test network +# (see https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html), # use the following [ips] section: # [ips] # r.altnet.rippletest.net 51235 diff --git a/docs/0001-negative-unl/README.md b/docs/0001-negative-unl/README.md new file mode 100644 index 00000000000..606b30aab15 --- /dev/null +++ b/docs/0001-negative-unl/README.md @@ -0,0 +1,597 @@ +# Negative UNL Engineering Spec + +## The Problem Statement + +The moment-to-moment health of the XRP Ledger network depends on the health and +connectivity of a small number of computers (nodes). The most important nodes +are validators, specifically ones listed on the unique node list +([UNL](#Question-What-are-UNLs)). Ripple publishes a recommended UNL that most +network nodes use to determine which peers in the network are trusted. Although +most validators use the same list, they are not required to. The XRP Ledger +network progresses to the next ledger when enough validators reach agreement +(above the minimum quorum of 80%) about what transactions to include in the next +ledger. + +As an example, if there are 10 validators on the UNL, at least 8 validators have +to agree with the latest ledger for it to become validated. But what if enough +of those validators are offline to drop the network below the 80% quorum? The +XRP Ledger network favors safety/correctness over advancing the ledger. Which +means if enough validators are offline, the network will not be able to validate +ledgers. + +Unfortunately validators can go offline at any time for many different reasons. +Power outages, network connectivity issues, and hardware failures are just a few +scenarios where a validator would appear "offline". Given that most of these +events are temporary, it would make sense to temporarily remove that validator +from the UNL. But the UNL is updated infrequently and not every node uses the +same UNL. So instead of removing the unreliable validator from the Ripple +recommended UNL, we can create a second negative UNL which is stored directly on +the ledger (so the entire network has the same view). This will help the network +see which validators are **currently** unreliable, and adjust their quorum +calculation accordingly. + +*Improving the liveness of the network is the main motivation for the negative UNL.* + +### Targeted Faults + +In order to determine which validators are unreliable, we need clearly define +what kind of faults to measure and analyze. We want to deal with the faults we +frequently observe in the production network. Hence we will only monitor for +validators that do not reliably respond to network messages or send out +validations disagreeing with the locally generated validations. We will not +target other byzantine faults. + +To track whether or not a validator is responding to the network, we could +monitor them with a “heartbeat” protocol. Instead of creating a new heartbeat +protocol, we can leverage some existing protocol messages to mimic the +heartbeat. We picked validation messages because validators should send one and +only one validation message per ledger. In addition, we only count the +validation messages that agree with the local node's validations. + +With the negative UNL, the network could keep making forward progress safely +even if the number of remaining validators gets to 60%. Say we have a network +with 10 validators on the UNL and everything is operating correctly. The quorum +required for this network would be 8 (80% of 10). When validators fail, the +quorum required would be as low as 6 (60% of 10), which is the absolute +***minimum quorum***. We need the absolute minimum quorum to be strictly greater +than 50% of the original UNL so that there cannot be two partitions of +well-behaved nodes headed in different directions. We arbitrarily choose 60% as +the minimum quorum to give a margin of safety. + +Consider these events in the absence of negative UNL: +1. 1:00pm - validator1 fails, votes vs. quorum: 9 >= 8, we have quorum +1. 3:00pm - validator2 fails, votes vs. quorum: 8 >= 8, we have quorum +1. 5:00pm - validator3 fails, votes vs. quorum: 7 < 8, we don’t have quorum + * **network cannot validate new ledgers with 3 failed validators** + +We're below 80% agreement, so new ledgers cannot be validated. This is how the +XRP Ledger operates today, but if the negative UNL was enabled, the events would +happen as follows. (Please note that the events below are from a simplified +version of our protocol.) + +1. 1:00pm - validator1 fails, votes vs. quorum: 9 >= 8, we have quorum +1. 1:40pm - network adds validator1 to negative UNL, quorum changes to ceil(9 * 0.8), or 8 +1. 3:00pm - validator2 fails, votes vs. quorum: 8 >= 8, we have quorum +1. 3:40pm - network adds validator2 to negative UNL, quorum changes to ceil(8 * 0.8), or 7 +1. 5:00pm - validator3 fails, votes vs. quorum: 7 >= 7, we have quorum +1. 5:40pm - network adds validator3 to negative UNL, quorum changes to ceil(7 * 0.8), or 6 +1. 7:00pm - validator4 fails, votes vs. quorum: 6 >= 6, we have quorum + * **network can still validate new ledgers with 4 failed validators** + +## External Interactions + +### Message Format Changes +This proposal will: +1. add a new pseudo-transaction type +1. add the negative UNL to the ledger data structure. + +Any tools or systems that rely on the format of this data will have to be +updated. + +### Amendment +This feature **will** need an amendment to activate. + +## Design + +This section discusses the following topics about the Negative UNL design: + +* [Negative UNL protocol overview](#Negative-UNL-Protocol-Overview) +* [Validator reliability measurement](#Validator-Reliability-Measurement) +* [Format Changes](#Format-Changes) +* [Negative UNL maintenance](#Negative-UNL-Maintenance) +* [Quorum size calculation](#Quorum-Size-Calculation) +* [Filter validation messages](#Filter-Validation-Messages) +* [High level sequence diagram of code + changes](#High-Level-Sequence-Diagram-of-Code-Changes) + +### Negative UNL Protocol Overview + +Every ledger stores a list of zero or more unreliable validators. Updates to the +list must be approved by the validators using the consensus mechanism that +validators use to agree on the set of transactions. The list is used only when +checking if a ledger is fully validated. If a validator V is in the list, nodes +with V in their UNL adjust the quorum and V’s validation message is not counted +when verifying if a ledger is fully validated. V’s flow of messages and network +interactions, however, will remain the same. + +We define the ***effective UNL** = original UNL - negative UNL*, and the +***effective quorum*** as the quorum of the *effective UNL*. And we set +*effective quorum = Ceiling(80% * effective UNL)*. + +### Validator Reliability Measurement + +A node only measures the reliability of validators on its own UNL, and only +proposes based on local observations. There are many metrics that a node can +measure about its validators, but we have chosen ledger validation messages. +This is because every validator shall send one and only one signed validation +message per ledger. This keeps the measurement simple and removes +timing/clock-sync issues. A node will measure the percentage of agreeing +validation messages (*PAV*) received from each validator on the node's UNL. Note +that the node will only count the validation messages that agree with its own +validations. + +We define the **PAV** as the **P**ercentage of **A**greed **V**alidation +messages received for the last N ledgers, where N = 256 by default. + +When the PAV drops below the ***low-water mark***, the validator is considered +unreliable, and is a candidate to be disabled by being added to the negative +UNL. A validator must have a PAV higher than the ***high-water mark*** to be +re-enabled. The validator is re-enabled by removing it from the negative UNL. In +the implementation, we plan to set the low-water mark as 50% and the high-water +mark as 80%. + +### Format Changes + +The negative UNL component in a ledger contains three fields. +* ***NegativeUNL***: The current negative UNL, a list of unreliable validators. +* ***ToDisable***: The validator to be added to the negative UNL on the next + flag ledger. +* ***ToReEnable***: The validator to be removed from the negative UNL on the + next flag ledger. + +All three fields are optional. When the *ToReEnable* field exists, the +*NegativeUNL* field cannot be empty. + +A new pseudo-transaction, ***UNLModify***, is added. It has three fields +* ***Disabling***: A flag indicating whether the modification is to disable or + to re-enable a validator. +* ***Seq***: The ledger sequence number. +* ***Validator***: The validator to be disabled or re-enabled. + +There would be at most one *disable* `UNLModify` and one *re-enable* `UNLModify` +transaction per flag ledger. The full machinery is described further on. + +### Negative UNL Maintenance + +The negative UNL can only be modified on the flag ledgers. If a validator's +reliability status changes, it takes two flag ledgers to modify the negative +UNL. Let's see an example of the algorithm: + +* Ledger seq = 100: A validator V goes offline. +* Ledger seq = 256: This is a flag ledger, and V's reliability measurement *PAV* + is lower than the low-water mark. Other validators add `UNLModify` + pseudo-transactions `{true, 256, V}` to the transaction set which goes through + the consensus. Then the pseudo-transaction is applied to the negative UNL + ledger component by setting `ToDisable = V`. +* Ledger seq = 257 ~ 511: The negative UNL ledger component is copied from the + parent ledger. +* Ledger seq=512: This is a flag ledger, and the negative UNL is updated + `NegativeUNL = NegativeUNL + ToDisable`. + +The negative UNL may have up to `MaxNegativeListed = floor(original UNL * 25%)` +validators. The 25% is because of 75% * 80% = 60%, where 75% = 100% - 25%, 80% +is the quorum of the effective UNL, and 60% is the absolute minimum quorum of +the original UNL. Adding more than 25% validators to the negative UNL does not +improve the liveness of the network, because adding more validators to the +negative UNL cannot lower the effective quorum. + +The following is the detailed algorithm: + +* **If** the ledger seq = x is a flag ledger + + 1. Compute `NegativeUNL = NegativeUNL + ToDisable - ToReEnable` if they + exist in the parent ledger + + 1. Try to find a candidate to disable if `sizeof NegativeUNL < MaxNegativeListed` + + 1. Find a validator V that has a *PAV* lower than the low-water + mark, but is not in `NegativeUNL`. + + 1. If two or more are found, their public keys are XORed with the hash + of the parent ledger and the one with the lowest XOR result is chosen. + + 1. If V is found, create a `UNLModify` pseudo-transaction + `TxDisableValidator = {true, x, V}` + + 1. Try to find a candidate to re-enable if `sizeof NegativeUNL > 0`: + + 1. Find a validator U that is in `NegativeUNL` and has a *PAV* higher + than the high-water mark. + + 1. If U is not found, try to find one in `NegativeUNL` but not in the + local *UNL*. + + 1. If two or more are found, their public keys are XORed with the hash + of the parent ledger and the one with the lowest XOR result is chosen. + + 1. If U is found, create a `UNLModify` pseudo-transaction + `TxReEnableValidator = {false, x, U}` + + 1. If any `UNLModify` pseudo-transactions are created, add them to the + transaction set. The transaction set goes through the consensus algorithm. + + 1. If have enough support, the `UNLModify` pseudo-transactions remain in the + transaction set agreed by the validators. Then the pseudo-transactions are + applied to the ledger: + + 1. If have `TxDisableValidator`, set `ToDisable=TxDisableValidator.V`. + Else clear `ToDisable`. + + 1. If have `TxReEnableValidator`, set + `ToReEnable=TxReEnableValidator.U`. Else clear `ToReEnable`. + +* **Else** (not a flag ledger) + + 1. Copy the negative UNL ledger component from the parent ledger + +The negative UNL is stored on each ledger because we don't know when a validator +may reconnect to the network. If the negative UNL was stored only on every flag +ledger, then a new validator would have to wait until it acquires the latest +flag ledger to know the negative UNL. So any new ledgers created that are not +flag ledgers copy the negative UNL from the parent ledger. + +Note that when we have a validator to disable and a validator to re-enable at +the same flag ledger, we create two separate `UNLModify` pseudo-transactions. We +want either one or the other or both to make it into the ledger on their own +merits. + +Readers may have noticed that we defined several rules of creating the +`UNLModify` pseudo-transactions but did not describe how to enforce the rules. +The rules are actually enforced by the existing consensus algorithm. Unless +enough validators propose the same pseudo-transaction it will not be included in +the transaction set of the ledger. + +### Quorum Size Calculation + +The effective quorum is 80% of the effective UNL. Note that because at most 25% +of the original UNL can be on the negative UNL, the quorum should not be lower +than the absolute minimum quorum (i.e. 60%) of the original UNL. However, +considering that different nodes may have different UNLs, to be safe we compute +`quorum = Ceiling(max(60% * original UNL, 80% * effective UNL))`. + +### Filter Validation Messages + +If a validator V is in the negative UNL, it still participates in consensus +sessions in the same way, i.e. V still follows the protocol and publishes +proposal and validation messages. The messages from V are still stored the same +way by everyone, used to calculate the new PAV for V, and could be used in +future consensus sessions if needed. However V's ledger validation message is +not counted when checking if the ledger is fully validated. + +### High Level Sequence Diagram of Code Changes + +The diagram below is the sequence of one round of consensus. Classes and +components with non-trivial changes are colored green. + +* The `ValidatorList` class is modified to compute the quorum of the effective + UNL. + +* The `Validations` class provides an interface for querying the validation + messages from trusted validators. + +* The `ConsensusAdaptor` component: + + * The `RCLConsensus::Adaptor` class is modified for creating `UNLModify` + Pseudo-Transactions. + + * The `Change` class is modified for applying `UNLModify` + Pseudo-Transactions. + + * The `Ledger` class is modified for creating and adjusting the negative UNL + ledger component. + + * The `LedgerMaster` class is modified for filtering out validation messages + from negative UNL validators when verifying if a ledger is fully + validated. + +![Sequence diagram](./negativeUNL_highLevel_sequence.png?raw=true "Negative UNL + Changes") + + +## Roads Not Taken + +### Use a Mechanism Like Fee Voting to Process UNLModify Pseudo-Transactions + +The previous version of the negative UNL specification used the same mechanism +as the [fee voting](https://xrpl.org/fee-voting.html#voting-process.) for +creating the negative UNL, and used the negative UNL as soon as the ledger was +fully validated. However the timing of fully validation can differ among nodes, +so different negative UNLs could be used, resulting in different effective UNLs +and different quorums for the same ledger. As a result, the network's safety is +impacted. + +This updated version does not impact safety though operates a bit more slowly. +The negative UNL modifications in the *UNLModify* pseudo-transaction approved by +the consensus will take effect at the next flag ledger. The extra time of the +256 ledgers should be enough for nodes to be in sync of the negative UNL +modifications. + +### Use an Expiration Approach to Re-enable Validators + +After a validator disabled by the negative UNL becomes reliable, other +validators explicitly vote for re-enabling it. An alternative approach to +re-enable a validator is the expiration approach, which was considered in the +previous version of the specification. In the expiration approach, every entry +in the negative UNL has a fixed expiration time. One flag ledger interval was +chosen as the expiration interval. Once expired, the other validators must +continue voting to keep the unreliable validator on the negative UNL. The +advantage of this approach is its simplicity. But it has a requirement. The +negative UNL protocol must be able to vote multiple unreliable validators to be +disabled at the same flag ledger. In this version of the specification, however, +only one unreliable validator can be disabled at a flag ledger. So the +expiration approach cannot be simply applied. + +### Validator Reliability Measurement and Flag Ledger Frequency + +If the ledger time is about 4.5 seconds and the low-water mark is 50%, then in +the worst case, it takes 48 minutes *((0.5 * 256 + 256 + 256) * 4.5 / 60 = 48)* +to put an offline validator on the negative UNL. We considered lowering the flag +ledger frequency so that the negative UNL can be more responsive. We also +considered decoupling the reliability measurement and flag ledger frequency to +be more flexible. In practice, however, their benefits are not clear. + + +## New Attack Vectors + +A group of malicious validators may try to frame a reliable validator and put it +on the negative UNL. But they cannot succeed. Because: + +1. A reliable validator sends a signed validation message every ledger. A +sufficient peer-to-peer network will propagate the validation messages to other +validators. The validators will decide if another validator is reliable or not +only by its local observation of the validation messages received. So an honest +validator’s vote on another validator’s reliability is accurate. + +1. Given the votes are accurate, and one vote per validator, an honest validator +will not create a UNLModify transaction of a reliable validator. + +1. A validator can be added to a negative UNL only through a UNLModify +transaction. + +Assuming the group of malicious validators is less than the quorum, they cannot +frame a reliable validator. + +## Summary + +The bullet points below briefly summarize the current proposal: + +* The motivation of the negative UNL is to improve the liveness of the network. + +* The targeted faults are the ones frequently observed in the production + network. + +* Validators propose negative UNL candidates based on their local measurements. + +* The absolute minimum quorum is 60% of the original UNL. + +* The format of the ledger is changed, and a new *UNLModify* pseudo-transaction + is added. Any tools or systems that rely on the format of these data will have + to be updated. + +* The negative UNL can only be modified on the flag ledgers. + +* At most one validator can be added to the negative UNL at a flag ledger. + +* At most one validator can be removed from the negative UNL at a flag ledger. + +* If a validator's reliability status changes, it takes two flag ledgers to + modify the negative UNL. + +* The quorum is the larger of 80% of the effective UNL and 60% of the original + UNL. + +* If a validator is on the negative UNL, its validation messages are ignored + when the local node verifies if a ledger is fully validated. + +## FAQ + +### Question: What are UNLs? + +Quote from the [Technical FAQ](https://xrpl.org/technical-faq.html): "They are +the lists of transaction validators a given participant believes will not +conspire to defraud them." + +### Question: How does the negative UNL proposal affect network liveness? + +The network can make forward progress when more than a quorum of the trusted +validators agree with the progress. The lower the quorum size is, the easier for +the network to progress. If the quorum is too low, however, the network is not +safe because nodes may have different results. So the quorum size used in the +consensus protocol is a balance between the safety and the liveness of the +network. The negative UNL reduces the size of the effective UNL, resulting in a +lower quorum size while keeping the network safe. + +

Question: How does a validator get into the negative UNL? How is a +validator removed from the negative UNL?

+ +A validator’s reliability is measured by other validators. If a validator +becomes unreliable, at a flag ledger, other validators propose *UNLModify* +pseudo-transactions which vote the validator to add to the negative UNL during +the consensus session. If agreed, the validator is added to the negative UNL at +the next flag ledger. The mechanism of removing a validator from the negative +UNL is the same. + +### Question: Given a negative UNL, what happens if the UNL changes? + +Answer: Let’s consider the cases: + +1. A validator is added to the UNL, and it is already in the negative UNL. This +case could happen when not all the nodes have the same UNL. Note that the +negative UNL on the ledger lists unreliable nodes that are not necessarily the +validators for everyone. + + In this case, the liveness is affected negatively. Because the minimum + quorum could be larger but the usable validators are not increased. + +1. A validator is removed from the UNL, and it is in the negative UNL. + + In this case, the liveness is affected positively. Because the quorum could + be smaller but the usable validators are not reduced. + +1. A validator is added to the UNL, and it is not in the negative UNL. +1. A validator is removed from the UNL, and it is not in the negative UNL. + + Case 3 and 4 are not affected by the negative UNL protocol. + +### Question: Can we simply lower the quorum to 60% without the negative UNL? + +Answer: No, because the negative UNL approach is safer. + +First let’s compare the two approaches intuitively, (1) the *negative UNL* +approach, and (2) *lower quorum*: simply lowering the quorum from 80% to 60% +without the negative UNL. The negative UNL approach uses consensus to come up +with a list of unreliable validators, which are then removed from the effective +UNL temporarily. With this approach, the list of unreliable validators is agreed +to by a quorum of validators and will be used by every node in the network to +adjust its UNL. The quorum is always 80% of the effective UNL. The lower quorum +approach is a tradeoff between safety and liveness and against our principle of +preferring safety over liveness. Note that different validators don't have to +agree on which validation sources they are ignoring. + +Next we compare the two approaches quantitatively with examples, and apply +Theorem 8 of [Analysis of the XRP Ledger Consensus +Protocol](https://arxiv.org/abs/1802.07242) paper: + +*XRP LCP guarantees fork safety if **Oi,j > nj / 2 + +ni − qi + ti,j** for every pair of nodes +Pi, Pj,* + +where *Oi,j* is the overlapping requirement, nj and +ni are UNL sizes, qi is the quorum size of Pi, +*ti,j = min(ti, tj, Oi,j)*, and +ti and tj are the number of faults can be tolerated by +Pi and Pj. + +We denote *UNLi* as *Pi's UNL*, and *|UNLi|* as +the size of *Pi's UNL*. + +Assuming *|UNLi| = |UNLj|*, let's consider the following +three cases: + +1. With 80% quorum and 20% faults, *Oi,j > 100% / 2 + 100% - 80% + +20% = 90%*. I.e. fork safety requires > 90% UNL overlaps. This is one of the +results in the analysis paper. + +1. If the quorum is 60%, the relationship between the overlapping requirement +and the faults that can be tolerated is *Oi,j > 90% + +ti,j*. Under the same overlapping condition (i.e. 90%), to guarantee +the fork safety, the network cannot tolerate any faults. So under the same +overlapping condition, if the quorum is simply lowered, the network can tolerate +fewer faults. + +1. With the negative UNL approach, we want to argue that the inequation +*Oi,j > nj / 2 + ni − qi + +ti,j* is always true to guarantee fork safety, while the negative UNL +protocol runs, i.e. the effective quorum is lowered without weakening the +network's fault tolerance. To make the discussion easier, we rewrite the +inequation as *Oi,j > nj / 2 + (ni − +qi) + min(ti, tj)*, where Oi,j is +dropped from the definition of ti,j because *Oi,j > +min(ti, tj)* always holds under the parameters we will +use. Assuming a validator V is added to the negative UNL, now let's consider the +4 cases: + + 1. V is not on UNLi nor UNLj + + The inequation holds because none of the variables change. + + 1. V is on UNLi but not on UNLj + + The value of *(ni − qi)* is smaller. The value of + *min(ti, tj)* could be smaller too. Other + variables do not change. Overall, the left side of the inequation does + not change, but the right side is smaller. So the inequation holds. + + 1. V is not on UNLi but on UNLj + + The value of *nj / 2* is smaller. The value of + *min(ti, tj)* could be smaller too. Other + variables do not change. Overall, the left side of the inequation does + not change, but the right side is smaller. So the inequation holds. + + 1. V is on both UNLi and UNLj + + The value of *Oi,j* is reduced by 1. The values of + *nj / 2*, *(ni − qi)*, and + *min(ti, tj)* are reduced by 0.5, 0.2, and 1 + respectively. The right side is reduced by 1.7. Overall, the left side + of the inequation is reduced by 1, and the right side is reduced by 1.7. + So the inequation holds. + + The inequation holds for all the cases. So with the negative UNL approach, + the network's fork safety is preserved, while the quorum is lowered that + increases the network's liveness. + +

Question: We have observed that occasionally a validator wanders off on its +own chain. How is this case handled by the negative UNL algorithm?

+ +Answer: The case that a validator wanders off on its own chain can be measured +with the validations agreement. Because the validations by this validator must +be different from other validators' validations of the same sequence numbers. +When there are enough disagreed validations, other validators will vote this +validator onto the negative UNL. + +In general by measuring the agreement of validations, we also measured the +"sanity". If two validators have too many disagreements, one of them could be +insane. When enough validators think a validator is insane, that validator is +put on the negative UNL. + +

Question: Why would there be at most one disable UNLModify and one +re-enable UNLModify transaction per flag ledger?

+ +Answer: It is a design choice so that the effective UNL does not change too +quickly. A typical targeted scenario is several validators go offline slowly +during a long weekend. The current design can handle this kind of cases well +without changing the effective UNL too quickly. + +## Appendix + +### Confidence Test + +We will use two test networks, a single machine test network with multiple IP +addresses and the QE test network with multiple machines. The single machine +network will be used to test all the test cases and to debug. The QE network +will be used after that. We want to see the test cases still pass with real +network delay. A test case specifies: + +1. a UNL with different number of validators for different test cases, +1. a network with zero or more non-validator nodes, +1. a sequence of validator reliability change events (by killing/restarting + nodes, or by running modified rippled that does not send all validation + messages), +1. the correct outcomes. + +For all the test cases, the correct outcomes are verified by examining logs. We +will grep the log to see if the correct negative UNLs are generated, and whether +or not the network is making progress when it should be. The ripdtop tool will +be helpful for monitoring validators' states and ledger progress. Some of the +timing parameters of rippled will be changed to have faster ledger time. Most if +not all test cases do not need client transactions. + +For example, the test cases for the prototype: +1. A 10-validator UNL. +1. The network does not have other nodes. +1. The validators will be started from the genesis. Once they start to produce + ledgers, we kill five validators, one every flag ledger interval. Then we + will restart them one by one. +1. A sequence of events (or the lack of events) such as a killed validator is + added to the negative UNL. + +#### Roads Not Taken: Test with Extended CSF + +We considered testing with the current unit test framework, specifically the +[Consensus Simulation +Framework](https://github.com/ripple/rippled/blob/develop/src/test/csf/README.md) +(CSF). However, the CSF currently can only test the generic consensus algorithm +as in the paper: [Analysis of the XRP Ledger Consensus +Protocol](https://arxiv.org/abs/1802.07242). \ No newline at end of file diff --git a/docs/0001-negative-unl/negativeUNLSqDiagram.puml b/docs/0001-negative-unl/negativeUNLSqDiagram.puml new file mode 100644 index 00000000000..8cb491af6ab --- /dev/null +++ b/docs/0001-negative-unl/negativeUNLSqDiagram.puml @@ -0,0 +1,79 @@ +@startuml negativeUNL_highLevel_sequence + +skinparam sequenceArrowThickness 2 +skinparam roundcorner 20 +skinparam maxmessagesize 160 + +actor "Rippled Start" as RS +participant "Timer" as T +participant "NetworkOPs" as NOP +participant "ValidatorList" as VL #lightgreen +participant "Consensus" as GC +participant "ConsensusAdaptor" as CA #lightgreen +participant "Validations" as RM #lightgreen + +RS -> NOP: begin consensus +activate NOP +NOP -[#green]> VL: update negative UNL +hnote over VL#lightgreen: store a copy of\nnegative UNL +VL -> NOP +NOP -> VL: update trusted validators +activate VL +VL -> VL: re-calculate quorum +hnote over VL#lightgreen: ignore negative listed validators\nwhen calculate quorum +VL -> NOP +deactivate VL +NOP -> GC: start round +activate GC +GC -> GC: phase = OPEN +GC -> NOP +deactivate GC +deactivate NOP + +loop at regular frequency +T -> GC: timerEntry +activate GC +end + +alt phase == OPEN + alt should close ledger + GC -> GC: phase = ESTABLISH + GC -> CA: onClose + activate CA + alt sqn%256==0 + CA -[#green]> RM: getValidations + CA -[#green]> CA: create UNLModify Tx + hnote over CA#lightgreen: use validatations of the last 256 ledgers\nto figure out UNLModify Tx candidates.\nIf any, create UNLModify Tx, and add to TxSet. + end + CA -> GC + GC -> CA: propose + deactivate CA + end +else phase == ESTABLISH + hnote over GC: receive peer postions + GC -> GC : update our position + GC -> CA : propose \n(if position changed) + GC -> GC : check if have consensus + alt consensus reached + GC -> GC: phase = ACCEPT + GC -> CA : onAccept + activate CA + CA -> CA : build LCL + hnote over CA #lightgreen: copy negative UNL from parent ledger + alt sqn%256==0 + CA -[#green]> CA: Adjust negative UNL + CA -[#green]> CA: apply UNLModify Tx + end + CA -> CA : validate and send validation message + activate NOP + CA -> NOP : end consensus and\nbegin next consensus round + deactivate NOP + deactivate CA + hnote over RM: receive validations + end +else phase == ACCEPTED + hnote over GC: timerEntry hash nothing to do at this phase +end +deactivate GC + +@enduml \ No newline at end of file diff --git a/docs/0001-negative-unl/negativeUNL_highLevel_sequence.png b/docs/0001-negative-unl/negativeUNL_highLevel_sequence.png new file mode 100644 index 00000000000..b962693b49c Binary files /dev/null and b/docs/0001-negative-unl/negativeUNL_highLevel_sequence.png differ diff --git a/src/ripple/app/consensus/RCLConsensus.cpp b/src/ripple/app/consensus/RCLConsensus.cpp index f05497a5332..9b5ae65cd48 100644 --- a/src/ripple/app/consensus/RCLConsensus.cpp +++ b/src/ripple/app/consensus/RCLConsensus.cpp @@ -22,12 +22,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include #include #include @@ -87,8 +89,10 @@ RCLConsensus::Adaptor::Adaptor( , nodeID_{validatorKeys.nodeID} , valPublic_{validatorKeys.publicKey} , valSecret_{validatorKeys.secretKey} - , valCookie_{ - rand_int(1, std::numeric_limits::max())} + , valCookie_{rand_int( + 1, + std::numeric_limits::max())} + , nUnlVote_(nodeID_, j_) { assert(valCookie_ != 0); @@ -190,7 +194,7 @@ RCLConsensus::Adaptor::propose(RCLCxPeerPos::Proposal const& proposal) prop.set_currenttxhash( proposal.position().begin(), proposal.position().size()); prop.set_previousledger( - proposal.prevLedger().begin(), proposal.position().size()); + proposal.prevLedger().begin(), proposal.prevLedger().size()); prop.set_proposeseq(proposal.proposeSeq()); prop.set_closetime(proposal.closeTime().time_since_epoch().count()); @@ -300,7 +304,7 @@ RCLConsensus::Adaptor::onClose( auto initialLedger = app_.openLedger().current(); auto initialSet = - std::make_shared(SHAMapType::TRANSACTION, app_.family()); + std::make_shared(SHAMapType::TRANSACTION, app_.getNodeFamily()); initialSet->setUnbacked(); // Build SHAMap containing all transactions in our open ledger @@ -317,18 +321,34 @@ RCLConsensus::Adaptor::onClose( } // Add pseudo-transactions to the set - if ((app_.config().standalone() || (proposing && !wrongLCL)) && - ((prevLedger->info().seq % 256) == 0)) + if (app_.config().standalone() || (proposing && !wrongLCL)) { - // previous ledger was flag ledger, add pseudo-transactions - auto const validations = app_.getValidations().getTrustedForLedger( - prevLedger->info().parentHash); - - if (validations.size() >= app_.validators().quorum()) + if (prevLedger->isFlagLedger()) { - feeVote_->doVoting(prevLedger, validations, initialSet); - app_.getAmendmentTable().doVoting( - prevLedger, validations, initialSet); + // previous ledger was flag ledger, add fee and amendment + // pseudo-transactions + auto validations = app_.validators().negativeUNLFilter( + app_.getValidations().getTrustedForLedger( + prevLedger->info().parentHash)); + if (validations.size() >= app_.validators().quorum()) + { + feeVote_->doVoting(prevLedger, validations, initialSet); + app_.getAmendmentTable().doVoting( + prevLedger, validations, initialSet); + } + } + else if ( + prevLedger->isVotingLedger() && + prevLedger->rules().enabled(featureNegativeUNL)) + { + // previous ledger was a voting ledger, + // so the current consensus session is for a flag ledger, + // add negative UNL pseudo-transactions + nUnlVote_.doVoting( + prevLedger, + app_.validators().getTrustedMasterKeys(), + app_.getValidations(), + initialSet); } } @@ -793,7 +813,7 @@ RCLConsensus::Adaptor::validate( v.setFieldU64(sfCookie, valCookie_); // Report our server version every flag ledger: - if ((ledger.seq() + 1) % 256 == 0) + if (ledger.ledger_->isVotingLedger()) v.setFieldU64( sfServerVersion, BuildInfo::getEncodedVersion()); } @@ -808,7 +828,7 @@ RCLConsensus::Adaptor::validate( // If the next ledger is a flag ledger, suggest fee changes and // new features: - if ((ledger.seq() + 1) % 256 == 0) + if (ledger.ledger_->isVotingLedger()) { // Fees: feeVote_->doValidation(ledger.ledger_->fees(), v); @@ -922,7 +942,9 @@ RCLConsensus::peerProposal( } bool -RCLConsensus::Adaptor::preStartRound(RCLCxLedger const& prevLgr) +RCLConsensus::Adaptor::preStartRound( + RCLCxLedger const& prevLgr, + hash_set const& nowTrusted) { // We have a key, we do not want out of sync validations after a restart // and are not amendment blocked. @@ -961,6 +983,11 @@ RCLConsensus::Adaptor::preStartRound(RCLCxLedger const& prevLgr) // Notify inbound ledgers that we are starting a new round inboundTransactions_.newRound(prevLgr.seq()); + // Notify NegativeUNLVote that new validators are added + if (prevLgr.ledger_->rules().enabled(featureNegativeUNL) && + !nowTrusted.empty()) + nUnlVote_.newValidators(prevLgr.seq() + 1, nowTrusted); + // propose only if we're in sync with the network (and validating) return validating_ && synced; } @@ -1009,10 +1036,15 @@ RCLConsensus::startRound( NetClock::time_point const& now, RCLCxLedger::ID const& prevLgrId, RCLCxLedger const& prevLgr, - hash_set const& nowUntrusted) + hash_set const& nowUntrusted, + hash_set const& nowTrusted) { std::lock_guard _{mutex_}; consensus_.startRound( - now, prevLgrId, prevLgr, nowUntrusted, adaptor_.preStartRound(prevLgr)); + now, + prevLgrId, + prevLgr, + nowUntrusted, + adaptor_.preStartRound(prevLgr, nowTrusted)); } } // namespace ripple diff --git a/src/ripple/app/consensus/RCLConsensus.h b/src/ripple/app/consensus/RCLConsensus.h index f06dc5e5a3b..f0ab98c147c 100644 --- a/src/ripple/app/consensus/RCLConsensus.h +++ b/src/ripple/app/consensus/RCLConsensus.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -85,6 +86,7 @@ class RCLConsensus std::atomic mode_{ConsensusMode::observing}; RCLCensorshipDetector censorshipDetector_; + NegativeUNLVote nUnlVote_; public: using Ledger_t = RCLCxLedger; @@ -131,10 +133,13 @@ class RCLConsensus /** Called before kicking off a new consensus round. @param prevLedger Ledger that will be prior ledger for next round + @param nowTrusted the new validators @return Whether we enter the round proposing */ bool - preStartRound(RCLCxLedger const& prevLedger); + preStartRound( + RCLCxLedger const& prevLedger, + hash_set const& nowTrusted); bool haveValidated() const; @@ -471,13 +476,16 @@ class RCLConsensus Json::Value getJson(bool full) const; - //! @see Consensus::startRound + /** Adjust the set of trusted validators and kick-off the next round of + consensus. For more details, @see Consensus::startRound + */ void startRound( NetClock::time_point const& now, RCLCxLedger::ID const& prevLgrId, RCLCxLedger const& prevLgr, - hash_set const& nowUntrusted); + hash_set const& nowUntrusted, + hash_set const& nowTrusted); //! @see Consensus::timerEntry void diff --git a/src/ripple/app/ledger/InboundLedger.h b/src/ripple/app/ledger/InboundLedger.h index 009bd627a02..6688d0dc358 100644 --- a/src/ripple/app/ledger/InboundLedger.h +++ b/src/ripple/app/ledger/InboundLedger.h @@ -142,8 +142,9 @@ class InboundLedger final : public PeerSet, void addPeers(); + void - tryDB(Family& f); + tryDB(NodeStore::Database& srcDB); void done(); @@ -174,23 +175,13 @@ class InboundLedger final : public PeerSet, bool takeHeader(std::string const& data); - bool - takeTxNode( - const std::vector& IDs, - const std::vector& data, - SHAMapAddNode&); + + void + receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode&); + bool takeTxRootNode(Slice const& data, SHAMapAddNode&); - // VFALCO TODO Rename to receiveAccountStateNode - // Don't use acronyms, but if we are going to use them at least - // capitalize them correctly. - // - bool - takeAsNode( - const std::vector& IDs, - const std::vector& data, - SHAMapAddNode&); bool takeAsRootNode(Slice const& data, SHAMapAddNode&); diff --git a/src/ripple/app/ledger/Ledger.cpp b/src/ripple/app/ledger/Ledger.cpp index 3a6c43376c5..3801d994441 100644 --- a/src/ripple/app/ledger/Ledger.cpp +++ b/src/ripple/app/ledger/Ledger.cpp @@ -40,6 +40,7 @@ #include #include #include +#include #include #include #include @@ -228,14 +229,14 @@ Ledger::Ledger( !txMap_->fetchRoot(SHAMapHash{info_.txHash}, nullptr)) { loaded = false; - JLOG(j.warn()) << "Don't have TX root for ledger"; + JLOG(j.warn()) << "Don't have transaction root for ledger" << info_.seq; } if (info_.accountHash.isNonZero() && !stateMap_->fetchRoot(SHAMapHash{info_.accountHash}, nullptr)) { loaded = false; - JLOG(j.warn()) << "Don't have AS root for ledger"; + JLOG(j.warn()) << "Don't have state data root for ledger" << info_.seq; } txMap_->setImmutable(); @@ -248,7 +249,7 @@ Ledger::Ledger( { info_.hash = calculateLedgerHash(info_); if (acquire) - family.missing_node(info_.hash, info_.seq); + family.missingNode(info_.hash, info_.seq); } } @@ -598,6 +599,112 @@ Ledger::peek(Keylet const& k) const return sle; } +hash_set +Ledger::negativeUnl() const +{ + hash_set negUnl; + if (auto sle = read(keylet::negativeUNL()); + sle && sle->isFieldPresent(sfNegativeUNL)) + { + auto const& nUnlData = sle->getFieldArray(sfNegativeUNL); + for (auto const& n : nUnlData) + { + if (n.isFieldPresent(sfPublicKey)) + { + auto d = n.getFieldVL(sfPublicKey); + auto s = makeSlice(d); + if (!publicKeyType(s)) + { + continue; + } + negUnl.emplace(s); + } + } + } + + return negUnl; +} + +boost::optional +Ledger::negativeUnlToDisable() const +{ + if (auto sle = read(keylet::negativeUNL()); + sle && sle->isFieldPresent(sfNegativeUNLToDisable)) + { + auto d = sle->getFieldVL(sfNegativeUNLToDisable); + auto s = makeSlice(d); + if (publicKeyType(s)) + return PublicKey(s); + } + + return boost::none; +} + +boost::optional +Ledger::negativeUnlToReEnable() const +{ + if (auto sle = read(keylet::negativeUNL()); + sle && sle->isFieldPresent(sfNegativeUNLToReEnable)) + { + auto d = sle->getFieldVL(sfNegativeUNLToReEnable); + auto s = makeSlice(d); + if (publicKeyType(s)) + return PublicKey(s); + } + + return boost::none; +} + +void +Ledger::updateNegativeUNL() +{ + auto sle = peek(keylet::negativeUNL()); + if (!sle) + return; + + bool const hasToDisable = sle->isFieldPresent(sfNegativeUNLToDisable); + bool const hasToReEnable = sle->isFieldPresent(sfNegativeUNLToReEnable); + + if (!hasToDisable && !hasToReEnable) + return; + + STArray newNUnl; + if (sle->isFieldPresent(sfNegativeUNL)) + { + auto const& oldNUnl = sle->getFieldArray(sfNegativeUNL); + for (auto v : oldNUnl) + { + if (hasToReEnable && v.isFieldPresent(sfPublicKey) && + v.getFieldVL(sfPublicKey) == + sle->getFieldVL(sfNegativeUNLToReEnable)) + continue; + newNUnl.push_back(v); + } + } + + if (hasToDisable) + { + newNUnl.emplace_back(sfNegativeUNLEntry); + newNUnl.back().setFieldVL( + sfPublicKey, sle->getFieldVL(sfNegativeUNLToDisable)); + newNUnl.back().setFieldU32(sfFirstLedgerSequence, seq()); + } + + if (!newNUnl.empty()) + { + sle->setFieldArray(sfNegativeUNL, newNUnl); + if (hasToReEnable) + sle->makeFieldAbsent(sfNegativeUNLToReEnable); + if (hasToDisable) + sle->makeFieldAbsent(sfNegativeUNLToDisable); + rawReplace(sle); + } + else + { + rawErase(sle); + } +} + //------------------------------------------------------------------------------ bool Ledger::walkLedger(beast::Journal j) const @@ -735,6 +842,23 @@ Ledger::updateSkipList() rawReplace(sle); } +bool +Ledger::isFlagLedger() const +{ + return info_.seq % FLAG_LEDGER_INTERVAL == 0; +} +bool +Ledger::isVotingLedger() const +{ + return (info_.seq + 1) % FLAG_LEDGER_INTERVAL == 0; +} + +bool +isFlagLedger(LedgerIndex seq) +{ + return seq % FLAG_LEDGER_INTERVAL == 0; +} + static bool saveValidatedLedger( Application& app, @@ -1077,7 +1201,7 @@ loadLedgerHelper(std::string const& sqlSuffix, Application& app, bool acquire) loaded, acquire, app.config(), - app.family(), + app.getNodeFamily(), app.journal("Ledger")); if (!loaded) diff --git a/src/ripple/app/ledger/Ledger.h b/src/ripple/app/ledger/Ledger.h index baf4e2f4090..5f088651a8d 100644 --- a/src/ripple/app/ledger/Ledger.h +++ b/src/ripple/app/ledger/Ledger.h @@ -329,6 +329,46 @@ class Ledger final : public std::enable_shared_from_this, void unshare() const; + /** + * get Negative UNL validators' master public keys + * + * @return the public keys + */ + hash_set + negativeUnl() const; + + /** + * get the to be disabled validator's master public key if any + * + * @return the public key if any + */ + boost::optional + negativeUnlToDisable() const; + + /** + * get the to be re-enabled validator's master public key if any + * + * @return the public key if any + */ + boost::optional + negativeUnlToReEnable() const; + + /** + * update the Negative UNL ledger component. + * @note must be called at and only at flag ledgers + * must be called before applying UNLModify Tx + */ + void + updateNegativeUNL(); + + /** Returns true if the ledger is a flag ledger */ + bool + isFlagLedger() const; + + /** Returns true if the ledger directly precedes a flag ledger */ + bool + isVotingLedger() const; + private: class sles_iter_impl; class txs_iter_impl; @@ -355,6 +395,11 @@ class Ledger final : public std::enable_shared_from_this, /** A ledger wrapped in a CachedView. */ using CachedLedger = CachedView; +std::uint32_t constexpr FLAG_LEDGER_INTERVAL = 256; +/** Returns true if the given ledgerIndex is a flag ledgerIndex */ +bool +isFlagLedger(LedgerIndex seq); + //------------------------------------------------------------------------------ // // API diff --git a/src/ripple/app/ledger/LedgerMaster.h b/src/ripple/app/ledger/LedgerMaster.h index b82fce0bd12..8c3f812541f 100644 --- a/src/ripple/app/ledger/LedgerMaster.h +++ b/src/ripple/app/ledger/LedgerMaster.h @@ -54,6 +54,10 @@ class Transaction; class LedgerMaster : public Stoppable, public AbstractFetchPackContainer { public: + // Age for last validated ledger if the process has yet to validate. + static constexpr std::chrono::seconds NO_VALIDATED_LEDGER_AGE = + std::chrono::hours{24 * 14}; + explicit LedgerMaster( Application& app, Stopwatch& stopwatch, @@ -160,7 +164,7 @@ class LedgerMaster : public Stoppable, public AbstractFetchPackContainer /** Walk to a ledger's hash using the skip list */ boost::optional - walkHashBySeq(std::uint32_t index); + walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason); /** Walk the chain of ledger hashes to determine the hash of the ledger with the specified index. The referenceLedger is used as @@ -172,7 +176,8 @@ class LedgerMaster : public Stoppable, public AbstractFetchPackContainer boost::optional walkHashBySeq( std::uint32_t index, - std::shared_ptr const& referenceLedger); + std::shared_ptr const& referenceLedger, + InboundLedger::Reason reason); std::shared_ptr getLedgerBySeq(std::uint32_t index); @@ -183,11 +188,6 @@ class LedgerMaster : public Stoppable, public AbstractFetchPackContainer void setLedgerRangePresent(std::uint32_t minV, std::uint32_t maxV); - boost::optional - getLedgerHash( - std::uint32_t desiredSeq, - std::shared_ptr const& knownGoodLedger); - boost::optional getCloseTimeBySeq(LedgerIndex ledgerIndex); @@ -264,7 +264,7 @@ class LedgerMaster : public Stoppable, public AbstractFetchPackContainer gotFetchPack(bool progress, std::uint32_t seq); void - addFetchPack(uint256 const& hash, std::shared_ptr& data); + addFetchPack(uint256 const& hash, std::shared_ptr data); boost::optional getFetchPack(uint256 const& hash) override; @@ -407,6 +407,9 @@ class LedgerMaster : public Stoppable, public AbstractFetchPackContainer // without first wiping the database. LedgerIndex const max_ledger_difference_{1000000}; + // Time that the previous upgrade warning was issued. + TimeKeeper::time_point upgradeWarningPrevTime_{}; + private: struct Stats { diff --git a/src/ripple/app/ledger/impl/BuildLedger.cpp b/src/ripple/app/ledger/impl/BuildLedger.cpp index 62aab964159..97592220eb9 100644 --- a/src/ripple/app/ledger/impl/BuildLedger.cpp +++ b/src/ripple/app/ledger/impl/BuildLedger.cpp @@ -47,6 +47,11 @@ buildLedgerImpl( { auto built = std::make_shared(*parent, closeTime); + if (built->isFlagLedger() && built->rules().enabled(featureNegativeUNL)) + { + built->updateNegativeUNL(); + } + // Set up to write SHAMap changes to our database, // perform updates, extract changes diff --git a/src/ripple/app/ledger/impl/InboundLedger.cpp b/src/ripple/app/ledger/impl/InboundLedger.cpp index 3131e3304cf..fdfc9739140 100644 --- a/src/ripple/app/ledger/impl/InboundLedger.cpp +++ b/src/ripple/app/ledger/impl/InboundLedger.cpp @@ -47,7 +47,7 @@ enum { , peerCountAdd = 2 - // how many timeouts before we giveup + // how many timeouts before we give up , ledgerTimeoutRetriesMax = 10 @@ -97,26 +97,30 @@ InboundLedger::init(ScopedLockType& collectionLock) { ScopedLockType sl(mLock); collectionLock.unlock(); - tryDB(app_.family()); + + tryDB(app_.getNodeFamily().db()); if (mFailed) return; + if (!mComplete) { auto shardStore = app_.getShardStore(); if (mReason == Reason::SHARD) { - if (!shardStore || !app_.shardFamily()) + if (!shardStore) { JLOG(m_journal.error()) << "Acquiring shard with no shard store available"; mFailed = true; return; } + mHaveHeader = false; mHaveTransactions = false; mHaveState = false; mLedger.reset(); - tryDB(*app_.shardFamily()); + + tryDB(app_.getShardFamily()->db()); if (mFailed) return; } @@ -197,11 +201,11 @@ InboundLedger::checkLocal() if (!isDone()) { if (mLedger) - tryDB(mLedger->stateMap().family()); + tryDB(mLedger->stateMap().family().db()); else if (mReason == Reason::SHARD) - tryDB(*app_.shardFamily()); + tryDB(app_.getShardFamily()->db()); else - tryDB(app_.family()); + tryDB(app_.getNodeFamily().db()); if (mFailed || mComplete) { done(); @@ -293,14 +297,17 @@ deserializePrefixedHeader(Slice data) // See how much of the ledger data is stored locally // Data found in a fetch pack will be stored void -InboundLedger::tryDB(Family& f) +InboundLedger::tryDB(NodeStore::Database& srcDB) { if (!mHaveHeader) { auto makeLedger = [&, this](Blob const& data) { JLOG(m_journal.trace()) << "Ledger header found in fetch pack"; mLedger = std::make_shared( - deserializePrefixedHeader(makeSlice(data)), app_.config(), f); + deserializePrefixedHeader(makeSlice(data)), + app_.config(), + mReason == Reason::SHARD ? *app_.getShardFamily() + : app_.getNodeFamily()); if (mLedger->info().hash != mHash || (mSeq != 0 && mSeq != mLedger->info().seq)) { @@ -314,25 +321,41 @@ InboundLedger::tryDB(Family& f) }; // Try to fetch the ledger header from the DB - auto node = f.db().fetch(mHash, mSeq); - if (!node) + if (auto node = srcDB.fetch(mHash, mSeq)) { + JLOG(m_journal.trace()) << "Ledger header found in local store"; + + makeLedger(node->getData()); + if (mFailed) + return; + + // Store the ledger header if the source and destination differ + auto& dstDB{mLedger->stateMap().family().db()}; + if (std::addressof(dstDB) != std::addressof(srcDB)) + { + Blob blob{node->getData()}; + dstDB.store( + hotLEDGER, std::move(blob), mHash, mLedger->info().seq); + } + } + else + { + // Try to fetch the ledger header from a fetch pack auto data = app_.getLedgerMaster().getFetchPack(mHash); if (!data) return; + JLOG(m_journal.trace()) << "Ledger header found in fetch pack"; + makeLedger(*data); - if (mLedger) - f.db().store( - hotLEDGER, std::move(*data), mHash, mLedger->info().seq); - } - else - { - JLOG(m_journal.trace()) << "Ledger header found in node store"; - makeLedger(node->getData()); + if (mFailed) + return; + + // Store the ledger header in the ledger's database + mLedger->stateMap().family().db().store( + hotLEDGER, std::move(*data), mHash, mLedger->info().seq); } - if (mFailed) - return; + if (mSeq == 0) mSeq = mLedger->info().seq; mLedger->stateMap().setLedgerSeq(mSeq); @@ -540,7 +563,9 @@ InboundLedger::trigger(std::shared_ptr const& peer, TriggerReason reason) if (!mHaveHeader) { - tryDB(mReason == Reason::SHARD ? *app_.shardFamily() : app_.family()); + tryDB( + mReason == Reason::SHARD ? app_.getShardFamily()->db() + : app_.getNodeFamily().db()); if (mFailed) { JLOG(m_journal.warn()) << " failed local for " << mHash; @@ -841,7 +866,8 @@ InboundLedger::takeHeader(std::string const& data) if (mComplete || mFailed || mHaveHeader) return true; - auto* f = mReason == Reason::SHARD ? app_.shardFamily() : &app_.family(); + auto* f = mReason == Reason::SHARD ? app_.getShardFamily() + : &app_.getNodeFamily(); mLedger = std::make_shared( deserializeHeader(makeSlice(data)), app_.config(), *f); if (mLedger->info().hash != mHash || @@ -876,149 +902,87 @@ InboundLedger::takeHeader(std::string const& data) return true; } -/** Process TX data received from a peer +/** Process node data received from a peer Call with a lock */ -bool -InboundLedger::takeTxNode( - const std::vector& nodeIDs, - const std::vector& data, - SHAMapAddNode& san) +void +InboundLedger::receiveNode(protocol::TMLedgerData& packet, SHAMapAddNode& san) { if (!mHaveHeader) { - JLOG(m_journal.warn()) << "TX node without header"; + JLOG(m_journal.warn()) << "Missing ledger header"; san.incInvalid(); - return false; - } - - if (mHaveTransactions || mFailed) - { - san.incDuplicate(); - return true; + return; } - - auto nodeIDit = nodeIDs.cbegin(); - auto nodeDatait = data.begin(); - TransactionStateSF filter( - mLedger->txMap().family().db(), app_.getLedgerMaster()); - - while (nodeIDit != nodeIDs.cend()) + if (packet.type() == protocol::liTX_NODE) { - if (nodeIDit->isRoot()) + if (mHaveTransactions || mFailed) { - san += mLedger->txMap().addRootNode( - SHAMapHash{mLedger->info().txHash}, - makeSlice(*nodeDatait), - snfWIRE, - &filter); - if (!san.isGood()) - return false; - } - else - { - san += mLedger->txMap().addKnownNode( - *nodeIDit, makeSlice(*nodeDatait), &filter); - if (!san.isGood()) - return false; + san.incDuplicate(); + return; } - - ++nodeIDit; - ++nodeDatait; } - - if (!mLedger->txMap().isSynching()) - { - mHaveTransactions = true; - - if (mHaveState) - { - mComplete = true; - done(); - } - } - - return true; -} - -/** Process AS data received from a peer - Call with a lock -*/ -bool -InboundLedger::takeAsNode( - const std::vector& nodeIDs, - const std::vector& data, - SHAMapAddNode& san) -{ - JLOG(m_journal.trace()) - << "got ASdata (" << nodeIDs.size() << ") acquiring ledger " << mHash; - if (nodeIDs.size() == 1) - { - JLOG(m_journal.trace()) << "got AS node: " << nodeIDs.front(); - } - - ScopedLockType sl(mLock); - - if (!mHaveHeader) - { - JLOG(m_journal.warn()) << "Don't have ledger header"; - san.incInvalid(); - return false; - } - - if (mHaveState || mFailed) + else if (mHaveState || mFailed) { san.incDuplicate(); - return true; + return; } - auto nodeIDit = nodeIDs.cbegin(); - auto nodeDatait = data.begin(); - AccountStateSF filter( - mLedger->stateMap().family().db(), app_.getLedgerMaster()); - - while (nodeIDit != nodeIDs.cend()) + auto [map, filter] = + [&]() -> std::pair> { + if (packet.type() == protocol::liTX_NODE) + return { + mLedger->txMap(), + std::make_unique( + mLedger->txMap().family().db(), app_.getLedgerMaster())}; + return { + mLedger->stateMap(), + std::make_unique( + mLedger->stateMap().family().db(), app_.getLedgerMaster())}; + }(); + + try { - if (nodeIDit->isRoot()) + for (auto const& node : packet.nodes()) { - san += mLedger->stateMap().addRootNode( - SHAMapHash{mLedger->info().accountHash}, - makeSlice(*nodeDatait), - snfWIRE, - &filter); - if (!san.isGood()) - { - JLOG(m_journal.warn()) << "Bad ledger header"; - return false; - } - } - else - { - san += mLedger->stateMap().addKnownNode( - *nodeIDit, makeSlice(*nodeDatait), &filter); + SHAMapNodeID const nodeID( + node.nodeid().data(), node.nodeid().size()); + if (nodeID.isRoot()) + san += map.addRootNode( + SHAMapHash{mLedger->info().accountHash}, + makeSlice(node.nodedata()), + filter.get()); + else + san += map.addKnownNode( + nodeID, makeSlice(node.nodedata()), filter.get()); + if (!san.isGood()) { - JLOG(m_journal.warn()) << "Unable to add AS node"; - return false; + JLOG(m_journal.warn()) << "Received bad node data"; + return; } } - - ++nodeIDit; - ++nodeDatait; + } + catch (std::exception const& e) + { + JLOG(m_journal.error()) << "Received bad node data: " << e.what(); + san.incInvalid(); + return; } - if (!mLedger->stateMap().isSynching()) + if (!map.isSynching()) { - mHaveState = true; + if (packet.type() == protocol::liTX_NODE) + mHaveTransactions = true; + else + mHaveState = true; - if (mHaveTransactions) + if (mHaveTransactions && mHaveState) { mComplete = true; done(); } } - - return true; } /** Process AS root node received from a peer @@ -1042,7 +1006,7 @@ InboundLedger::takeAsRootNode(Slice const& data, SHAMapAddNode& san) AccountStateSF filter( mLedger->stateMap().family().db(), app_.getLedgerMaster()); san += mLedger->stateMap().addRootNode( - SHAMapHash{mLedger->info().accountHash}, data, snfWIRE, &filter); + SHAMapHash{mLedger->info().accountHash}, data, &filter); return san.isGood(); } @@ -1067,7 +1031,7 @@ InboundLedger::takeTxRootNode(Slice const& data, SHAMapAddNode& san) TransactionStateSF filter( mLedger->txMap().family().db(), app_.getLedgerMaster()); san += mLedger->txMap().addRootNode( - SHAMapHash{mLedger->info().txHash}, data, snfWIRE, &filter); + SHAMapHash{mLedger->info().txHash}, data, &filter); return san.isGood(); } @@ -1156,28 +1120,38 @@ InboundLedger::processData( SHAMapAddNode san; - if (!mHaveHeader) + try { - if (takeHeader(packet.nodes(0).nodedata())) + if (!mHaveHeader) + { + if (!takeHeader(packet.nodes(0).nodedata())) + { + JLOG(m_journal.warn()) << "Got invalid header data"; + peer->charge(Resource::feeInvalidRequest); + return -1; + } + san.incUseful(); - else + } + + if (!mHaveState && (packet.nodes().size() > 1) && + !takeAsRootNode(makeSlice(packet.nodes(1).nodedata()), san)) { - JLOG(m_journal.warn()) << "Got invalid header data"; - peer->charge(Resource::feeInvalidRequest); - return -1; + JLOG(m_journal.warn()) << "Included AS root invalid"; } - } - if (!mHaveState && (packet.nodes().size() > 1) && - !takeAsRootNode(makeSlice(packet.nodes(1).nodedata()), san)) - { - JLOG(m_journal.warn()) << "Included AS root invalid"; + if (!mHaveTransactions && (packet.nodes().size() > 2) && + !takeTxRootNode(makeSlice(packet.nodes(2).nodedata()), san)) + { + JLOG(m_journal.warn()) << "Included TX root invalid"; + } } - - if (!mHaveTransactions && (packet.nodes().size() > 2) && - !takeTxRootNode(makeSlice(packet.nodes(2).nodedata()), san)) + catch (std::exception const& ex) { - JLOG(m_journal.warn()) << "Included TX root invalid"; + JLOG(m_journal.warn()) + << "Included AS/TX root invalid: " << ex.what(); + peer->charge(Resource::feeBadData); + return -1; } if (san.isUseful()) @@ -1197,38 +1171,26 @@ InboundLedger::processData( return -1; } - std::vector nodeIDs; - nodeIDs.reserve(packet.nodes().size()); - std::vector nodeData; - nodeData.reserve(packet.nodes().size()); - - for (int i = 0; i < packet.nodes().size(); ++i) + // Verify node IDs and data are complete + for (auto const& node : packet.nodes()) { - const protocol::TMLedgerNode& node = packet.nodes(i); - if (!node.has_nodeid() || !node.has_nodedata()) { JLOG(m_journal.warn()) << "Got bad node"; peer->charge(Resource::feeInvalidRequest); return -1; } - - nodeIDs.push_back( - SHAMapNodeID(node.nodeid().data(), node.nodeid().size())); - nodeData.push_back( - Blob(node.nodedata().begin(), node.nodedata().end())); } SHAMapAddNode san; + receiveNode(packet, san); if (packet.type() == protocol::liTX_NODE) { - takeTxNode(nodeIDs, nodeData, san); JLOG(m_journal.debug()) << "Ledger TX node stats: " << san.get(); } else { - takeAsNode(nodeIDs, nodeData, san); JLOG(m_journal.debug()) << "Ledger AS node stats: " << san.get(); } diff --git a/src/ripple/app/ledger/impl/InboundLedgers.cpp b/src/ripple/app/ledger/impl/InboundLedgers.cpp index 6eb80c36edf..91bb735086c 100644 --- a/src/ripple/app/ledger/impl/InboundLedgers.cpp +++ b/src/ripple/app/ledger/impl/InboundLedgers.cpp @@ -247,15 +247,8 @@ class InboundLedgersImp : public InboundLedgers, public Stoppable if (!node.has_nodeid() || !node.has_nodedata()) return; - auto id_string = node.nodeid(); - auto newNode = SHAMapAbstractNode::make( - makeSlice(node.nodedata()), - 0, - snfWIRE, - SHAMapHash{uZero}, - false, - app_.journal("SHAMapNodeID"), - SHAMapNodeID(id_string.data(), id_string.size())); + auto newNode = SHAMapAbstractNode::makeFromWire( + makeSlice(node.nodedata())); if (!newNode) return; @@ -263,10 +256,9 @@ class InboundLedgersImp : public InboundLedgers, public Stoppable s.erase(); newNode->addRaw(s, snfPREFIX); - auto blob = std::make_shared(s.begin(), s.end()); - app_.getLedgerMaster().addFetchPack( - newNode->getNodeHash().as_uint256(), blob); + newNode->getNodeHash().as_uint256(), + std::make_shared(s.begin(), s.end())); } } catch (std::exception const&) diff --git a/src/ripple/app/ledger/impl/InboundTransactions.cpp b/src/ripple/app/ledger/impl/InboundTransactions.cpp index 4487caca69a..b4c2cf734b0 100644 --- a/src/ripple/app/ledger/impl/InboundTransactions.cpp +++ b/src/ripple/app/ledger/impl/InboundTransactions.cpp @@ -75,7 +75,7 @@ class InboundTransactionsImp : public InboundTransactions, public Stoppable , m_gotSet(std::move(gotSet)) { m_zeroSet.mSet = std::make_shared( - SHAMapType::TRANSACTION, uint256(), app_.family()); + SHAMapType::TRANSACTION, uint256(), app_.getNodeFamily()); m_zeroSet.mSet->setUnbacked(); } diff --git a/src/ripple/app/ledger/impl/LedgerMaster.cpp b/src/ripple/app/ledger/impl/LedgerMaster.cpp index 9a8f7dfe382..c72f3349bc1 100644 --- a/src/ripple/app/ledger/impl/LedgerMaster.cpp +++ b/src/ripple/app/ledger/impl/LedgerMaster.cpp @@ -18,6 +18,7 @@ //============================================================================== #include +#include #include #include #include @@ -34,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -43,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -269,7 +272,7 @@ LedgerMaster::getValidatedLedgerAge() if (valClose == 0s) { JLOG(m_journal.debug()) << "No validated ledger"; - return weeks{2}; + return NO_VALIDATED_LEDGER_AGE; } std::chrono::seconds ret = app_.timeKeeper().closeTime().time_since_epoch(); @@ -312,14 +315,14 @@ LedgerMaster::setValidLedger(std::shared_ptr const& l) if (!standalone_) { - auto const vals = - app_.getValidations().getTrustedForLedger(l->info().hash); - times.reserve(vals.size()); - for (auto const& val : vals) + auto validations = app_.validators().negativeUNLFilter( + app_.getValidations().getTrustedForLedger(l->info().hash)); + times.reserve(validations.size()); + for (auto const& val : validations) times.push_back(val->getSignTime()); - if (!vals.empty()) - consensusHash = vals.front()->getConsensusHash(); + if (!validations.empty()) + consensusHash = validations.front()->getConsensusHash(); } NetClock::time_point signTime; @@ -357,7 +360,7 @@ LedgerMaster::setValidLedger(std::shared_ptr const& l) "activated: server blocked."; app_.getOPs().setAmendmentBlocked(); } - else if (!app_.getOPs().isAmendmentWarned() || ((l->seq() % 256) == 0)) + else if (!app_.getOPs().isAmendmentWarned() || l->isFlagLedger()) { // Amendments can lose majority, so re-check periodically (every // flag ledger), and clear the flag if appropriate. If an unknown @@ -735,7 +738,19 @@ LedgerMaster::tryFill(Job& job, std::shared_ptr ledger) void LedgerMaster::getFetchPack(LedgerIndex missing, InboundLedger::Reason reason) { - auto haveHash{getLedgerHashForHistory(missing + 1, reason)}; + LedgerIndex const ledgerIndex([&]() { + if (reason == InboundLedger::Reason::SHARD) + { + // Do not acquire a ledger sequence greater + // than the last ledger in the shard + auto const shardStore{app_.getShardStore()}; + auto const shardIndex{shardStore->seqToShardIndex(missing)}; + return std::min(missing + 1, shardStore->lastLedgerSeq(shardIndex)); + } + return missing + 1; + }()); + + auto const haveHash{getLedgerHashForHistory(ledgerIndex, reason)}; if (!haveHash || haveHash->isZero()) { if (reason == InboundLedger::Reason::SHARD) @@ -927,8 +942,9 @@ LedgerMaster::checkAccept(uint256 const& hash, std::uint32_t seq) if (seq < mValidLedgerSeq) return; - valCount = app_.getValidations().numTrustedForLedger(hash); - + auto validations = app_.validators().negativeUNLFilter( + app_.getValidations().getTrustedForLedger(hash)); + valCount = validations.size(); if (valCount >= app_.validators().quorum()) { std::lock_guard ml(m_mutex); @@ -992,8 +1008,9 @@ LedgerMaster::checkAccept(std::shared_ptr const& ledger) return; auto const minVal = getNeededValidations(); - auto const tvc = - app_.getValidations().numTrustedForLedger(ledger->info().hash); + auto validations = app_.validators().negativeUNLFilter( + app_.getValidations().getTrustedForLedger(ledger->info().hash)); + auto const tvc = validations.size(); if (tvc < minVal) // nothing we can do { JLOG(m_journal.trace()) @@ -1037,6 +1054,78 @@ LedgerMaster::checkAccept(std::shared_ptr const& ledger) app_.getFeeTrack().setRemoteFee(fee); tryAdvance(); + + if (ledger->seq() % 256 == 0) + { + // Check if the majority of validators run a higher version rippled + // software. If so print a warning. + // + // Once the HardenedValidations amendment is enabled, validators include + // their rippled software version in the validation messages of every + // (flag - 1) ledger. We wait for one ledger time before checking the + // version information to accumulate more validation messages. + + auto currentTime = app_.timeKeeper().now(); + bool needPrint = false; + + // The variable upgradeWarningPrevTime_ will be set when and only when + // the warning is printed. + if (upgradeWarningPrevTime_ == TimeKeeper::time_point()) + { + // Have not printed the warning before, check if need to print. + auto const vals = app_.getValidations().getTrustedForLedger( + ledger->info().parentHash); + std::size_t higherVersionCount = 0; + std::size_t rippledCount = 0; + for (auto const& v : vals) + { + if (v->isFieldPresent(sfServerVersion)) + { + auto version = v->getFieldU64(sfServerVersion); + higherVersionCount += + BuildInfo::isNewerVersion(version) ? 1 : 0; + rippledCount += + BuildInfo::isRippledVersion(version) ? 1 : 0; + } + } + // We report only if (1) we have accumulated validation messages + // from 90% validators from the UNL, (2) 60% of validators + // running the rippled implementation have higher version numbers, + // and (3) the calculation won't cause divide-by-zero. + if (higherVersionCount > 0 && rippledCount > 0) + { + constexpr std::size_t reportingPercent = 90; + constexpr std::size_t cutoffPercent = 60; + auto const unlSize{ + app_.validators().getQuorumKeys().second.size()}; + needPrint = unlSize > 0 && + calculatePercent(vals.size(), unlSize) >= + reportingPercent && + calculatePercent(higherVersionCount, rippledCount) >= + cutoffPercent; + } + } + // To throttle the warning messages, instead of printing a warning + // every flag ledger, we print every week. + else if (currentTime - upgradeWarningPrevTime_ >= weeks{1}) + { + // Printed the warning before, and assuming most validators + // do not downgrade, we keep printing the warning + // until the local server is restarted. + needPrint = true; + } + + if (needPrint) + { + upgradeWarningPrevTime_ = currentTime; + auto const upgradeMsg = + "Check for upgrade: " + "A majority of trusted validators are " + "running a newer version."; + std::cerr << upgradeMsg << std::endl; + JLOG(m_journal.error()) << upgradeMsg; + } + } } /** Report that the consensus process built a particular ledger */ @@ -1076,7 +1165,8 @@ LedgerMaster::consensusBuilt( // This ledger cannot be the new fully-validated ledger, but // maybe we saved up validations for some other ledger that can be - auto const val = app_.getValidations().currentTrusted(); + auto validations = app_.validators().negativeUNLFilter( + app_.getValidations().currentTrusted()); // Track validation counts with sequence numbers class valSeq @@ -1103,7 +1193,7 @@ LedgerMaster::consensusBuilt( // Count the number of current, trusted validations hash_map count; - for (auto const& v : val) + for (auto const& v : validations) { valSeq& vs = count[v->getLedgerHash()]; vs.mergeValidation(v->getFieldU32(sfLedgerSequence)); @@ -1175,11 +1265,11 @@ LedgerMaster::getLedgerHashForHistory( { ret = hashOfSeq(*l, index, m_journal); if (!ret) - ret = walkHashBySeq(index, l); + ret = walkHashBySeq(index, l, reason); } if (!ret) - ret = walkHashBySeq(index); + ret = walkHashBySeq(index, reason); return ret; } @@ -1303,41 +1393,6 @@ LedgerMaster::tryAdvance() } } -// Return the hash of the valid ledger with a particular sequence, given a -// subsequent ledger known valid. -boost::optional -LedgerMaster::getLedgerHash( - std::uint32_t desiredSeq, - std::shared_ptr const& knownGoodLedger) -{ - assert(desiredSeq < knownGoodLedger->info().seq); - - auto hash = hashOfSeq(*knownGoodLedger, desiredSeq, m_journal); - - // Not directly in the given ledger - if (!hash) - { - std::uint32_t seq = (desiredSeq + 255) % 256; - assert(seq < desiredSeq); - - hash = hashOfSeq(*knownGoodLedger, seq, m_journal); - if (hash) - { - if (auto l = getLedgerByHash(*hash)) - { - hash = hashOfSeq(*l, desiredSeq, m_journal); - assert(hash); - } - } - else - { - assert(false); - } - } - - return hash; -} - void LedgerMaster::updatePaths(Job& job) { @@ -1547,12 +1602,12 @@ LedgerMaster::getHashBySeq(std::uint32_t index) } boost::optional -LedgerMaster::walkHashBySeq(std::uint32_t index) +LedgerMaster::walkHashBySeq(std::uint32_t index, InboundLedger::Reason reason) { boost::optional ledgerHash; if (auto referenceLedger = mValidLedger.get()) - ledgerHash = walkHashBySeq(index, referenceLedger); + ledgerHash = walkHashBySeq(index, referenceLedger, reason); return ledgerHash; } @@ -1560,7 +1615,8 @@ LedgerMaster::walkHashBySeq(std::uint32_t index) boost::optional LedgerMaster::walkHashBySeq( std::uint32_t index, - std::shared_ptr const& referenceLedger) + std::shared_ptr const& referenceLedger, + InboundLedger::Reason reason) { if (!referenceLedger || (referenceLedger->info().seq < index)) { @@ -1599,7 +1655,7 @@ LedgerMaster::walkHashBySeq( if (!ledger) { if (auto const l = app_.getInboundLedgers().acquire( - *refHash, refIndex, InboundLedger::Reason::GENERIC)) + *refHash, refIndex, reason)) { ledgerHash = hashOfSeq(*l, index, m_journal); assert(ledgerHash); @@ -1948,7 +2004,7 @@ LedgerMaster::doAdvance(std::unique_lock& sl) } void -LedgerMaster::addFetchPack(uint256 const& hash, std::shared_ptr& data) +LedgerMaster::addFetchPack(uint256 const& hash, std::shared_ptr data) { fetch_packs_.canonicalize_replace_client(hash, data); } diff --git a/src/ripple/app/ledger/impl/TransactionAcquire.cpp b/src/ripple/app/ledger/impl/TransactionAcquire.cpp index 1608a38f14f..fa71ca4daba 100644 --- a/src/ripple/app/ledger/impl/TransactionAcquire.cpp +++ b/src/ripple/app/ledger/impl/TransactionAcquire.cpp @@ -43,8 +43,8 @@ TransactionAcquire::TransactionAcquire(Application& app, uint256 const& hash) : PeerSet(app, hash, TX_ACQUIRE_TIMEOUT, app.journal("TransactionAcquire")) , mHaveRoot(false) { - mMap = - std::make_shared(SHAMapType::TRANSACTION, hash, app_.family()); + mMap = std::make_shared( + SHAMapType::TRANSACTION, hash, app_.getNodeFamily()); mMap->setUnbacked(); } @@ -212,7 +212,6 @@ TransactionAcquire::takeNodes( else if (!mMap->addRootNode( SHAMapHash{mHash}, makeSlice(*nodeDatait), - snfWIRE, nullptr) .isGood()) { diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index 3b37c736963..8c1a47c6f58 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -29,10 +29,10 @@ #include #include #include +#include #include #include #include -#include #include #include #include @@ -64,8 +64,9 @@ #include #include #include +#include +#include -#include #include #include #include @@ -77,177 +78,6 @@ namespace ripple { -// 204/256 about 80% -static int const MAJORITY_FRACTION(204); - -//------------------------------------------------------------------------------ - -namespace detail { - -class AppFamily : public Family -{ -private: - Application& app_; - TreeNodeCache treecache_; - FullBelowCache fullbelow_; - NodeStore::Database& db_; - bool const shardBacked_; - beast::Journal const j_; - - // missing node handler - LedgerIndex maxSeq = 0; - std::mutex maxSeqLock; - - void - acquire(uint256 const& hash, std::uint32_t seq) - { - if (hash.isNonZero()) - { - auto j = app_.journal("Ledger"); - - JLOG(j.error()) << "Missing node in " << to_string(hash); - - app_.getInboundLedgers().acquire( - hash, - seq, - shardBacked_ ? InboundLedger::Reason::SHARD - : InboundLedger::Reason::GENERIC); - } - } - -public: - AppFamily(AppFamily const&) = delete; - AppFamily& - operator=(AppFamily const&) = delete; - - AppFamily( - Application& app, - NodeStore::Database& db, - CollectorManager& collectorManager) - : app_(app) - , treecache_( - "TreeNodeCache", - 65536, - std::chrono::minutes{1}, - stopwatch(), - app.journal("TaggedCache")) - , fullbelow_( - "full_below", - stopwatch(), - collectorManager.collector(), - fullBelowTargetSize, - fullBelowExpiration) - , db_(db) - , shardBacked_(dynamic_cast(&db) != nullptr) - , j_(app.journal("SHAMap")) - { - } - - beast::Journal const& - journal() override - { - return j_; - } - - FullBelowCache& - fullbelow() override - { - return fullbelow_; - } - - FullBelowCache const& - fullbelow() const override - { - return fullbelow_; - } - - TreeNodeCache& - treecache() override - { - return treecache_; - } - - TreeNodeCache const& - treecache() const override - { - return treecache_; - } - - NodeStore::Database& - db() override - { - return db_; - } - - NodeStore::Database const& - db() const override - { - return db_; - } - - bool - isShardBacked() const override - { - return shardBacked_; - } - - void - missing_node(std::uint32_t seq) override - { - auto j = app_.journal("Ledger"); - - JLOG(j.error()) << "Missing node in " << seq; - - // prevent recursive invocation - std::unique_lock lock(maxSeqLock); - - if (maxSeq == 0) - { - maxSeq = seq; - - do - { - // Try to acquire the most recent missing ledger - seq = maxSeq; - - lock.unlock(); - - // This can invoke the missing node handler - acquire(app_.getLedgerMaster().getHashBySeq(seq), seq); - - lock.lock(); - } while (maxSeq != seq); - } - else if (maxSeq < seq) - { - // We found a more recent ledger with a - // missing node - maxSeq = seq; - } - } - - void - missing_node(uint256 const& hash, std::uint32_t seq) override - { - acquire(hash, seq); - } - - void - reset() override - { - { - std::lock_guard lock(maxSeqLock); - maxSeq = 0; - } - fullbelow_.reset(); - treecache_.reset(); - } -}; - -} // namespace detail - -//------------------------------------------------------------------------------ - // VFALCO TODO Move the function definitions into the class declaration class ApplicationImp : public Application, public RootStoppable, public BasicApp { @@ -346,9 +176,9 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp // These are Stoppable-related std::unique_ptr m_jobQueue; std::unique_ptr m_nodeStore; - detail::AppFamily family_; + NodeFamily nodeFamily_; std::unique_ptr shardStore_; - std::unique_ptr shardFamily_; + std::unique_ptr shardFamily_; std::unique_ptr shardArchiveHandler_; // VFALCO TODO Make OrderBookDB abstract OrderBookDB m_orderBookDB; @@ -479,7 +309,7 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp , m_nodeStore(m_shaMapStore->makeNodeStore("NodeStore.main", 4)) - , family_(*this, *m_nodeStore, *m_collectorManager) + , nodeFamily_(*this, *m_collectorManager) // The shard store is optional and make_ShardStore can return null. , shardStore_(make_ShardStore( @@ -674,13 +504,15 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp } Family& - family() override + getNodeFamily() override { - return family_; + return nodeFamily_; } + // The shard store is an optional feature. If the sever is configured for + // shards, this function will return a valid pointer, otherwise a nullptr. Family* - shardFamily() override + getShardFamily() override { return shardFamily_.get(); } @@ -782,6 +614,8 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp return *m_nodeStore; } + // The shard store is an optional feature. If the sever is configured for + // shards, this function will return a valid pointer, otherwise a nullptr. NodeStore::DatabaseShard* getShardStore() override { @@ -1022,7 +856,7 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp try { - auto const setup = setup_DatabaseCon(*config_); + auto setup = setup_DatabaseCon(*config_, m_journal); // transaction database mTxnDB = std::make_unique( @@ -1072,6 +906,7 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp mLedgerDB->setupCheckpointing(m_jobQueue.get(), logs()); // wallet database + setup.useGlobalPragma = false; mWalletDB = std::make_unique( setup, WalletDBName, @@ -1129,11 +964,6 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp config_->getValueFor(SizedItem::ledgerSize), seconds{config_->getValueFor(SizedItem::ledgerAge)}); - family().treecache().setTargetSize( - config_->getValueFor(SizedItem::treeCacheSize)); - family().treecache().setTargetAge( - seconds{config_->getValueFor(SizedItem::treeCacheAge)}); - return true; } @@ -1363,7 +1193,7 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp JLOG(m_journal.fatal()) << "Free SQLite space for transaction db is less than " "512MB. To fix this, rippled must be executed with the " - "vacuum parameter before restarting. " + "\"--vacuum\" parameter before restarting. " "Note that this activity can take multiple days, " "depending on database size."; signalStop(); @@ -1374,9 +1204,9 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp // VFALCO TODO fix the dependency inversion using an observer, // have listeners register for "onSweep ()" notification. - family().fullbelow().sweep(); + nodeFamily_.sweep(); if (shardFamily_) - shardFamily_->fullbelow().sweep(); + shardFamily_->sweep(); getMasterTransaction().sweep(); getNodeStore().sweep(); if (shardStore_) @@ -1386,9 +1216,6 @@ class ApplicationImp : public Application, public RootStoppable, public BasicApp getValidations().expire(); getInboundLedgers().sweep(); m_acceptedLedgerCache.sweep(); - family().treecache().sweep(); - if (shardFamily_) - shardFamily_->treecache().sweep(); cachedSLEs_.expire(); // Set timer to do another sweep later. @@ -1493,14 +1320,8 @@ ApplicationImp::setup() if (shardStore_) { - shardFamily_ = std::make_unique( - *this, *shardStore_, *m_collectorManager); - - using namespace std::chrono; - shardFamily_->treecache().setTargetSize( - config_->getValueFor(SizedItem::treeCacheSize)); - shardFamily_->treecache().setTargetAge( - seconds{config_->getValueFor(SizedItem::treeCacheAge)}); + shardFamily_ = + std::make_unique(*this, *m_collectorManager); if (!shardStore_->init()) return false; @@ -1533,8 +1354,7 @@ ApplicationImp::setup() Section enabledAmendments = config_->section(SECTION_AMENDMENTS); m_amendmentTable = make_AmendmentTable( - weeks{2}, - MAJORITY_FRACTION, + config().AMENDMENT_MAJORITY_TIME, supportedAmendments, enabledAmendments, config_->section(SECTION_VETO_AMENDMENTS), @@ -1759,9 +1579,11 @@ ApplicationImp::setup() getOPs(), getLedgerMaster(), c, - Role::ADMIN}, - jvCommand, - RPC::ApiMaximumSupportedVersion}; + Role::ADMIN, + {}, + {}, + RPC::ApiMaximumSupportedVersion}, + jvCommand}; Json::Value jvResult; RPC::doCommand(context, jvResult); @@ -1907,7 +1729,7 @@ ApplicationImp::startGenesisLedger() : std::vector{}; std::shared_ptr const genesis = std::make_shared( - create_genesis, *config_, initialAmendments, family()); + create_genesis, *config_, initialAmendments, nodeFamily_); m_ledgerMaster->storeLedger(genesis); auto const next = @@ -2038,7 +1860,7 @@ ApplicationImp::loadLedgerFromFile(std::string const& name) } auto loadLedger = - std::make_shared(seq, closeTime, *config_, family()); + std::make_shared(seq, closeTime, *config_, nodeFamily_); loadLedger->setTotalDrops(totalDrops); for (Json::UInt index = 0; index < ledger.get().size(); ++index) diff --git a/src/ripple/app/main/Application.h b/src/ripple/app/main/Application.h index 71746cdbd8a..f2c33aa2adc 100644 --- a/src/ripple/app/main/Application.h +++ b/src/ripple/app/main/Application.h @@ -147,9 +147,9 @@ class Application : public beast::PropertyStream::Source virtual CollectorManager& getCollectorManager() = 0; virtual Family& - family() = 0; + getNodeFamily() = 0; virtual Family* - shardFamily() = 0; + getShardFamily() = 0; virtual TimeKeeper& timeKeeper() = 0; virtual JobQueue& diff --git a/src/ripple/app/main/DBInit.h b/src/ripple/app/main/DBInit.h index 2aa183d4e70..0a561be8834 100644 --- a/src/ripple/app/main/DBInit.h +++ b/src/ripple/app/main/DBInit.h @@ -26,13 +26,23 @@ namespace ripple { //////////////////////////////////////////////////////////////////////////////// +// These pragmas are built at startup and applied to all database +// connections, unless otherwise noted. +inline constexpr char const* CommonDBPragmaJournal{"PRAGMA journal_mode=%s;"}; +inline constexpr char const* CommonDBPragmaSync{"PRAGMA synchronous=%s;"}; +inline constexpr char const* CommonDBPragmaTemp{"PRAGMA temp_store=%s;"}; +// A warning will be logged if any lower-safety sqlite tuning settings +// are used and at least this much ledger history is configured. This +// includes full history nodes. This is because such a large amount of +// data will be more difficult to recover if a rare failure occurs, +// which are more likely with some of the other available tuning settings. +inline constexpr std::uint32_t SQLITE_TUNING_CUTOFF = 10'000'000; + // Ledger database holds ledgers and ledger confirmations inline constexpr auto LgrDBName{"ledger.db"}; -inline constexpr std::array LgrDBPragma{ - {"PRAGMA synchronous=NORMAL;", - "PRAGMA journal_mode=WAL;", - "PRAGMA journal_size_limit=1582080;"}}; +inline constexpr std::array LgrDBPragma{ + {"PRAGMA journal_size_limit=1582080;"}}; inline constexpr std::array LgrDBInit{ {"BEGIN TRANSACTION;", @@ -61,22 +71,13 @@ inline constexpr std::array LgrDBInit{ // Transaction database holds transactions and public keys inline constexpr auto TxDBName{"transaction.db"}; -inline constexpr -#if (ULONG_MAX > UINT_MAX) && !defined(NO_SQLITE_MMAP) - std::array - TxDBPragma +inline constexpr std::array TxDBPragma { - { -#else - std::array TxDBPragma {{ -#endif - "PRAGMA page_size=4096;", "PRAGMA synchronous=NORMAL;", - "PRAGMA journal_mode=WAL;", "PRAGMA journal_size_limit=1582080;", - "PRAGMA max_page_count=2147483646;", + "PRAGMA page_size=4096;", "PRAGMA journal_size_limit=1582080;", + "PRAGMA max_page_count=2147483646;", #if (ULONG_MAX > UINT_MAX) && !defined(NO_SQLITE_MMAP) - "PRAGMA mmap_size=17179869184;" + "PRAGMA mmap_size=17179869184;" #endif - } }; inline constexpr std::array TxDBInit{ @@ -115,10 +116,8 @@ inline constexpr std::array TxDBInit{ // Temporary database used with an incomplete shard that is being acquired inline constexpr auto AcquireShardDBName{"acquire.db"}; -inline constexpr std::array AcquireShardDBPragma{ - {"PRAGMA synchronous=NORMAL;", - "PRAGMA journal_mode=WAL;", - "PRAGMA journal_size_limit=1582080;"}}; +inline constexpr std::array AcquireShardDBPragma{ + {"PRAGMA journal_size_limit=1582080;"}}; inline constexpr std::array AcquireShardDBInit{ {"CREATE TABLE IF NOT EXISTS Shard ( \ @@ -130,6 +129,7 @@ inline constexpr std::array AcquireShardDBInit{ //////////////////////////////////////////////////////////////////////////////// // Pragma for Ledger and Transaction databases with complete shards +// These override the CommonDBPragma values defined above. inline constexpr std::array CompleteShardDBPragma{ {"PRAGMA synchronous=OFF;", "PRAGMA journal_mode=OFF;"}}; @@ -172,6 +172,7 @@ inline constexpr std::array WalletDBInit{ static constexpr auto stateDBName{"state.db"}; +// These override the CommonDBPragma values defined above. static constexpr std::array DownloaderDBPragma{ {"PRAGMA synchronous=FULL;", "PRAGMA journal_mode=DELETE;"}}; diff --git a/src/ripple/app/main/GRPCServer.cpp b/src/ripple/app/main/GRPCServer.cpp index acf33784f38..007e5f0c499 100644 --- a/src/ripple/app/main/GRPCServer.cpp +++ b/src/ripple/app/main/GRPCServer.cpp @@ -142,7 +142,8 @@ GRPCServerImpl::CallData::process( usage, role, coro, - InfoSub::pointer()}, + InfoSub::pointer(), + apiVersion}, request_}; // Make sure we can currently handle the rpc diff --git a/src/ripple/app/main/GRPCServer.h b/src/ripple/app/main/GRPCServer.h index 5175e2e256d..bb06784c24f 100644 --- a/src/ripple/app/main/GRPCServer.h +++ b/src/ripple/app/main/GRPCServer.h @@ -105,6 +105,8 @@ class GRPCServerImpl final template using Handler = std::function( RPC::GRPCContext&)>; + // This implementation is currently limited to v1 of the API + static unsigned constexpr apiVersion = 1; public: explicit GRPCServerImpl(Application& app); diff --git a/src/ripple/app/main/Main.cpp b/src/ripple/app/main/Main.cpp index ccc3f2c773e..e8ed917587f 100644 --- a/src/ripple/app/main/Main.cpp +++ b/src/ripple/app/main/Main.cpp @@ -354,10 +354,7 @@ run(int argc, char** argv) "nodetoshard", "Import node store into shards")( "replay", "Replay a ledger close.")( "start", "Start from a fresh Ledger.")( - "vacuum", - po::value(), - "VACUUM the transaction db. Mandatory string argument specifies " - "temporary directory path.")( + "vacuum", "VACUUM the transaction db.")( "valid", "Consider the initial ledger a valid network ledger.")( "validateShards", shardsText.c_str()); @@ -520,24 +517,22 @@ run(int argc, char** argv) } using namespace boost::filesystem; - DatabaseCon::Setup dbSetup = setup_DatabaseCon(*config); + DatabaseCon::Setup const dbSetup = setup_DatabaseCon(*config); path dbPath = dbSetup.dataDir / TxDBName; - path tmpPath = vm["vacuum"].as(); try { uintmax_t const dbSize = file_size(dbPath); assert(dbSize != static_cast(-1)); - if (space(tmpPath).available < dbSize) + if (auto available = space(dbPath.parent_path()).available; + available < dbSize) { - std::cerr << "A valid directory for vacuuming must be " - "specified on a filesystem with at least " - "as much free space as the size of " + std::cerr << "The database filesystem must have at least as " + "much free space as the size of " << dbPath.string() << ", which is " << dbSize - << " bytes. The filesystem for " << tmpPath.string() - << " only has " << space(tmpPath).available - << " bytes.\n"; + << " bytes. Only " << available + << " bytes are available.\n"; return -1; } @@ -546,16 +541,19 @@ run(int argc, char** argv) auto& session = txnDB->getSession(); std::uint32_t pageSize; + // Only the most trivial databases will fit in memory on typical + // (recommended) software. Force temp files to be written to disk + // regardless of the config settings. + session << boost::format(CommonDBPragmaTemp) % "file"; session << "PRAGMA page_size;", soci::into(pageSize); std::cout << "VACUUM beginning. page_size: " << pageSize << std::endl; - session << "PRAGMA journal_mode=OFF;"; - session << "PRAGMA temp_store_directory=\"" << tmpPath.string() - << "\";"; session << "VACUUM;"; - session << "PRAGMA journal_mode=WAL;"; + assert(dbSetup.globalPragma); + for (auto const& p : *dbSetup.globalPragma) + session << p; session << "PRAGMA page_size;", soci::into(pageSize); std::cout << "VACUUM finished. page_size: " << pageSize diff --git a/src/ripple/app/misc/AmendmentTable.h b/src/ripple/app/misc/AmendmentTable.h index 0ac55858074..bcd21f763b5 100644 --- a/src/ripple/app/misc/AmendmentTable.h +++ b/src/ripple/app/misc/AmendmentTable.h @@ -99,6 +99,7 @@ class AmendmentTable // inject pseudo-transactions virtual std::map doVoting( + Rules const& rules, NetClock::time_point closeTime, std::set const& enabledAmendments, majorityAmendments_t const& majorityAmendments, @@ -130,6 +131,7 @@ class AmendmentTable { // Ask implementation what to do auto actions = doVoting( + lastClosedLedger->rules(), lastClosedLedger->parentCloseTime(), getEnabledAmendments(*lastClosedLedger), getMajorityAmendments(*lastClosedLedger), @@ -164,7 +166,6 @@ class AmendmentTable std::unique_ptr make_AmendmentTable( std::chrono::seconds majorityTime, - int majorityFraction, Section const& supported, Section const& enabled, Section const& vetoed, diff --git a/src/ripple/app/misc/FeeVoteImpl.cpp b/src/ripple/app/misc/FeeVoteImpl.cpp index 873c488754c..e2dc2e40712 100644 --- a/src/ripple/app/misc/FeeVoteImpl.cpp +++ b/src/ripple/app/misc/FeeVoteImpl.cpp @@ -17,6 +17,7 @@ */ //============================================================================== +#include #include #include #include @@ -150,7 +151,7 @@ FeeVoteImpl::doVoting( std::shared_ptr const& initialPosition) { // LCL must be flag ledger - assert((lastClosedLedger->info().seq % 256) == 0); + assert(isFlagLedger(lastClosedLedger->seq())); detail::VotableValue baseFeeVote( lastClosedLedger->fees().base, target_.reference_fee); diff --git a/src/ripple/app/misc/NegativeUNLVote.cpp b/src/ripple/app/misc/NegativeUNLVote.cpp new file mode 100644 index 00000000000..f4f09b3d132 --- /dev/null +++ b/src/ripple/app/misc/NegativeUNLVote.cpp @@ -0,0 +1,350 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +namespace ripple { + +NegativeUNLVote::NegativeUNLVote(NodeID const& myId, beast::Journal j) + : myId_(myId), j_(j) +{ +} + +void +NegativeUNLVote::doVoting( + std::shared_ptr const& prevLedger, + hash_set const& unlKeys, + RCLValidations& validations, + std::shared_ptr const& initialSet) +{ + // Voting steps: + // -- build a reliability score table of validators + // -- process the table and find all candidates to disable or to re-enable + // -- pick one to disable and one to re-enable if any + // -- if found candidates, add ttUNL_MODIFY Tx + + // Build NodeID set for internal use. + // Build NodeID to PublicKey map for lookup before creating ttUNL_MODIFY Tx. + hash_set unlNodeIDs; + hash_map nidToKeyMap; + for (auto const& k : unlKeys) + { + auto nid = calcNodeID(k); + nidToKeyMap.emplace(nid, k); + unlNodeIDs.emplace(nid); + } + + // Build a reliability score table of validators + if (std::optional> scoreTable = + buildScoreTable(prevLedger, unlNodeIDs, validations)) + { + // build next negUnl + auto negUnlKeys = prevLedger->negativeUnl(); + auto negUnlToDisable = prevLedger->negativeUnlToDisable(); + auto negUnlToReEnable = prevLedger->negativeUnlToReEnable(); + if (negUnlToDisable) + negUnlKeys.insert(*negUnlToDisable); + if (negUnlToReEnable) + negUnlKeys.erase(*negUnlToReEnable); + + hash_set negUnlNodeIDs; + for (auto const& k : negUnlKeys) + { + auto nid = calcNodeID(k); + negUnlNodeIDs.emplace(nid); + if (!nidToKeyMap.count(nid)) + { + nidToKeyMap.emplace(nid, k); + } + } + + auto const seq = prevLedger->info().seq + 1; + purgeNewValidators(seq); + + // Process the table and find all candidates to disable or to re-enable + auto const candidates = + findAllCandidates(unlNodeIDs, negUnlNodeIDs, *scoreTable); + + // Pick one to disable and one to re-enable if any, add ttUNL_MODIFY Tx + if (!candidates.toDisableCandidates.empty()) + { + auto n = + choose(prevLedger->info().hash, candidates.toDisableCandidates); + assert(nidToKeyMap.count(n)); + addTx(seq, nidToKeyMap[n], ToDisable, initialSet); + } + + if (!candidates.toReEnableCandidates.empty()) + { + auto n = choose( + prevLedger->info().hash, candidates.toReEnableCandidates); + assert(nidToKeyMap.count(n)); + addTx(seq, nidToKeyMap[n], ToReEnable, initialSet); + } + } +} + +void +NegativeUNLVote::addTx( + LedgerIndex seq, + PublicKey const& vp, + NegativeUNLModify modify, + std::shared_ptr const& initialSet) +{ + STTx negUnlTx(ttUNL_MODIFY, [&](auto& obj) { + obj.setFieldU8(sfUNLModifyDisabling, modify == ToDisable ? 1 : 0); + obj.setFieldU32(sfLedgerSequence, seq); + obj.setFieldVL(sfUNLModifyValidator, vp.slice()); + }); + + uint256 txID = negUnlTx.getTransactionID(); + Serializer s; + negUnlTx.add(s); + if (!initialSet->addGiveItem( + std::make_shared(txID, s.peekData()), true, false)) + { + JLOG(j_.warn()) << "N-UNL: ledger seq=" << seq + << ", add ttUNL_MODIFY tx failed"; + } + else + { + JLOG(j_.debug()) << "N-UNL: ledger seq=" << seq + << ", add a ttUNL_MODIFY Tx with txID: " << txID + << ", the validator to " + << (modify == ToDisable ? "disable: " : "re-enable: ") + << vp; + } +} + +NodeID +NegativeUNLVote::choose( + uint256 const& randomPadData, + std::vector const& candidates) +{ + assert(!candidates.empty()); + static_assert(NodeID::bytes <= uint256::bytes); + NodeID randomPad = NodeID::fromVoid(randomPadData.data()); + NodeID txNodeID = candidates[0]; + for (int j = 1; j < candidates.size(); ++j) + { + if ((candidates[j] ^ randomPad) < (txNodeID ^ randomPad)) + { + txNodeID = candidates[j]; + } + } + return txNodeID; +} + +std::optional> +NegativeUNLVote::buildScoreTable( + std::shared_ptr const& prevLedger, + hash_set const& unl, + RCLValidations& validations) +{ + // Find agreed validation messages received for + // the last FLAG_LEDGER_INTERVAL (i.e. 256) ledgers, + // for every validator, and fill the score table. + + // Ask the validation container to keep enough validation message history + // for next time. + auto const seq = prevLedger->info().seq + 1; + validations.setSeqToKeep(seq - 1); + + // Find FLAG_LEDGER_INTERVAL (i.e. 256) previous ledger hashes + auto const hashIndex = prevLedger->read(keylet::skip()); + if (!hashIndex || !hashIndex->isFieldPresent(sfHashes)) + { + JLOG(j_.debug()) << "N-UNL: ledger " << seq << " no history."; + return {}; + } + auto const ledgerAncestors = hashIndex->getFieldV256(sfHashes).value(); + auto const numAncestors = ledgerAncestors.size(); + if (numAncestors < FLAG_LEDGER_INTERVAL) + { + JLOG(j_.debug()) << "N-UNL: ledger " << seq + << " not enough history. Can trace back only " + << numAncestors << " ledgers."; + return {}; + } + + // have enough ledger ancestors, build the score table + hash_map scoreTable; + for (auto const& k : unl) + { + scoreTable[k] = 0; + } + + // Query the validation container for every ledger hash and fill + // the score table. + for (int i = 0; i < FLAG_LEDGER_INTERVAL; ++i) + { + for (auto const& v : validations.getTrustedForLedger( + ledgerAncestors[numAncestors - 1 - i])) + { + if (scoreTable.count(v->getNodeID())) + ++scoreTable[v->getNodeID()]; + } + } + + // Return false if the validation message history or local node's + // participation in the history is not good. + auto const myValidationCount = [&]() -> std::uint32_t { + if (auto const it = scoreTable.find(myId_); it != scoreTable.end()) + return it->second; + return 0; + }(); + if (myValidationCount < negativeUnlMinLocalValsToVote) + { + JLOG(j_.debug()) << "N-UNL: ledger " << seq + << ". Local node only issued " << myValidationCount + << " validations in last " << FLAG_LEDGER_INTERVAL + << " ledgers." + << " The reliability measurement could be wrong."; + return {}; + } + else if ( + myValidationCount > negativeUnlMinLocalValsToVote && + myValidationCount <= FLAG_LEDGER_INTERVAL) + { + return scoreTable; + } + else + { + // cannot happen because validations.getTrustedForLedger does not + // return multiple validations of the same ledger from a validator. + JLOG(j_.error()) << "N-UNL: ledger " << seq << ". Local node issued " + << myValidationCount << " validations in last " + << FLAG_LEDGER_INTERVAL << " ledgers. Too many!"; + return {}; + } +} + +NegativeUNLVote::Candidates const +NegativeUNLVote::findAllCandidates( + hash_set const& unl, + hash_set const& negUnl, + hash_map const& scoreTable) +{ + // Compute if need to find more validators to disable + auto const canAdd = [&]() -> bool { + auto const maxNegativeListed = static_cast( + std::ceil(unl.size() * negativeUnlMaxListed)); + std::size_t negativeListed = 0; + for (auto const& n : unl) + { + if (negUnl.count(n)) + ++negativeListed; + } + bool const result = negativeListed < maxNegativeListed; + JLOG(j_.trace()) << "N-UNL: nodeId " << myId_ << " lowWaterMark " + << negativeUnlLowWaterMark << " highWaterMark " + << negativeUnlHighWaterMark << " canAdd " << result + << " negativeListed " << negativeListed + << " maxNegativeListed " << maxNegativeListed; + return result; + }(); + + Candidates candidates; + for (auto const& [nodeId, score] : scoreTable) + { + JLOG(j_.trace()) << "N-UNL: node " << nodeId << " score " << score; + + // Find toDisable Candidates: check if + // (1) canAdd, + // (2) has less than negativeUnlLowWaterMark validations, + // (3) is not in negUnl, and + // (4) is not a new validator. + if (canAdd && score < negativeUnlLowWaterMark && + !negUnl.count(nodeId) && !newValidators_.count(nodeId)) + { + JLOG(j_.trace()) << "N-UNL: toDisable candidate " << nodeId; + candidates.toDisableCandidates.push_back(nodeId); + } + + // Find toReEnable Candidates: check if + // (1) has more than negativeUnlHighWaterMark validations, + // (2) is in negUnl + if (score > negativeUnlHighWaterMark && negUnl.count(nodeId)) + { + JLOG(j_.trace()) << "N-UNL: toReEnable candidate " << nodeId; + candidates.toReEnableCandidates.push_back(nodeId); + } + } + + // If a negative UNL validator is removed from nodes' UNLs, it is no longer + // a validator. It should be removed from the negative UNL too. + // Note that even if it is still offline and in minority nodes' UNLs, it + // will not be re-added to the negative UNL. Because the UNLModify Tx will + // not be included in the agreed TxSet of a ledger. + // + // Find this kind of toReEnable Candidate if did not find any toReEnable + // candidate yet: check if + // (1) is in negUnl + // (2) is not in unl. + if (candidates.toReEnableCandidates.empty()) + { + for (auto const& n : negUnl) + { + if (!unl.count(n)) + { + candidates.toReEnableCandidates.push_back(n); + } + } + } + return candidates; +} + +void +NegativeUNLVote::newValidators( + LedgerIndex seq, + hash_set const& nowTrusted) +{ + std::lock_guard lock(mutex_); + for (auto const& n : nowTrusted) + { + if (newValidators_.find(n) == newValidators_.end()) + { + JLOG(j_.trace()) << "N-UNL: add a new validator " << n + << " at ledger seq=" << seq; + newValidators_[n] = seq; + } + } +} + +void +NegativeUNLVote::purgeNewValidators(LedgerIndex seq) +{ + std::lock_guard lock(mutex_); + auto i = newValidators_.begin(); + while (i != newValidators_.end()) + { + if (seq - i->second > newValidatorDisableSkip) + { + i = newValidators_.erase(i); + } + else + { + ++i; + } + } +} + +} // namespace ripple diff --git a/src/ripple/app/misc/NegativeUNLVote.h b/src/ripple/app/misc/NegativeUNLVote.h new file mode 100644 index 00000000000..da7bc5392b2 --- /dev/null +++ b/src/ripple/app/misc/NegativeUNLVote.h @@ -0,0 +1,217 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_APP_MISC_NEGATIVEUNLVOTE_H_INCLUDED +#define RIPPLE_APP_MISC_NEGATIVEUNLVOTE_H_INCLUDED + +#include +#include +#include +#include +#include + +#include + +namespace ripple { + +template +class Validations; +class RCLValidationsAdaptor; +using RCLValidations = Validations; +class SHAMap; +namespace test { +class NegativeUNLVoteInternal_test; +class NegativeUNLVoteScoreTable_test; +} // namespace test + +/** + * Manager to create NegativeUNL votes. + */ +class NegativeUNLVote final +{ +public: + /** + * A validator is considered unreliable if its validations is less than + * negativeUnlLowWaterMark in the last flag ledger period. + * An unreliable validator is a candidate to be disabled by the NegativeUNL + * protocol. + */ + static constexpr size_t negativeUnlLowWaterMark = + FLAG_LEDGER_INTERVAL * 50 / 100; + /** + * An unreliable validator must have more than negativeUnlHighWaterMark + * validations in the last flag ledger period to be re-enabled. + */ + static constexpr size_t negativeUnlHighWaterMark = + FLAG_LEDGER_INTERVAL * 80 / 100; + /** + * The minimum number of validations of the local node for it to + * participate in the voting. + */ + static constexpr size_t negativeUnlMinLocalValsToVote = + FLAG_LEDGER_INTERVAL * 90 / 100; + /** + * We don't want to disable new validators immediately after adding them. + * So we skip voting for disabling them for 2 flag ledgers. + */ + static constexpr size_t newValidatorDisableSkip = FLAG_LEDGER_INTERVAL * 2; + /** + * We only want to put 25% of the UNL on the NegativeUNL. + */ + static constexpr float negativeUnlMaxListed = 0.25; + + /** + * A flag indicating whether a UNLModify Tx is to disable or to re-enable + * a validator. + */ + enum NegativeUNLModify { + ToDisable, // UNLModify Tx is to disable a validator + ToReEnable // UNLModify Tx is to re-enable a validator + }; + + /** + * Constructor + * + * @param myId the NodeID of the local node + * @param j log + */ + NegativeUNLVote(NodeID const& myId, beast::Journal j); + ~NegativeUNLVote() = default; + + /** + * Cast our local vote on the NegativeUNL candidates. + * + * @param prevLedger the parent ledger + * @param unlKeys the trusted master keys of validators in the UNL + * @param validations the validation message container + * @note validations is an in/out parameter. It contains validation messages + * that will be deleted when no longer needed by other consensus logic. This + * function asks it to keep the validation messages long enough for this + * function to use. + * @param initialSet the transactions set for adding ttUNL_MODIFY Tx if any + */ + void + doVoting( + std::shared_ptr const& prevLedger, + hash_set const& unlKeys, + RCLValidations& validations, + std::shared_ptr const& initialSet); + + /** + * Notify NegativeUNLVote that new validators are added. + * So that they don't get voted to the NegativeUNL immediately. + * + * @param seq the current LedgerIndex when adding the new validators + * @param nowTrusted the new validators + */ + void + newValidators(LedgerIndex seq, hash_set const& nowTrusted); + +private: + NodeID const myId_; + beast::Journal j_; + mutable std::mutex mutex_; + hash_map newValidators_; + + /** + * UNLModify Tx candidates + */ + struct Candidates + { + std::vector toDisableCandidates; + std::vector toReEnableCandidates; + }; + + /** + * Add a ttUNL_MODIFY Tx to the transaction set. + * + * @param seq the LedgerIndex when adding the Tx + * @param vp the master public key of the validator + * @param modify disabling or re-enabling the validator + * @param initialSet the transaction set + */ + void + addTx( + LedgerIndex seq, + PublicKey const& vp, + NegativeUNLModify modify, + std::shared_ptr const& initialSet); + + /** + * Pick one candidate from a vector of candidates. + * + * @param randomPadData the data used for picking a candidate. + * @note Nodes must use the same randomPadData for picking the same + * candidate. The hash of the parent ledger is used. + * @param candidates the vector of candidates + * @return the picked candidate + */ + NodeID + choose(uint256 const& randomPadData, std::vector const& candidates); + + /** + * Build a reliability measurement score table of validators' validation + * messages in the last flag ledger period. + * + * @param prevLedger the parent ledger + * @param unl the trusted master keys + * @param validations the validation container + * @note validations is an in/out parameter. It contains validation messages + * that will be deleted when no longer needed by other consensus logic. This + * function asks it to keep the validation messages long enough for this + * function to use. + * @return the built scoreTable or empty optional if table could not be + * built + */ + std::optional> + buildScoreTable( + std::shared_ptr const& prevLedger, + hash_set const& unl, + RCLValidations& validations); + + /** + * Process the score table and find all disabling and re-enabling + * candidates. + * + * @param unl the trusted master keys + * @param negUnl the NegativeUNL + * @param scoreTable the score table + * @return the candidates to disable and the candidates to re-enable + */ + Candidates const + findAllCandidates( + hash_set const& unl, + hash_set const& negUnl, + hash_map const& scoreTable); + + /** + * Purge validators that are not new anymore. + * + * @param seq the current LedgerIndex + */ + void + purgeNewValidators(LedgerIndex seq); + + friend class test::NegativeUNLVoteInternal_test; + friend class test::NegativeUNLVoteScoreTable_test; +}; + +} // namespace ripple + +#endif diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 66d01b791e0..6a19cc66bbb 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -56,6 +56,7 @@ #include #include #include +#include #include #include #include @@ -1749,6 +1750,8 @@ NetworkOPsImp::beginConsensus(uint256 const& networkClosed) closingInfo.parentHash == m_ledgerMaster.getClosedLedger()->info().hash); + if (prevLedger->rules().enabled(featureNegativeUNL)) + app_.validators().setNegativeUnl(prevLedger->negativeUnl()); TrustChanges const changes = app_.validators().updateTrusted( app_.getValidations().getCurrentNodeIDs()); @@ -1759,7 +1762,8 @@ NetworkOPsImp::beginConsensus(uint256 const& networkClosed) app_.timeKeeper().closeTime(), networkClosed, prevLedger, - changes.removed); + changes.removed, + changes.added); const ConsensusPhase currPhase = mConsensus.phase(); if (mLastConsensusPhase != currPhase) @@ -2757,16 +2761,24 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) if (std::abs(closeOffset.count()) >= 60) l[jss::close_time_offset] = closeOffset.count(); - auto lCloseTime = lpClosed->info().closeTime; - auto closeTime = app_.timeKeeper().closeTime(); - if (lCloseTime <= closeTime) + constexpr std::chrono::seconds highAgeThreshold{1000000}; + if (m_ledgerMaster.haveValidated()) { - using namespace std::chrono_literals; - auto age = closeTime - lCloseTime; - if (age < 1000000s) - l[jss::age] = Json::UInt(age.count()); - else - l[jss::age] = 0; + auto const age = m_ledgerMaster.getValidatedLedgerAge(); + l[jss::age] = + Json::UInt(age < highAgeThreshold ? age.count() : 0); + } + else + { + auto lCloseTime = lpClosed->info().closeTime; + auto closeTime = app_.timeKeeper().closeTime(); + if (lCloseTime <= closeTime) + { + using namespace std::chrono_literals; + auto age = closeTime - lCloseTime; + l[jss::age] = + Json::UInt(age < highAgeThreshold ? age.count() : 0); + } } } diff --git a/src/ripple/app/misc/SHAMapStoreImp.cpp b/src/ripple/app/misc/SHAMapStoreImp.cpp index 94deae5d276..da1c29452bb 100644 --- a/src/ripple/app/misc/SHAMapStoreImp.cpp +++ b/src/ripple/app/misc/SHAMapStoreImp.cpp @@ -180,13 +180,24 @@ SHAMapStoreImp::SHAMapStoreImp( section.set("filter_bits", "10"); } - get_if_exists(section, "delete_batch", deleteBatch_); - get_if_exists(section, "backOff", backOff_); - get_if_exists(section, "age_threshold", ageThreshold_); get_if_exists(section, "online_delete", deleteInterval_); if (deleteInterval_) { + // Configuration that affects the behavior of online delete + get_if_exists(section, "delete_batch", deleteBatch_); + std::uint32_t temp; + if (get_if_exists(section, "back_off_milliseconds", temp) || + // Included for backward compaibility with an undocumented setting + get_if_exists(section, "backOff", temp)) + { + backOff_ = std::chrono::milliseconds{temp}; + } + if (get_if_exists(section, "age_threshold_seconds", temp)) + ageThreshold_ = std::chrono::seconds{temp}; + if (get_if_exists(section, "recovery_wait_seconds", temp)) + recoveryWaitTime_.emplace(std::chrono::seconds{temp}); + get_if_exists(section, "advisory_delete", advisoryDelete_); auto const minInterval = config.standalone() @@ -308,8 +319,8 @@ SHAMapStoreImp::run() LedgerIndex lastRotated = state_db_.getState().lastRotated; netOPs_ = &app_.getOPs(); ledgerMaster_ = &app_.getLedgerMaster(); - fullBelowCache_ = &app_.family().fullbelow(); - treeNodeCache_ = &app_.family().treecache(); + fullBelowCache_ = &(*app_.getNodeFamily().getFullBelowCache(0)); + treeNodeCache_ = &(*app_.getNodeFamily().getTreeNodeCache(0)); transactionDb_ = &app_.getTxnDB(); ledgerDb_ = &app_.getLedgerDB(); @@ -348,23 +359,14 @@ SHAMapStoreImp::run() // will delete up to (not including) lastRotated if (validatedSeq >= lastRotated + deleteInterval_ && - canDelete_ >= lastRotated - 1) + canDelete_ >= lastRotated - 1 && !health()) { JLOG(journal_.warn()) << "rotating validatedSeq " << validatedSeq << " lastRotated " << lastRotated << " deleteInterval " << deleteInterval_ - << " canDelete_ " << canDelete_; - - switch (health()) - { - case Health::stopping: - stopped(); - return; - case Health::unhealthy: - continue; - case Health::ok: - default:; - } + << " canDelete_ " << canDelete_ << " state " + << app_.getOPs().strOperatingMode(false) << " age " + << ledgerMaster_->getValidatedLedgerAge().count() << 's'; clearPrior(lastRotated); switch (health()) @@ -378,14 +380,13 @@ SHAMapStoreImp::run() default:; } + JLOG(journal_.debug()) << "copying ledger " << validatedSeq; std::uint64_t nodeCount = 0; validatedLedger->stateMap().snapShot(false)->visitNodes(std::bind( &SHAMapStoreImp::copyNode, this, std::ref(nodeCount), std::placeholders::_1)); - JLOG(journal_.debug()) << "copied ledger " << validatedSeq - << " nodecount " << nodeCount; switch (health()) { case Health::stopping: @@ -396,9 +397,12 @@ SHAMapStoreImp::run() case Health::ok: default:; } + // Only log if we completed without a "health" abort + JLOG(journal_.debug()) << "copied ledger " << validatedSeq + << " nodecount " << nodeCount; + JLOG(journal_.debug()) << "freshening caches"; freshenCaches(); - JLOG(journal_.debug()) << validatedSeq << " freshened caches"; switch (health()) { case Health::stopping: @@ -409,7 +413,10 @@ SHAMapStoreImp::run() case Health::ok: default:; } + // Only log if we completed without a "health" abort + JLOG(journal_.debug()) << validatedSeq << " freshened caches"; + JLOG(journal_.trace()) << "Making a new backend"; auto newBackend = makeBackendRotating(); JLOG(journal_.debug()) << validatedSeq << " new backend " << newBackend->getName(); @@ -559,26 +566,38 @@ SHAMapStoreImp::makeBackendRotating(std::string path) return backend; } -bool +void SHAMapStoreImp::clearSql( DatabaseCon& database, LedgerIndex lastRotated, std::string const& minQuery, std::string const& deleteQuery) { + assert(deleteInterval_); LedgerIndex min = std::numeric_limits::max(); { - auto db = database.checkoutDb(); boost::optional m; - *db << minQuery, soci::into(m); + JLOG(journal_.trace()) + << "Begin: Look up lowest value of: " << minQuery; + { + auto db = database.checkoutDb(); + *db << minQuery, soci::into(m); + } + JLOG(journal_.trace()) << "End: Look up lowest value of: " << minQuery; if (!m) - return false; + return; min = *m; } if (min > lastRotated || health() != Health::ok) - return false; + return; + if (min == lastRotated) + { + // Micro-optimization mainly to clarify logs + JLOG(journal_.trace()) << "Nothing to delete from " << deleteQuery; + return; + } boost::format formattedDeleteQuery(deleteQuery); @@ -587,17 +606,24 @@ SHAMapStoreImp::clearSql( while (min < lastRotated) { min = std::min(lastRotated, min + deleteBatch_); + JLOG(journal_.trace()) << "Begin: Delete up to " << deleteBatch_ + << " rows with LedgerSeq < " << min + << " using query: " << deleteQuery; { auto db = database.checkoutDb(); *db << boost::str(formattedDeleteQuery % min); } + JLOG(journal_.trace()) + << "End: Delete up to " << deleteBatch_ << " rows with LedgerSeq < " + << min << " using query: " << deleteQuery; if (health()) - return true; + return; if (min < lastRotated) - std::this_thread::sleep_for(std::chrono::milliseconds(backOff_)); + std::this_thread::sleep_for(backOff_); + if (health()) + return; } JLOG(journal_.debug()) << "finished: " << deleteQuery; - return true; } void @@ -621,13 +647,14 @@ SHAMapStoreImp::freshenCaches() void SHAMapStoreImp::clearPrior(LedgerIndex lastRotated) { - if (health()) - return; - // Do not allow ledgers to be acquired from the network // that are about to be deleted. minimumOnline_ = lastRotated + 1; + JLOG(journal_.trace()) << "Begin: Clear internal ledgers up to " + << lastRotated; ledgerMaster_->clearPriorLedgers(lastRotated); + JLOG(journal_.trace()) << "End: Clear internal ledgers up to " + << lastRotated; if (health()) return; @@ -666,16 +693,32 @@ SHAMapStoreImp::health() } if (!netOPs_) return Health::ok; + assert(deleteInterval_); - constexpr static std::chrono::seconds age_threshold(60); - auto age = ledgerMaster_->getValidatedLedgerAge(); - OperatingMode mode = netOPs_->getOperatingMode(); - if (mode != OperatingMode::FULL || age > age_threshold) + if (healthy_) { - JLOG(journal_.warn()) << "Not deleting. state: " - << app_.getOPs().strOperatingMode(mode, false) - << ". age " << age.count() << 's'; - healthy_ = false; + auto age = ledgerMaster_->getValidatedLedgerAge(); + OperatingMode mode = netOPs_->getOperatingMode(); + if (recoveryWaitTime_ && mode == OperatingMode::SYNCING && + age < ageThreshold_) + { + JLOG(journal_.warn()) + << "Waiting " << recoveryWaitTime_->count() + << "s for node to get back into sync with network. state: " + << app_.getOPs().strOperatingMode(mode, false) << ". age " + << age.count() << 's'; + std::this_thread::sleep_for(*recoveryWaitTime_); + + age = ledgerMaster_->getValidatedLedgerAge(); + mode = netOPs_->getOperatingMode(); + } + if (mode != OperatingMode::FULL || age > ageThreshold_) + { + JLOG(journal_.warn()) << "Not deleting. state: " + << app_.getOPs().strOperatingMode(mode, false) + << ". age " << age.count() << 's'; + healthy_ = false; + } } if (healthy_) diff --git a/src/ripple/app/misc/SHAMapStoreImp.h b/src/ripple/app/misc/SHAMapStoreImp.h index 2fabf1a6996..6145cb48dfd 100644 --- a/src/ripple/app/misc/SHAMapStoreImp.h +++ b/src/ripple/app/misc/SHAMapStoreImp.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -106,8 +107,14 @@ class SHAMapStoreImp : public SHAMapStore std::uint32_t deleteInterval_ = 0; bool advisoryDelete_ = false; std::uint32_t deleteBatch_ = 100; - std::uint32_t backOff_ = 100; - std::int32_t ageThreshold_ = 60; + std::chrono::milliseconds backOff_{100}; + std::chrono::seconds ageThreshold_{60}; + /// If set, and the node is out of sync during an + /// online_delete health check, sleep the thread + /// for this time and check again so the node can + /// recover. + /// See also: "recovery_wait_seconds" in rippled-example.cfg + boost::optional recoveryWaitTime_; // these do not exist upon SHAMapStore creation, but do exist // as of onPrepare() or before @@ -212,13 +219,11 @@ class SHAMapStoreImp : public SHAMapStore return false; } - /** delete from sqlite table in batches to not lock the db excessively - * pause briefly to extend access time to other users - * call with mutex object unlocked - * @return true if any deletable rows were found (though not - * necessarily deleted. + /** delete from sqlite table in batches to not lock the db excessively. + * Pause briefly to extend access time to other users. + * Call with mutex object unlocked. */ - bool + void clearSql( DatabaseCon& database, LedgerIndex lastRotated, @@ -236,6 +241,9 @@ class SHAMapStoreImp : public SHAMapStore // Assume that, once unhealthy, a necessary step has been // aborted, so the online-delete process needs to restart // at next ledger. + // If recoveryWaitTime_ is set, this may sleep to give rippled + // time to recover, so never call it from any thread other than + // the main "run()". Health health(); // diff --git a/src/ripple/app/misc/ValidatorList.h b/src/ripple/app/misc/ValidatorList.h index c47119eb753..cf0ca00fe9d 100644 --- a/src/ripple/app/misc/ValidatorList.h +++ b/src/ripple/app/misc/ValidatorList.h @@ -38,6 +38,7 @@ namespace ripple { // predeclaration class Overlay; class HashRouter; +class STValidation; enum class ListDisposition { /// List is valid @@ -159,6 +160,9 @@ class ValidatorList PublicKey localPubKey_; + // The master public keys of the current negative UNL + hash_set negativeUnl_; + // Currently supported version of publisher list format static constexpr std::uint32_t requiredListVersion = 1; static const std::string filePrefix_; @@ -505,6 +509,37 @@ class ValidatorList return {quorum_, trustedSigningKeys_}; } + /** + * get the trusted master public keys + * @return the public keys + */ + hash_set + getTrustedMasterKeys() const; + + /** + * get the master public keys of Negative UNL validators + * @return the master public keys + */ + hash_set + getNegativeUnl() const; + + /** + * set the Negative UNL with validators' master public keys + * @param negUnl the public keys + */ + void + setNegativeUnl(hash_set const& negUnl); + + /** + * Remove validations that are from validators on the negative UNL. + * + * @param validations the validations to filter + * @return a filtered copy of the validations + */ + std::vector> + negativeUNLFilter( + std::vector>&& validations) const; + private: /** Get the filename used for caching UNLs */ @@ -547,12 +582,19 @@ class ValidatorList /** Return quorum for trusted validator set - @param trusted Number of trusted validator keys + @param unlSize Number of trusted validator keys - @param seen Number of trusted validators that have signed - recently received validations */ + @param effectiveUnlSize Number of trusted validator keys that are not in + the NegativeUNL + + @param seenSize Number of trusted validators that have signed + recently received validations + */ std::size_t - calculateQuorum(std::size_t trusted, std::size_t seen); + calculateQuorum( + std::size_t unlSize, + std::size_t effectiveUnlSize, + std::size_t seenSize); }; } // namespace ripple diff --git a/src/ripple/app/misc/impl/AmendmentTable.cpp b/src/ripple/app/misc/impl/AmendmentTable.cpp index 4d499ea9172..2f29a2f5788 100644 --- a/src/ripple/app/misc/impl/AmendmentTable.cpp +++ b/src/ripple/app/misc/impl/AmendmentTable.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -92,28 +93,74 @@ struct AmendmentState }; /** The status of all amendments requested in a given window. */ -struct AmendmentSet +class AmendmentSet { private: // How many yes votes each amendment received hash_map votes_; - -public: + Rules const& rules_; // number of trusted validations - int mTrustedValidations = 0; - + int trustedValidations_ = 0; // number of votes needed - int mThreshold = 0; + int threshold_ = 0; - AmendmentSet() = default; +public: + AmendmentSet( + Rules const& rules, + std::vector> const& valSet) + : rules_(rules) + { + // process validations for ledger before flag ledger + for (auto const& val : valSet) + { + if (val->isTrusted()) + { + if (val->isFieldPresent(sfAmendments)) + { + auto const choices = val->getFieldV256(sfAmendments); + std::for_each( + choices.begin(), + choices.end(), + [&](auto const& amendment) { ++votes_[amendment]; }); + } + + ++trustedValidations_; + } + } - void - tally(std::set const& amendments) + threshold_ = !rules_.enabled(fixAmendmentMajorityCalc) + ? std::max( + 1L, + static_cast( + (trustedValidations_ * + preFixAmendmentMajorityCalcThreshold.num) / + preFixAmendmentMajorityCalcThreshold.den)) + : std::max( + 1L, + static_cast( + (trustedValidations_ * + postFixAmendmentMajorityCalcThreshold.num) / + postFixAmendmentMajorityCalcThreshold.den)); + } + + bool + passes(uint256 const& amendment) const { - ++mTrustedValidations; + auto const& it = votes_.find(amendment); - for (auto const& amendment : amendments) - ++votes_[amendment]; + if (it == votes_.end()) + return false; + + // Before this fix, it was possible for an amendment to activate with a + // percentage slightly less than 80% because we compared for "greater + // than or equal to" instead of strictly "greater than". + // One validator is an exception, otherwise it is not possible + // to gain majority. + if (!rules_.enabled(fixAmendmentMajorityCalc) || + trustedValidations_ == 1) + return it->second >= threshold_; + + return it->second > threshold_; } int @@ -126,6 +173,18 @@ struct AmendmentSet return it->second; } + + int + trustedValidations() const + { + return trustedValidations_; + } + + int + threshold() const + { + return threshold_; + } }; //------------------------------------------------------------------------------ @@ -138,7 +197,7 @@ struct AmendmentSet */ class AmendmentTableImpl final : public AmendmentTable { -protected: +private: mutable std::mutex mutex_; hash_map amendmentMap_; @@ -147,10 +206,6 @@ class AmendmentTableImpl final : public AmendmentTable // Time that an amendment must hold a majority for std::chrono::seconds const majorityTime_; - // The amount of support that an amendment must receive - // 0 = 0% and 256 = 100% - int const majorityFraction_; - // The results of the last voting round - may be empty if // we haven't participated in one yet. std::unique_ptr lastVote_; @@ -187,7 +242,6 @@ class AmendmentTableImpl final : public AmendmentTable public: AmendmentTableImpl( std::chrono::seconds majorityTime, - int majorityFraction, Section const& supported, Section const& enabled, Section const& vetoed, @@ -237,6 +291,7 @@ class AmendmentTableImpl final : public AmendmentTable std::map doVoting( + Rules const& rules, NetClock::time_point closeTime, std::set const& enabledAmendments, majorityAmendments_t const& majorityAmendments, @@ -247,19 +302,15 @@ class AmendmentTableImpl final : public AmendmentTable AmendmentTableImpl::AmendmentTableImpl( std::chrono::seconds majorityTime, - int majorityFraction, Section const& supported, Section const& enabled, Section const& vetoed, beast::Journal journal) : lastUpdateSeq_(0) , majorityTime_(majorityTime) - , majorityFraction_(majorityFraction) , unsupportedEnabled_(false) , j_(journal) { - assert(majorityFraction_ != 0); - std::lock_guard sl(mutex_); for (auto const& a : parseSection(supported)) @@ -461,6 +512,7 @@ AmendmentTableImpl::getDesired() const std::map AmendmentTableImpl::doVoting( + Rules const& rules, NetClock::time_point closeTime, std::set const& enabledAmendments, majorityAmendments_t const& majorityAmendments, @@ -470,31 +522,11 @@ AmendmentTableImpl::doVoting( << ": " << enabledAmendments.size() << ", " << majorityAmendments.size() << ", " << valSet.size(); - auto vote = std::make_unique(); - - // process validations for ledger before flag ledger - for (auto const& val : valSet) - { - if (val->isTrusted()) - { - std::set ballot; + auto vote = std::make_unique(rules, valSet); - if (val->isFieldPresent(sfAmendments)) - { - auto const choices = val->getFieldV256(sfAmendments); - ballot.insert(choices.begin(), choices.end()); - } - - vote->tally(ballot); - } - } - - vote->mThreshold = - std::max(1, (vote->mTrustedValidations * majorityFraction_) / 256); - - JLOG(j_.debug()) << "Received " << vote->mTrustedValidations + JLOG(j_.debug()) << "Received " << vote->trustedValidations() << " trusted validations, threshold is: " - << vote->mThreshold; + << vote->threshold(); // Map of amendments to the action to be taken for each one. The action is // the value of the flags in the pseudo-transaction @@ -507,8 +539,7 @@ AmendmentTableImpl::doVoting( { NetClock::time_point majorityTime = {}; - bool const hasValMajority = - (vote->votes(entry.first) >= vote->mThreshold); + bool const hasValMajority = vote->passes(entry.first); { auto const it = majorityAmendments.find(entry.first); @@ -614,18 +645,15 @@ AmendmentTableImpl::injectJson( if (!fs.enabled && lastVote_) { - auto const votesTotal = lastVote_->mTrustedValidations; - auto const votesNeeded = lastVote_->mThreshold; + auto const votesTotal = lastVote_->trustedValidations(); + auto const votesNeeded = lastVote_->threshold(); auto const votesFor = lastVote_->votes(id); v[jss::count] = votesFor; v[jss::validations] = votesTotal; if (votesNeeded) - { - v[jss::vote] = votesFor * 256 / votesNeeded; v[jss::threshold] = votesNeeded; - } } } @@ -666,14 +694,13 @@ AmendmentTableImpl::getJson(uint256 const& amendmentID) const std::unique_ptr make_AmendmentTable( std::chrono::seconds majorityTime, - int majorityFraction, Section const& supported, Section const& enabled, Section const& vetoed, beast::Journal journal) { return std::make_unique( - majorityTime, majorityFraction, supported, enabled, vetoed, journal); + majorityTime, supported, enabled, vetoed, journal); } } // namespace ripple diff --git a/src/ripple/app/misc/impl/ValidatorList.cpp b/src/ripple/app/misc/impl/ValidatorList.cpp index 710ad34928f..ebb34a7e523 100644 --- a/src/ripple/app/misc/impl/ValidatorList.cpp +++ b/src/ripple/app/misc/impl/ValidatorList.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -746,6 +747,16 @@ ValidatorList::getJson() const } }); + // Negative UNL + if (!negativeUnl_.empty()) + { + Json::Value& jNegativeUNL = (res[jss::NegativeUNL] = Json::arrayValue); + for (auto const& k : negativeUnl_) + { + jNegativeUNL.append(toBase58(TokenType::NodePublic, k)); + } + } + return res; } @@ -818,7 +829,10 @@ ValidatorList::getAvailable(boost::beast::string_view const& pubKey) } std::size_t -ValidatorList::calculateQuorum(std::size_t trusted, std::size_t seen) +ValidatorList::calculateQuorum( + std::size_t unlSize, + std::size_t effectiveUnlSize, + std::size_t seenSize) { // Do not use achievable quorum until lists from all configured // publishers are available @@ -858,11 +872,16 @@ ValidatorList::calculateQuorum(std::size_t trusted, std::size_t seen) // Oi,j > nj/2 + ni − qi + ti,j // ni - pi > (ni - pi + pj)/2 + ni − .8*ni + .2*ni // pi + pj < .2*ni - auto quorum = static_cast(std::ceil(trusted * 0.8f)); + // + // Note that the negative UNL protocol introduced the AbsoluteMinimumQuorum + // which is 60% of the original UNL size. The effective quorum should + // not be lower than it. + auto quorum = static_cast(std::max( + std::ceil(effectiveUnlSize * 0.8f), std::ceil(unlSize * 0.6f))); // Use lower quorum specified via command line if the normal quorum appears // unreachable based on the number of recently received validations. - if (minimumQuorum_ && *minimumQuorum_ < quorum && seen < quorum) + if (minimumQuorum_ && *minimumQuorum_ < quorum && seenSize < quorum) { quorum = *minimumQuorum_; @@ -922,7 +941,28 @@ ValidatorList::updateTrusted(hash_set const& seenValidators) << trustedMasterKeys_.size() << " of " << keyListings_.size() << " listed validators eligible for inclusion in the trusted set"; - quorum_ = calculateQuorum(trustedMasterKeys_.size(), seenValidators.size()); + auto unlSize = trustedMasterKeys_.size(); + auto effectiveUnlSize = unlSize; + auto seenSize = seenValidators.size(); + if (!negativeUnl_.empty()) + { + for (auto const& k : trustedMasterKeys_) + { + if (negativeUnl_.count(k)) + --effectiveUnlSize; + } + hash_set negUnlNodeIDs; + for (auto const& k : negativeUnl_) + { + negUnlNodeIDs.emplace(calcNodeID(k)); + } + for (auto const& nid : seenValidators) + { + if (negUnlNodeIDs.count(nid)) + --seenSize; + } + } + quorum_ = calculateQuorum(unlSize, effectiveUnlSize, seenSize); JLOG(j_.debug()) << "Using quorum of " << quorum_ << " for new set of " << trustedMasterKeys_.size() << " trusted validators (" @@ -939,4 +979,57 @@ ValidatorList::updateTrusted(hash_set const& seenValidators) return trustChanges; } +hash_set +ValidatorList::getTrustedMasterKeys() const +{ + std::shared_lock lock{mutex_}; + return trustedMasterKeys_; +} + +hash_set +ValidatorList::getNegativeUnl() const +{ + std::shared_lock lock{mutex_}; + return negativeUnl_; +} + +void +ValidatorList::setNegativeUnl(hash_set const& negUnl) +{ + std::lock_guard lock{mutex_}; + negativeUnl_ = negUnl; +} + +std::vector> +ValidatorList::negativeUNLFilter( + std::vector>&& validations) const +{ + // Remove validations that are from validators on the negative UNL. + auto ret = std::move(validations); + + std::shared_lock lock{mutex_}; + if (!negativeUnl_.empty()) + { + ret.erase( + std::remove_if( + ret.begin(), + ret.end(), + [&](auto const& v) -> bool { + if (auto const masterKey = + getTrustedKey(v->getSignerPublic()); + masterKey) + { + return negativeUnl_.count(*masterKey); + } + else + { + return false; + } + }), + ret.end()); + } + + return ret; +} + } // namespace ripple diff --git a/src/ripple/app/tx/impl/Change.cpp b/src/ripple/app/tx/impl/Change.cpp index a75962b0013..9563790d86e 100644 --- a/src/ripple/app/tx/impl/Change.cpp +++ b/src/ripple/app/tx/impl/Change.cpp @@ -17,11 +17,13 @@ */ //============================================================================== +#include #include #include #include #include #include +#include #include #include @@ -62,6 +64,13 @@ Change::preflight(PreflightContext const& ctx) return temBAD_SEQUENCE; } + if (ctx.tx.getTxnType() == ttUNL_MODIFY && + !ctx.rules.enabled(featureNegativeUNL)) + { + JLOG(ctx.j.warn()) << "Change: NegativeUNL not enabled"; + return temDISABLED; + } + return tesSUCCESS; } @@ -76,20 +85,32 @@ Change::preclaim(PreclaimContext const& ctx) return temINVALID; } - if (ctx.tx.getTxnType() != ttAMENDMENT && ctx.tx.getTxnType() != ttFEE) - return temUNKNOWN; - - return tesSUCCESS; + switch (ctx.tx.getTxnType()) + { + case ttAMENDMENT: + case ttFEE: + case ttUNL_MODIFY: + return tesSUCCESS; + default: + return temUNKNOWN; + } } TER Change::doApply() { - if (ctx_.tx.getTxnType() == ttAMENDMENT) - return applyAmendment(); - - assert(ctx_.tx.getTxnType() == ttFEE); - return applyFee(); + switch (ctx_.tx.getTxnType()) + { + case ttAMENDMENT: + return applyAmendment(); + case ttFEE: + return applyFee(); + case ttUNL_MODIFY: + return applyUNLModify(); + default: + assert(0); + return tefFAILURE; + } } void @@ -221,4 +242,130 @@ Change::applyFee() return tesSUCCESS; } +TER +Change::applyUNLModify() +{ + if (!isFlagLedger(view().seq())) + { + JLOG(j_.warn()) << "N-UNL: applyUNLModify, not a flag ledger, seq=" + << view().seq(); + return tefFAILURE; + } + + if (!ctx_.tx.isFieldPresent(sfUNLModifyDisabling) || + ctx_.tx.getFieldU8(sfUNLModifyDisabling) > 1 || + !ctx_.tx.isFieldPresent(sfLedgerSequence) || + !ctx_.tx.isFieldPresent(sfUNLModifyValidator)) + { + JLOG(j_.warn()) << "N-UNL: applyUNLModify, wrong Tx format."; + return tefFAILURE; + } + + bool const disabling = ctx_.tx.getFieldU8(sfUNLModifyDisabling); + auto const seq = ctx_.tx.getFieldU32(sfLedgerSequence); + if (seq != view().seq()) + { + JLOG(j_.warn()) << "N-UNL: applyUNLModify, wrong ledger seq=" << seq; + return tefFAILURE; + } + + Blob const validator = ctx_.tx.getFieldVL(sfUNLModifyValidator); + if (!publicKeyType(makeSlice(validator))) + { + JLOG(j_.warn()) << "N-UNL: applyUNLModify, bad validator key"; + return tefFAILURE; + } + + JLOG(j_.info()) << "N-UNL: applyUNLModify, " + << (disabling ? "ToDisable" : "ToReEnable") + << " seq=" << seq + << " validator data:" << strHex(validator); + + auto const k = keylet::negativeUNL(); + SLE::pointer negUnlObject = view().peek(k); + if (!negUnlObject) + { + negUnlObject = std::make_shared(k); + view().insert(negUnlObject); + } + + bool const found = [&] { + if (negUnlObject->isFieldPresent(sfNegativeUNL)) + { + auto const& negUnl = negUnlObject->getFieldArray(sfNegativeUNL); + for (auto const& v : negUnl) + { + if (v.isFieldPresent(sfPublicKey) && + v.getFieldVL(sfPublicKey) == validator) + return true; + } + } + return false; + }(); + + if (disabling) + { + // cannot have more than one toDisable + if (negUnlObject->isFieldPresent(sfNegativeUNLToDisable)) + { + JLOG(j_.warn()) << "N-UNL: applyUNLModify, already has ToDisable"; + return tefFAILURE; + } + + // cannot be the same as toReEnable + if (negUnlObject->isFieldPresent(sfNegativeUNLToReEnable)) + { + if (negUnlObject->getFieldVL(sfNegativeUNLToReEnable) == validator) + { + JLOG(j_.warn()) + << "N-UNL: applyUNLModify, ToDisable is same as ToReEnable"; + return tefFAILURE; + } + } + + // cannot be in negative UNL already + if (found) + { + JLOG(j_.warn()) + << "N-UNL: applyUNLModify, ToDisable already in negative UNL"; + return tefFAILURE; + } + + negUnlObject->setFieldVL(sfNegativeUNLToDisable, validator); + } + else + { + // cannot have more than one toReEnable + if (negUnlObject->isFieldPresent(sfNegativeUNLToReEnable)) + { + JLOG(j_.warn()) << "N-UNL: applyUNLModify, already has ToReEnable"; + return tefFAILURE; + } + + // cannot be the same as toDisable + if (negUnlObject->isFieldPresent(sfNegativeUNLToDisable)) + { + if (negUnlObject->getFieldVL(sfNegativeUNLToDisable) == validator) + { + JLOG(j_.warn()) + << "N-UNL: applyUNLModify, ToReEnable is same as ToDisable"; + return tefFAILURE; + } + } + + // must be in negative UNL + if (!found) + { + JLOG(j_.warn()) + << "N-UNL: applyUNLModify, ToReEnable is not in negative UNL"; + return tefFAILURE; + } + + negUnlObject->setFieldVL(sfNegativeUNLToReEnable, validator); + } + + view().update(negUnlObject); + return tesSUCCESS; +} + } // namespace ripple diff --git a/src/ripple/app/tx/impl/Change.h b/src/ripple/app/tx/impl/Change.h index 54e0aa4f927..9f79278411f 100644 --- a/src/ripple/app/tx/impl/Change.h +++ b/src/ripple/app/tx/impl/Change.h @@ -59,6 +59,9 @@ class Change : public Transactor TER applyFee(); + + TER + applyUNLModify(); }; } // namespace ripple diff --git a/src/ripple/app/tx/impl/Escrow.cpp b/src/ripple/app/tx/impl/Escrow.cpp index 30a74c138c6..07d72f7875a 100644 --- a/src/ripple/app/tx/impl/Escrow.cpp +++ b/src/ripple/app/tx/impl/Escrow.cpp @@ -147,10 +147,7 @@ EscrowCreate::preflight(PreflightContext const& ctx) return temMALFORMED; } - // Conditions other than PrefixSha256 require the - // "CryptoConditionsSuite" amendment: - if (condition->type != Type::preimageSha256 && - !ctx.rules.enabled(featureCryptoConditionsSuite)) + if (condition->type != Type::preimageSha256) return temDISABLED; } diff --git a/src/ripple/app/tx/impl/InvariantCheck.cpp b/src/ripple/app/tx/impl/InvariantCheck.cpp index 34e9d90ec7c..73b20a0f1dd 100644 --- a/src/ripple/app/tx/impl/InvariantCheck.cpp +++ b/src/ripple/app/tx/impl/InvariantCheck.cpp @@ -365,6 +365,7 @@ LedgerEntryTypesMatch::visitEntry( case ltPAYCHAN: case ltCHECK: case ltDEPOSIT_PREAUTH: + case ltNEGATIVE_UNL: break; default: invalidTypeAdded_ = true; diff --git a/src/ripple/app/tx/impl/applySteps.cpp b/src/ripple/app/tx/impl/applySteps.cpp index fddf9221659..8ebfd6d3c7f 100644 --- a/src/ripple/app/tx/impl/applySteps.cpp +++ b/src/ripple/app/tx/impl/applySteps.cpp @@ -86,6 +86,7 @@ invoke_preflight(PreflightContext const& ctx) return DeleteAccount ::preflight(ctx); case ttAMENDMENT: case ttFEE: + case ttUNL_MODIFY: return Change ::preflight(ctx); default: assert(false); @@ -173,6 +174,7 @@ invoke_preclaim(PreclaimContext const& ctx) return invoke_preclaim(ctx); case ttAMENDMENT: case ttFEE: + case ttUNL_MODIFY: return invoke_preclaim(ctx); default: assert(false); @@ -227,6 +229,7 @@ invoke_calculateBaseFee(ReadView const& view, STTx const& tx) return DeleteAccount::calculateBaseFee(view, tx); case ttAMENDMENT: case ttFEE: + case ttUNL_MODIFY: return Change::calculateBaseFee(view, tx); default: assert(false); @@ -294,6 +297,7 @@ invoke_calculateConsequences(STTx const& tx) return invoke_calculateConsequences(tx); case ttAMENDMENT: case ttFEE: + case ttUNL_MODIFY: [[fallthrough]]; default: assert(false); @@ -390,7 +394,8 @@ invoke_apply(ApplyContext& ctx) return p(); } case ttAMENDMENT: - case ttFEE: { + case ttFEE: + case ttUNL_MODIFY: { Change p(ctx); return p(); } diff --git a/src/ripple/basics/MathUtilities.h b/src/ripple/basics/MathUtilities.h new file mode 100644 index 00000000000..f9dbcbbbc30 --- /dev/null +++ b/src/ripple/basics/MathUtilities.h @@ -0,0 +1,68 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_BASICS_MATHUTILITIES_H_INCLUDED +#define RIPPLE_BASICS_MATHUTILITIES_H_INCLUDED + +#include +#include +#include + +namespace ripple { + +/** Calculate one number divided by another number in percentage. + * The result is rounded up to the next integer, and capped in the range [0,100] + * E.g. calculatePercent(1, 100) = 1 because 1/100 = 0.010000 + * calculatePercent(1, 99) = 2 because 1/99 = 0.010101 + * calculatePercent(0, 100) = 0 + * calculatePercent(100, 100) = 100 + * calculatePercent(200, 100) = 100 because the result is capped to 100 + * + * @param count dividend + * @param total divisor + * @return the percentage, in [0, 100] + * + * @note total cannot be zero. + * */ +constexpr std::size_t +calculatePercent(std::size_t count, std::size_t total) +{ + assert(total != 0); + return ((std::min(count, total) * 100) + total - 1) / total; +} + +// unit tests +static_assert(calculatePercent(1, 2) == 50); +static_assert(calculatePercent(0, 100) == 0); +static_assert(calculatePercent(100, 100) == 100); +static_assert(calculatePercent(200, 100) == 100); +static_assert(calculatePercent(1, 100) == 1); +static_assert(calculatePercent(1, 99) == 2); +static_assert(calculatePercent(6, 14) == 43); +static_assert(calculatePercent(29, 33) == 88); +static_assert(calculatePercent(1, 64) == 2); +static_assert(calculatePercent(0, 100'000'000) == 0); +static_assert(calculatePercent(1, 100'000'000) == 1); +static_assert(calculatePercent(50'000'000, 100'000'000) == 50); +static_assert(calculatePercent(50'000'001, 100'000'000) == 51); +static_assert(calculatePercent(99'999'999, 100'000'000) == 100); + +} // namespace ripple + +#endif diff --git a/src/ripple/basics/Slice.h b/src/ripple/basics/Slice.h index 126e8ab1ce0..67c954bb723 100644 --- a/src/ripple/basics/Slice.h +++ b/src/ripple/basics/Slice.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -63,7 +64,7 @@ class Slice } /** Return `true` if the byte range is empty. */ - bool + [[nodiscard]] bool empty() const noexcept { return size_ == 0; @@ -73,12 +74,20 @@ class Slice This may be zero for an empty range. */ + /** @{ */ std::size_t size() const noexcept { return size_; } + std::size_t + length() const noexcept + { + return size_; + } + /** @} */ + /** Return a pointer to beginning of the storage. @note The return type is guaranteed to be a pointer to a single byte, to facilitate pointer arithmetic. @@ -117,6 +126,21 @@ class Slice } /** @} */ + /** Shrinks the slice by moving its start forward by n characters. */ + void + remove_prefix(std::size_t n) + { + data_ += n; + size_ -= n; + } + + /** Shrinks the slice by moving its end backward by n characters. */ + void + remove_suffix(std::size_t n) + { + size_ -= n; + } + const_iterator begin() const noexcept { @@ -140,6 +164,28 @@ class Slice { return data_ + size_; } + + /** Return a "sub slice" of given length starting at the given position + + Note that the subslice encompasses the range [pos, pos + rcount), + where rcount is the smaller of count and size() - pos. + + @param pos position of the first character + @count requested length + + @returns The requested subslice, if the request is valid. + @throws std::out_of_range if pos > size() + */ + Slice + substr( + std::size_t pos, + std::size_t count = std::numeric_limits::max()) const + { + if (pos > size()) + throw std::out_of_range("Requested sub-slice is out of bounds"); + + return {data_ + pos, std::min(count, size() - pos)}; + } }; //------------------------------------------------------------------------------ diff --git a/src/ripple/consensus/Validations.h b/src/ripple/consensus/Validations.h index 8626d680616..d4df679bf92 100644 --- a/src/ripple/consensus/Validations.h +++ b/src/ripple/consensus/Validations.h @@ -322,6 +322,9 @@ class Validations beast::uhash<>> bySequence_; + // Sequence of the earliest validation to keep from expire + boost::optional toKeep_; + // Represents the ancestry of validated ledgers LedgerTrie trie_; @@ -686,15 +689,47 @@ class Validations return ValStatus::current; } + /** + * Set the smallest sequence number of validations to keep from expire + * @param s the sequence number + */ + void + setSeqToKeep(Seq const& s) + { + std::lock_guard lock{mutex_}; + toKeep_ = s; + } + /** Expire old validation sets Remove validation sets that were accessed more than - validationSET_EXPIRES ago. + validationSET_EXPIRES ago and were not asked to keep. */ void expire() { std::lock_guard lock{mutex_}; + if (toKeep_) + { + for (auto i = byLedger_.begin(); i != byLedger_.end(); ++i) + { + auto const& validationMap = i->second; + if (!validationMap.empty() && + validationMap.begin()->second.seq() >= toKeep_) + { + byLedger_.touch(i); + } + } + + for (auto i = bySequence_.begin(); i != bySequence_.end(); ++i) + { + if (i->first >= toKeep_) + { + bySequence_.touch(i); + } + } + } + beast::expire(byLedger_, parms_.validationSET_EXPIRES); beast::expire(bySequence_, parms_.validationSET_EXPIRES); } diff --git a/src/ripple/core/Config.h b/src/ripple/core/Config.h index 7943906fdae..7eec3bc0764 100644 --- a/src/ripple/core/Config.h +++ b/src/ripple/core/Config.h @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -172,6 +173,9 @@ class Config : public BasicConfig // Compression bool COMPRESSION = false; + // Amendment majority time + std::chrono::seconds AMENDMENT_MAJORITY_TIME = defaultAmendmentMajorityTime; + // Thread pool configuration std::size_t WORKERS = 0; diff --git a/src/ripple/core/ConfigSections.h b/src/ripple/core/ConfigSections.h index c9b61c2cb2b..3aae9774d10 100644 --- a/src/ripple/core/ConfigSections.h +++ b/src/ripple/core/ConfigSections.h @@ -60,6 +60,7 @@ struct ConfigSection #define SECTION_INSIGHT "insight" #define SECTION_IPS "ips" #define SECTION_IPS_FIXED "ips_fixed" +#define SECTION_AMENDMENT_MAJORITY_TIME "amendment_majority_time" #define SECTION_NETWORK_QUORUM "network_quorum" #define SECTION_NODE_SEED "node_seed" #define SECTION_NODE_SIZE "node_size" diff --git a/src/ripple/core/DatabaseCon.h b/src/ripple/core/DatabaseCon.h index d79ecef2071..5cdabb08f08 100644 --- a/src/ripple/core/DatabaseCon.h +++ b/src/ripple/core/DatabaseCon.h @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -89,6 +90,19 @@ class DatabaseCon Config::StartUpType startUp = Config::NORMAL; bool standAlone = false; boost::filesystem::path dataDir; + // Indicates whether or not to return the `globalPragma` + // from commonPragma() + bool useGlobalPragma = false; + + std::vector const* + commonPragma() const + { + assert(!useGlobalPragma || globalPragma); + return useGlobalPragma && globalPragma ? globalPragma.get() + : nullptr; + } + + static std::unique_ptr const> globalPragma; }; template @@ -97,16 +111,17 @@ class DatabaseCon std::string const& DBName, std::array const& pragma, std::array const& initSQL) - { // Use temporary files or regular DB files? - auto const useTempFiles = setup.standAlone && - setup.startUp != Config::LOAD && - setup.startUp != Config::LOAD_FILE && - setup.startUp != Config::REPLAY; - boost::filesystem::path pPath = - useTempFiles ? "" : (setup.dataDir / DBName); - - init(pPath, pragma, initSQL); + : DatabaseCon( + setup.standAlone && setup.startUp != Config::LOAD && + setup.startUp != Config::LOAD_FILE && + setup.startUp != Config::REPLAY + ? "" + : (setup.dataDir / DBName), + setup.commonPragma(), + pragma, + initSQL) + { } template @@ -115,8 +130,8 @@ class DatabaseCon std::string const& DBName, std::array const& pragma, std::array const& initSQL) + : DatabaseCon(dataDir / DBName, nullptr, pragma, initSQL) { - init((dataDir / DBName), pragma, initSQL); } soci::session& @@ -136,14 +151,22 @@ class DatabaseCon private: template - void - init( + DatabaseCon( boost::filesystem::path const& pPath, + std::vector const* commonPragma, std::array const& pragma, std::array const& initSQL) { open(session_, "sqlite", pPath.string()); + if (commonPragma) + { + for (auto const& p : *commonPragma) + { + soci::statement st = session_.prepare << p; + st.execute(true); + } + } for (auto const& p : pragma) { soci::statement st = session_.prepare << p; @@ -163,7 +186,9 @@ class DatabaseCon }; DatabaseCon::Setup -setup_DatabaseCon(Config const& c); +setup_DatabaseCon( + Config const& c, + boost::optional j = boost::none); } // namespace ripple diff --git a/src/ripple/core/impl/Config.cpp b/src/ripple/core/impl/Config.cpp index f12ba7dbcee..a7aeca8617e 100644 --- a/src/ripple/core/impl/Config.cpp +++ b/src/ripple/core/impl/Config.cpp @@ -442,7 +442,8 @@ Config::loadFromString(std::string const& fileContents) if (getSingleSection(secConfig, SECTION_LEDGER_HISTORY, strTemp, j_)) { if (boost::iequals(strTemp, "full")) - LEDGER_HISTORY = 1000000000u; + LEDGER_HISTORY = + std::numeric_limits::max(); else if (boost::iequals(strTemp, "none")) LEDGER_HISTORY = 0; else @@ -454,7 +455,7 @@ Config::loadFromString(std::string const& fileContents) if (boost::iequals(strTemp, "none")) FETCH_DEPTH = 0; else if (boost::iequals(strTemp, "full")) - FETCH_DEPTH = 1000000000u; + FETCH_DEPTH = std::numeric_limits::max(); else FETCH_DEPTH = beast::lexicalCastThrow(strTemp); @@ -480,6 +481,37 @@ Config::loadFromString(std::string const& fileContents) if (getSingleSection(secConfig, SECTION_COMPRESSION, strTemp, j_)) COMPRESSION = beast::lexicalCastThrow(strTemp); + if (getSingleSection( + secConfig, SECTION_AMENDMENT_MAJORITY_TIME, strTemp, j_)) + { + using namespace std::chrono; + boost::regex const re( + "^\\s*(\\d+)\\s*(minutes|hours|days|weeks)\\s*(\\s+.*)?$"); + boost::smatch match; + if (!boost::regex_match(strTemp, match, re)) + Throw( + "Invalid " SECTION_AMENDMENT_MAJORITY_TIME + ", must be: [0-9]+ [minutes|hours|days|weeks]"); + + std::uint32_t duration = + beast::lexicalCastThrow(match[1].str()); + + if (boost::iequals(match[2], "minutes")) + AMENDMENT_MAJORITY_TIME = minutes(duration); + else if (boost::iequals(match[2], "hours")) + AMENDMENT_MAJORITY_TIME = hours(duration); + else if (boost::iequals(match[2], "days")) + AMENDMENT_MAJORITY_TIME = days(duration); + else if (boost::iequals(match[2], "weeks")) + AMENDMENT_MAJORITY_TIME = weeks(duration); + + if (AMENDMENT_MAJORITY_TIME < minutes(15)) + Throw( + "Invalid " SECTION_AMENDMENT_MAJORITY_TIME + ", the minimum amount of time an amendment must hold a " + "majority is 15 minutes"); + } + // Do not load trusted validator configuration for standalone mode if (!RUN_STANDALONE) { diff --git a/src/ripple/core/impl/DatabaseCon.cpp b/src/ripple/core/impl/DatabaseCon.cpp index 3a4489b2f94..89c4ee1f291 100644 --- a/src/ripple/core/impl/DatabaseCon.cpp +++ b/src/ripple/core/impl/DatabaseCon.cpp @@ -21,12 +21,14 @@ #include #include #include +#include +#include #include namespace ripple { DatabaseCon::Setup -setup_DatabaseCon(Config const& c) +setup_DatabaseCon(Config const& c, boost::optional j) { DatabaseCon::Setup setup; @@ -38,9 +40,134 @@ setup_DatabaseCon(Config const& c) Throw("database_path must be set."); } + if (!setup.globalPragma) + { + setup.globalPragma = [&c, &j]() { + auto const& sqlite = c.section("sqlite"); + auto result = std::make_unique>(); + result->reserve(3); + + // defaults + std::string safety_level; + std::string journal_mode = "wal"; + std::string synchronous = "normal"; + std::string temp_store = "file"; + bool showRiskWarning = false; + + if (set(safety_level, "safety_level", sqlite)) + { + if (boost::iequals(safety_level, "low")) + { + // low safety defaults + journal_mode = "memory"; + synchronous = "off"; + temp_store = "memory"; + showRiskWarning = true; + } + else if (!boost::iequals(safety_level, "high")) + { + Throw( + "Invalid safety_level value: " + safety_level); + } + } + + { + // #journal_mode Valid values : delete, truncate, persist, + // memory, wal, off + if (set(journal_mode, "journal_mode", sqlite) && + !safety_level.empty()) + { + Throw( + "Configuration file may not define both " + "\"safety_level\" and \"journal_mode\""); + } + bool higherRisk = boost::iequals(journal_mode, "memory") || + boost::iequals(journal_mode, "off"); + showRiskWarning = showRiskWarning || higherRisk; + if (higherRisk || boost::iequals(journal_mode, "delete") || + boost::iequals(journal_mode, "truncate") || + boost::iequals(journal_mode, "persist") || + boost::iequals(journal_mode, "wal")) + { + result->emplace_back(boost::str( + boost::format(CommonDBPragmaJournal) % journal_mode)); + } + else + { + Throw( + "Invalid journal_mode value: " + journal_mode); + } + } + + { + //#synchronous Valid values : off, normal, full, extra + if (set(synchronous, "synchronous", sqlite) && + !safety_level.empty()) + { + Throw( + "Configuration file may not define both " + "\"safety_level\" and \"synchronous\""); + } + bool higherRisk = boost::iequals(synchronous, "off"); + showRiskWarning = showRiskWarning || higherRisk; + if (higherRisk || boost::iequals(synchronous, "normal") || + boost::iequals(synchronous, "full") || + boost::iequals(synchronous, "extra")) + { + result->emplace_back(boost::str( + boost::format(CommonDBPragmaSync) % synchronous)); + } + else + { + Throw( + "Invalid synchronous value: " + synchronous); + } + } + + { + // #temp_store Valid values : default, file, memory + if (set(temp_store, "temp_store", sqlite) && + !safety_level.empty()) + { + Throw( + "Configuration file may not define both " + "\"safety_level\" and \"temp_store\""); + } + bool higherRisk = boost::iequals(temp_store, "memory"); + showRiskWarning = showRiskWarning || higherRisk; + if (higherRisk || boost::iequals(temp_store, "default") || + boost::iequals(temp_store, "file")) + { + result->emplace_back(boost::str( + boost::format(CommonDBPragmaTemp) % temp_store)); + } + else + { + Throw( + "Invalid temp_store value: " + temp_store); + } + } + + if (showRiskWarning && j && c.LEDGER_HISTORY > SQLITE_TUNING_CUTOFF) + { + JLOG(j->warn()) + << "reducing the data integrity guarantees from the " + "default [sqlite] behavior is not recommended for " + "nodes storing large amounts of history, because of the " + "difficulty inherent in rebuilding corrupted data."; + } + assert(result->size() == 3); + return result; + }(); + } + setup.useGlobalPragma = true; + return setup; } +std::unique_ptr const> + DatabaseCon::Setup::globalPragma; + void DatabaseCon::setupCheckpointing(JobQueue* q, Logs& l) { diff --git a/src/ripple/net/impl/DatabaseBody.ipp b/src/ripple/net/impl/DatabaseBody.ipp index d6bae7b47f7..5a1bd7e6185 100644 --- a/src/ripple/net/impl/DatabaseBody.ipp +++ b/src/ripple/net/impl/DatabaseBody.ipp @@ -50,7 +50,9 @@ DatabaseBody::value_type::open( auto setup = setup_DatabaseCon(config); setup.dataDir = path.parent_path(); + setup.useGlobalPragma = false; + // Downloader ignores the "CommonPragma" conn_ = std::make_unique( setup, "Download", DownloaderDBPragma, DatabaseBodyDBInit); diff --git a/src/ripple/net/impl/RPCCall.cpp b/src/ripple/net/impl/RPCCall.cpp index a565d4bc321..3b956f8cba3 100644 --- a/src/ripple/net/impl/RPCCall.cpp +++ b/src/ripple/net/impl/RPCCall.cpp @@ -314,7 +314,10 @@ class RPCParser if (uLedgerMax != -1 && uLedgerMax < uLedgerMin) { - return rpcError(rpcLGR_IDXS_INVALID); + // The command line always follows ApiMaximumSupportedVersion + if (RPC::ApiMaximumSupportedVersion == 1) + return rpcError(rpcLGR_IDXS_INVALID); + return rpcError(rpcNOT_SYNCED); } jvRequest[jss::ledger_index_min] = jvParams[1u].asInt(); @@ -384,7 +387,10 @@ class RPCParser if (uLedgerMax != -1 && uLedgerMax < uLedgerMin) { - return rpcError(rpcLGR_IDXS_INVALID); + // The command line always follows ApiMaximumSupportedVersion + if (RPC::ApiMaximumSupportedVersion == 1) + return rpcError(rpcLGR_IDXS_INVALID); + return rpcError(rpcNOT_SYNCED); } jvRequest[jss::ledger_index_min] = jvParams[1u].asInt(); diff --git a/src/ripple/nodestore/Backend.h b/src/ripple/nodestore/Backend.h index d3abc33142f..4c3ae2cdbd2 100644 --- a/src/ripple/nodestore/Backend.h +++ b/src/ripple/nodestore/Backend.h @@ -58,6 +58,25 @@ class Backend virtual void open(bool createIfMissing = true) = 0; + /** Open the backend. + @param createIfMissing Create the database files if necessary. + @param appType Deterministic appType used to create a backend. + @param uid Deterministic uid used to create a backend. + @param salt Deterministic salt used to create a backend. + This allows the caller to catch exceptions. + */ + virtual void + open( + bool createIfMissing, + boost::optional appType, + boost::optional uid, + boost::optional salt) + { + Throw(std::string( + "Deterministic appType/uid/salt not supported by backend " + + getName())); + } + /** Close the backend. This allows the caller to catch exceptions. */ diff --git a/src/ripple/nodestore/DeterministicShard.md b/src/ripple/nodestore/DeterministicShard.md new file mode 100644 index 00000000000..10fd6465327 --- /dev/null +++ b/src/ripple/nodestore/DeterministicShard.md @@ -0,0 +1,103 @@ +# Deterministic Database Shards + +This doc describes the standard way to assemble the database shard. A shard assembled using this approach becomes deterministic i.e. if two independent sides assemble the shard consists of the same ledgers, accounts and transactions, then they will obtain the same shard files `nudb.dat` and `nudb.key`. The approach deals with the `NuDB` database format only, refer to `https://github.com/vinniefalco/NuDB`. + + +## Headers + +Due to NuDB database definition, the following headers are used for database files: + +nudb.key: +``` +char[8] Type The characters "nudb.key" +uint16 Version Holds the version number +uint64 UID Unique ID generated on creation +uint64 Appnum Application defined constant +uint16 KeySize Key size in bytes +uint64 Salt A random seed +uint64 Pepper The salt hashed +uint16 BlockSize Size of a file block in bytes +uint16 LoadFactor Target fraction in 65536ths +uint8[56] Reserved Zeroes +uint8[] Reserved Zero-pad to block size +``` + +nudb.dat: +``` +char[8] Type The characters "nudb.dat" +uint16 Version Holds the version number +uint64 UID Unique ID generated on creation +uint64 Appnum Application defined constant +uint16 KeySize Key size in bytes +uint8[64] (reserved) Zeroes +``` +there all fields are saved using network byte order (most significant byte first). + +To make the shard deterministic the following parameters are used as values of header field both for `nudb.key` and `nudb.dat` files. +``` +Version 2 +UID digest(0) +Appnum digest(2) | 0x5348524400000000 /* 'SHRD' */ +KeySize 32 +Salt digest(1) +Pepper XXH64(Salt) +BlockSize 0x1000 (4096 bytes) +LoadFactor 0.5 (numeric 0x8000) +``` +Note: XXH64() is well-known hash algorithm. + +The `digest(i)` mentioned above defined as the follows: + +First, RIPEMD160 hash `H` calculated of the following structure +``` +uint256 lastHash Hash of last ledger in shard +uint32 index Index of the shard +uint32 firstSeq Sequence number of first ledger in the shard +uint32 lastSeq Sequence number of last ledger in the shard +uint32 version Version of shard, 2 at the present +``` +there all 32-bit integers are hashed in network byte order. + +Then, `digest(i)` is defined as the following portion of the above hash `H`: +``` +digest(0) = H[0] << 56 | H[2] << 48 | ... | H[14] << 0, +digest(1) = H[1] << 56 | H[3] << 48 | ... | H[15] << 0, +digest(2) = H[19] << 24 | H[18] << 16 | ... | H[16] << 0, +``` +where `H[i]` denotes `i`-th byte of hash `H`. + + +## Contents + +After deterministic shard is created using the above mentioned headers, it filled with objects. First, all objects of the shard are collected and sorted in according to their hashes. Here the objects are: ledgers, SHAmap tree nodes including accounts and transactions, and final key object with hash 0. Objects are sorted by increasing of their hashes, precisely, by increasing of hex representations of hashes in lexicographic order. + +For example, the following is an example of sorted hashes in their hex representation: +``` +0000000000000000000000000000000000000000000000000000000000000000 +154F29A919B30F50443A241C466691B046677C923EE7905AB97A4DBE8A5C2423 +2231553FC01D37A66C61BBEEACBB8C460994493E5659D118E19A8DDBB1444273 +272DCBFD8E4D5D786CF11A5444B30FB35435933B5DE6C660AA46E68CF0F5C447 +3C062FD9F0BCDCA31ACEBCD8E530D0BDAD1F1D1257B89C435616506A3EE6CB9E +58A0E5AE427CDDC1C7C06448E8C3E4BF718DE036D827881624B20465C3E1334F +... +``` + +Finally, objects added to the shard one by one in the sorted order from low to high hashes. + + +## Tests + +To perform test to deterministic shards implementation one can enter the following command: +``` +rippled --unittest ripple.NodeStore.DatabaseShard +``` + +The following is the right output of deterministic shards test: +``` +ripple.NodeStore.DatabaseShard DatabaseShard deterministic_shard with backend nudb +Iteration 0: RIPEMD160[nudb.key] = 4CFA8985836B549EC99D2E9705707F488DC91E4E +Iteration 0: RIPEMD160[nudb.dat] = 8CC61F503C36339803F8C2FC652C1102DDB889F1 +Iteration 1: RIPEMD160[nudb.key] = 4CFA8985836B549EC99D2E9705707F488DC91E4E +Iteration 1: RIPEMD160[nudb.dat] = 8CC61F503C36339803F8C2FC652C1102DDB889F1 +``` + diff --git a/src/ripple/nodestore/backend/NuDBFactory.cpp b/src/ripple/nodestore/backend/NuDBFactory.cpp index 8147f218cc0..04dfa208551 100644 --- a/src/ripple/nodestore/backend/NuDBFactory.cpp +++ b/src/ripple/nodestore/backend/NuDBFactory.cpp @@ -38,7 +38,10 @@ namespace NodeStore { class NuDBBackend : public Backend { public: - static constexpr std::size_t currentType = 1; + static constexpr std::uint64_t currentType = 1; + static constexpr std::uint64_t deterministicType = 0x5348524400000000ull; + /* "SHRD" in ASCII */ + static constexpr std::uint64_t deterministicMask = 0xFFFFFFFF00000000ull; beast::Journal const j_; size_t const keyBytes_; @@ -93,7 +96,11 @@ class NuDBBackend : public Backend } void - open(bool createIfMissing) override + open( + bool createIfMissing, + boost::optional appType, + boost::optional uid, + boost::optional salt) override { using namespace boost::filesystem; if (db_.is_open()) @@ -114,8 +121,9 @@ class NuDBBackend : public Backend dp, kp, lp, - currentType, - nudb::make_salt(), + appType.value_or(currentType), + uid.value_or(nudb::make_uid()), + salt.value_or(nudb::make_salt()), keyBytes_, nudb::block_size(kp), 0.50, @@ -128,10 +136,27 @@ class NuDBBackend : public Backend db_.open(dp, kp, lp, ec); if (ec) Throw(ec); - if (db_.appnum() != currentType) + + /** Old value currentType is accepted for appnum in traditional + * databases, new value is used for deterministic shard databases. + * New 64-bit value is constructed from fixed and random parts. + * Fixed part is bounded by bitmask deterministicMask, + * and the value of fixed part is deterministicType. + * Random part depends on the contents of the shard and may be any. + * The contents of appnum field should match either old or new rule. + */ + if (db_.appnum() != appType.value_or(currentType) && + (appType || + (db_.appnum() & deterministicMask) != deterministicType)) Throw("nodestore: unknown appnum"); } + void + open(bool createIfMissing) override + { + open(createIfMissing, boost::none, boost::none, boost::none); + } + void close() override { diff --git a/src/ripple/nodestore/impl/DatabaseShardImp.cpp b/src/ripple/nodestore/impl/DatabaseShardImp.cpp index d9a771d66d0..7b6478c53d1 100644 --- a/src/ripple/nodestore/impl/DatabaseShardImp.cpp +++ b/src/ripple/nodestore/impl/DatabaseShardImp.cpp @@ -355,8 +355,8 @@ DatabaseShardImp::importShard( return false; } - auto expectedHash = - app_.getLedgerMaster().walkHashBySeq(lastLedgerSeq(shardIndex)); + auto expectedHash = app_.getLedgerMaster().walkHashBySeq( + lastLedgerSeq(shardIndex), InboundLedger::Reason::GENERIC); if (!expectedHash) { @@ -475,7 +475,7 @@ DatabaseShardImp::fetchLedger(uint256 const& hash, std::uint32_t seq) auto ledger{std::make_shared( deserializePrefixedHeader(makeSlice(nObj->getData())), app_.config(), - *app_.shardFamily())}; + *app_.getShardFamily())}; if (ledger->info().seq != seq) { @@ -600,7 +600,7 @@ DatabaseShardImp::validate() shard->finalize(true, boost::none); } - app_.shardFamily()->reset(); + app_.getShardFamily()->reset(); } void @@ -742,7 +742,6 @@ DatabaseShardImp::import(Database& source) } // Create the new shard - app_.shardFamily()->reset(); auto shard{std::make_unique(app_, *this, shardIndex, j_)}; if (!shard->open(scheduler_, *ctx_)) continue; diff --git a/src/ripple/nodestore/impl/DeterministicShard.cpp b/src/ripple/nodestore/impl/DeterministicShard.cpp new file mode 100644 index 00000000000..151752cfcac --- /dev/null +++ b/src/ripple/nodestore/impl/DeterministicShard.cpp @@ -0,0 +1,206 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { +namespace NodeStore { + +DeterministicShard::DeterministicShard( + Application& app, + DatabaseShard const& db, + std::uint32_t index, + uint256 const& lastHash, + beast::Journal j) + : inited_(false) + , nodeset_([](nodeptr l, nodeptr r) { return l->getHash() < r->getHash(); }) + , app_(app) + , db_(db) + , index_(index) + , hash_(hash(lastHash)) + , tempdir_(db.getRootDir() / (std::to_string(index_) + ".tmp")) + , finaldir_(db.getRootDir() / std::to_string(index_)) + , ctx_(std::make_unique()) + , j_(j) +{ +} + +DeterministicShard::~DeterministicShard() +{ + close(true); +} + +uint160 +DeterministicShard::hash(uint256 const& lastHash) const +{ + using beast::hash_append; + ripemd160_hasher h; + + hash_append(h, lastHash); + hash_append(h, index_); + hash_append(h, db_.firstLedgerSeq(index_)); + hash_append(h, db_.lastLedgerSeq(index_)); + hash_append(h, Shard::version); + + auto const result = static_cast(h); + return uint160::fromVoid(result.data()); +} + +std::uint64_t +DeterministicShard::digest(int n) const +{ + auto const data = hash_.data(); + + if (n == 2) + { // Extract 32 bits: + return (static_cast(data[19]) << 24) + + (static_cast(data[18]) << 16) + + (static_cast(data[17]) << 8) + + (static_cast(data[16])); + } + + std::uint64_t ret = 0; + + if (n == 0 || n == 1) + { // Extract 64 bits + for (int i = n; i < 16; i += 2) + ret = (ret << 8) + data[i]; + } + + return ret; +} + +bool +DeterministicShard::init() +{ + if (index_ < db_.earliestShardIndex()) + { + JLOG(j_.error()) << "shard " << index_ << " is illegal"; + return false; + } + + Config const& config{app_.config()}; + + Section section{config.section(ConfigSection::shardDatabase())}; + std::string const type{get(section, "type", "nudb")}; + + if (type != "nudb") + { + JLOG(j_.error()) << "shard " << index_ << " backend type " << type + << " not supported"; + return false; + } + + auto factory{Manager::instance().find(type)}; + if (!factory) + { + JLOG(j_.error()) << "shard " << index_ + << " failed to create factory for backend type " + << type; + return false; + } + + ctx_->start(); + + section.set("path", tempdir_.string()); + backend_ = factory->createInstance( + NodeObject::keyBytes, section, scheduler_, *ctx_, j_); + + if (!backend_) + { + JLOG(j_.error()) << "shard " << index_ + << " failed to create backend type " << type; + return false; + } + + // Open or create the NuDB key/value store + bool preexist = exists(tempdir_); + if (preexist) + { + remove_all(tempdir_); + preexist = false; + } + + backend_->open( + !preexist, + digest(2) | 0x5348524400000000ll, /* appType */ + digest(0), /* uid */ + digest(1) /* salt */ + ); + + inited_ = true; + + return true; +} + +void +DeterministicShard::close(bool cancel) +{ + if (!inited_) + return; + + backend_->close(); + if (cancel) + { + remove_all(tempdir_); + } + else + { + flush(); + remove_all(finaldir_); + rename(tempdir_, finaldir_); + } + inited_ = false; +} + +void +DeterministicShard::store(nodeptr nObj) +{ + if (!inited_) + return; + + nodeset_.insert(nObj); +} + +void +DeterministicShard::flush() +{ + if (!inited_) + return; + + for (auto nObj : nodeset_) + { + backend_->store(nObj); + } + + nodeset_.clear(); +} + +} // namespace NodeStore +} // namespace ripple diff --git a/src/ripple/nodestore/impl/DeterministicShard.h b/src/ripple/nodestore/impl/DeterministicShard.h new file mode 100644 index 00000000000..91bdf1e867a --- /dev/null +++ b/src/ripple/nodestore/impl/DeterministicShard.h @@ -0,0 +1,144 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_NODESTORE_DETERMINISTICSHARD_H_INCLUDED +#define RIPPLE_NODESTORE_DETERMINISTICSHARD_H_INCLUDED + +#include +#include +#include +#include + +namespace ripple { +namespace NodeStore { + +/** DeterministicShard class. + * + * 1. The init() method creates temporary folder tempdir_, + * and the deterministic shard is initialized in that folder. + * 2. The store() method adds object to memory pool. + * 3. The flush() method stores all objects from memory pool to the shard + * located in tempdir_ in sorted order. + * 4. The close(true) method finalizes the shard and moves it from tempdir_ + * temporary folder to filandir_ permanent folder, + * deleting old (non-deterministic) shard located in finaldir_. + */ +class DeterministicShard +{ +public: + using nodeptr = std::shared_ptr; + + DeterministicShard(DeterministicShard const&) = delete; + DeterministicShard& + operator=(DeterministicShard const&) = delete; + + /** Creates the object for shard database + * + * @param app Application object + * @param db Shard Database which deterministic shard belongs to + * @param index Index of the shard + * @param lastHash Hash of last ledger in the shard + * @param j Journal to logging + */ + DeterministicShard( + Application& app, + DatabaseShard const& db, + std::uint32_t index, + uint256 const& lastHash, + beast::Journal j); + + ~DeterministicShard(); + + /** Initializes the deterministic shard. + * + * @return true is success, false if errored + */ + bool + init(); + + /** Finalizes and closes the shard. + * + * @param cancel True if reject the shard and delete all files, + * false if finalize the shard and store them + */ + void + close(bool cancel = false); + + /** Store the object into memory pool + * + * @param nobj Object to store. + */ + void + store(nodeptr nobj); + + /** Flush all objects from memory pool to shard + */ + void + flush(); + +private: + // Count hash of shard parameters: lashHash, firstSeq, lastSeq, index + uint160 + hash(const uint256& lastHash) const; + + // Get n-th 64-bit portion of shard parameters's hash + std::uint64_t + digest(int n) const; + + // If database inited + bool inited_; + + // Sorted set of stored and not flushed objects + std::set> nodeset_; + + // Application reference + Application& app_; + + // Shard database + DatabaseShard const& db_; + + // Shard Index + std::uint32_t const index_; + + // Hash used for digests + uint160 const hash_; + + // Path to temporary database files + boost::filesystem::path const tempdir_; + + // Path to final database files + boost::filesystem::path const finaldir_; + + // Dummy scheduler for deterministic write + DummyScheduler scheduler_; + + // NuDB context + std::unique_ptr ctx_; + + // NuDB key/value store for node objects + std::shared_ptr backend_; + + // Journal + beast::Journal const j_; +}; + +} // namespace NodeStore +} // namespace ripple + +#endif diff --git a/src/ripple/nodestore/impl/Shard.cpp b/src/ripple/nodestore/impl/Shard.cpp index 1701206fe4d..1bf7877c752 100644 --- a/src/ripple/nodestore/impl/Shard.cpp +++ b/src/ripple/nodestore/impl/Shard.cpp @@ -40,6 +40,7 @@ Shard::Shard( std::uint32_t index, beast::Journal j) : app_(app) + , db_(db) , index_(index) , firstSeq_(db.firstLedgerSeq(index)) , lastSeq_(std::max(firstSeq_, db.lastLedgerSeq(index))) @@ -124,6 +125,7 @@ Shard::open(Scheduler& scheduler, nudb::context& ctx) setup.startUp = config.START_UP; setup.standAlone = config.standalone(); setup.dataDir = dir_; + setup.useGlobalPragma = true; acquireInfo_->SQLiteDB = std::make_unique( setup, @@ -182,11 +184,8 @@ Shard::open(Scheduler& scheduler, nudb::context& ctx) } if (boost::icl::length(storedSeqs) == maxLedgers_) - { - // All ledgers have been acquired, shard is complete - acquireInfo_.reset(); + // All ledgers have been acquired, shard backend is complete backendComplete_ = true; - } } } else @@ -239,7 +238,7 @@ Shard::prepare() if (backendComplete_) { JLOG(j_.warn()) << "shard " << index_ - << " prepare called when shard is complete"; + << " prepare called when shard backend is complete"; return {}; } @@ -290,7 +289,6 @@ Shard::store(std::shared_ptr const& ledger) if (!initSQLite(lock)) return false; - acquireInfo_.reset(); backendComplete_ = true; setBackendCache(lock); } @@ -396,7 +394,8 @@ Shard::isLegacy() const bool Shard::finalize( bool const writeSQLite, - boost::optional const& expectedHash) + boost::optional const& expectedHash, + const bool writeDeterministicShard) { assert(backend_); @@ -418,7 +417,7 @@ Shard::finalize( { std::unique_lock lock(mutex_); if (!backendComplete_) - return fail("incomplete"); + return fail("backend incomplete"); /* TODO MP @@ -510,6 +509,17 @@ Shard::finalize( std::shared_ptr next; auto const lastLedgerHash{hash}; + std::shared_ptr dsh; + if (writeDeterministicShard) + { + dsh = std::make_shared( + app_, db_, index_, lastLedgerHash, j_); + if (!dsh->init()) + { + return fail("can't create deterministic shard"); + } + } + // Start with the last ledger in the shard and walk backwards from // child to parent until we reach the first ledger seq = lastSeq_; @@ -525,7 +535,7 @@ Shard::finalize( ledger = std::make_shared( deserializePrefixedHeader(makeSlice(nObj->getData())), app_.config(), - *app_.shardFamily()); + *app_.getShardFamily()); if (ledger->info().seq != seq) return fail("invalid ledger sequence"); if (ledger->info().hash != hash) @@ -546,8 +556,11 @@ Shard::finalize( return fail("missing root TXN node"); } - if (!valLedger(ledger, next)) - return fail("failed to validate ledger"); + if (dsh) + dsh->store(nObj); + + if (!verifyLedger(ledger, next, dsh)) + return fail("verification check failed"); if (writeSQLite) { @@ -557,8 +570,8 @@ Shard::finalize( } hash = ledger->info().parentHash; + next = std::move(ledger); --seq; - next = ledger; } JLOG(j_.debug()) << "shard " << index_ << " is valid"; @@ -608,6 +621,12 @@ Shard::finalize( { backend_->store(nObj); + if (dsh) + { + dsh->store(nObj); + dsh->flush(); + } + std::lock_guard lock(mutex_); final_ = true; @@ -627,6 +646,23 @@ Shard::finalize( std::string("exception ") + e.what() + " in function " + __func__); } + if (dsh) + { + /* Close non-deterministic shard database. */ + backend_->close(); + /* Replace non-deterministic shard by deterministic one. */ + dsh->close(); + /* Re-open deterministic shard database. */ + backend_->open(false); + /** The finalize() function verifies the shard and, if third parameter + * is true, then replaces the shard by deterministic copy of the shard. + * After deterministic shard is created it verifies again, + * the finalize() function called here to verify deterministic shard, + * third parameter is false. + */ + return finalize(false, expectedHash, false); + } + return true; } @@ -668,10 +704,14 @@ bool Shard::initSQLite(std::lock_guard const&) { Config const& config{app_.config()}; - DatabaseCon::Setup setup; - setup.startUp = config.START_UP; - setup.standAlone = config.standalone(); - setup.dataDir = dir_; + DatabaseCon::Setup const setup = [&]() { + DatabaseCon::Setup result; + result.startUp = config.START_UP; + result.standAlone = config.standalone(); + result.dataDir = dir_; + result.useGlobalPragma = !backendComplete_; + return result; + }(); try { @@ -917,9 +957,10 @@ Shard::setFileStats(std::lock_guard const&) } bool -Shard::valLedger( +Shard::verifyLedger( std::shared_ptr const& ledger, - std::shared_ptr const& next) const + std::shared_ptr const& next, + std::shared_ptr dsh) const { auto fail = [j = j_, index = index_, &ledger](std::string const& msg) { JLOG(j.fatal()) << "shard " << index << ". " << msg @@ -938,11 +979,14 @@ Shard::valLedger( return fail("Invalid ledger account hash"); bool error{false}; - auto visit = [this, &error](SHAMapAbstractNode& node) { + auto visit = [this, &error, dsh](SHAMapAbstractNode& node) { if (stop_) return false; - if (!valFetch(node.getNodeHash().as_uint256())) + auto nObj = valFetch(node.getNodeHash().as_uint256()); + if (!nObj) error = true; + else if (dsh) + dsh->store(nObj); return !error; }; diff --git a/src/ripple/nodestore/impl/Shard.h b/src/ripple/nodestore/impl/Shard.h index d43fe22a892..0685942636d 100644 --- a/src/ripple/nodestore/impl/Shard.h +++ b/src/ripple/nodestore/impl/Shard.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -128,11 +129,14 @@ class Shard final verified backend data. @param referenceHash If present, this hash must match the hash of the last ledger in the shard. + @param writeDeterministicShard If true, shard will be rewritten + deterministically. */ bool finalize( bool const writeSQLite, - boost::optional const& referenceHash); + boost::optional const& referenceHash, + const bool writeDeterministicShard = true); void stop() @@ -170,6 +174,8 @@ class Shard final Application& app_; mutable std::recursive_mutex mutex_; + DatabaseShard const& db_; + // Shard Index std::uint32_t const index_; @@ -208,7 +214,7 @@ class Shard final std::unique_ptr txSQLiteDB_; // Tracking information used only when acquiring a shard from the network. - // If the shard is complete, this member will be null. + // If the shard is final, this member will be null. std::unique_ptr acquireInfo_; beast::Journal const j_; @@ -252,10 +258,12 @@ class Shard final setFileStats(std::lock_guard const& lock); // Validate this ledger by walking its SHAMaps and verifying Merkle trees + // If dsh != NULL then save all walking SHAMaps to deterministic shard dsh bool - valLedger( + verifyLedger( std::shared_ptr const& ledger, - std::shared_ptr const& next) const; + std::shared_ptr const& next, + std::shared_ptr dsh = {}) const; // Fetches from backend and log errors based on status codes std::shared_ptr diff --git a/src/ripple/nodestore/impl/varint.h b/src/ripple/nodestore/impl/varint.h index e74f53afd5c..75d8e58a751 100644 --- a/src/ripple/nodestore/impl/varint.h +++ b/src/ripple/nodestore/impl/varint.h @@ -55,6 +55,8 @@ template std::size_t read_varint(void const* buf, std::size_t buflen, std::size_t& t) { + if (buflen == 0) + return 0; t = 0; std::uint8_t const* p = reinterpret_cast(buf); std::size_t n = 0; diff --git a/src/ripple/overlay/Compression.h b/src/ripple/overlay/Compression.h index 5d45dbda888..6bb94792b43 100644 --- a/src/ripple/overlay/Compression.h +++ b/src/ripple/overlay/Compression.h @@ -31,7 +31,9 @@ namespace compression { std::size_t constexpr headerBytes = 6; std::size_t constexpr headerBytesCompressed = 10; -enum class Algorithm : std::uint8_t { None = 0x00, LZ4 = 0x01 }; +// All values other than 'none' must have the high bit. The low order four bits +// must be 0. +enum class Algorithm : std::uint8_t { None = 0x00, LZ4 = 0x90 }; enum class Compressed : std::uint8_t { On, Off }; diff --git a/src/ripple/overlay/Message.h b/src/ripple/overlay/Message.h index af7a168ad0b..e2c081d123f 100644 --- a/src/ripple/overlay/Message.h +++ b/src/ripple/overlay/Message.h @@ -84,7 +84,7 @@ class Message : public std::enable_shared_from_this * @param in Pointer to the payload * @param payloadBytes Size of the payload excluding the header size * @param type Protocol message type - * @param comprAlgorithm Compression algorithm used in compression, + * @param compression Compression algorithm used in compression, * currently LZ4 only. If None then the message is uncompressed. * @param uncompressedBytes Size of the uncompressed message */ @@ -93,7 +93,7 @@ class Message : public std::enable_shared_from_this std::uint8_t* in, std::uint32_t payloadBytes, int type, - Algorithm comprAlgorithm, + Algorithm compression, std::uint32_t uncompressedBytes); /** Try to compress the payload. diff --git a/src/ripple/overlay/Peer.h b/src/ripple/overlay/Peer.h index d16433c1d0f..1f5aad376d2 100644 --- a/src/ripple/overlay/Peer.h +++ b/src/ripple/overlay/Peer.h @@ -118,6 +118,9 @@ class Peer cycleStatus() = 0; virtual bool hasRange(std::uint32_t uMin, std::uint32_t uMax) = 0; + + virtual bool + compressionEnabled() const = 0; }; } // namespace ripple diff --git a/src/ripple/overlay/impl/Message.cpp b/src/ripple/overlay/impl/Message.cpp index e2c1f2fb548..29440c44e48 100644 --- a/src/ripple/overlay/impl/Message.cpp +++ b/src/ripple/overlay/impl/Message.cpp @@ -109,22 +109,46 @@ Message::compress() } /** Set payload header - * Uncompressed message header - * 47-42 Set to 0 - * 41-16 Payload size - * 15-0 Message Type - * Compressed message header - * 79 Set to 0, indicates the message is compressed - * 78-76 Compression algorithm, value 1-7. Set to 1 to indicate LZ4 - * compression 75-74 Set to 0 73-48 Payload size 47-32 Message Type - * 31-0 Uncompressed message size - */ + + The header is a variable-sized structure that contains information about + the type of the message and the length and encoding of the payload. + + The first bit determines whether a message is compressed or uncompressed; + for compressed messages, the next three bits identify the compression + algorithm. + + All multi-byte values are represented in big endian. + + For uncompressed messages (6 bytes), numbering bits from left to right: + + - The first 6 bits are set to 0. + - The next 26 bits represent the payload size. + - The remaining 16 bits represent the message type. + + For compressed messages (10 bytes), numbering bits from left to right: + + - The first 32 bits, together, represent the compression algorithm + and payload size: + - The first bit is set to 1 to indicate the message is compressed. + - The next 3 bits indicate the compression algorithm. + - The next 2 bits are reserved at this time and set to 0. + - The remaining 26 bits represent the payload size. + - The next 16 bits represent the message type. + - The remaining 32 bits are the uncompressed message size. + + The maximum size of a message at this time is 64 MB. Messages larger than + this will be dropped and the recipient may, at its option, sever the link. + + @note While nominally a part of the wire protocol, the framing is subject + to change; future versions of the code may negotiate the use of + substantially different framing. +*/ void Message::setHeader( std::uint8_t* in, std::uint32_t payloadBytes, int type, - Algorithm comprAlgorithm, + Algorithm compression, std::uint32_t uncompressedBytes) { auto h = in; @@ -142,10 +166,10 @@ Message::setHeader( *in++ = static_cast((type >> 8) & 0xFF); *in++ = static_cast(type & 0xFF); - if (comprAlgorithm != Algorithm::None) + if (compression != Algorithm::None) { pack(in, uncompressedBytes); - *h |= 0x80 | (static_cast(comprAlgorithm) << 4); + *h |= static_cast(compression); } } diff --git a/src/ripple/overlay/impl/PeerImp.cpp b/src/ripple/overlay/impl/PeerImp.cpp index cfcc8ef0c5e..b3c00894d8f 100644 --- a/src/ripple/overlay/impl/PeerImp.cpp +++ b/src/ripple/overlay/impl/PeerImp.cpp @@ -1184,7 +1184,15 @@ PeerImp::onMessage(std::shared_ptr const& m) reply.set_lastlink(true); if (m->peerchain_size() > 0) + { + for (int i = 0; i < m->peerchain_size(); ++i) + { + if (!publicKeyType(makeSlice(m->peerchain(i).nodepubkey()))) + return badData("Invalid peer chain public key"); + } + *reply.mutable_peerchain() = m->peerchain(); + } send(std::make_shared(reply, protocol::mtPEER_SHARD_INFO)); @@ -2209,7 +2217,15 @@ PeerImp::onMessage(std::shared_ptr const& m) reply.set_type(packet.type()); if (packet.has_ledgerhash()) + { + if (!stringIsUint256Sized(packet.ledgerhash())) + { + fee_ = Resource::feeInvalidRequest; + return; + } + reply.set_ledgerhash(packet.ledgerhash()); + } // This is a very minimal implementation for (int i = 0; i < packet.objects_size(); ++i) @@ -2290,10 +2306,10 @@ PeerImp::onMessage(std::shared_ptr const& m) { uint256 const hash{obj.hash()}; - std::shared_ptr data(std::make_shared( - obj.data().begin(), obj.data().end())); - - app_.getLedgerMaster().addFetchPack(hash, data); + app_.getLedgerMaster().addFetchPack( + hash, + std::make_shared( + obj.data().begin(), obj.data().end())); } } } @@ -2587,16 +2603,15 @@ PeerImp::getLedger(std::shared_ptr const& m) { JLOG(p_journal_.debug()) << "GetLedger: Routing Tx set request"; - auto const v = getPeerWithTree(overlay_, txHash, this); - if (!v) + if (auto const v = getPeerWithTree(overlay_, txHash, this)) { - JLOG(p_journal_.info()) << "GetLedger: Route TX set failed"; + packet.set_requestcookie(id()); + v->send(std::make_shared( + packet, protocol::mtGET_LEDGER)); return; } - packet.set_requestcookie(id()); - v->send( - std::make_shared(packet, protocol::mtGET_LEDGER)); + JLOG(p_journal_.info()) << "GetLedger: Route TX set failed"; return; } diff --git a/src/ripple/overlay/impl/PeerImp.h b/src/ripple/overlay/impl/PeerImp.h index e005fcf846b..4b279dea658 100644 --- a/src/ripple/overlay/impl/PeerImp.h +++ b/src/ripple/overlay/impl/PeerImp.h @@ -428,6 +428,12 @@ class PeerImp : public Peer, boost::optional> getPeerShardInfo() const; + bool + compressionEnabled() const override + { + return compressionEnabled_ == Compressed::On; + } + private: void close(); diff --git a/src/ripple/overlay/impl/ProtocolMessage.h b/src/ripple/overlay/impl/ProtocolMessage.h index 0f97cdd11b9..b929f91ba40 100644 --- a/src/ripple/overlay/impl/ProtocolMessage.h +++ b/src/ripple/overlay/impl/ProtocolMessage.h @@ -120,51 +120,94 @@ buffersBegin(BufferSequence const& bufs) bufs); } +/** Parse a message header + * @return a seated optional if the message header was successfully + * parsed. An unseated optional otherwise, in which case + * @param ec contains more information: + * - set to `errc::success` if not enough bytes were present + * - set to `errc::no_message` if a valid header was not present + */ template boost::optional -parseMessageHeader(BufferSequence const& bufs, std::size_t size) +parseMessageHeader( + boost::system::error_code& ec, + BufferSequence const& bufs, + std::size_t size) { using namespace ripple::compression; - auto iter = buffersBegin(bufs); MessageHeader hdr; - auto const compressed = (*iter & 0x80) == 0x80; + auto iter = buffersBegin(bufs); // Check valid header - if ((*iter & 0xFC) == 0 || compressed) + if (*iter & 0x80) { - hdr.header_size = compressed ? headerBytesCompressed : headerBytes; + hdr.header_size = headerBytesCompressed; + // not enough bytes to parse the header if (size < hdr.header_size) - return {}; + { + ec = make_error_code(boost::system::errc::success); + return boost::none; + } + + if (*iter & 0x0C) + { + ec = make_error_code(boost::system::errc::protocol_error); + return boost::none; + } + + hdr.algorithm = static_cast(*iter); - if (compressed) + if (hdr.algorithm != compression::Algorithm::LZ4) { - uint8_t algorithm = (*iter & 0x70) >> 4; - if (algorithm != - static_cast(compression::Algorithm::LZ4)) - return {}; - hdr.algorithm = compression::Algorithm::LZ4; + ec = make_error_code(boost::system::errc::protocol_error); + return boost::none; } for (int i = 0; i != 4; ++i) hdr.payload_wire_size = (hdr.payload_wire_size << 8) + *iter++; - // clear the compression bits - hdr.payload_wire_size &= 0x03FFFFFF; + + // clear the top four bits (the compression bits). + hdr.payload_wire_size &= 0x0FFFFFFF; hdr.total_wire_size = hdr.header_size + hdr.payload_wire_size; for (int i = 0; i != 2; ++i) hdr.message_type = (hdr.message_type << 8) + *iter++; - if (compressed) - for (int i = 0; i != 4; ++i) - hdr.uncompressed_size = (hdr.uncompressed_size << 8) + *iter++; + for (int i = 0; i != 4; ++i) + hdr.uncompressed_size = (hdr.uncompressed_size << 8) + *iter++; + + return hdr; + } + + if ((*iter & 0xFC) == 0) + { + hdr.header_size = headerBytes; + + if (size < hdr.header_size) + { + ec = make_error_code(boost::system::errc::success); + return boost::none; + } + + hdr.algorithm = Algorithm::None; + + for (int i = 0; i != 4; ++i) + hdr.payload_wire_size = (hdr.payload_wire_size << 8) + *iter++; + + hdr.uncompressed_size = hdr.payload_wire_size; + hdr.total_wire_size = hdr.header_size + hdr.payload_wire_size; + + for (int i = 0; i != 2; ++i) + hdr.message_type = (hdr.message_type << 8) + *iter++; return hdr; } - return {}; + ec = make_error_code(boost::system::errc::no_message); + return boost::none; } template < @@ -186,7 +229,7 @@ invoke(MessageHeader const& header, Buffers const& buffers, Handler& handler) std::vector payload; payload.resize(header.uncompressed_size); - auto payloadSize = ripple::compression::decompress( + auto const payloadSize = ripple::compression::decompress( stream, header.payload_wire_size, payload.data(), @@ -226,10 +269,13 @@ invokeProtocolMessage(Buffers const& buffers, Handler& handler) if (size == 0) return result; - auto header = detail::parseMessageHeader(buffers, size); + auto header = detail::parseMessageHeader(result.second, buffers, size); // If we can't parse the header then it may be that we don't have enough - // bytes yet, or because the message was cut off. + // bytes yet, or because the message was cut off (if error_code is success). + // Otherwise we failed to match the header's marker (error_code is set to + // no_message) or the compression algorithm is invalid (error_code is + // protocol_error) and signal an error. if (!header) return result; @@ -237,12 +283,21 @@ invokeProtocolMessage(Buffers const& buffers, Handler& handler) // whose size exceeds this may result in the connection being dropped. A // larger message size may be supported in the future or negotiated as // part of a protocol upgrade. - if (header->payload_wire_size > megabytes(64)) + if (header->payload_wire_size > megabytes(64) || + header->uncompressed_size > megabytes(64)) { result.second = make_error_code(boost::system::errc::message_size); return result; } + // We requested uncompressed messages from the peer but received compressed. + if (!handler.compressionEnabled() && + header->algorithm != compression::Algorithm::None) + { + result.second = make_error_code(boost::system::errc::protocol_error); + return result; + } + // We don't have the whole message yet. This isn't an error but we have // nothing to do. if (header->total_wire_size > size) diff --git a/src/ripple/proto/org/xrpl/rpc/v1/common.proto b/src/ripple/proto/org/xrpl/rpc/v1/common.proto index 3cc3c73ae41..cc0a0aa14b5 100644 --- a/src/ripple/proto/org/xrpl/rpc/v1/common.proto +++ b/src/ripple/proto/org/xrpl/rpc/v1/common.proto @@ -7,7 +7,7 @@ option java_multiple_files = true; import "org/xrpl/rpc/v1/amount.proto"; import "org/xrpl/rpc/v1/account.proto"; -// These fields are used in many different messsage types. They can be present +// These fields are used in many different message types. They can be present // in one or more transactions, as well as metadata of one or more transactions. // Each is defined as its own message type with a single field "value", to // ensure the field is the correct type everywhere it's used @@ -70,6 +70,11 @@ message HighQualityOut uint32 value = 1; } +message FirstLedgerSequence +{ + uint32 value = 1; +} + message LastLedgerSequence { uint32 value = 1; @@ -351,6 +356,15 @@ message TransactionSignature bytes value = 1; } +message NegativeUnlToDisable +{ + bytes value = 1; +} + +message NegativeUnlToReEnable +{ + bytes value = 1; +} // *** Messages wrapping a Currency value *** @@ -474,3 +488,12 @@ message SignerEntry SignerWeight signer_weight = 2; } + +// Next field: 3 +message NegativeUnlEntry +{ + PublicKey public_key = 1; + + FirstLedgerSequence ledger_sequence = 2; +} + diff --git a/src/ripple/proto/org/xrpl/rpc/v1/get_account_info.proto b/src/ripple/proto/org/xrpl/rpc/v1/get_account_info.proto index b533ebe6a69..9a2a877cd6f 100644 --- a/src/ripple/proto/org/xrpl/rpc/v1/get_account_info.proto +++ b/src/ripple/proto/org/xrpl/rpc/v1/get_account_info.proto @@ -19,6 +19,11 @@ message GetAccountInfoRequest bool strict = 2; + // Which ledger to use to retrieve data. + // If this field is not set, the server will use the open ledger. + // The open ledger includes data that is not validated or final. + // To retrieve the most up to date and validated data, use + // SHORTCUT_VALIDATED LedgerSpecifier ledger = 3; bool queue = 4; diff --git a/src/ripple/proto/org/xrpl/rpc/v1/get_account_transaction_history.proto b/src/ripple/proto/org/xrpl/rpc/v1/get_account_transaction_history.proto index 5b9e677c4a4..c4889a6bdbe 100644 --- a/src/ripple/proto/org/xrpl/rpc/v1/get_account_transaction_history.proto +++ b/src/ripple/proto/org/xrpl/rpc/v1/get_account_transaction_history.proto @@ -16,6 +16,13 @@ message GetAccountTransactionHistoryRequest // What ledger to include results from. Specifying a not yet validated // ledger results in an error. Not specifying a ledger uses the entire // range of validated ledgers available to the server. + // Note, this parameter acts as a filter, and can only reduce the number of + // results. Specifying a single ledger will return only transactions from + // that ledger. This includes specifying a ledger with a Shortcut. For + // example, specifying SHORTCUT_VALIDATED will result in only transactions + // that were part of the most recently validated ledger being returned. + // Specifying a range of ledgers results in only transactions that were + // included in a ledger within the specified range being returned. oneof ledger { LedgerSpecifier ledger_specifier = 2; diff --git a/src/ripple/proto/org/xrpl/rpc/v1/get_transaction.proto b/src/ripple/proto/org/xrpl/rpc/v1/get_transaction.proto index f872619ad7d..e0c21c598a2 100644 --- a/src/ripple/proto/org/xrpl/rpc/v1/get_transaction.proto +++ b/src/ripple/proto/org/xrpl/rpc/v1/get_transaction.proto @@ -23,7 +23,11 @@ message GetTransactionRequest { // if true, return data in binary format. defaults to false bool binary = 2; - // search only specified range. optional + // If the transaction was not found, server will report whether the entire + // specified range was searched. The value is contained in the error message. + // The error message is of the form: + // "txn not found. searched_all = [true,false]" + // If the transaction was found, this parameter is ignored. LedgerRange ledger_range = 3; } diff --git a/src/ripple/proto/org/xrpl/rpc/v1/ledger_objects.proto b/src/ripple/proto/org/xrpl/rpc/v1/ledger_objects.proto index 9bcaa2672d8..2ad820dd2f9 100644 --- a/src/ripple/proto/org/xrpl/rpc/v1/ledger_objects.proto +++ b/src/ripple/proto/org/xrpl/rpc/v1/ledger_objects.proto @@ -6,7 +6,7 @@ option java_multiple_files = true; import "org/xrpl/rpc/v1/common.proto"; -// Next field: 13 +// Next field: 14 message LedgerObject { oneof object @@ -23,10 +23,11 @@ message LedgerObject PayChannel pay_channel = 10; RippleState ripple_state = 11; SignerList signer_list = 12; + NegativeUnl negative_unl = 13; } } -// Next field: 13 +// Next field: 14 enum LedgerEntryType { LEDGER_ENTRY_TYPE_UNSPECIFIED = 0; @@ -42,6 +43,7 @@ enum LedgerEntryType LEDGER_ENTRY_TYPE_PAY_CHANNEL = 10; LEDGER_ENTRY_TYPE_RIPPLE_STATE = 11; LEDGER_ENTRY_TYPE_SIGNER_LIST = 12; + LEDGER_ENTRY_TYPE_NEGATIVE_UNL = 13; } // Next field: 15 @@ -329,3 +331,13 @@ message SignerList SignerQuorum signer_quorum = 7; } + +// Next field: 4 +message NegativeUnl +{ + repeated NegativeUnlEntry negative_unl_entries = 1; + + NegativeUnlToDisable validator_to_disable = 2; + + NegativeUnlToReEnable validator_to_re_enable = 3; +} \ No newline at end of file diff --git a/src/ripple/proto/org/xrpl/rpc/v1/xrp_ledger.proto b/src/ripple/proto/org/xrpl/rpc/v1/xrp_ledger.proto index 444f97d12be..7cb52605016 100644 --- a/src/ripple/proto/org/xrpl/rpc/v1/xrp_ledger.proto +++ b/src/ripple/proto/org/xrpl/rpc/v1/xrp_ledger.proto @@ -12,6 +12,7 @@ import "org/xrpl/rpc/v1/get_account_transaction_history.proto"; // RPCs available to interact with the XRP Ledger. +// The gRPC API mimics the JSON API. Refer to xrpl.org for documentation service XRPLedgerAPIService { // Get account info for an account on the XRP Ledger. @@ -26,5 +27,6 @@ service XRPLedgerAPIService { // Get the status of a transaction rpc GetTransaction(GetTransactionRequest) returns (GetTransactionResponse); + // Get all validated transactions associated with a given account rpc GetAccountTransactionHistory(GetAccountTransactionHistoryRequest) returns (GetAccountTransactionHistoryResponse); } diff --git a/src/ripple/protocol/BuildInfo.h b/src/ripple/protocol/BuildInfo.h index 84a83313452..cfe35d3383b 100644 --- a/src/ripple/protocol/BuildInfo.h +++ b/src/ripple/protocol/BuildInfo.h @@ -43,7 +43,7 @@ getVersionString(); std::string const& getFullVersionString(); -/** Returns the server version packed in a 64-bit integer. +/** Encode an arbitrary server software version in a 64-bit integer. The general format is: @@ -64,10 +64,38 @@ getFullVersionString(); 10 if an RC 01 if a beta N: 6-bit rc/beta number (1-63) + + @param the version string + @return the encoded version in a 64-bit integer */ std::uint64_t +encodeSoftwareVersion(char const* const versionStr); + +/** Returns this server's version packed in a 64-bit integer. */ +std::uint64_t getEncodedVersion(); +/** Check if the encoded software version is a rippled software version. + + @param version another node's encoded software version + @return true if the version is a rippled software version, false otherwise +*/ +bool +isRippledVersion(std::uint64_t version); + +/** Check if the version is newer than the local node's rippled software + version. + + @param version another node's encoded software version + @return true if the version is newer than the local node's rippled software + version, false otherwise. + + @note This function only understands version numbers that are generated by + rippled. Please see the encodeSoftwareVersion() function for detail. +*/ +bool +isNewerVersion(std::uint64_t version); + } // namespace BuildInfo } // namespace ripple diff --git a/src/ripple/protocol/ErrorCodes.h b/src/ripple/protocol/ErrorCodes.h index a3fb9e590bb..68c6c4395f5 100644 --- a/src/ripple/protocol/ErrorCodes.h +++ b/src/ripple/protocol/ErrorCodes.h @@ -64,9 +64,9 @@ enum error_code_i { rpcNO_CLOSED = 15, rpcNO_CURRENT = 16, rpcNO_NETWORK = 17, + rpcNOT_SYNCED = 18, // Ledger state - // unused 18, rpcACT_NOT_FOUND = 19, // unused 20, rpcLGR_NOT_FOUND = 21, diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index e47778a03a4..bec4ad9bdd4 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -83,7 +83,7 @@ class FeatureCollections "TickSize", "fix1368", "Escrow", - "CryptoConditionsSuite", + // "CryptoConditionsSuite", "fix1373", "EnforceInvariants", "SortedDirectories", @@ -111,8 +111,9 @@ class FeatureCollections "RequireFullyCanonicalSig", "fix1781", // XRPEndpointSteps should be included in the circular // payment check - "HardenedValidations"}; - + "HardenedValidations", + "fixAmendmentMajorityCalc", // Fix Amendment majority calculation + "NegativeUNL"}; std::vector features; boost::container::flat_map featureToIndex; boost::container::flat_map nameToFeature; @@ -347,7 +348,6 @@ extern uint256 const featureOwnerPaysFee; extern uint256 const featureFlow; extern uint256 const featureCompareTakerFlowCross; extern uint256 const featureFlowCross; -extern uint256 const featureCryptoConditionsSuite; extern uint256 const fix1513; extern uint256 const featureDepositAuth; extern uint256 const featureChecks; @@ -367,6 +367,8 @@ extern uint256 const fixQualityUpperBound; extern uint256 const featureRequireFullyCanonicalSig; extern uint256 const fix1781; extern uint256 const featureHardenedValidations; +extern uint256 const fixAmendmentMajorityCalc; +extern uint256 const featureNegativeUNL; } // namespace ripple diff --git a/src/ripple/protocol/Indexes.h b/src/ripple/protocol/Indexes.h index 95c52805a6d..1cbb8fd56c0 100644 --- a/src/ripple/protocol/Indexes.h +++ b/src/ripple/protocol/Indexes.h @@ -85,6 +85,10 @@ skip(LedgerIndex ledger) noexcept; Keylet const& fees() noexcept; +/** The (fixed) index of the object containing the ledger negativeUnl. */ +Keylet const& +negativeUNL() noexcept; + /** The beginning of an order book */ struct book_t { diff --git a/src/ripple/protocol/LedgerFormats.h b/src/ripple/protocol/LedgerFormats.h index bee6a467ad6..18b0170081e 100644 --- a/src/ripple/protocol/LedgerFormats.h +++ b/src/ripple/protocol/LedgerFormats.h @@ -86,6 +86,8 @@ enum LedgerEntryType { ltDEPOSIT_PREAUTH = 'p', + ltNEGATIVE_UNL = 'N', + // No longer used or supported. Left here to prevent accidental // reassignment of the ledger type. ltNICKNAME [[deprecated]] = 'n', diff --git a/src/ripple/protocol/SField.h b/src/ripple/protocol/SField.h index abf7037e7de..e0f78d5ec50 100644 --- a/src/ripple/protocol/SField.h +++ b/src/ripple/protocol/SField.h @@ -340,6 +340,7 @@ extern SF_U8 const sfCloseResolution; extern SF_U8 const sfMethod; extern SF_U8 const sfTransactionResult; extern SF_U8 const sfTickSize; +extern SF_U8 const sfUNLModifyDisabling; // 16-bit integers extern SF_U16 const sfLedgerEntryType; @@ -375,7 +376,7 @@ extern SF_U32 const sfStampEscrow; extern SF_U32 const sfBondAmount; extern SF_U32 const sfLoadFee; extern SF_U32 const sfOfferSequence; -extern SF_U32 const sfFirstLedgerSequence; // Deprecated: do not use +extern SF_U32 const sfFirstLedgerSequence; extern SF_U32 const sfLastLedgerSequence; extern SF_U32 const sfTransactionIndex; extern SF_U32 const sfOperationLimit; @@ -471,6 +472,9 @@ extern SF_Blob const sfMemoFormat; extern SF_Blob const sfFulfillment; extern SF_Blob const sfCondition; extern SF_Blob const sfMasterSignature; +extern SF_Blob const sfUNLModifyValidator; +extern SF_Blob const sfNegativeUNLToDisable; +extern SF_Blob const sfNegativeUNLToReEnable; // account extern SF_Account const sfAccount; @@ -504,6 +508,7 @@ extern SField const sfMemo; extern SField const sfSignerEntry; extern SField const sfSigner; extern SField const sfMajority; +extern SField const sfNegativeUNLEntry; // array of objects // ARRAY/1 is reserved for end of array @@ -516,7 +521,7 @@ extern SField const sfSufficient; extern SField const sfAffectedNodes; extern SField const sfMemos; extern SField const sfMajorities; - +extern SField const sfNegativeUNL; //------------------------------------------------------------------------------ } // namespace ripple diff --git a/src/ripple/protocol/SystemParameters.h b/src/ripple/protocol/SystemParameters.h index a74155a6a32..2a59de656d6 100644 --- a/src/ripple/protocol/SystemParameters.h +++ b/src/ripple/protocol/SystemParameters.h @@ -21,6 +21,7 @@ #define RIPPLE_PROTOCOL_SYSTEMPARAMETERS_H_INCLUDED #include +#include #include #include @@ -59,6 +60,18 @@ systemCurrencyCode() /** The XRP ledger network's earliest allowed sequence */ static std::uint32_t constexpr XRP_LEDGER_EARLIEST_SEQ{32570}; +/** The minimum amount of support an amendment should have. + + @note This value is used by legacy code and will become obsolete + once the fixAmendmentMajorityCalc amendment activates. +*/ +constexpr std::ratio<204, 256> preFixAmendmentMajorityCalcThreshold; + +constexpr std::ratio<80, 100> postFixAmendmentMajorityCalcThreshold; + +/** The minimum amount of time an amendment must hold a majority */ +constexpr std::chrono::seconds const defaultAmendmentMajorityTime = weeks{2}; + } // namespace ripple /** Default peer port (IANA registered) */ diff --git a/src/ripple/protocol/TER.h b/src/ripple/protocol/TER.h index dcb31bfd8aa..a30bd794a20 100644 --- a/src/ripple/protocol/TER.h +++ b/src/ripple/protocol/TER.h @@ -185,7 +185,7 @@ enum TERcodes : TERUnderlyingType { // - Hold // - Makes hole in sequence which jams transactions. terRETRY = -99, - terFUNDS_SPENT, // This is a free transaction, so don't burden network. + terFUNDS_SPENT, // DEPRECATED. terINSUF_FEE_B, // Can't pay fee, therefore don't burden network. terNO_ACCOUNT, // Can't pay fee, therefore don't burden network. terNO_AUTH, // Not authorized to hold IOUs. @@ -193,7 +193,7 @@ enum TERcodes : TERUnderlyingType { terOWNERS, // Can't succeed with non-zero owner count. terPRE_SEQ, // Can't pay fee, no point in forwarding, so don't // burden network. - terLAST, // Process after all other transactions + terLAST, // DEPRECATED. terNO_RIPPLE, // Rippling not allowed terQUEUED // Transaction is being held in TxQ until fee drops }; @@ -238,7 +238,7 @@ enum TECcodes : TERUnderlyingType { // DO NOT CHANGE THESE NUMBERS: They appear in ledger meta data. tecCLAIM = 100, tecPATH_PARTIAL = 101, - tecUNFUNDED_ADD = 102, + tecUNFUNDED_ADD = 102, // Unused legacy code tecUNFUNDED_OFFER = 103, tecUNFUNDED_PAYMENT = 104, tecFAILED_PROCESSING = 105, @@ -250,7 +250,7 @@ enum TECcodes : TERUnderlyingType { tecNO_LINE_INSUF_RESERVE = 126, tecNO_LINE_REDUNDANT = 127, tecPATH_DRY = 128, - tecUNFUNDED = 129, // Deprecated, old ambiguous unfunded. + tecUNFUNDED = 129, tecNO_ALTERNATIVE_KEY = 130, tecNO_REGULAR_KEY = 131, tecOWNERS = 132, diff --git a/src/ripple/protocol/TxFormats.h b/src/ripple/protocol/TxFormats.h index ad4e5174261..5ff3cf3ee6f 100644 --- a/src/ripple/protocol/TxFormats.h +++ b/src/ripple/protocol/TxFormats.h @@ -58,6 +58,7 @@ enum TxType { ttAMENDMENT = 100, ttFEE = 101, + ttUNL_MODIFY = 102, }; /** Manages the list of known transaction formats. diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 9cf30d2945c..d94d8311965 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.6.0-b7" +char const* const versionString = "1.6.0-b8" // clang-format on #if defined(DEBUG) || defined(SANITIZER) @@ -77,77 +77,98 @@ getFullVersionString() return value; } +static constexpr std::uint64_t implementationVersionIdentifier = + 0x183B'0000'0000'0000LLU; +static constexpr std::uint64_t implementationVersionIdentifierMask = + 0xFFFF'0000'0000'0000LLU; + std::uint64_t -getEncodedVersion() +encodeSoftwareVersion(char const* const versionStr) { - static std::uint64_t const cookie = []() { - std::uint64_t c = 0x183B000000000000; + std::uint64_t c = implementationVersionIdentifier; - beast::SemanticVersion v; + beast::SemanticVersion v; - if (v.parse(versionString)) - { - if (v.majorVersion >= 0 && v.majorVersion <= 255) - c |= static_cast(v.majorVersion) << 40; + if (v.parse(std::string(versionStr))) + { + if (v.majorVersion >= 0 && v.majorVersion <= 255) + c |= static_cast(v.majorVersion) << 40; + + if (v.minorVersion >= 0 && v.minorVersion <= 255) + c |= static_cast(v.minorVersion) << 32; - if (v.minorVersion >= 0 && v.minorVersion <= 255) - c |= static_cast(v.minorVersion) << 32; + if (v.patchVersion >= 0 && v.patchVersion <= 255) + c |= static_cast(v.patchVersion) << 24; - if (v.patchVersion >= 0 && v.patchVersion <= 255) - c |= static_cast(v.patchVersion) << 24; + if (!v.isPreRelease()) + c |= static_cast(0xC00000); - if (!v.isPreRelease()) - c |= static_cast(0xC00000); + if (v.isPreRelease()) + { + std::uint8_t x = 0; - if (v.isPreRelease()) + for (auto id : v.preReleaseIdentifiers) { - std::uint8_t x = 0; + auto parsePreRelease = [](std::string_view identifier, + std::string_view prefix, + std::uint8_t key, + std::uint8_t lok, + std::uint8_t hik) -> std::uint8_t { + std::uint8_t ret = 0; + + if (prefix != identifier.substr(0, prefix.length())) + return 0; + + if (!beast::lexicalCastChecked( + ret, + std::string(identifier.substr(prefix.length())))) + return 0; + + if (std::clamp(ret, lok, hik) != ret) + return 0; + + return ret + key; + }; + + x = parsePreRelease(id, "rc", 0x80, 0, 63); + + if (x == 0) + x = parsePreRelease(id, "b", 0x40, 0, 63); - for (auto id : v.preReleaseIdentifiers) + if (x & 0xC0) { - auto parsePreRelease = - [](std::string_view identifier, - std::string_view prefix, - std::uint8_t key, - std::uint8_t lok, - std::uint8_t hik) -> std::uint8_t { - std::uint8_t ret = 0; - - if (prefix != identifier.substr(0, prefix.length())) - return 0; - - if (!beast::lexicalCastChecked( - ret, - std::string( - identifier.substr(prefix.length())))) - return 0; - - if (std::clamp(ret, lok, hik) != ret) - return 0; - - return ret + key; - }; - - x = parsePreRelease(id, "rc", 0x80, 0, 63); - - if (x == 0) - x = parsePreRelease(id, "b", 0x40, 0, 63); - - if (x & 0xC0) - { - c |= static_cast(x) << 16; - break; - } + c |= static_cast(x) << 16; + break; } } } + } - return c; - }(); + return c; +} +std::uint64_t +getEncodedVersion() +{ + static std::uint64_t const cookie = {encodeSoftwareVersion(versionString)}; return cookie; } +bool +isRippledVersion(std::uint64_t version) +{ + return (version & implementationVersionIdentifierMask) == + implementationVersionIdentifier; +} + +bool +isNewerVersion(std::uint64_t version) +{ + if (isRippledVersion(version)) + return version > getEncodedVersion(); + return false; +} + } // namespace BuildInfo } // namespace ripple diff --git a/src/ripple/protocol/impl/ErrorCodes.cpp b/src/ripple/protocol/impl/ErrorCodes.cpp index 9110daf40f7..3df10624655 100644 --- a/src/ripple/protocol/impl/ErrorCodes.cpp +++ b/src/ripple/protocol/impl/ErrorCodes.cpp @@ -90,8 +90,9 @@ constexpr static ErrorInfo unorderedErrorInfos[]{ {rpcNOT_SUPPORTED, "notSupported", "Operation not supported."}, {rpcNO_CLOSED, "noClosed", "Closed ledger is unavailable."}, {rpcNO_CURRENT, "noCurrent", "Current ledger is unavailable."}, + {rpcNOT_SYNCED, "notSynced", "Not synced to the network."}, {rpcNO_EVENTS, "noEvents", "Current transport does not support events."}, - {rpcNO_NETWORK, "noNetwork", "Not synced to Ripple network."}, + {rpcNO_NETWORK, "noNetwork", "Not synced to the network."}, {rpcNO_PERMISSION, "noPermission", "You don't have permission for this command."}, diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index b8cac9b8d68..6a0c34c6ce5 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -102,7 +102,7 @@ detail::supportedAmendments() "TickSize", "fix1368", "Escrow", - "CryptoConditionsSuite", + // "CryptoConditionsSuite", DO NOT REUSE "fix1373", "EnforceInvariants", "FlowCross", @@ -130,7 +130,10 @@ detail::supportedAmendments() "fixQualityUpperBound", "RequireFullyCanonicalSig", "fix1781", - "HardenedValidations"}; + "HardenedValidations", + "fixAmendmentMajorityCalc", + //"NegativeUNL" // Commented out to prevent automatic enablement + }; return supported; } @@ -162,7 +165,6 @@ uint256 const featureFlow = *getRegisteredFeature("Flow"), featureCompareTakerFlowCross = *getRegisteredFeature("CompareTakerFlowCross"), featureFlowCross = *getRegisteredFeature("FlowCross"), - featureCryptoConditionsSuite = *getRegisteredFeature("CryptoConditionsSuite"), fix1513 = *getRegisteredFeature("fix1513"), featureDepositAuth = *getRegisteredFeature("DepositAuth"), featureChecks = *getRegisteredFeature("Checks"), @@ -181,7 +183,9 @@ uint256 const fixQualityUpperBound = *getRegisteredFeature("fixQualityUpperBound"), featureRequireFullyCanonicalSig = *getRegisteredFeature("RequireFullyCanonicalSig"), fix1781 = *getRegisteredFeature("fix1781"), - featureHardenedValidations = *getRegisteredFeature("HardenedValidations"); + featureHardenedValidations = *getRegisteredFeature("HardenedValidations"), + fixAmendmentMajorityCalc = *getRegisteredFeature("fixAmendmentMajorityCalc"), + featureNegativeUNL = *getRegisteredFeature("NegativeUNL"); // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. diff --git a/src/ripple/protocol/impl/Indexes.cpp b/src/ripple/protocol/impl/Indexes.cpp index 6df08f90d22..acb3ef14e64 100644 --- a/src/ripple/protocol/impl/Indexes.cpp +++ b/src/ripple/protocol/impl/Indexes.cpp @@ -58,6 +58,7 @@ enum class LedgerNameSpace : std::uint16_t { XRP_PAYMENT_CHANNEL = 'x', CHECK = 'C', DEPOSIT_PREAUTH = 'p', + NEGATIVE_UNL = 'N', // No longer used or supported. Left here to reserve the space // to avoid accidental reuse. @@ -162,6 +163,14 @@ fees() noexcept return ret; } +Keylet const& +negativeUNL() noexcept +{ + static Keylet const ret{ + ltNEGATIVE_UNL, indexHash(LedgerNameSpace::NEGATIVE_UNL)}; + return ret; +} + Keylet book_t::operator()(Book const& b) const { diff --git a/src/ripple/protocol/impl/LedgerFormats.cpp b/src/ripple/protocol/impl/LedgerFormats.cpp index 199c1265e1b..0c0abdbb6cd 100644 --- a/src/ripple/protocol/impl/LedgerFormats.cpp +++ b/src/ripple/protocol/impl/LedgerFormats.cpp @@ -224,6 +224,15 @@ LedgerFormats::LedgerFormats() {sfPreviousTxnLgrSeq, soeREQUIRED}, }, commonFields); + + add(jss::NegativeUNL, + ltNEGATIVE_UNL, + { + {sfNegativeUNL, soeOPTIONAL}, + {sfNegativeUNLToDisable, soeOPTIONAL}, + {sfNegativeUNLToReEnable, soeOPTIONAL}, + }, + commonFields); } LedgerFormats const& diff --git a/src/ripple/protocol/impl/SField.cpp b/src/ripple/protocol/impl/SField.cpp index 4784d2de99c..558635bee5d 100644 --- a/src/ripple/protocol/impl/SField.cpp +++ b/src/ripple/protocol/impl/SField.cpp @@ -56,6 +56,7 @@ SF_U8 const sfTransactionResult(access, STI_UINT8, 3, "TransactionResult"); // 8-bit integers (uncommon) SF_U8 const sfTickSize(access, STI_UINT8, 16, "TickSize"); +SF_U8 const sfUNLModifyDisabling(access, STI_UINT8, 17, "UNLModifyDisabling"); // 16-bit integers SF_U16 const sfLedgerEntryType( @@ -101,11 +102,8 @@ SF_U32 const sfStampEscrow(access, STI_UINT32, 22, "StampEscrow"); SF_U32 const sfBondAmount(access, STI_UINT32, 23, "BondAmount"); SF_U32 const sfLoadFee(access, STI_UINT32, 24, "LoadFee"); SF_U32 const sfOfferSequence(access, STI_UINT32, 25, "OfferSequence"); -SF_U32 const sfFirstLedgerSequence( - access, - STI_UINT32, - 26, - "FirstLedgerSequence"); // Deprecated: do not use +SF_U32 const + sfFirstLedgerSequence(access, STI_UINT32, 26, "FirstLedgerSequence"); SF_U32 const sfLastLedgerSequence(access, STI_UINT32, 27, "LastLedgerSequence"); SF_U32 const sfTransactionIndex(access, STI_UINT32, 28, "TransactionIndex"); SF_U32 const sfOperationLimit(access, STI_UINT32, 29, "OperationLimit"); @@ -225,6 +223,10 @@ SF_Blob const sfMasterSignature( "MasterSignature", SField::sMD_Default, SField::notSigning); +SF_Blob const sfUNLModifyValidator(access, STI_VL, 19, "UNLModifyValidator"); +SF_Blob const sfNegativeUNLToDisable(access, STI_VL, 20, "ValidatorToDisable"); +SF_Blob const + sfNegativeUNLToReEnable(access, STI_VL, 21, "ValidatorToReEnable"); // account SF_Account const sfAccount(access, STI_ACCOUNT, 1, "Account"); @@ -263,6 +265,7 @@ SField const sfSignerEntry(access, STI_OBJECT, 11, "SignerEntry"); SField const sfSigner(access, STI_OBJECT, 16, "Signer"); // 17 has not been used yet... SField const sfMajority(access, STI_OBJECT, 18, "Majority"); +SField const sfNegativeUNLEntry(access, STI_OBJECT, 19, "DisabledValidator"); // array of objects // ARRAY/1 is reserved for end of array @@ -284,6 +287,7 @@ SField const sfMemos(access, STI_ARRAY, 9, "Memos"); // array of objects (uncommon) SField const sfMajorities(access, STI_ARRAY, 16, "Majorities"); +SField const sfNegativeUNL(access, STI_ARRAY, 17, "NegativeUNL"); SField::SField( private_access_tag_t, diff --git a/src/ripple/protocol/impl/STTx.cpp b/src/ripple/protocol/impl/STTx.cpp index 1da713e0f35..d5f468b66a9 100644 --- a/src/ripple/protocol/impl/STTx.cpp +++ b/src/ripple/protocol/impl/STTx.cpp @@ -527,7 +527,7 @@ isPseudoTx(STObject const& tx) if (!t) return false; auto tt = safe_cast(*t); - return tt == ttAMENDMENT || tt == ttFEE; + return tt == ttAMENDMENT || tt == ttFEE || tt == ttUNL_MODIFY; } } // namespace ripple diff --git a/src/ripple/protocol/impl/TER.cpp b/src/ripple/protocol/impl/TER.cpp index 7f0e795cf3f..f496711dd92 100644 --- a/src/ripple/protocol/impl/TER.cpp +++ b/src/ripple/protocol/impl/TER.cpp @@ -31,230 +31,144 @@ static std::unordered_map< std::pair> const& transResults() { - static std::unordered_map< - TERUnderlyingType, - std::pair> const results{ - {tecCLAIM, {"tecCLAIM", "Fee claimed. Sequence used. No action."}}, - {tecDIR_FULL, {"tecDIR_FULL", "Can not add entry to full directory."}}, - {tecFAILED_PROCESSING, - {"tecFAILED_PROCESSING", "Failed to correctly process transaction."}}, - {tecINSUF_RESERVE_LINE, - {"tecINSUF_RESERVE_LINE", "Insufficient reserve to add trust line."}}, - {tecINSUF_RESERVE_OFFER, - {"tecINSUF_RESERVE_OFFER", "Insufficient reserve to create offer."}}, - {tecNO_DST, - {"tecNO_DST", "Destination does not exist. Send XRP to create it."}}, - {tecNO_DST_INSUF_XRP, - {"tecNO_DST_INSUF_XRP", - "Destination does not exist. Too little XRP sent to create it."}}, - {tecNO_LINE_INSUF_RESERVE, - {"tecNO_LINE_INSUF_RESERVE", - "No such line. Too little reserve to create it."}}, - {tecNO_LINE_REDUNDANT, - {"tecNO_LINE_REDUNDANT", "Can't set non-existent line to default."}}, - {tecPATH_DRY, {"tecPATH_DRY", "Path could not send partial amount."}}, - {tecPATH_PARTIAL, - {"tecPATH_PARTIAL", "Path could not send full amount."}}, - {tecNO_ALTERNATIVE_KEY, - {"tecNO_ALTERNATIVE_KEY", - "The operation would remove the ability to sign transactions with " - "the account."}}, - {tecNO_REGULAR_KEY, {"tecNO_REGULAR_KEY", "Regular key is not set."}}, - {tecOVERSIZE, {"tecOVERSIZE", "Object exceeded serialization limits."}}, - {tecUNFUNDED, - {"tecUNFUNDED", "One of _ADD, _OFFER, or _SEND. Deprecated."}}, - {tecUNFUNDED_ADD, - {"tecUNFUNDED_ADD", "Insufficient XRP balance for WalletAdd."}}, - {tecUNFUNDED_OFFER, - {"tecUNFUNDED_OFFER", "Insufficient balance to fund created offer."}}, - {tecUNFUNDED_PAYMENT, - {"tecUNFUNDED_PAYMENT", "Insufficient XRP balance to send."}}, - {tecOWNERS, {"tecOWNERS", "Non-zero owner count."}}, - {tecNO_ISSUER, {"tecNO_ISSUER", "Issuer account does not exist."}}, - {tecNO_AUTH, {"tecNO_AUTH", "Not authorized to hold asset."}}, - {tecNO_LINE, {"tecNO_LINE", "No such line."}}, - {tecINSUFF_FEE, {"tecINSUFF_FEE", "Insufficient balance to pay fee."}}, - {tecFROZEN, {"tecFROZEN", "Asset is frozen."}}, - {tecNO_TARGET, {"tecNO_TARGET", "Target account does not exist."}}, - {tecNO_PERMISSION, - {"tecNO_PERMISSION", "No permission to perform requested operation."}}, - {tecNO_ENTRY, {"tecNO_ENTRY", "No matching entry found."}}, - {tecINSUFFICIENT_RESERVE, - {"tecINSUFFICIENT_RESERVE", - "Insufficient reserve to complete requested operation."}}, - {tecNEED_MASTER_KEY, - {"tecNEED_MASTER_KEY", - "The operation requires the use of the Master Key."}}, - {tecDST_TAG_NEEDED, - {"tecDST_TAG_NEEDED", "A destination tag is required."}}, - {tecINTERNAL, - {"tecINTERNAL", "An internal error has occurred during processing."}}, - {tecCRYPTOCONDITION_ERROR, - {"tecCRYPTOCONDITION_ERROR", - "Malformed, invalid, or mismatched conditional or fulfillment."}}, - {tecINVARIANT_FAILED, - {"tecINVARIANT_FAILED", - "One or more invariants for the transaction were not satisfied."}}, - {tecEXPIRED, {"tecEXPIRED", "Expiration time is passed."}}, - {tecDUPLICATE, {"tecDUPLICATE", "Ledger object already exists."}}, - {tecKILLED, {"tecKILLED", "FillOrKill offer killed."}}, - {tecHAS_OBLIGATIONS, - {"tecHAS_OBLIGATIONS", - "The account cannot be deleted since it has obligations."}}, - {tecTOO_SOON, - {"tecTOO_SOON", - "It is too early to attempt the requested operation. Please wait."}}, - - {tefALREADY, - {"tefALREADY", "The exact transaction was already in this ledger."}}, - {tefBAD_ADD_AUTH, - {"tefBAD_ADD_AUTH", "Not authorized to add account."}}, - {tefBAD_AUTH, - {"tefBAD_AUTH", "Transaction's public key is not authorized."}}, - {tefBAD_LEDGER, {"tefBAD_LEDGER", "Ledger in unexpected state."}}, - {tefBAD_QUORUM, - {"tefBAD_QUORUM", "Signatures provided do not meet the quorum."}}, - {tefBAD_SIGNATURE, - {"tefBAD_SIGNATURE", "A signature is provided for a non-signer."}}, - {tefCREATED, {"tefCREATED", "Can't add an already created account."}}, - {tefEXCEPTION, {"tefEXCEPTION", "Unexpected program state."}}, - {tefFAILURE, {"tefFAILURE", "Failed to apply."}}, - {tefINTERNAL, {"tefINTERNAL", "Internal error."}}, - {tefMASTER_DISABLED, {"tefMASTER_DISABLED", "Master key is disabled."}}, - {tefMAX_LEDGER, {"tefMAX_LEDGER", "Ledger sequence too high."}}, - {tefNO_AUTH_REQUIRED, {"tefNO_AUTH_REQUIRED", "Auth is not required."}}, - {tefNOT_MULTI_SIGNING, - {"tefNOT_MULTI_SIGNING", - "Account has no appropriate list of multi-signers."}}, - {tefPAST_SEQ, - {"tefPAST_SEQ", "This sequence number has already passed."}}, - {tefWRONG_PRIOR, - {"tefWRONG_PRIOR", "This previous transaction does not match."}}, - {tefBAD_AUTH_MASTER, - {"tefBAD_AUTH_MASTER", - "Auth for unclaimed account needs correct master key."}}, - {tefINVARIANT_FAILED, - {"tefINVARIANT_FAILED", - "Fee claim violated invariants for the transaction."}}, - {tefTOO_BIG, {"tefTOO_BIG", "Transaction affects too many items."}}, - - {telLOCAL_ERROR, {"telLOCAL_ERROR", "Local failure."}}, - {telBAD_DOMAIN, {"telBAD_DOMAIN", "Domain too long."}}, - {telBAD_PATH_COUNT, - {"telBAD_PATH_COUNT", "Malformed: Too many paths."}}, - {telBAD_PUBLIC_KEY, {"telBAD_PUBLIC_KEY", "Public key too long."}}, - {telFAILED_PROCESSING, - {"telFAILED_PROCESSING", "Failed to correctly process transaction."}}, - {telINSUF_FEE_P, {"telINSUF_FEE_P", "Fee insufficient."}}, - {telNO_DST_PARTIAL, - {"telNO_DST_PARTIAL", - "Partial payment to create account not allowed."}}, - {telCAN_NOT_QUEUE, {"telCAN_NOT_QUEUE", "Can not queue at this time."}}, - {telCAN_NOT_QUEUE_BALANCE, - {"telCAN_NOT_QUEUE_BALANCE", - "Can not queue at this time: insufficient balance to pay all queued " - "fees."}}, - {telCAN_NOT_QUEUE_BLOCKS, - {"telCAN_NOT_QUEUE_BLOCKS", - "Can not queue at this time: would block later queued " - "transaction(s)."}}, - {telCAN_NOT_QUEUE_BLOCKED, - {"telCAN_NOT_QUEUE_BLOCKED", - "Can not queue at this time: blocking transaction in queue."}}, - {telCAN_NOT_QUEUE_FEE, - {"telCAN_NOT_QUEUE_FEE", - "Can not queue at this time: fee insufficient to replace queued " - "transaction."}}, - {telCAN_NOT_QUEUE_FULL, - {"telCAN_NOT_QUEUE_FULL", - "Can not queue at this time: queue is full."}}, - - {temMALFORMED, {"temMALFORMED", "Malformed transaction."}}, - {temBAD_AMOUNT, {"temBAD_AMOUNT", "Can only send positive amounts."}}, - {temBAD_CURRENCY, {"temBAD_CURRENCY", "Malformed: Bad currency."}}, - {temBAD_EXPIRATION, - {"temBAD_EXPIRATION", "Malformed: Bad expiration."}}, - {temBAD_FEE, {"temBAD_FEE", "Invalid fee, negative or not XRP."}}, - {temBAD_ISSUER, {"temBAD_ISSUER", "Malformed: Bad issuer."}}, - {temBAD_LIMIT, {"temBAD_LIMIT", "Limits must be non-negative."}}, - {temBAD_OFFER, {"temBAD_OFFER", "Malformed: Bad offer."}}, - {temBAD_PATH, {"temBAD_PATH", "Malformed: Bad path."}}, - {temBAD_PATH_LOOP, {"temBAD_PATH_LOOP", "Malformed: Loop in path."}}, - {temBAD_QUORUM, {"temBAD_QUORUM", "Malformed: Quorum is unreachable."}}, - {temBAD_REGKEY, - {"temBAD_REGKEY", - "Malformed: Regular key cannot be same as master key."}}, - {temBAD_SEND_XRP_LIMIT, - {"temBAD_SEND_XRP_LIMIT", - "Malformed: Limit quality is not allowed for XRP to XRP."}}, - {temBAD_SEND_XRP_MAX, - {"temBAD_SEND_XRP_MAX", - "Malformed: Send max is not allowed for XRP to XRP."}}, - {temBAD_SEND_XRP_NO_DIRECT, - {"temBAD_SEND_XRP_NO_DIRECT", - "Malformed: No Ripple direct is not allowed for XRP to XRP."}}, - {temBAD_SEND_XRP_PARTIAL, - {"temBAD_SEND_XRP_PARTIAL", - "Malformed: Partial payment is not allowed for XRP to XRP."}}, - {temBAD_SEND_XRP_PATHS, - {"temBAD_SEND_XRP_PATHS", - "Malformed: Paths are not allowed for XRP to XRP."}}, - {temBAD_SEQUENCE, - {"temBAD_SEQUENCE", "Malformed: Sequence is not in the past."}}, - {temBAD_SIGNATURE, {"temBAD_SIGNATURE", "Malformed: Bad signature."}}, - {temBAD_SIGNER, - {"temBAD_SIGNER", - "Malformed: No signer may duplicate account or other signers."}}, - {temBAD_SRC_ACCOUNT, - {"temBAD_SRC_ACCOUNT", "Malformed: Bad source account."}}, - {temBAD_TRANSFER_RATE, - {"temBAD_TRANSFER_RATE", - "Malformed: Transfer rate must be >= 1.0 and <= 2.0"}}, - {temBAD_WEIGHT, - {"temBAD_WEIGHT", "Malformed: Weight must be a positive value."}}, - {temDST_IS_SRC, {"temDST_IS_SRC", "Destination may not be source."}}, - {temDST_NEEDED, {"temDST_NEEDED", "Destination not specified."}}, - {temINVALID, {"temINVALID", "The transaction is ill-formed."}}, - {temINVALID_FLAG, - {"temINVALID_FLAG", "The transaction has an invalid flag."}}, - {temREDUNDANT, {"temREDUNDANT", "Sends same currency to self."}}, - {temRIPPLE_EMPTY, {"temRIPPLE_EMPTY", "PathSet with no paths."}}, - {temUNCERTAIN, - {"temUNCERTAIN", "In process of determining result. Never returned."}}, - {temUNKNOWN, - {"temUNKNOWN", - "The transaction requires logic that is not implemented yet."}}, - {temDISABLED, - {"temDISABLED", - "The transaction requires logic that is currently disabled."}}, - {temBAD_TICK_SIZE, - {"temBAD_TICK_SIZE", "Malformed: Tick size out of range."}}, - {temINVALID_ACCOUNT_ID, - {"temINVALID_ACCOUNT_ID", - "Malformed: A field contains an invalid account ID."}}, - {temCANNOT_PREAUTH_SELF, - {"temCANNOT_PREAUTH_SELF", - "Malformed: An account may not preauthorize itself."}}, + // clang-format off + + // Macros are generally ugly, but they can help make code readable to + // humans without affecting the compiler. +#define MAKE_ERROR(code, desc) { code, { #code, desc } } + + static + std::unordered_map< + TERUnderlyingType, + std::pair> const results + { + MAKE_ERROR(tecCLAIM, "Fee claimed. Sequence used. No action."), + MAKE_ERROR(tecDIR_FULL, "Can not add entry to full directory."), + MAKE_ERROR(tecFAILED_PROCESSING, "Failed to correctly process transaction."), + MAKE_ERROR(tecINSUF_RESERVE_LINE, "Insufficient reserve to add trust line."), + MAKE_ERROR(tecINSUF_RESERVE_OFFER, "Insufficient reserve to create offer."), + MAKE_ERROR(tecNO_DST, "Destination does not exist. Send XRP to create it."), + MAKE_ERROR(tecNO_DST_INSUF_XRP, "Destination does not exist. Too little XRP sent to create it."), + MAKE_ERROR(tecNO_LINE_INSUF_RESERVE, "No such line. Too little reserve to create it."), + MAKE_ERROR(tecNO_LINE_REDUNDANT, "Can't set non-existent line to default."), + MAKE_ERROR(tecPATH_DRY, "Path could not send partial amount."), + MAKE_ERROR(tecPATH_PARTIAL, "Path could not send full amount."), + MAKE_ERROR(tecNO_ALTERNATIVE_KEY, "The operation would remove the ability to sign transactions with the account."), + MAKE_ERROR(tecNO_REGULAR_KEY, "Regular key is not set."), + MAKE_ERROR(tecOVERSIZE, "Object exceeded serialization limits."), + MAKE_ERROR(tecUNFUNDED, "Not enough XRP to satisfy the reserve requirement."), + MAKE_ERROR(tecUNFUNDED_ADD, "DEPRECATED."), + MAKE_ERROR(tecUNFUNDED_OFFER, "Insufficient balance to fund created offer."), + MAKE_ERROR(tecUNFUNDED_PAYMENT, "Insufficient XRP balance to send."), + MAKE_ERROR(tecOWNERS, "Non-zero owner count."), + MAKE_ERROR(tecNO_ISSUER, "Issuer account does not exist."), + MAKE_ERROR(tecNO_AUTH, "Not authorized to hold asset."), + MAKE_ERROR(tecNO_LINE, "No such line."), + MAKE_ERROR(tecINSUFF_FEE, "Insufficient balance to pay fee."), + MAKE_ERROR(tecFROZEN, "Asset is frozen."), + MAKE_ERROR(tecNO_TARGET, "Target account does not exist."), + MAKE_ERROR(tecNO_PERMISSION, "No permission to perform requested operation."), + MAKE_ERROR(tecNO_ENTRY, "No matching entry found."), + MAKE_ERROR(tecINSUFFICIENT_RESERVE, "Insufficient reserve to complete requested operation."), + MAKE_ERROR(tecNEED_MASTER_KEY, "The operation requires the use of the Master Key."), + MAKE_ERROR(tecDST_TAG_NEEDED, "A destination tag is required."), + MAKE_ERROR(tecINTERNAL, "An internal error has occurred during processing."), + MAKE_ERROR(tecCRYPTOCONDITION_ERROR, "Malformed, invalid, or mismatched conditional or fulfillment."), + MAKE_ERROR(tecINVARIANT_FAILED, "One or more invariants for the transaction were not satisfied."), + MAKE_ERROR(tecEXPIRED, "Expiration time is passed."), + MAKE_ERROR(tecDUPLICATE, "Ledger object already exists."), + MAKE_ERROR(tecKILLED, "FillOrKill offer killed."), + MAKE_ERROR(tecHAS_OBLIGATIONS, "The account cannot be deleted since it has obligations."), + MAKE_ERROR(tecTOO_SOON, "It is too early to attempt the requested operation. Please wait."), + + MAKE_ERROR(tefALREADY, "The exact transaction was already in this ledger."), + MAKE_ERROR(tefBAD_ADD_AUTH, "Not authorized to add account."), + MAKE_ERROR(tefBAD_AUTH, "Transaction's public key is not authorized."), + MAKE_ERROR(tefBAD_LEDGER, "Ledger in unexpected state."), + MAKE_ERROR(tefBAD_QUORUM, "Signatures provided do not meet the quorum."), + MAKE_ERROR(tefBAD_SIGNATURE, "A signature is provided for a non-signer."), + MAKE_ERROR(tefCREATED, "Can't add an already created account."), + MAKE_ERROR(tefEXCEPTION, "Unexpected program state."), + MAKE_ERROR(tefFAILURE, "Failed to apply."), + MAKE_ERROR(tefINTERNAL, "Internal error."), + MAKE_ERROR(tefMASTER_DISABLED, "Master key is disabled."), + MAKE_ERROR(tefMAX_LEDGER, "Ledger sequence too high."), + MAKE_ERROR(tefNO_AUTH_REQUIRED, "Auth is not required."), + MAKE_ERROR(tefNOT_MULTI_SIGNING, "Account has no appropriate list of multi-signers."), + MAKE_ERROR(tefPAST_SEQ, "This sequence number has already passed."), + MAKE_ERROR(tefWRONG_PRIOR, "This previous transaction does not match."), + MAKE_ERROR(tefBAD_AUTH_MASTER, "Auth for unclaimed account needs correct master key."), + MAKE_ERROR(tefINVARIANT_FAILED, "Fee claim violated invariants for the transaction."), + MAKE_ERROR(tefTOO_BIG, "Transaction affects too many items."), + + MAKE_ERROR(telLOCAL_ERROR, "Local failure."), + MAKE_ERROR(telBAD_DOMAIN, "Domain too long."), + MAKE_ERROR(telBAD_PATH_COUNT, "Malformed: Too many paths."), + MAKE_ERROR(telBAD_PUBLIC_KEY, "Public key too long."), + MAKE_ERROR(telFAILED_PROCESSING, "Failed to correctly process transaction."), + MAKE_ERROR(telINSUF_FEE_P, "Fee insufficient."), + MAKE_ERROR(telNO_DST_PARTIAL, "Partial payment to create account not allowed."), + MAKE_ERROR(telCAN_NOT_QUEUE, "Can not queue at this time."), + MAKE_ERROR(telCAN_NOT_QUEUE_BALANCE, "Can not queue at this time: insufficient balance to pay all queued fees."), + MAKE_ERROR(telCAN_NOT_QUEUE_BLOCKS, "Can not queue at this time: would block later queued transaction(s)."), + MAKE_ERROR(telCAN_NOT_QUEUE_BLOCKED, "Can not queue at this time: blocking transaction in queue."), + MAKE_ERROR(telCAN_NOT_QUEUE_FEE, "Can not queue at this time: fee insufficient to replace queued transaction."), + MAKE_ERROR(telCAN_NOT_QUEUE_FULL, "Can not queue at this time: queue is full."), + + MAKE_ERROR(temMALFORMED, "Malformed transaction."), + MAKE_ERROR(temBAD_AMOUNT, "Can only send positive amounts."), + MAKE_ERROR(temBAD_CURRENCY, "Malformed: Bad currency."), + MAKE_ERROR(temBAD_EXPIRATION, "Malformed: Bad expiration."), + MAKE_ERROR(temBAD_FEE, "Invalid fee, negative or not XRP."), + MAKE_ERROR(temBAD_ISSUER, "Malformed: Bad issuer."), + MAKE_ERROR(temBAD_LIMIT, "Limits must be non-negative."), + MAKE_ERROR(temBAD_OFFER, "Malformed: Bad offer."), + MAKE_ERROR(temBAD_PATH, "Malformed: Bad path."), + MAKE_ERROR(temBAD_PATH_LOOP, "Malformed: Loop in path."), + MAKE_ERROR(temBAD_QUORUM, "Malformed: Quorum is unreachable."), + MAKE_ERROR(temBAD_REGKEY, "Malformed: Regular key cannot be same as master key."), + MAKE_ERROR(temBAD_SEND_XRP_LIMIT, "Malformed: Limit quality is not allowed for XRP to XRP."), + MAKE_ERROR(temBAD_SEND_XRP_MAX, "Malformed: Send max is not allowed for XRP to XRP."), + MAKE_ERROR(temBAD_SEND_XRP_NO_DIRECT, "Malformed: No Ripple direct is not allowed for XRP to XRP."), + MAKE_ERROR(temBAD_SEND_XRP_PARTIAL, "Malformed: Partial payment is not allowed for XRP to XRP."), + MAKE_ERROR(temBAD_SEND_XRP_PATHS, "Malformed: Paths are not allowed for XRP to XRP."), + MAKE_ERROR(temBAD_SEQUENCE, "Malformed: Sequence is not in the past."), + MAKE_ERROR(temBAD_SIGNATURE, "Malformed: Bad signature."), + MAKE_ERROR(temBAD_SIGNER, "Malformed: No signer may duplicate account or other signers."), + MAKE_ERROR(temBAD_SRC_ACCOUNT, "Malformed: Bad source account."), + MAKE_ERROR(temBAD_TRANSFER_RATE, "Malformed: Transfer rate must be >= 1.0 and <= 2.0"), + MAKE_ERROR(temBAD_WEIGHT, "Malformed: Weight must be a positive value."), + MAKE_ERROR(temDST_IS_SRC, "Destination may not be source."), + MAKE_ERROR(temDST_NEEDED, "Destination not specified."), + MAKE_ERROR(temINVALID, "The transaction is ill-formed."), + MAKE_ERROR(temINVALID_FLAG, "The transaction has an invalid flag."), + MAKE_ERROR(temREDUNDANT, "Sends same currency to self."), + MAKE_ERROR(temRIPPLE_EMPTY, "PathSet with no paths."), + MAKE_ERROR(temUNCERTAIN, "In process of determining result. Never returned."), + MAKE_ERROR(temUNKNOWN, "The transaction requires logic that is not implemented yet."), + MAKE_ERROR(temDISABLED, "The transaction requires logic that is currently disabled."), + MAKE_ERROR(temBAD_TICK_SIZE, "Malformed: Tick size out of range."), + MAKE_ERROR(temINVALID_ACCOUNT_ID, "Malformed: A field contains an invalid account ID."), + MAKE_ERROR(temCANNOT_PREAUTH_SELF, "Malformed: An account may not preauthorize itself."), + + MAKE_ERROR(terRETRY, "Retry transaction."), + MAKE_ERROR(terFUNDS_SPENT, "DEPRECATED."), + MAKE_ERROR(terINSUF_FEE_B, "Account balance can't pay fee."), + MAKE_ERROR(terLAST, "DEPRECATED."), + MAKE_ERROR(terNO_RIPPLE, "Path does not permit rippling."), + MAKE_ERROR(terNO_ACCOUNT, "The source account does not exist."), + MAKE_ERROR(terNO_AUTH, "Not authorized to hold IOUs."), + MAKE_ERROR(terNO_LINE, "No such line."), + MAKE_ERROR(terPRE_SEQ, "Missing/inapplicable prior transaction."), + MAKE_ERROR(terOWNERS, "Non-zero owner count."), + MAKE_ERROR(terQUEUED, "Held until escalated fee drops."), + + MAKE_ERROR(tesSUCCESS, "The transaction was applied. Only final in a validated ledger."), + }; + // clang-format on - {terRETRY, {"terRETRY", "Retry transaction."}}, - {terFUNDS_SPENT, - {"terFUNDS_SPENT", - "Can't set password, password set funds already spent."}}, - {terINSUF_FEE_B, {"terINSUF_FEE_B", "Account balance can't pay fee."}}, - {terLAST, {"terLAST", "Process last."}}, - {terNO_RIPPLE, {"terNO_RIPPLE", "Path does not permit rippling."}}, - {terNO_ACCOUNT, - {"terNO_ACCOUNT", "The source account does not exist."}}, - {terNO_AUTH, {"terNO_AUTH", "Not authorized to hold IOUs."}}, - {terNO_LINE, {"terNO_LINE", "No such line."}}, - {terPRE_SEQ, {"terPRE_SEQ", "Missing/inapplicable prior transaction."}}, - {terOWNERS, {"terOWNERS", "Non-zero owner count."}}, - {terQUEUED, {"terQUEUED", "Held until escalated fee drops."}}, +#undef MAKE_ERROR - {tesSUCCESS, - {"tesSUCCESS", - "The transaction was applied. Only final in a validated ledger."}}, - }; return results; } diff --git a/src/ripple/protocol/impl/TxFormats.cpp b/src/ripple/protocol/impl/TxFormats.cpp index bea1dc60181..24a8ef197bf 100644 --- a/src/ripple/protocol/impl/TxFormats.cpp +++ b/src/ripple/protocol/impl/TxFormats.cpp @@ -152,6 +152,15 @@ TxFormats::TxFormats() }, commonFields); + add(jss::UNLModify, + ttUNL_MODIFY, + { + {sfUNLModifyDisabling, soeREQUIRED}, + {sfLedgerSequence, soeREQUIRED}, + {sfUNLModifyValidator, soeREQUIRED}, + }, + commonFields); + add(jss::TicketCreate, ttTICKET_CREATE, { diff --git a/src/ripple/protocol/jss.h b/src/ripple/protocol/jss.h index 1544a5b6a76..1df4bf7fcfa 100644 --- a/src/ripple/protocol/jss.h +++ b/src/ripple/protocol/jss.h @@ -82,6 +82,7 @@ JSS(PaymentChannelFund); // transaction type. JSS(RippleState); // ledger type. JSS(SLE_hit_rate); // out: GetCounts. JSS(SetFee); // transaction type. +JSS(UNLModify); // transaction type. JSS(SettleDelay); // in: TransactionSign JSS(SendMax); // in: TransactionSign JSS(Sequence); // in/out: TransactionSign; field. @@ -576,8 +577,8 @@ JSS(vote); // in: Feature JSS(warning); // rpc: JSS(warnings); // out: server_info, server_state JSS(workers); -JSS(write_load); // out: GetCounts - +JSS(write_load); // out: GetCounts +JSS(NegativeUNL); // out: ValidatorList; ledger type #undef JSS } // namespace jss diff --git a/src/ripple/rpc/Context.h b/src/ripple/rpc/Context.h index 1188006362e..7a22ed9fe0c 100644 --- a/src/ripple/rpc/Context.h +++ b/src/ripple/rpc/Context.h @@ -47,6 +47,7 @@ struct Context Role role; std::shared_ptr coro{}; InfoSub::pointer infoSub{}; + unsigned int apiVersion; }; struct JsonContext : public Context @@ -62,7 +63,6 @@ struct JsonContext : public Context Json::Value params; - unsigned int apiVersion; Headers headers{}; }; diff --git a/src/ripple/rpc/handlers/AccountTx.cpp b/src/ripple/rpc/handlers/AccountTx.cpp index f4e3a6b9d3f..ffcbe145fbb 100644 --- a/src/ripple/rpc/handlers/AccountTx.cpp +++ b/src/ripple/rpc/handlers/AccountTx.cpp @@ -214,7 +214,9 @@ getLedgerRange( if (!bValidated) { // Don't have a validated ledger range. - return rpcLGR_IDXS_INVALID; + if (context.apiVersion == 1) + return rpcLGR_IDXS_INVALID; + return rpcNOT_SYNCED; } std::uint32_t uLedgerMin = uValidatedMin; @@ -236,7 +238,11 @@ getLedgerRange( uLedgerMax = ls.max; } if (uLedgerMax < uLedgerMin) - return rpcLGR_IDXS_INVALID; + { + if (context.apiVersion == 1) + return rpcLGR_IDXS_INVALID; + return rpcINVALID_LGR_RANGE; + } } else { @@ -330,6 +336,10 @@ populateProtoResponse( { status = {grpc::StatusCode::NOT_FOUND, error.message()}; } + else if (error.toErrorCode() == rpcNOT_SYNCED) + { + status = {grpc::StatusCode::FAILED_PRECONDITION, error.message()}; + } else { status = {grpc::StatusCode::INVALID_ARGUMENT, error.message()}; diff --git a/src/ripple/rpc/handlers/AccountTxOld.cpp b/src/ripple/rpc/handlers/AccountTxOld.cpp index 472f999b621..5950f474d36 100644 --- a/src/ripple/rpc/handlers/AccountTxOld.cpp +++ b/src/ripple/rpc/handlers/AccountTxOld.cpp @@ -105,7 +105,9 @@ doAccountTxOld(RPC::JsonContext& context) if (!bValidated && (iLedgerMin == -1 || iLedgerMax == -1)) { // Don't have a validated ledger range. - return rpcError(rpcLGR_IDXS_INVALID); + if (context.apiVersion == 1) + return rpcError(rpcLGR_IDXS_INVALID); + return rpcError(rpcNOT_SYNCED); } uLedgerMin = iLedgerMin == -1 ? uValidatedMin : iLedgerMin; @@ -113,7 +115,9 @@ doAccountTxOld(RPC::JsonContext& context) if (uLedgerMax < uLedgerMin) { - return rpcError(rpcLGR_IDXS_INVALID); + if (context.apiVersion == 1) + return rpcError(rpcLGR_IDXS_INVALID); + return rpcError(rpcNOT_SYNCED); } } else diff --git a/src/ripple/rpc/handlers/CrawlShards.cpp b/src/ripple/rpc/handlers/CrawlShards.cpp index bc235a3a05a..b28aa0e9596 100644 --- a/src/ripple/rpc/handlers/CrawlShards.cpp +++ b/src/ripple/rpc/handlers/CrawlShards.cpp @@ -32,7 +32,7 @@ namespace ripple { { // Determines if the result includes node public key. // optional, default is false - pubkey: + public_key: // The maximum number of peer hops to attempt. // optional, default is zero, maximum is 3 diff --git a/src/ripple/rpc/handlers/GetCounts.cpp b/src/ripple/rpc/handlers/GetCounts.cpp index 45e21b7d64c..314ad190ae3 100644 --- a/src/ripple/rpc/handlers/GetCounts.cpp +++ b/src/ripple/rpc/handlers/GetCounts.cpp @@ -32,6 +32,7 @@ #include #include #include +#include namespace ripple { @@ -103,9 +104,11 @@ getCountsJson(Application& app, int minObjectCount) ret[jss::AL_hit_rate] = app.getAcceptedLedgerCache().getHitRate(); ret[jss::fullbelow_size] = - static_cast(app.family().fullbelow().size()); - ret[jss::treenode_cache_size] = app.family().treecache().getCacheSize(); - ret[jss::treenode_track_size] = app.family().treecache().getTrackSize(); + static_cast(app.getNodeFamily().getFullBelowCache(0)->size()); + ret[jss::treenode_cache_size] = + app.getNodeFamily().getTreeNodeCache(0)->getCacheSize(); + ret[jss::treenode_track_size] = + app.getNodeFamily().getTreeNodeCache(0)->getTrackSize(); std::string uptime; auto s = UptimeClock::now(); @@ -125,13 +128,13 @@ getCountsJson(Application& app, int minObjectCount) if (auto shardStore = app.getShardStore()) { + auto shardFamily{dynamic_cast(app.getShardFamily())}; + auto const [cacheSz, trackSz] = shardFamily->getTreeNodeCacheSize(); Json::Value& jv = (ret[jss::shards] = Json::objectValue); - jv[jss::fullbelow_size] = - static_cast(app.shardFamily()->fullbelow().size()); - jv[jss::treenode_cache_size] = - app.shardFamily()->treecache().getCacheSize(); - jv[jss::treenode_track_size] = - app.shardFamily()->treecache().getTrackSize(); + + jv[jss::fullbelow_size] = shardFamily->getFullBelowCacheSize(); + jv[jss::treenode_cache_size] = cacheSz; + jv[jss::treenode_track_size] = trackSz; ret[jss::write_load] = shardStore->getWriteLoad(); ret[jss::node_hit_rate] = shardStore->getCacheHitRate(); jv[jss::node_writes] = shardStore->getStoreCount(); diff --git a/src/ripple/rpc/handlers/LedgerRequest.cpp b/src/ripple/rpc/handlers/LedgerRequest.cpp index f7b12e95eb9..5d2d11b00aa 100644 --- a/src/ripple/rpc/handlers/LedgerRequest.cpp +++ b/src/ripple/rpc/handlers/LedgerRequest.cpp @@ -67,7 +67,11 @@ doLedgerRequest(RPC::JsonContext& context) // We need a validated ledger to get the hash from the sequence if (ledgerMaster.getValidatedLedgerAge() > RPC::Tuning::maxValidatedLedgerAge) - return rpcError(rpcNO_CURRENT); + { + if (context.apiVersion == 1) + return rpcError(rpcNO_CURRENT); + return rpcError(rpcNOT_SYNCED); + } ledgerIndex = jsonIndex.asInt(); auto ledger = ledgerMaster.getValidatedLedger(); diff --git a/src/ripple/rpc/handlers/RipplePathFind.cpp b/src/ripple/rpc/handlers/RipplePathFind.cpp index 4b03bb93f53..5e23a47bd52 100644 --- a/src/ripple/rpc/handlers/RipplePathFind.cpp +++ b/src/ripple/rpc/handlers/RipplePathFind.cpp @@ -49,7 +49,9 @@ doRipplePathFind(RPC::JsonContext& context) if (context.app.getLedgerMaster().getValidatedLedgerAge() > RPC::Tuning::maxValidatedLedgerAge) { - return rpcError(rpcNO_NETWORK); + if (context.apiVersion == 1) + return rpcError(rpcNO_NETWORK); + return rpcError(rpcNOT_SYNCED); } PathRequest::pointer request; diff --git a/src/ripple/rpc/impl/GRPCHelpers.cpp b/src/ripple/rpc/impl/GRPCHelpers.cpp index 0c068b08d45..7b68f9fe331 100644 --- a/src/ripple/rpc/impl/GRPCHelpers.cpp +++ b/src/ripple/rpc/impl/GRPCHelpers.cpp @@ -485,6 +485,36 @@ populateFlags(T& to, STObject const& from) [&to]() { return to.mutable_flags(); }, from, sfFlags); } +template +void +populateFirstLedgerSequence(T& to, STObject const& from) +{ + populateProtoPrimitive( + [&to]() { return to.mutable_ledger_sequence(); }, + from, + sfFirstLedgerSequence); +} + +template +void +populateNegativeUNLToDisable(T& to, STObject const& from) +{ + populateProtoPrimitive( + [&to]() { return to.mutable_validator_to_disable(); }, + from, + sfNegativeUNLToDisable); +} + +template +void +populateNegativeUNLToReEnable(T& to, STObject const& from) +{ + populateProtoPrimitive( + [&to]() { return to.mutable_validator_to_re_enable(); }, + from, + sfNegativeUNLToReEnable); +} + template void populateLastLedgerSequence(T& to, STObject const& from) @@ -846,6 +876,21 @@ populateSignerEntries(T& to, STObject const& from) sfSignerEntry); } +template +void +populateNegativeUNLEntries(T& to, STObject const& from) +{ + populateProtoArray( + [&to]() { return to.add_negative_unl_entries(); }, + [](auto& innerObj, auto& innerProto) { + populatePublicKey(innerProto, innerObj); + populateFirstLedgerSequence(innerProto, innerObj); + }, + from, + sfNegativeUNL, + sfNegativeUNLEntry); +} + template void populateMemos(T& to, STObject const& from) @@ -1417,6 +1462,16 @@ convert(org::xrpl::rpc::v1::SignerList& to, STObject const& from) populateSignerListID(to, from); } +void +convert(org::xrpl::rpc::v1::NegativeUnl& to, STObject const& from) +{ + populateNegativeUNLEntries(to, from); + + populateNegativeUNLToDisable(to, from); + + populateNegativeUNLToReEnable(to, from); +} + void setLedgerEntryType( org::xrpl::rpc::v1::AffectedNode& proto, @@ -1472,6 +1527,10 @@ setLedgerEntryType( proto.set_ledger_entry_type( org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_DEPOSIT_PREAUTH); break; + case ltNEGATIVE_UNL: + proto.set_ledger_entry_type( + org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_NEGATIVE_UNL); + break; } } @@ -1517,6 +1576,9 @@ convert(T& to, STObject& from, std::uint16_t type) case ltDEPOSIT_PREAUTH: RPC::convert(*to.mutable_deposit_preauth(), from); break; + case ltNEGATIVE_UNL: + RPC::convert(*to.mutable_negative_unl(), from); + break; } } diff --git a/src/ripple/rpc/impl/GRPCHelpers.h b/src/ripple/rpc/impl/GRPCHelpers.h index bc856431da0..e9eae0107c3 100644 --- a/src/ripple/rpc/impl/GRPCHelpers.h +++ b/src/ripple/rpc/impl/GRPCHelpers.h @@ -58,6 +58,9 @@ convert(org::xrpl::rpc::v1::AccountRoot& to, STObject const& from); void convert(org::xrpl::rpc::v1::SignerList& to, STObject const& from); +void +convert(org::xrpl::rpc::v1::NegativeUnl& to, STObject const& from); + template void convert(T& to, STAmount const& from) diff --git a/src/ripple/rpc/impl/Handler.h b/src/ripple/rpc/impl/Handler.h index 09acd6ba165..6b6fa71e7cb 100644 --- a/src/ripple/rpc/impl/Handler.h +++ b/src/ripple/rpc/impl/Handler.h @@ -83,7 +83,9 @@ conditionMet(Condition condition_required, T& context) JLOG(context.j.info()) << "Insufficient network mode for RPC: " << context.netOps.strOperatingMode(); - return rpcNO_NETWORK; + if (context.apiVersion == 1) + return rpcNO_NETWORK; + return rpcNOT_SYNCED; } if (context.app.getOPs().isAmendmentBlocked() && @@ -99,7 +101,9 @@ conditionMet(Condition condition_required, T& context) if (context.ledgerMaster.getValidatedLedgerAge() > Tuning::maxValidatedLedgerAge) { - return rpcNO_CURRENT; + if (context.apiVersion == 1) + return rpcNO_CURRENT; + return rpcNOT_SYNCED; } auto const cID = context.ledgerMaster.getCurrentLedgerIndex(); @@ -110,14 +114,18 @@ conditionMet(Condition condition_required, T& context) JLOG(context.j.debug()) << "Current ledger ID(" << cID << ") is less than validated ledger ID(" << vID << ")"; - return rpcNO_CURRENT; + if (context.apiVersion == 1) + return rpcNO_CURRENT; + return rpcNOT_SYNCED; } } if ((condition_required & NEEDS_CLOSED_LEDGER) && !context.ledgerMaster.getClosedLedger()) { - return rpcNO_CLOSED; + if (context.apiVersion == 1) + return rpcNO_CLOSED; + return rpcNOT_SYNCED; } return rpcSUCCESS; diff --git a/src/ripple/rpc/impl/RPCHandler.cpp b/src/ripple/rpc/impl/RPCHandler.cpp index 9d4885b1ebf..cd15e91e0a7 100644 --- a/src/ripple/rpc/impl/RPCHandler.cpp +++ b/src/ripple/rpc/impl/RPCHandler.cpp @@ -65,9 +65,16 @@ namespace { Failure: { "result" : { + // api_version == 1 "error" : "noNetwork", - "error_code" : 16, - "error_message" : "Not synced to Ripple network.", + "error_code" : 17, + "error_message" : "Not synced to the network.", + + // api_version == 2 + "error" : "notSynced", + "error_code" : 18, + "error_message" : "Not synced to the network.", + "request" : { "command" : "ledger", "ledger_index" : 10300865 @@ -95,9 +102,16 @@ namespace { Failure: { + // api_version == 1 "error" : "noNetwork", - "error_code" : 16, - "error_message" : "Not synced to Ripple network.", + "error_code" : 17, + "error_message" : "Not synced to the network.", + + // api_version == 2 + "error" : "notSynced", + "error_code" : 18, + "error_message" : "Not synced to the network.", + "request" : { "command" : "ledger", "ledger_index" : 10300865 diff --git a/src/ripple/rpc/impl/RPCHelpers.cpp b/src/ripple/rpc/impl/RPCHelpers.cpp index 1104caa59f2..dbc86774d19 100644 --- a/src/ripple/rpc/impl/RPCHelpers.cpp +++ b/src/ripple/rpc/impl/RPCHelpers.cpp @@ -347,7 +347,9 @@ getLedger(T& ledger, uint32_t ledgerIndex, Context& context) isValidatedOld(context.ledgerMaster, context.app.config().standalone())) { ledger.reset(); - return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + if (context.apiVersion == 1) + return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + return {rpcNOT_SYNCED, "notSynced"}; } return Status::OK; @@ -358,13 +360,21 @@ Status getLedger(T& ledger, LedgerShortcut shortcut, Context& context) { if (isValidatedOld(context.ledgerMaster, context.app.config().standalone())) - return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + { + if (context.apiVersion == 1) + return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + return {rpcNOT_SYNCED, "notSynced"}; + } if (shortcut == LedgerShortcut::VALIDATED) { ledger = context.ledgerMaster.getValidatedLedger(); if (ledger == nullptr) - return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + { + if (context.apiVersion == 1) + return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + return {rpcNOT_SYNCED, "notSynced"}; + } assert(!ledger->open()); } @@ -386,7 +396,11 @@ getLedger(T& ledger, LedgerShortcut shortcut, Context& context) } if (ledger == nullptr) - return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + { + if (context.apiVersion == 1) + return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + return {rpcNOT_SYNCED, "notSynced"}; + } static auto const minSequenceGap = 10; @@ -394,7 +408,9 @@ getLedger(T& ledger, LedgerShortcut shortcut, Context& context) context.ledgerMaster.getValidLedgerIndex()) { ledger.reset(); - return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + if (context.apiVersion == 1) + return {rpcNO_NETWORK, "InsufficientNetworkMode"}; + return {rpcNOT_SYNCED, "notSynced"}; } } return Status::OK; @@ -428,7 +444,8 @@ isValidated( // Use the skip list in the last validated ledger to see if ledger // comes before the last validated ledger (and thus has been // validated). - auto hash = ledgerMaster.walkHashBySeq(seq); + auto hash = + ledgerMaster.walkHashBySeq(seq, InboundLedger::Reason::GENERIC); if (!hash || ledger.info().hash != *hash) { diff --git a/src/ripple/rpc/impl/ServerHandlerImp.cpp b/src/ripple/rpc/impl/ServerHandlerImp.cpp index 3f404321bd0..0fb0018f390 100644 --- a/src/ripple/rpc/impl/ServerHandlerImp.cpp +++ b/src/ripple/rpc/impl/ServerHandlerImp.cpp @@ -443,9 +443,9 @@ ServerHandlerImp::processSession( is->getConsumer(), role, coro, - is}, + is, + apiVersion}, jv, - apiVersion, {is->user(), is->forwarded_for()}}; RPC::doCommand(context, jr[jss::result]); @@ -829,9 +829,9 @@ ServerHandlerImp::processRequest( usage, role, coro, - InfoSub::pointer()}, + InfoSub::pointer(), + apiVersion}, params, - apiVersion, {user, forwardedFor}}; Json::Value result; RPC::doCommand(context, result); diff --git a/src/ripple/rpc/impl/ShardArchiveHandler.cpp b/src/ripple/rpc/impl/ShardArchiveHandler.cpp index dac964a3022..39cfdd90b51 100644 --- a/src/ripple/rpc/impl/ShardArchiveHandler.cpp +++ b/src/ripple/rpc/impl/ShardArchiveHandler.cpp @@ -328,7 +328,8 @@ ShardArchiveHandler::next(std::lock_guard const& l) if (auto const seq = app_.getShardStore()->lastLedgerSeq(shardIndex); (shouldHaveHash = app_.getLedgerMaster().getValidLedgerIndex() > seq)) { - expectedHash = app_.getLedgerMaster().walkHashBySeq(seq); + expectedHash = app_.getLedgerMaster().walkHashBySeq( + seq, InboundLedger::Reason::GENERIC); } if (!expectedHash) diff --git a/src/ripple/rpc/impl/TransactionSign.cpp b/src/ripple/rpc/impl/TransactionSign.cpp index 0ebeef36326..0cceca14223 100644 --- a/src/ripple/rpc/impl/TransactionSign.cpp +++ b/src/ripple/rpc/impl/TransactionSign.cpp @@ -270,7 +270,8 @@ checkTxJsonFields( bool const verify, std::chrono::seconds validatedLedgerAge, Config const& config, - LoadFeeTrack const& feeTrack) + LoadFeeTrack const& feeTrack, + unsigned apiVersion) { std::pair ret; @@ -308,7 +309,10 @@ checkTxJsonFields( if (verify && !config.standalone() && (validatedLedgerAge > Tuning::maxValidatedLedgerAge)) { - ret.first = rpcError(rpcNO_CURRENT); + if (apiVersion == 1) + ret.first = rpcError(rpcNO_CURRENT); + else + ret.first = rpcError(rpcNOT_SYNCED); return ret; } @@ -384,7 +388,8 @@ transactionPreProcessImpl( verify, validatedLedgerAge, app.config(), - app.getFeeTrack()); + app.getFeeTrack(), + getAPIVersionNumber(params)); if (RPC::contains_error(txJsonResult)) return std::move(txJsonResult); @@ -1068,7 +1073,8 @@ transactionSubmitMultiSigned( true, validatedLedgerAge, app.config(), - app.getFeeTrack()); + app.getFeeTrack(), + getAPIVersionNumber(jvRequest)); if (RPC::contains_error(txJsonResult)) return std::move(txJsonResult); diff --git a/src/ripple/shamap/Family.h b/src/ripple/shamap/Family.h index 897ea6a9938..72c9a6cb07a 100644 --- a/src/ripple/shamap/Family.h +++ b/src/ripple/shamap/Family.h @@ -32,22 +32,17 @@ namespace ripple { class Family { public: - virtual ~Family() = default; - - virtual beast::Journal const& - journal() = 0; + Family(Family const&) = delete; + Family(Family&&) = delete; - virtual FullBelowCache& - fullbelow() = 0; + Family& + operator=(Family const&) = delete; - virtual FullBelowCache const& - fullbelow() const = 0; + Family& + operator=(Family&&) = delete; - virtual TreeNodeCache& - treecache() = 0; - - virtual TreeNodeCache const& - treecache() const = 0; + explicit Family() = default; + virtual ~Family() = default; virtual NodeStore::Database& db() = 0; @@ -55,14 +50,36 @@ class Family virtual NodeStore::Database const& db() const = 0; + virtual beast::Journal const& + journal() = 0; + + /** Return a pointer to the Family Full Below Cache + + @param ledgerSeq ledger sequence determines a corresponding shard cache + @note ledgerSeq is used by ShardFamily and ignored by NodeFamily + */ + virtual std::shared_ptr + getFullBelowCache(std::uint32_t ledgerSeq) = 0; + + /** Return a pointer to the Family Tree Node Cache + + @param ledgerSeq ledger sequence determines a corresponding shard cache + @note ledgerSeq is used by ShardFamily and ignored by NodeFamily + */ + virtual std::shared_ptr + getTreeNodeCache(std::uint32_t ledgerSeq) = 0; + + virtual void + sweep() = 0; + virtual bool isShardBacked() const = 0; virtual void - missing_node(std::uint32_t refNum) = 0; + missingNode(std::uint32_t refNum) = 0; virtual void - missing_node(uint256 const& refHash, std::uint32_t refNum) = 0; + missingNode(uint256 const& refHash, std::uint32_t refNum) = 0; virtual void reset() = 0; diff --git a/src/ripple/shamap/NodeFamily.h b/src/ripple/shamap/NodeFamily.h new file mode 100644 index 00000000000..2d8236705b5 --- /dev/null +++ b/src/ripple/shamap/NodeFamily.h @@ -0,0 +1,112 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_SHAMAP_NODEFAMILY_H_INCLUDED +#define RIPPLE_SHAMAP_NODEFAMILY_H_INCLUDED + +#include +#include + +namespace ripple { + +class Application; + +class NodeFamily : public Family +{ +public: + NodeFamily() = delete; + NodeFamily(NodeFamily const&) = delete; + NodeFamily(NodeFamily&&) = delete; + + NodeFamily& + operator=(NodeFamily const&) = delete; + + NodeFamily& + operator=(NodeFamily&&) = delete; + + NodeFamily(Application& app, CollectorManager& cm); + + NodeStore::Database& + db() override + { + return db_; + } + + NodeStore::Database const& + db() const override + { + return db_; + } + + beast::Journal const& + journal() override + { + return j_; + } + + bool + isShardBacked() const override + { + return false; + } + + std::shared_ptr getFullBelowCache(std::uint32_t) override + { + return fbCache_; + } + + std::shared_ptr getTreeNodeCache(std::uint32_t) override + { + return tnCache_; + } + + void + sweep() override; + + void + reset() override; + + void + missingNode(std::uint32_t seq) override; + + void + missingNode(uint256 const& hash, std::uint32_t seq) override + { + acquire(hash, seq); + } + +private: + Application& app_; + NodeStore::Database& db_; + beast::Journal const j_; + + std::shared_ptr fbCache_; + std::shared_ptr tnCache_; + + // Missing node handler + LedgerIndex maxSeq_{0}; + std::mutex maxSeqMutex_; + + void + acquire(uint256 const& hash, std::uint32_t seq); +}; + +} // namespace ripple + +#endif diff --git a/src/ripple/shamap/SHAMap.h b/src/ripple/shamap/SHAMap.h index 67b3c3c5605..86adbd05470 100644 --- a/src/ripple/shamap/SHAMap.h +++ b/src/ripple/shamap/SHAMap.h @@ -246,7 +246,6 @@ class SHAMap addRootNode( SHAMapHash const& hash, Slice const& rootNode, - SHANodeFormat format, SHAMapSyncFilter* filter); SHAMapAddNode addKnownNode( diff --git a/src/ripple/shamap/SHAMapTreeNode.h b/src/ripple/shamap/SHAMapTreeNode.h index 93cb91fe032..f453d7cd86f 100644 --- a/src/ripple/shamap/SHAMapTreeNode.h +++ b/src/ripple/shamap/SHAMapTreeNode.h @@ -176,14 +176,32 @@ class SHAMapAbstractNode invariants(bool is_root = false) const = 0; static std::shared_ptr - make( - Slice const& rawNode, + makeFromPrefix(Slice rawNode, SHAMapHash const& hash); + + static std::shared_ptr + makeFromWire(Slice rawNode); + +private: + static std::shared_ptr + makeTransaction( + Slice data, + std::uint32_t seq, + SHAMapHash const& hash, + bool hashValid); + + static std::shared_ptr + makeAccountState( + Slice data, + std::uint32_t seq, + SHAMapHash const& hash, + bool hashValid); + + static std::shared_ptr + makeTransactionWithMeta( + Slice data, std::uint32_t seq, - SHANodeFormat format, SHAMapHash const& hash, - bool hashValid, - beast::Journal j, - SHAMapNodeID const& id = SHAMapNodeID{}); + bool hashValid); }; class SHAMapInnerNode : public SHAMapAbstractNode @@ -239,15 +257,15 @@ class SHAMapInnerNode : public SHAMapAbstractNode void invariants(bool is_root = false) const override; - friend std::shared_ptr - SHAMapAbstractNode::make( - Slice const& rawNode, + static std::shared_ptr + makeFullInner( + Slice data, std::uint32_t seq, - SHANodeFormat format, SHAMapHash const& hash, - bool hashValid, - beast::Journal j, - SHAMapNodeID const& id); + bool hashValid); + + static std::shared_ptr + makeCompressedInner(Slice data, std::uint32_t seq); }; // SHAMapTreeNode represents a leaf, and may eventually be renamed to reflect @@ -282,10 +300,6 @@ class SHAMapTreeNode : public SHAMapAbstractNode invariants(bool is_root = false) const override; public: // public only to SHAMap - // inner node functions - bool - isInnerNode() const; - // item node function bool hasItem() const; @@ -399,12 +413,6 @@ SHAMapInnerNode::setFullBelowGen(std::uint32_t gen) // SHAMapTreeNode -inline bool -SHAMapTreeNode::isInnerNode() const -{ - return !mItem; -} - inline bool SHAMapTreeNode::hasItem() const { diff --git a/src/ripple/shamap/ShardFamily.h b/src/ripple/shamap/ShardFamily.h new file mode 100644 index 00000000000..550efeb5b81 --- /dev/null +++ b/src/ripple/shamap/ShardFamily.h @@ -0,0 +1,124 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_SHAMAP_SHARDFAMILY_H_INCLUDED +#define RIPPLE_SHAMAP_SHARDFAMILY_H_INCLUDED + +#include +#include + +namespace ripple { + +class Application; + +class ShardFamily : public Family +{ +public: + ShardFamily() = delete; + ShardFamily(ShardFamily const&) = delete; + ShardFamily(ShardFamily&&) = delete; + + ShardFamily& + operator=(ShardFamily const&) = delete; + + ShardFamily& + operator=(ShardFamily&&) = delete; + + ShardFamily(Application& app, CollectorManager& cm); + + NodeStore::Database& + db() override + { + return db_; + } + + NodeStore::Database const& + db() const override + { + return db_; + } + + beast::Journal const& + journal() override + { + return j_; + } + + bool + isShardBacked() const override + { + return true; + } + + std::shared_ptr + getFullBelowCache(std::uint32_t ledgerSeq) override; + + /** Return the number of entries in the cache */ + int + getFullBelowCacheSize(); + + std::shared_ptr + getTreeNodeCache(std::uint32_t ledgerSeq) override; + + /** Return a pair where the first item is the number of items cached + and the second item is the number of entries in the cached + */ + std::pair + getTreeNodeCacheSize(); + + void + sweep() override; + + void + reset() override; + + void + missingNode(std::uint32_t seq) override; + + void + missingNode(uint256 const& hash, std::uint32_t seq) override + { + acquire(hash, seq); + } + +private: + Application& app_; + NodeStore::Database& db_; + CollectorManager& cm_; + beast::Journal const j_; + + std::unordered_map> fbCache_; + std::mutex fbCacheMutex_; + + std::unordered_map> tnCache_; + std::mutex tnCacheMutex_; + int const tnTargetSize_; + std::chrono::seconds const tnTargetAge_; + + // Missing node handler + LedgerIndex maxSeq_{0}; + std::mutex maxSeqMutex_; + + void + acquire(uint256 const& hash, std::uint32_t seq); +}; + +} // namespace ripple + +#endif diff --git a/src/ripple/shamap/TreeNodeCache.h b/src/ripple/shamap/TreeNodeCache.h index 110f75c089e..9951db73dc2 100644 --- a/src/ripple/shamap/TreeNodeCache.h +++ b/src/ripple/shamap/TreeNodeCache.h @@ -21,12 +21,9 @@ #define RIPPLE_SHAMAP_TREENODECACHE_H_INCLUDED #include -#include namespace ripple { -class SHAMapAbstractNode; - using TreeNodeCache = TaggedCache; } // namespace ripple diff --git a/src/ripple/shamap/impl/NodeFamily.cpp b/src/ripple/shamap/impl/NodeFamily.cpp new file mode 100644 index 00000000000..f817020377c --- /dev/null +++ b/src/ripple/shamap/impl/NodeFamily.cpp @@ -0,0 +1,108 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include + +namespace ripple { + +NodeFamily::NodeFamily(Application& app, CollectorManager& cm) + : app_(app) + , db_(app.getNodeStore()) + , j_(app.journal("NodeFamily")) + , fbCache_(std::make_shared( + "Node family full below cache", + stopwatch(), + cm.collector(), + fullBelowTargetSize, + fullBelowExpiration)) + , tnCache_(std::make_shared( + "Node family tree node cache", + app.config().getValueFor(SizedItem::treeCacheSize), + std::chrono::seconds( + app.config().getValueFor(SizedItem::treeCacheAge)), + stopwatch(), + j_)) +{ +} + +void +NodeFamily::sweep() +{ + fbCache_->sweep(); + tnCache_->sweep(); +} + +void +NodeFamily::reset() +{ + { + std::lock_guard lock(maxSeqMutex_); + maxSeq_ = 0; + } + + fbCache_->reset(); + tnCache_->reset(); +} + +void +NodeFamily::missingNode(std::uint32_t seq) +{ + JLOG(j_.error()) << "Missing node in " << seq; + + std::unique_lock lock(maxSeqMutex_); + if (maxSeq_ == 0) + { + maxSeq_ = seq; + + do + { + // Try to acquire the most recent missing ledger + seq = maxSeq_; + + lock.unlock(); + + // This can invoke the missing node handler + acquire(app_.getLedgerMaster().getHashBySeq(seq), seq); + + lock.lock(); + } while (maxSeq_ != seq); + } + else if (maxSeq_ < seq) + { + // We found a more recent ledger with a missing node + maxSeq_ = seq; + } +} + +void +NodeFamily::acquire(uint256 const& hash, std::uint32_t seq) +{ + if (hash.isNonZero()) + { + JLOG(j_.error()) << "Missing node in " << to_string(hash); + + app_.getInboundLedgers().acquire( + hash, seq, InboundLedger::Reason::GENERIC); + } +} + +} // namespace ripple diff --git a/src/ripple/shamap/impl/SHAMap.cpp b/src/ripple/shamap/impl/SHAMap.cpp index b1d390b1ccc..18d83608c9c 100644 --- a/src/ripple/shamap/impl/SHAMap.cpp +++ b/src/ripple/shamap/impl/SHAMap.cpp @@ -151,13 +151,8 @@ SHAMap::fetchNodeFromDB(SHAMapHash const& hash) const { try { - node = SHAMapAbstractNode::make( - makeSlice(obj->getData()), - 0, - snfPREFIX, - hash, - true, - f_.journal()); + node = SHAMapAbstractNode::makeFromPrefix( + makeSlice(obj->getData()), hash); if (node) canonicalize(hash, node); } @@ -169,7 +164,7 @@ SHAMap::fetchNodeFromDB(SHAMapHash const& hash) const } else if (full_) { - f_.missing_node(ledgerSeq_); + f_.missingNode(ledgerSeq_); const_cast(full_) = false; } } @@ -181,20 +176,32 @@ SHAMap::fetchNodeFromDB(SHAMapHash const& hash) const std::shared_ptr SHAMap::checkFilter(SHAMapHash const& hash, SHAMapSyncFilter* filter) const { - std::shared_ptr node; if (auto nodeData = filter->getNode(hash)) { - node = SHAMapAbstractNode::make( - makeSlice(*nodeData), 0, snfPREFIX, hash, true, f_.journal()); - if (node) + try { - filter->gotNode( - true, hash, ledgerSeq_, std::move(*nodeData), node->getType()); - if (backed_) - canonicalize(hash, node); + auto node = + SHAMapAbstractNode::makeFromPrefix(makeSlice(*nodeData), hash); + if (node) + { + filter->gotNode( + true, + hash, + ledgerSeq_, + std::move(*nodeData), + node->getType()); + if (backed_) + canonicalize(hash, node); + } + return node; + } + catch (std::exception const& x) + { + JLOG(f_.journal().warn()) + << "Invalid node/data, hash=" << hash << ": " << x.what(); } } - return node; + return {}; } // Get a node without throwing @@ -325,10 +332,10 @@ SHAMap::descend( assert(!parent->isEmptyBranch(branch)); SHAMapAbstractNode* child = parent->getChildPointer(branch); - auto const& childHash = parent->getChildHash(branch); if (!child) { + auto const& childHash = parent->getChildHash(branch); std::shared_ptr childNode = fetchNodeNT(childHash, filter); @@ -374,13 +381,8 @@ SHAMap::descendAsync( if (!obj) return nullptr; - ptr = SHAMapAbstractNode::make( - makeSlice(obj->getData()), - 0, - snfPREFIX, - hash, - true, - f_.journal()); + ptr = SHAMapAbstractNode::makeFromPrefix( + makeSlice(obj->getData()), hash); if (ptr && backed_) canonicalize(hash, ptr); } @@ -1113,7 +1115,7 @@ SHAMap::dump(bool hash) const std::shared_ptr SHAMap::getCache(SHAMapHash const& hash) const { - auto ret = f_.treecache().fetch(hash.as_uint256()); + auto ret = f_.getTreeNodeCache(ledgerSeq_)->fetch(hash.as_uint256()); assert(!ret || !ret->getSeq()); return ret; } @@ -1127,7 +1129,8 @@ SHAMap::canonicalize( assert(node->getSeq() == 0); assert(node->getNodeHash() == hash); - f_.treecache().canonicalize_replace_client(hash.as_uint256(), node); + f_.getTreeNodeCache(ledgerSeq_) + ->canonicalize_replace_client(hash.as_uint256(), node); } void diff --git a/src/ripple/shamap/impl/SHAMapSync.cpp b/src/ripple/shamap/impl/SHAMapSync.cpp index b62ca050b63..19f3937985a 100644 --- a/src/ripple/shamap/impl/SHAMapSync.cpp +++ b/src/ripple/shamap/impl/SHAMapSync.cpp @@ -198,7 +198,9 @@ SHAMap::gmn_ProcessNodes(MissingNodes& mn, MissingNodes::StackEntry& se) fullBelow = false; } else if ( - !backed_ || !f_.fullbelow().touch_if_exists(childHash.as_uint256())) + !backed_ || + !f_.getFullBelowCache(ledgerSeq_) + ->touch_if_exists(childHash.as_uint256())) { SHAMapNodeID childID = nodeID.getChildNodeID(branch); bool pending = false; @@ -243,7 +245,10 @@ SHAMap::gmn_ProcessNodes(MissingNodes& mn, MissingNodes::StackEntry& se) { // No partial node encountered below this node node->setFullBelowGen(mn.generation_); if (backed_) - f_.fullbelow().insert(node->getNodeHash().as_uint256()); + { + f_.getFullBelowCache(ledgerSeq_) + ->insert(node->getNodeHash().as_uint256()); + } } node = nullptr; @@ -323,7 +328,7 @@ SHAMap::getMissingNodes(int max, SHAMapSyncFilter* filter) max, filter, f_.db().getDesiredAsyncReadCount(ledgerSeq_), - f_.fullbelow().getGeneration()); + f_.getFullBelowCache(ledgerSeq_)->getGeneration()); if (!root_->isInner() || std::static_pointer_cast(root_)->isFullBelow( @@ -546,7 +551,6 @@ SHAMapAddNode SHAMap::addRootNode( SHAMapHash const& hash, Slice const& rootNode, - SHANodeFormat format, SHAMapSyncFilter* filter) { // we already have a root_ node @@ -558,8 +562,7 @@ SHAMap::addRootNode( } assert(seq_ >= 1); - auto node = SHAMapAbstractNode::make( - rootNode, 0, format, SHAMapHash{}, false, f_.journal()); + auto node = SHAMapAbstractNode::makeFromWire(rootNode); if (!node || !node->isValid() || node->getNodeHash() != hash) return SHAMapAddNode::invalid(); @@ -601,9 +604,8 @@ SHAMap::addKnownNode( return SHAMapAddNode::duplicate(); } - std::uint32_t generation = f_.fullbelow().getGeneration(); - auto newNode = SHAMapAbstractNode::make( - rawNode, 0, snfWIRE, SHAMapHash{}, false, f_.journal(), node); + auto const generation = f_.getFullBelowCache(ledgerSeq_)->getGeneration(); + auto newNode = SHAMapAbstractNode::makeFromWire(rawNode); SHAMapNodeID iNodeID; auto iNode = root_.get(); @@ -621,8 +623,11 @@ SHAMap::addKnownNode( } auto childHash = inner->getChildHash(branch); - if (f_.fullbelow().touch_if_exists(childHash.as_uint256())) + if (f_.getFullBelowCache(ledgerSeq_) + ->touch_if_exists(childHash.as_uint256())) + { return SHAMapAddNode::duplicate(); + } auto prevNode = inner; std::tie(iNode, iNodeID) = descend(inner, iNodeID, branch, filter); diff --git a/src/ripple/shamap/impl/SHAMapTreeNode.cpp b/src/ripple/shamap/impl/SHAMapTreeNode.cpp index cbf5a3745e9..147787ae929 100644 --- a/src/ripple/shamap/impl/SHAMapTreeNode.cpp +++ b/src/ripple/shamap/impl/SHAMapTreeNode.cpp @@ -76,220 +76,219 @@ SHAMapTreeNode::SHAMapTreeNode( } std::shared_ptr -SHAMapAbstractNode::make( - Slice const& rawNode, +SHAMapAbstractNode::makeTransaction( + Slice data, std::uint32_t seq, - SHANodeFormat format, SHAMapHash const& hash, - bool hashValid, - beast::Journal j, - SHAMapNodeID const& id) + bool hashValid) { - if (format == snfWIRE) - { - if (rawNode.empty()) - return {}; + // FIXME: using a Serializer results in a copy; avoid it? + Serializer s(data.begin(), data.size()); - Serializer s(rawNode.data(), rawNode.size() - 1); - int type = rawNode[rawNode.size() - 1]; - int len = s.getLength(); + auto item = std::make_shared( + sha512Half(HashPrefix::transactionID, data), s); - if ((type < 0) || (type > 6)) - return {}; - if (type == 0) - { - // transaction - auto item = std::make_shared( - sha512Half( - HashPrefix::transactionID, Slice(s.data(), s.size())), - s.peekData()); - if (hashValid) - return std::make_shared( - std::move(item), tnTRANSACTION_NM, seq, hash); - return std::make_shared( - std::move(item), tnTRANSACTION_NM, seq); - } - else if (type == 1) - { - // account state - if (len < (256 / 8)) - Throw("short AS node"); - - uint256 u; - s.getBitString(u, len - (256 / 8)); - s.chop(256 / 8); - - if (u.isZero()) - Throw("invalid AS node"); - - auto item = std::make_shared(u, s.peekData()); - if (hashValid) - return std::make_shared( - std::move(item), tnACCOUNT_STATE, seq, hash); - return std::make_shared( - std::move(item), tnACCOUNT_STATE, seq); - } - else if (type == 2) - { - // full inner - if (len != 512) - Throw("invalid FI node"); + if (hashValid) + return std::make_shared( + std::move(item), tnTRANSACTION_NM, seq, hash); - auto ret = std::make_shared(seq); - for (int i = 0; i < 16; ++i) - { - s.getBitString(ret->mHashes[i].as_uint256(), i * 32); + return std::make_shared( + std::move(item), tnTRANSACTION_NM, seq); +} - if (ret->mHashes[i].isNonZero()) - ret->mIsBranch |= (1 << i); - } - if (hashValid) - ret->mHash = hash; - else - ret->updateHash(); - return ret; - } - else if (type == 3) - { - auto ret = std::make_shared(seq); - // compressed inner - for (int i = 0; i < (len / 33); ++i) - { - int pos; - if (!s.get8(pos, 32 + (i * 33))) - Throw("short CI node"); - if ((pos < 0) || (pos >= 16)) - Throw("invalid CI node"); - s.getBitString(ret->mHashes[pos].as_uint256(), i * 33); - if (ret->mHashes[pos].isNonZero()) - ret->mIsBranch |= (1 << pos); - } - if (hashValid) - ret->mHash = hash; - else - ret->updateHash(); - return ret; - } - else if (type == 4) - { - // transaction with metadata - if (len < (256 / 8)) - Throw("short TM node"); - - uint256 u; - s.getBitString(u, len - (256 / 8)); - s.chop(256 / 8); - - if (u.isZero()) - Throw("invalid TM node"); - - auto item = std::make_shared(u, s.peekData()); - if (hashValid) - return std::make_shared( - std::move(item), tnTRANSACTION_MD, seq, hash); - return std::make_shared( - std::move(item), tnTRANSACTION_MD, seq); - } - } +std::shared_ptr +SHAMapAbstractNode::makeTransactionWithMeta( + Slice data, + std::uint32_t seq, + SHAMapHash const& hash, + bool hashValid) +{ + Serializer s(data.data(), data.size()); + + uint256 tag; + + if (s.size() < tag.bytes) + Throw("Short TXN+MD node"); + + // FIXME: improve this interface so that the above check isn't needed + if (!s.getBitString(tag, s.size() - tag.bytes)) + Throw( + "Short TXN+MD node (" + std::to_string(s.size()) + ")"); + + s.chop(tag.bytes); - else if (format == snfPREFIX) + auto item = std::make_shared(tag, s.peekData()); + + if (hashValid) + return std::make_shared( + std::move(item), tnTRANSACTION_MD, seq, hash); + + return std::make_shared( + std::move(item), tnTRANSACTION_MD, seq); +} + +std::shared_ptr +SHAMapAbstractNode::makeAccountState( + Slice data, + std::uint32_t seq, + SHAMapHash const& hash, + bool hashValid) +{ + Serializer s(data.data(), data.size()); + + uint256 tag; + + if (s.size() < tag.bytes) + Throw("short AS node"); + + // FIXME: improve this interface so that the above check isn't needed + if (!s.getBitString(tag, s.size() - tag.bytes)) + Throw( + "Short AS node (" + std::to_string(s.size()) + ")"); + + s.chop(tag.bytes); + + if (tag.isZero()) + Throw("Invalid AS node"); + + auto item = std::make_shared(tag, s.peekData()); + + if (hashValid) + return std::make_shared( + std::move(item), tnACCOUNT_STATE, seq, hash); + + return std::make_shared( + std::move(item), tnACCOUNT_STATE, seq); +} + +std::shared_ptr +SHAMapInnerNode::makeFullInner( + Slice data, + std::uint32_t seq, + SHAMapHash const& hash, + bool hashValid) +{ + if (data.size() != 512) + Throw("Invalid FI node"); + + auto ret = std::make_shared(seq); + + Serializer s(data.data(), data.size()); + + for (int i = 0; i < 16; ++i) { - if (rawNode.size() < 4) - { - JLOG(j.info()) << "size < 4"; - Throw("invalid P node"); - } + s.getBitString(ret->mHashes[i].as_uint256(), i * 32); - std::uint32_t prefix = rawNode[0]; - prefix <<= 8; - prefix |= rawNode[1]; - prefix <<= 8; - prefix |= rawNode[2]; - prefix <<= 8; - prefix |= rawNode[3]; - Serializer s(rawNode.data() + 4, rawNode.size() - 4); + if (ret->mHashes[i].isNonZero()) + ret->mIsBranch |= (1 << i); + } - if (safe_cast(prefix) == HashPrefix::transactionID) - { - auto item = std::make_shared( - sha512Half(rawNode), s.peekData()); - if (hashValid) - return std::make_shared( - std::move(item), tnTRANSACTION_NM, seq, hash); - return std::make_shared( - std::move(item), tnTRANSACTION_NM, seq); - } - else if (safe_cast(prefix) == HashPrefix::leafNode) - { - if (s.getLength() < 32) - Throw("short PLN node"); + if (hashValid) + ret->mHash = hash; + else + ret->updateHash(); + return ret; +} - uint256 u; - s.getBitString(u, s.getLength() - 32); - s.chop(32); +std::shared_ptr +SHAMapInnerNode::makeCompressedInner(Slice data, std::uint32_t seq) +{ + Serializer s(data.data(), data.size()); - if (u.isZero()) - { - JLOG(j.info()) << "invalid PLN node"; - Throw("invalid PLN node"); - } + int len = s.getLength(); - auto item = std::make_shared(u, s.peekData()); - if (hashValid) - return std::make_shared( - std::move(item), tnACCOUNT_STATE, seq, hash); - return std::make_shared( - std::move(item), tnACCOUNT_STATE, seq); - } - else if (safe_cast(prefix) == HashPrefix::innerNode) - { - auto len = s.getLength(); + auto ret = std::make_shared(seq); - if (len != 512) - Throw("invalid PIN node"); + for (int i = 0; i < (len / 33); ++i) + { + int pos; - auto ret = std::make_shared(seq); + if (!s.get8(pos, 32 + (i * 33))) + Throw("short CI node"); - for (int i = 0; i < 16; ++i) - { - s.getBitString(ret->mHashes[i].as_uint256(), i * 32); + if ((pos < 0) || (pos >= 16)) + Throw("invalid CI node"); - if (ret->mHashes[i].isNonZero()) - ret->mIsBranch |= (1 << i); - } + s.getBitString(ret->mHashes[pos].as_uint256(), i * 33); - if (hashValid) - ret->mHash = hash; - else - ret->updateHash(); - return ret; - } - else if (safe_cast(prefix) == HashPrefix::txNode) - { - // transaction with metadata - if (s.getLength() < 32) - Throw("short TXN node"); - - uint256 txID; - s.getBitString(txID, s.getLength() - 32); - s.chop(32); - auto item = std::make_shared(txID, s.peekData()); - if (hashValid) - return std::make_shared( - std::move(item), tnTRANSACTION_MD, seq, hash); - return std::make_shared( - std::move(item), tnTRANSACTION_MD, seq); - } - else - { - JLOG(j.info()) << "Unknown node prefix " << std::hex << prefix - << std::dec; - Throw("invalid node prefix"); - } + if (ret->mHashes[pos].isNonZero()) + ret->mIsBranch |= (1 << pos); } - assert(false); - Throw("Unknown format"); - return {}; // Silence compiler warning. + + ret->updateHash(); + + return ret; +} + +std::shared_ptr +SHAMapAbstractNode::makeFromWire(Slice rawNode) +{ + if (rawNode.empty()) + return {}; + + auto const type = rawNode[rawNode.size() - 1]; + + rawNode.remove_suffix(1); + + bool const hashValid = false; + SHAMapHash const hash; + + std::uint32_t const seq = 0; + + if (type == 0) + return makeTransaction(rawNode, seq, hash, hashValid); + + if (type == 1) + return makeAccountState(rawNode, seq, hash, hashValid); + + if (type == 2) + return SHAMapInnerNode::makeFullInner(rawNode, seq, hash, hashValid); + + if (type == 3) + return SHAMapInnerNode::makeCompressedInner(rawNode, seq); + + if (type == 4) + return makeTransactionWithMeta(rawNode, seq, hash, hashValid); + + Throw( + "wire: Unknown type (" + std::to_string(type) + ")"); +} + +std::shared_ptr +SHAMapAbstractNode::makeFromPrefix(Slice rawNode, SHAMapHash const& hash) +{ + if (rawNode.size() < 4) + Throw("prefix: short node"); + + // FIXME: Use SerialIter::get32? + // Extract the prefix + auto const type = safe_cast( + (safe_cast(rawNode[0]) << 24) + + (safe_cast(rawNode[1]) << 16) + + (safe_cast(rawNode[2]) << 8) + + (safe_cast(rawNode[3]))); + + rawNode.remove_prefix(4); + + bool const hashValid = true; + std::uint32_t const seq = 0; + + if (type == HashPrefix::transactionID) + return makeTransaction(rawNode, seq, hash, hashValid); + + if (type == HashPrefix::leafNode) + return makeAccountState(rawNode, seq, hash, hashValid); + + if (type == HashPrefix::innerNode) + return SHAMapInnerNode::makeFullInner(rawNode, seq, hash, hashValid); + + if (type == HashPrefix::txNode) + return makeTransactionWithMeta(rawNode, seq, hash, hashValid); + + Throw( + "prefix: unknown type (" + + std::to_string(safe_cast>(type)) + + ")"); } bool diff --git a/src/ripple/shamap/impl/ShardFamily.cpp b/src/ripple/shamap/impl/ShardFamily.cpp new file mode 100644 index 00000000000..ea80f85ba38 --- /dev/null +++ b/src/ripple/shamap/impl/ShardFamily.cpp @@ -0,0 +1,195 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include + +namespace ripple { + +static NodeStore::Database& +getShardStore(Application& app) +{ + auto const dbPtr = app.getShardStore(); + assert(dbPtr); + return *dbPtr; +} + +ShardFamily::ShardFamily(Application& app, CollectorManager& cm) + : app_(app) + , db_(getShardStore(app)) + , cm_(cm) + , j_(app.journal("ShardFamily")) + , tnTargetSize_(app.config().getValueFor(SizedItem::treeCacheSize)) + , tnTargetAge_(app.config().getValueFor(SizedItem::treeCacheAge)) +{ +} + +std::shared_ptr +ShardFamily::getFullBelowCache(std::uint32_t ledgerSeq) +{ + auto const shardIndex{app_.getShardStore()->seqToShardIndex(ledgerSeq)}; + std::lock_guard lock(fbCacheMutex_); + if (auto const it{fbCache_.find(shardIndex)}; it != fbCache_.end()) + return it->second; + + // Create a cache for the corresponding shard + auto fbCache{std::make_shared( + "Shard family full below cache shard " + std::to_string(shardIndex), + stopwatch(), + cm_.collector(), + fullBelowTargetSize, + fullBelowExpiration)}; + return fbCache_.emplace(shardIndex, std::move(fbCache)).first->second; +} + +int +ShardFamily::getFullBelowCacheSize() +{ + size_t sz{0}; + std::lock_guard lock(fbCacheMutex_); + for (auto const& e : fbCache_) + sz += e.second->size(); + return sz; +} + +std::shared_ptr +ShardFamily::getTreeNodeCache(std::uint32_t ledgerSeq) +{ + auto const shardIndex{app_.getShardStore()->seqToShardIndex(ledgerSeq)}; + std::lock_guard lock(tnCacheMutex_); + if (auto const it{tnCache_.find(shardIndex)}; it != tnCache_.end()) + return it->second; + + // Create a cache for the corresponding shard + auto tnCache{std::make_shared( + "Shard family tree node cache shard " + std::to_string(shardIndex), + tnTargetSize_, + tnTargetAge_, + stopwatch(), + j_)}; + return tnCache_.emplace(shardIndex, std::move(tnCache)).first->second; +} + +std::pair +ShardFamily::getTreeNodeCacheSize() +{ + int cacheSz{0}; + int trackSz{0}; + std::lock_guard lock(tnCacheMutex_); + for (auto const& e : tnCache_) + { + cacheSz += e.second->getCacheSize(); + trackSz += e.second->getTrackSize(); + } + return {cacheSz, trackSz}; +} + +void +ShardFamily::sweep() +{ + { + std::lock_guard lock(fbCacheMutex_); + for (auto it = fbCache_.cbegin(); it != fbCache_.cend();) + { + it->second->sweep(); + + // Remove cache if empty + if (it->second->size() == 0) + it = fbCache_.erase(it); + else + ++it; + } + } + + std::lock_guard lock(tnCacheMutex_); + for (auto it = tnCache_.cbegin(); it != tnCache_.cend();) + { + it->second->sweep(); + + // Remove cache if empty + if (it->second->getTrackSize() == 0) + it = tnCache_.erase(it); + else + ++it; + } +} + +void +ShardFamily::reset() +{ + { + std::lock_guard lock(maxSeqMutex_); + maxSeq_ = 0; + } + + { + std::lock_guard lock(fbCacheMutex_); + fbCache_.clear(); + } + + std::lock_guard lock(tnCacheMutex_); + tnCache_.clear(); +} + +void +ShardFamily::missingNode(std::uint32_t seq) +{ + JLOG(j_.error()) << "Missing node in ledger sequence " << seq; + + std::unique_lock lock(maxSeqMutex_); + if (maxSeq_ == 0) + { + maxSeq_ = seq; + + do + { + // Try to acquire the most recent missing ledger + seq = maxSeq_; + + lock.unlock(); + + // This can invoke the missing node handler + acquire(app_.getLedgerMaster().getHashBySeq(seq), seq); + + lock.lock(); + } while (maxSeq_ != seq); + } + else if (maxSeq_ < seq) + { + // We found a more recent ledger with a missing node + maxSeq_ = seq; + } +} + +void +ShardFamily::acquire(uint256 const& hash, std::uint32_t seq) +{ + if (hash.isNonZero()) + { + JLOG(j_.error()) << "Missing node in " << to_string(hash); + + app_.getInboundLedgers().acquire( + hash, seq, InboundLedger::Reason::SHARD); + } +} + +} // namespace ripple diff --git a/src/test/app/AmendmentTable_test.cpp b/src/test/app/AmendmentTable_test.cpp index ec9293281b0..3b4ec47d143 100644 --- a/src/test/app/AmendmentTable_test.cpp +++ b/src/test/app/AmendmentTable_test.cpp @@ -38,9 +38,6 @@ namespace ripple { class AmendmentTable_test final : public beast::unit_test::suite { private: - // 204/256 about 80% (we round down because the implementation rounds up) - static int const majorityFraction{204}; - static uint256 amendmentId(std::string in) { @@ -100,12 +97,7 @@ class AmendmentTable_test final : public beast::unit_test::suite Section const vetoed) { return make_AmendmentTable( - majorityTime, - majorityFraction, - supported, - enabled, - vetoed, - journal); + majorityTime, supported, enabled, vetoed, journal); } std::unique_ptr @@ -373,6 +365,7 @@ class AmendmentTable_test final : public beast::unit_test::suite // Execute a pretend consensus round for a flag ledger void doRound( + uint256 const& feat, AmendmentTable& table, weeks week, std::vector> const& validators, @@ -399,25 +392,25 @@ class AmendmentTable_test final : public beast::unit_test::suite validations.reserve(validators.size()); int i = 0; - for (auto const& val : validators) + for (auto const& [pub, sec] : validators) { ++i; std::vector field; - for (auto const& amendment : votes) + for (auto const& [hash, nVotes] : votes) { - if ((256 * i) < (validators.size() * amendment.second)) + if (feat == fixAmendmentMajorityCalc ? nVotes >= i : nVotes > i) { // We vote yes on this amendment - field.push_back(amendment.first); + field.push_back(hash); } } auto v = std::make_shared( ripple::NetClock::time_point{}, - val.first, - val.second, - calcNodeID(val.first), + pub, + sec, + calcNodeID(pub), [&field](STValidation& v) { if (!field.empty()) v.setFieldV256( @@ -430,14 +423,13 @@ class AmendmentTable_test final : public beast::unit_test::suite ourVotes = table.doValidation(enabled); - auto actions = - table.doVoting(roundTime, enabled, majority, validations); - for (auto const& action : actions) + auto actions = table.doVoting( + Rules({feat}), roundTime, enabled, majority, validations); + for (auto const& [hash, action] : actions) { // This code assumes other validators do as we do - auto const& hash = action.first; - switch (action.second) + switch (action) { case 0: // amendment goes from majority to enabled @@ -471,7 +463,7 @@ class AmendmentTable_test final : public beast::unit_test::suite // No vote on unknown amendment void - testNoOnUnknown() + testNoOnUnknown(uint256 const& feat) { testcase("Vote NO on unknown"); @@ -487,15 +479,29 @@ class AmendmentTable_test final : public beast::unit_test::suite majorityAmendments_t majority; doRound( - *table, weeks{1}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{1}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(ourVotes.empty()); BEAST_EXPECT(enabled.empty()); BEAST_EXPECT(majority.empty()); - votes.emplace_back(testAmendment, 256); + votes.emplace_back(testAmendment, validators.size()); doRound( - *table, weeks{2}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{2}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(ourVotes.empty()); BEAST_EXPECT(enabled.empty()); @@ -504,14 +510,21 @@ class AmendmentTable_test final : public beast::unit_test::suite // Note that the simulation code assumes others behave as we do, // so the amendment won't get enabled doRound( - *table, weeks{5}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{5}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(ourVotes.empty()); BEAST_EXPECT(enabled.empty()); } // No vote on vetoed amendment void - testNoOnVetoed() + testNoOnVetoed(uint256 const& feat) { testcase("Vote NO on vetoed"); @@ -528,29 +541,50 @@ class AmendmentTable_test final : public beast::unit_test::suite majorityAmendments_t majority; doRound( - *table, weeks{1}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{1}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(ourVotes.empty()); BEAST_EXPECT(enabled.empty()); BEAST_EXPECT(majority.empty()); - votes.emplace_back(testAmendment, 256); + votes.emplace_back(testAmendment, validators.size()); doRound( - *table, weeks{2}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{2}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(ourVotes.empty()); BEAST_EXPECT(enabled.empty()); majority[testAmendment] = weekTime(weeks{1}); doRound( - *table, weeks{5}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{5}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(ourVotes.empty()); BEAST_EXPECT(enabled.empty()); } // Vote on and enable known, not-enabled amendment void - testVoteEnable() + testVoteEnable(uint256 const& feat) { testcase("voteEnable"); @@ -565,7 +599,14 @@ class AmendmentTable_test final : public beast::unit_test::suite // Week 1: We should vote for all known amendments not enabled doRound( - *table, weeks{1}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{1}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(ourVotes.size() == supported_.size()); BEAST_EXPECT(enabled.empty()); for (auto const& i : supported_) @@ -573,11 +614,18 @@ class AmendmentTable_test final : public beast::unit_test::suite // Now, everyone votes for this feature for (auto const& i : supported_) - votes.emplace_back(amendmentId(i), 256); + votes.emplace_back(amendmentId(i), validators.size()); // Week 2: We should recognize a majority doRound( - *table, weeks{2}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{2}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(ourVotes.size() == supported_.size()); BEAST_EXPECT(enabled.empty()); @@ -586,12 +634,26 @@ class AmendmentTable_test final : public beast::unit_test::suite // Week 5: We should enable the amendment doRound( - *table, weeks{5}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{5}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(enabled.size() == supported_.size()); // Week 6: We should remove it from our votes and from having a majority doRound( - *table, weeks{6}, validators, votes, ourVotes, enabled, majority); + feat, + *table, + weeks{6}, + validators, + votes, + ourVotes, + enabled, + majority); BEAST_EXPECT(enabled.size() == supported_.size()); BEAST_EXPECT(ourVotes.empty()); for (auto const& i : supported_) @@ -600,7 +662,7 @@ class AmendmentTable_test final : public beast::unit_test::suite // Detect majority at 80%, enable later void - testDetectMajority() + testDetectMajority(uint256 const& feat) { testcase("detectMajority"); @@ -619,9 +681,10 @@ class AmendmentTable_test final : public beast::unit_test::suite std::vector ourVotes; if ((i > 0) && (i < 17)) - votes.emplace_back(testAmendment, i * 16); + votes.emplace_back(testAmendment, i); doRound( + feat, *table, weeks{i}, validators, @@ -630,7 +693,7 @@ class AmendmentTable_test final : public beast::unit_test::suite enabled, majority); - if (i < 13) + if (i < 13) // 13 => 13/16 = 0.8125 => > 80% { // We are voting yes, not enabled, no majority BEAST_EXPECT(!ourVotes.empty()); @@ -663,7 +726,7 @@ class AmendmentTable_test final : public beast::unit_test::suite // Detect loss of majority void - testLostMajority() + testLostMajority(uint256 const& feat) { testcase("lostMajority"); @@ -681,9 +744,10 @@ class AmendmentTable_test final : public beast::unit_test::suite std::vector> votes; std::vector ourVotes; - votes.emplace_back(testAmendment, 250); + votes.emplace_back(testAmendment, validators.size()); doRound( + feat, *table, weeks{1}, validators, @@ -696,15 +760,16 @@ class AmendmentTable_test final : public beast::unit_test::suite BEAST_EXPECT(!majority.empty()); } - for (int i = 1; i < 16; ++i) + for (int i = 1; i < 8; ++i) { std::vector> votes; std::vector ourVotes; // Gradually reduce support - votes.emplace_back(testAmendment, 256 - i * 8); + votes.emplace_back(testAmendment, validators.size() - i); doRound( + feat, *table, weeks{i + 1}, validators, @@ -713,8 +778,8 @@ class AmendmentTable_test final : public beast::unit_test::suite enabled, majority); - if (i < 8) - { + if (i < 4) // 16 - 3 = 13 => 13/16 = 0.8125 => > 80% + { // 16 - 4 = 12 => 12/16 = 0.75 => < 80% // We are voting yes, not enabled, majority BEAST_EXPECT(!ourVotes.empty()); BEAST_EXPECT(enabled.empty()); @@ -775,6 +840,16 @@ class AmendmentTable_test final : public beast::unit_test::suite BEAST_EXPECT(table->needValidatedLedger(257)); } + void + testFeature(uint256 const& feat) + { + testNoOnUnknown(feat); + testNoOnVetoed(feat); + testVoteEnable(feat); + testDetectMajority(feat); + testLostMajority(feat); + } + void run() override { @@ -782,12 +857,9 @@ class AmendmentTable_test final : public beast::unit_test::suite testGet(); testBadConfig(); testEnableVeto(); - testNoOnUnknown(); - testNoOnVetoed(); - testVoteEnable(); - testDetectMajority(); - testLostMajority(); testHasUnsupported(); + testFeature({}); + testFeature(fixAmendmentMajorityCalc); } }; diff --git a/src/test/app/LedgerHistory_test.cpp b/src/test/app/LedgerHistory_test.cpp index ac2dcda61b2..513905a6bf7 100644 --- a/src/test/app/LedgerHistory_test.cpp +++ b/src/test/app/LedgerHistory_test.cpp @@ -27,6 +27,7 @@ #include #include #include +#include namespace ripple { namespace test { @@ -34,56 +35,6 @@ namespace test { class LedgerHistory_test : public beast::unit_test::suite { public: - /** Log manager that searches for a specific message substring - */ - class CheckMessageLogs : public Logs - { - std::string msg_; - bool& found_; - - class CheckMessageSink : public beast::Journal::Sink - { - CheckMessageLogs& owner_; - - public: - CheckMessageSink( - beast::severities::Severity threshold, - CheckMessageLogs& owner) - : beast::Journal::Sink(threshold, false), owner_(owner) - { - } - - void - write(beast::severities::Severity level, std::string const& text) - override - { - if (text.find(owner_.msg_) != std::string::npos) - owner_.found_ = true; - } - }; - - public: - /** Constructor - - @param msg The message string to search for - @param found The variable to set to true if the message is found - */ - CheckMessageLogs(std::string msg, bool& found) - : Logs{beast::severities::kDebug} - , msg_{std::move(msg)} - , found_{found} - { - } - - std::unique_ptr - makeSink( - std::string const& partition, - beast::severities::Severity threshold) override - { - return std::make_unique(threshold, *this); - } - }; - /** Generate a new ledger by hand, applying a specific close time offset and optionally inserting a transaction. @@ -106,7 +57,7 @@ class LedgerHistory_test : public beast::unit_test::suite create_genesis, env.app().config(), std::vector{}, - env.app().family()); + env.app().getNodeFamily()); } auto res = std::make_shared( *prev, prev->info().closeTime + closeOffset); @@ -149,7 +100,7 @@ class LedgerHistory_test : public beast::unit_test::suite Env env{ *this, envconfig(), - std::make_unique("MISMATCH ", found)}; + std::make_unique("MISMATCH ", &found)}; LedgerHistory lh{beast::insight::NullCollector::New(), env.app()}; auto const genesis = makeLedger({}, env, lh, 0s); uint256 const dummyTxHash{1}; @@ -166,7 +117,7 @@ class LedgerHistory_test : public beast::unit_test::suite *this, envconfig(), std::make_unique( - "MISMATCH on close time", found)}; + "MISMATCH on close time", &found)}; LedgerHistory lh{beast::insight::NullCollector::New(), env.app()}; auto const genesis = makeLedger({}, env, lh, 0s); auto const ledgerA = makeLedger(genesis, env, lh, 4s); @@ -186,7 +137,7 @@ class LedgerHistory_test : public beast::unit_test::suite *this, envconfig(), std::make_unique( - "MISMATCH on prior ledger", found)}; + "MISMATCH on prior ledger", &found)}; LedgerHistory lh{beast::insight::NullCollector::New(), env.app()}; auto const genesis = makeLedger({}, env, lh, 0s); auto const ledgerA = makeLedger(genesis, env, lh, 4s); @@ -212,7 +163,7 @@ class LedgerHistory_test : public beast::unit_test::suite Env env{ *this, envconfig(), - std::make_unique(msg, found)}; + std::make_unique(msg, &found)}; LedgerHistory lh{beast::insight::NullCollector::New(), env.app()}; Account alice{"A1"}; diff --git a/src/test/app/Manifest_test.cpp b/src/test/app/Manifest_test.cpp index 063b4383281..18460ce3689 100644 --- a/src/test/app/Manifest_test.cpp +++ b/src/test/app/Manifest_test.cpp @@ -256,6 +256,7 @@ class Manifest_test : public beast::unit_test::suite { DatabaseCon::Setup setup; setup.dataDir = getDatabasePath(); + BEAST_EXPECT(!setup.useGlobalPragma); DatabaseCon dbCon( setup, dbName.data(), diff --git a/src/test/app/Path_test.cpp b/src/test/app/Path_test.cpp index 7b273666ac5..17e15c95043 100644 --- a/src/test/app/Path_test.cpp +++ b/src/test/app/Path_test.cpp @@ -223,9 +223,11 @@ class Path_test : public beast::unit_test::suite app.getOPs(), app.getLedgerMaster(), c, - Role::USER}, + Role::USER, + {}, + {}, + RPC::APIVersionIfUnspecified}, {}, - RPC::APIVersionIfUnspecified, {}}; Json::Value params = Json::objectValue; @@ -329,9 +331,11 @@ class Path_test : public beast::unit_test::suite app.getOPs(), app.getLedgerMaster(), c, - Role::USER}, + Role::USER, + {}, + {}, + RPC::APIVersionIfUnspecified}, {}, - RPC::APIVersionIfUnspecified, {}}; Json::Value result; gate g; diff --git a/src/test/app/RCLValidations_test.cpp b/src/test/app/RCLValidations_test.cpp index eca66a26a88..14a54a1492f 100644 --- a/src/test/app/RCLValidations_test.cpp +++ b/src/test/app/RCLValidations_test.cpp @@ -77,7 +77,10 @@ class RCLValidations_test : public beast::unit_test::suite jtx::Env env(*this); Config config; auto prev = std::make_shared( - create_genesis, config, std::vector{}, env.app().family()); + create_genesis, + config, + std::vector{}, + env.app().getNodeFamily()); history.push_back(prev); for (auto i = 0; i < (2 * maxAncestors + 1); ++i) { @@ -237,7 +240,10 @@ class RCLValidations_test : public beast::unit_test::suite auto& j = env.journal; Config config; auto prev = std::make_shared( - create_genesis, config, std::vector{}, env.app().family()); + create_genesis, + config, + std::vector{}, + env.app().getNodeFamily()); history.push_back(prev); for (auto i = 0; i < (maxAncestors + 10); ++i) { diff --git a/src/test/app/Regression_test.cpp b/src/test/app/Regression_test.cpp index e7c4dc7d5b6..6431f81dbd6 100644 --- a/src/test/app/Regression_test.cpp +++ b/src/test/app/Regression_test.cpp @@ -60,7 +60,7 @@ struct Regression_test : public beast::unit_test::suite create_genesis, env.app().config(), std::vector{}, - env.app().family()); + env.app().getNodeFamily()); auto expectedDrops = INITIAL_XRP; BEAST_EXPECT(closed->info().drops == expectedDrops); diff --git a/src/test/app/ValidatorList_test.cpp b/src/test/app/ValidatorList_test.cpp index 5bb6fb0ee2a..cb069c2c798 100644 --- a/src/test/app/ValidatorList_test.cpp +++ b/src/test/app/ValidatorList_test.cpp @@ -1267,6 +1267,185 @@ class ValidatorList_test : public beast::unit_test::suite } } + void + testNegativeUNL() + { + testcase("NegativeUNL"); + jtx::Env env(*this); + PublicKey emptyLocalKey; + ManifestCache manifests; + + auto createValidatorList = + [&](std::uint32_t vlSize, + boost::optional minimumQuorum = {}) + -> std::shared_ptr { + auto trustedKeys = std::make_shared( + manifests, + manifests, + env.timeKeeper(), + env.app().config().legacy("database_path"), + env.journal, + minimumQuorum); + + std::vector cfgPublishers; + std::vector cfgKeys; + hash_set activeValidators; + cfgKeys.reserve(vlSize); + while (cfgKeys.size() < cfgKeys.capacity()) + { + auto const valKey = randomNode(); + cfgKeys.push_back(toBase58(TokenType::NodePublic, valKey)); + activeValidators.emplace(calcNodeID(valKey)); + } + if (trustedKeys->load(emptyLocalKey, cfgKeys, cfgPublishers)) + { + trustedKeys->updateTrusted(activeValidators); + if (trustedKeys->quorum() == std::ceil(cfgKeys.size() * 0.8f)) + return trustedKeys; + } + return nullptr; + }; + + /* + * Test NegativeUNL + * == Combinations == + * -- UNL size: 34, 35, 57 + * -- nUNL size: 0%, 20%, 30%, 50% + * + * == with UNL size 60 + * -- set == get, + * -- check quorum, with nUNL size: 0, 12, 30, 18 + * -- nUNL overlap: |nUNL - UNL| = 5, with nUNL size: 18 + * -- with command line minimumQuorum = 50%, + * seen_reliable affected by nUNL + */ + + { + hash_set activeValidators; + //== Combinations == + std::array unlSizes = {34, 35, 39, 60}; + std::array nUnlPercent = {0, 20, 30, 50}; + for (auto us : unlSizes) + { + for (auto np : nUnlPercent) + { + auto validators = createValidatorList(us); + BEAST_EXPECT(validators); + if (validators) + { + std::uint32_t nUnlSize = us * np / 100; + auto unl = validators->getTrustedMasterKeys(); + hash_set nUnl; + auto it = unl.begin(); + for (std::uint32_t i = 0; i < nUnlSize; ++i) + { + nUnl.insert(*it); + ++it; + } + validators->setNegativeUnl(nUnl); + validators->updateTrusted(activeValidators); + BEAST_EXPECT( + validators->quorum() == + static_cast(std::ceil( + std::max((us - nUnlSize) * 0.8f, us * 0.6f)))); + } + } + } + } + + { + //== with UNL size 60 + auto validators = createValidatorList(60); + BEAST_EXPECT(validators); + if (validators) + { + hash_set activeValidators; + auto unl = validators->getTrustedMasterKeys(); + BEAST_EXPECT(unl.size() == 60); + { + //-- set == get, + //-- check quorum, with nUNL size: 0, 30, 18, 12 + auto nUnlChange = [&](std::uint32_t nUnlSize, + std::uint32_t quorum) -> bool { + hash_set nUnl; + auto it = unl.begin(); + for (std::uint32_t i = 0; i < nUnlSize; ++i) + { + nUnl.insert(*it); + ++it; + } + validators->setNegativeUnl(nUnl); + auto nUnl_temp = validators->getNegativeUnl(); + if (nUnl_temp.size() == nUnl.size()) + { + for (auto& n : nUnl_temp) + { + if (nUnl.find(n) == nUnl.end()) + return false; + } + validators->updateTrusted(activeValidators); + return validators->quorum() == quorum; + } + return false; + }; + BEAST_EXPECT(nUnlChange(0, 48)); + BEAST_EXPECT(nUnlChange(30, 36)); + BEAST_EXPECT(nUnlChange(18, 36)); + BEAST_EXPECT(nUnlChange(12, 39)); + } + + { + // nUNL overlap: |nUNL - UNL| = 5, with nUNL size: 18 + auto nUnl = validators->getNegativeUnl(); + BEAST_EXPECT(nUnl.size() == 12); + std::size_t ss = 33; + std::vector data(ss, 0); + data[0] = 0xED; + for (int i = 0; i < 6; ++i) + { + Slice s(data.data(), ss); + data[1]++; + nUnl.emplace(s); + } + validators->setNegativeUnl(nUnl); + validators->updateTrusted(activeValidators); + BEAST_EXPECT(validators->quorum() == 39); + } + } + } + + { + //== with UNL size 60 + //-- with command line minimumQuorum = 50%, + // seen_reliable affected by nUNL + auto validators = createValidatorList(60, 30); + BEAST_EXPECT(validators); + if (validators) + { + hash_set activeValidators; + hash_set unl = validators->getTrustedMasterKeys(); + auto it = unl.begin(); + for (std::uint32_t i = 0; i < 50; ++i) + { + activeValidators.insert(calcNodeID(*it)); + ++it; + } + validators->updateTrusted(activeValidators); + BEAST_EXPECT(validators->quorum() == 48); + hash_set nUnl; + it = unl.begin(); + for (std::uint32_t i = 0; i < 20; ++i) + { + nUnl.insert(*it); + ++it; + } + validators->setNegativeUnl(nUnl); + validators->updateTrusted(activeValidators); + BEAST_EXPECT(validators->quorum() == 30); + } + } + } + public: void run() override @@ -1276,6 +1455,7 @@ class ValidatorList_test : public beast::unit_test::suite testApplyList(); testUpdateTrusted(); testExpires(); + testNegativeUNL(); } }; diff --git a/src/test/consensus/NegativeUNL_test.cpp b/src/test/consensus/NegativeUNL_test.cpp new file mode 100644 index 00000000000..547cd17aa7e --- /dev/null +++ b/src/test/consensus/NegativeUNL_test.cpp @@ -0,0 +1,2108 @@ +//----------------------------------------------------------------------------- +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { +namespace test { + +/* + * This file implements the following negative UNL related tests: + * -- test filling and applying ttUNL_MODIFY Tx and ledger update + * -- test ttUNL_MODIFY Tx failure without featureNegativeUNL amendment + * -- test the NegativeUNLVote class. The test cases are split to multiple + * test classes to allow parallel execution. + * -- test the negativeUNLFilter function + * + * Other negative UNL related tests such as ValidatorList and RPC related ones + * are put in their existing unit test files. + */ + +/** + * Test the size of the negative UNL in a ledger, + * also test if the ledger has ToDisalbe and/or ToReEnable + * + * @param l the ledger + * @param size the expected negative UNL size + * @param hasToDisable if expect ToDisable in ledger + * @param hasToReEnable if expect ToDisable in ledger + * @return true if meet all three expectation + */ +bool +negUnlSizeTest( + std::shared_ptr const& l, + size_t size, + bool hasToDisable, + bool hasToReEnable); + +/** + * Try to apply a ttUNL_MODIFY Tx, and test the apply result + * + * @param env the test environment + * @param view the OpenView of the ledger + * @param tx the ttUNL_MODIFY Tx + * @param pass if the Tx should be applied successfully + * @return true if meet the expectation of apply result + */ +bool +applyAndTestResult(jtx::Env& env, OpenView& view, STTx const& tx, bool pass); + +/** + * Verify the content of negative UNL entries (public key and ledger sequence) + * of a ledger + * + * @param l the ledger + * @param nUnlLedgerSeq the expected PublicKeys and ledger Sequences + * @note nUnlLedgerSeq is copied so that it can be modified. + * @return true if meet the expectation + */ +bool +VerifyPubKeyAndSeq( + std::shared_ptr const& l, + hash_map nUnlLedgerSeq); + +/** + * Count the number of Tx in a TxSet + * + * @param txSet the TxSet + * @return the number of Tx + */ +std::size_t +countTx(std::shared_ptr const& txSet); + +/** + * Create fake public keys + * + * @param n the number of public keys + * @return a vector of public keys created + */ +std::vector +createPublicKeys(std::size_t n); + +/** + * Create ttUNL_MODIFY Tx + * + * @param disabling disabling or re-enabling a validator + * @param seq current ledger seq + * @param txKey the public key of the validator + * @return the ttUNL_MODIFY Tx + */ +STTx +createTx(bool disabling, LedgerIndex seq, PublicKey const& txKey); + +class NegativeUNL_test : public beast::unit_test::suite +{ + /** + * Test filling and applying ttUNL_MODIFY Tx, as well as ledger update: + * + * We will build a long history of ledgers, and try to apply different + * ttUNL_MODIFY Txes. We will check if the apply results meet expectations + * and if the ledgers are updated correctly. + */ + void + testNegativeUNL() + { + /* + * test cases: + * + * (1) the ledger after genesis + * -- cannot apply Disable Tx + * -- cannot apply ReEnable Tx + * -- nUNL empty + * -- no ToDisable + * -- no ToReEnable + * + * (2) a flag ledger + * -- apply an Disable Tx + * -- cannot apply the second Disable Tx + * -- cannot apply a ReEnable Tx + * -- nUNL empty + * -- has ToDisable with right nodeId + * -- no ToReEnable + * ++ extra test: first Disable Tx in ledger TxSet + * + * (3) ledgers before the next flag ledger + * -- nUNL empty + * -- has ToDisable with right nodeId + * -- no ToReEnable + * + * (4) next flag ledger + * -- nUNL size == 1, with right nodeId + * -- no ToDisable + * -- no ToReEnable + * -- cannot apply an Disable Tx with nodeId already in nUNL + * -- apply an Disable Tx with different nodeId + * -- cannot apply a ReEnable Tx with the same NodeId as Add + * -- cannot apply a ReEnable Tx with a NodeId not in nUNL + * -- apply a ReEnable Tx with a nodeId already in nUNL + * -- has ToDisable with right nodeId + * -- has ToReEnable with right nodeId + * -- nUNL size still 1, right nodeId + * + * (5) ledgers before the next flag ledger + * -- nUNL size == 1, right nodeId + * -- has ToDisable with right nodeId + * -- has ToReEnable with right nodeId + * + * (6) next flag ledger + * -- nUNL size == 1, different nodeId + * -- no ToDisable + * -- no ToReEnable + * -- apply an Disable Tx with different nodeId + * -- nUNL size still 1, right nodeId + * -- has ToDisable with right nodeId + * -- no ToReEnable + * + * (7) ledgers before the next flag ledger + * -- nUNL size still 1, right nodeId + * -- has ToDisable with right nodeId + * -- no ToReEnable + * + * (8) next flag ledger + * -- nUNL size == 2 + * -- apply a ReEnable Tx + * -- cannot apply second ReEnable Tx, even with right nodeId + * -- cannot apply an Disable Tx with the same NodeId as Remove + * -- nUNL size == 2 + * -- no ToDisable + * -- has ToReEnable with right nodeId + * + * (9) ledgers before the next flag ledger + * -- nUNL size == 2 + * -- no ToDisable + * -- has ToReEnable with right nodeId + * + * (10) next flag ledger + * -- nUNL size == 1 + * -- apply a ReEnable Tx + * -- nUNL size == 1 + * -- no ToDisable + * -- has ToReEnable with right nodeId + * + * (11) ledgers before the next flag ledger + * -- nUNL size == 1 + * -- no ToDisable + * -- has ToReEnable with right nodeId + * + * (12) next flag ledger + * -- nUNL size == 0 + * -- no ToDisable + * -- no ToReEnable + * + * (13) ledgers before the next flag ledger + * -- nUNL size == 0 + * -- no ToDisable + * -- no ToReEnable + * + * (14) next flag ledger + * -- nUNL size == 0 + * -- no ToDisable + * -- no ToReEnable + */ + + testcase("Create UNLModify Tx and apply to ledgers"); + + jtx::Env env(*this, jtx::supported_amendments() | featureNegativeUNL); + std::vector publicKeys = createPublicKeys(3); + // genesis ledger + auto l = std::make_shared( + create_genesis, + env.app().config(), + std::vector{}, + env.app().getNodeFamily()); + BEAST_EXPECT(l->rules().enabled(featureNegativeUNL)); + + // Record the public keys and ledger sequences of expected negative UNL + // validators when we build the ledger history + hash_map nUnlLedgerSeq; + + { + //(1) the ledger after genesis, not a flag ledger + l = std::make_shared( + *l, env.app().timeKeeper().closeTime()); + + auto txDisable_0 = createTx(true, l->seq(), publicKeys[0]); + auto txReEnable_1 = createTx(false, l->seq(), publicKeys[1]); + + OpenView accum(&*l); + BEAST_EXPECT(applyAndTestResult(env, accum, txDisable_0, false)); + BEAST_EXPECT(applyAndTestResult(env, accum, txReEnable_1, false)); + accum.apply(*l); + BEAST_EXPECT(negUnlSizeTest(l, 0, false, false)); + } + + { + //(2) a flag ledger + // generate more ledgers + for (auto i = 0; i < 256 - 2; ++i) + { + l = std::make_shared( + *l, env.app().timeKeeper().closeTime()); + } + BEAST_EXPECT(l->isFlagLedger()); + l->updateNegativeUNL(); + + auto txDisable_0 = createTx(true, l->seq(), publicKeys[0]); + auto txDisable_1 = createTx(true, l->seq(), publicKeys[1]); + auto txReEnable_2 = createTx(false, l->seq(), publicKeys[2]); + + // can apply 1 and only 1 ToDisable Tx, + // cannot apply ToReEnable Tx, since negative UNL is empty + OpenView accum(&*l); + BEAST_EXPECT(applyAndTestResult(env, accum, txDisable_0, true)); + BEAST_EXPECT(applyAndTestResult(env, accum, txDisable_1, false)); + BEAST_EXPECT(applyAndTestResult(env, accum, txReEnable_2, false)); + accum.apply(*l); + auto good_size = negUnlSizeTest(l, 0, true, false); + BEAST_EXPECT(good_size); + if (good_size) + { + BEAST_EXPECT(l->negativeUnlToDisable() == publicKeys[0]); + //++ first ToDisable Tx in ledger's TxSet + uint256 txID = txDisable_0.getTransactionID(); + BEAST_EXPECT(l->txExists(txID)); + } + } + + { + //(3) ledgers before the next flag ledger + for (auto i = 0; i < 256; ++i) + { + auto good_size = negUnlSizeTest(l, 0, true, false); + BEAST_EXPECT(good_size); + if (good_size) + BEAST_EXPECT(l->negativeUnlToDisable() == publicKeys[0]); + l = std::make_shared( + *l, env.app().timeKeeper().closeTime()); + } + BEAST_EXPECT(l->isFlagLedger()); + l->updateNegativeUNL(); + + //(4) next flag ledger + // test if the ledger updated correctly + auto good_size = negUnlSizeTest(l, 1, false, false); + BEAST_EXPECT(good_size); + if (good_size) + { + BEAST_EXPECT(*(l->negativeUnl().begin()) == publicKeys[0]); + nUnlLedgerSeq.emplace(publicKeys[0], l->seq()); + } + + auto txDisable_0 = createTx(true, l->seq(), publicKeys[0]); + auto txDisable_1 = createTx(true, l->seq(), publicKeys[1]); + auto txReEnable_0 = createTx(false, l->seq(), publicKeys[0]); + auto txReEnable_1 = createTx(false, l->seq(), publicKeys[1]); + auto txReEnable_2 = createTx(false, l->seq(), publicKeys[2]); + + OpenView accum(&*l); + BEAST_EXPECT(applyAndTestResult(env, accum, txDisable_0, false)); + BEAST_EXPECT(applyAndTestResult(env, accum, txDisable_1, true)); + BEAST_EXPECT(applyAndTestResult(env, accum, txReEnable_1, false)); + BEAST_EXPECT(applyAndTestResult(env, accum, txReEnable_2, false)); + BEAST_EXPECT(applyAndTestResult(env, accum, txReEnable_0, true)); + accum.apply(*l); + good_size = negUnlSizeTest(l, 1, true, true); + BEAST_EXPECT(good_size); + if (good_size) + { + BEAST_EXPECT(l->negativeUnl().count(publicKeys[0])); + BEAST_EXPECT(l->negativeUnlToDisable() == publicKeys[1]); + BEAST_EXPECT(l->negativeUnlToReEnable() == publicKeys[0]); + // test sfFirstLedgerSequence + BEAST_EXPECT(VerifyPubKeyAndSeq(l, nUnlLedgerSeq)); + } + } + + { + //(5) ledgers before the next flag ledger + for (auto i = 0; i < 256; ++i) + { + auto good_size = negUnlSizeTest(l, 1, true, true); + BEAST_EXPECT(good_size); + if (good_size) + { + BEAST_EXPECT(l->negativeUnl().count(publicKeys[0])); + BEAST_EXPECT(l->negativeUnlToDisable() == publicKeys[1]); + BEAST_EXPECT(l->negativeUnlToReEnable() == publicKeys[0]); + } + l = std::make_shared( + *l, env.app().timeKeeper().closeTime()); + } + BEAST_EXPECT(l->isFlagLedger()); + l->updateNegativeUNL(); + + //(6) next flag ledger + // test if the ledger updated correctly + auto good_size = negUnlSizeTest(l, 1, false, false); + BEAST_EXPECT(good_size); + if (good_size) + { + BEAST_EXPECT(l->negativeUnl().count(publicKeys[1])); + } + + auto txDisable_0 = createTx(true, l->seq(), publicKeys[0]); + + OpenView accum(&*l); + BEAST_EXPECT(applyAndTestResult(env, accum, txDisable_0, true)); + accum.apply(*l); + good_size = negUnlSizeTest(l, 1, true, false); + BEAST_EXPECT(good_size); + if (good_size) + { + BEAST_EXPECT(l->negativeUnl().count(publicKeys[1])); + BEAST_EXPECT(l->negativeUnlToDisable() == publicKeys[0]); + nUnlLedgerSeq.emplace(publicKeys[1], l->seq()); + nUnlLedgerSeq.erase(publicKeys[0]); + BEAST_EXPECT(VerifyPubKeyAndSeq(l, nUnlLedgerSeq)); + } + } + + { + //(7) ledgers before the next flag ledger + for (auto i = 0; i < 256; ++i) + { + auto good_size = negUnlSizeTest(l, 1, true, false); + BEAST_EXPECT(good_size); + if (good_size) + { + BEAST_EXPECT(l->negativeUnl().count(publicKeys[1])); + BEAST_EXPECT(l->negativeUnlToDisable() == publicKeys[0]); + } + l = std::make_shared( + *l, env.app().timeKeeper().closeTime()); + } + BEAST_EXPECT(l->isFlagLedger()); + l->updateNegativeUNL(); + + //(8) next flag ledger + // test if the ledger updated correctly + auto good_size = negUnlSizeTest(l, 2, false, false); + BEAST_EXPECT(good_size); + if (good_size) + { + BEAST_EXPECT(l->negativeUnl().count(publicKeys[0])); + BEAST_EXPECT(l->negativeUnl().count(publicKeys[1])); + nUnlLedgerSeq.emplace(publicKeys[0], l->seq()); + BEAST_EXPECT(VerifyPubKeyAndSeq(l, nUnlLedgerSeq)); + } + + auto txDisable_0 = createTx(true, l->seq(), publicKeys[0]); + auto txReEnable_0 = createTx(false, l->seq(), publicKeys[0]); + auto txReEnable_1 = createTx(false, l->seq(), publicKeys[1]); + + OpenView accum(&*l); + BEAST_EXPECT(applyAndTestResult(env, accum, txReEnable_0, true)); + BEAST_EXPECT(applyAndTestResult(env, accum, txReEnable_1, false)); + BEAST_EXPECT(applyAndTestResult(env, accum, txDisable_0, false)); + accum.apply(*l); + good_size = negUnlSizeTest(l, 2, false, true); + BEAST_EXPECT(good_size); + if (good_size) + { + BEAST_EXPECT(l->negativeUnl().count(publicKeys[0])); + BEAST_EXPECT(l->negativeUnl().count(publicKeys[1])); + BEAST_EXPECT(l->negativeUnlToReEnable() == publicKeys[0]); + BEAST_EXPECT(VerifyPubKeyAndSeq(l, nUnlLedgerSeq)); + } + } + + { + //(9) ledgers before the next flag ledger + for (auto i = 0; i < 256; ++i) + { + auto good_size = negUnlSizeTest(l, 2, false, true); + BEAST_EXPECT(good_size); + if (good_size) + { + BEAST_EXPECT(l->negativeUnl().count(publicKeys[0])); + BEAST_EXPECT(l->negativeUnl().count(publicKeys[1])); + BEAST_EXPECT(l->negativeUnlToReEnable() == publicKeys[0]); + } + l = std::make_shared( + *l, env.app().timeKeeper().closeTime()); + } + BEAST_EXPECT(l->isFlagLedger()); + l->updateNegativeUNL(); + + //(10) next flag ledger + // test if the ledger updated correctly + auto good_size = negUnlSizeTest(l, 1, false, false); + BEAST_EXPECT(good_size); + if (good_size) + { + BEAST_EXPECT(l->negativeUnl().count(publicKeys[1])); + nUnlLedgerSeq.erase(publicKeys[0]); + BEAST_EXPECT(VerifyPubKeyAndSeq(l, nUnlLedgerSeq)); + } + + auto txReEnable_1 = createTx(false, l->seq(), publicKeys[1]); + + OpenView accum(&*l); + BEAST_EXPECT(applyAndTestResult(env, accum, txReEnable_1, true)); + accum.apply(*l); + good_size = negUnlSizeTest(l, 1, false, true); + BEAST_EXPECT(good_size); + if (good_size) + { + BEAST_EXPECT(l->negativeUnl().count(publicKeys[1])); + BEAST_EXPECT(l->negativeUnlToReEnable() == publicKeys[1]); + BEAST_EXPECT(VerifyPubKeyAndSeq(l, nUnlLedgerSeq)); + } + } + + { + //(11) ledgers before the next flag ledger + for (auto i = 0; i < 256; ++i) + { + auto good_size = negUnlSizeTest(l, 1, false, true); + BEAST_EXPECT(good_size); + if (good_size) + { + BEAST_EXPECT(l->negativeUnl().count(publicKeys[1])); + BEAST_EXPECT(l->negativeUnlToReEnable() == publicKeys[1]); + } + l = std::make_shared( + *l, env.app().timeKeeper().closeTime()); + } + BEAST_EXPECT(l->isFlagLedger()); + l->updateNegativeUNL(); + + //(12) next flag ledger + BEAST_EXPECT(negUnlSizeTest(l, 0, false, false)); + } + + { + //(13) ledgers before the next flag ledger + for (auto i = 0; i < 256; ++i) + { + BEAST_EXPECT(negUnlSizeTest(l, 0, false, false)); + l = std::make_shared( + *l, env.app().timeKeeper().closeTime()); + } + BEAST_EXPECT(l->isFlagLedger()); + l->updateNegativeUNL(); + + //(14) next flag ledger + BEAST_EXPECT(negUnlSizeTest(l, 0, false, false)); + } + } + + void + run() override + { + testNegativeUNL(); + } +}; + +class NegativeUNLNoAmendment_test : public beast::unit_test::suite +{ + void + testNegativeUNLNoAmendment() + { + testcase("No negative UNL amendment"); + + jtx::Env env(*this, jtx::supported_amendments() - featureNegativeUNL); + std::vector publicKeys = createPublicKeys(1); + // genesis ledger + auto l = std::make_shared( + create_genesis, + env.app().config(), + std::vector{}, + env.app().getNodeFamily()); + BEAST_EXPECT(!l->rules().enabled(featureNegativeUNL)); + + // generate more ledgers + for (auto i = 0; i < 256 - 1; ++i) + { + l = std::make_shared( + *l, env.app().timeKeeper().closeTime()); + } + BEAST_EXPECT(l->seq() == 256); + auto txDisable_0 = createTx(true, l->seq(), publicKeys[0]); + OpenView accum(&*l); + BEAST_EXPECT(applyAndTestResult(env, accum, txDisable_0, false)); + accum.apply(*l); + BEAST_EXPECT(negUnlSizeTest(l, 0, false, false)); + } + + void + run() override + { + testNegativeUNLNoAmendment(); + } +}; + +/** + * Utility class for creating validators and ledger history + */ +struct NetworkHistory +{ + using LedgerHistory = std::vector>; + /** + * + * Only reasonable parameters can be honored, + * e.g cannot hasToReEnable when nUNLSize == 0 + */ + struct Parameter + { + std::uint32_t numNodes; // number of validators + std::uint32_t negUNLSize; // size of negative UNL in the last ledger + bool hasToDisable; // if has ToDisable in the last ledger + bool hasToReEnable; // if has ToReEnable in the last ledger + /** + * if not specified, the number of ledgers in the history is calculated + * from negUNLSize, hasToDisable, and hasToReEnable + */ + std::optional numLedgers; + }; + + NetworkHistory(beast::unit_test::suite& suite, Parameter const& p) + : env(suite, jtx::supported_amendments() | featureNegativeUNL) + , param(p) + , validations(env.app().getValidations()) + { + createNodes(); + if (!param.numLedgers) + param.numLedgers = 256 * (param.negUNLSize + 1); + goodHistory = createLedgerHistory(); + } + + void + createNodes() + { + assert(param.numNodes <= 256); + UNLKeys = createPublicKeys(param.numNodes); + for (int i = 0; i < param.numNodes; ++i) + { + UNLKeySet.insert(UNLKeys[i]); + UNLNodeIDs.push_back(calcNodeID(UNLKeys[i])); + UNLNodeIDSet.insert(UNLNodeIDs.back()); + } + } + + /** + * create ledger history and apply needed ttUNL_MODIFY tx at flag ledgers + * @return + */ + bool + createLedgerHistory() + { + static uint256 fake_amemdment; // So we have different genesis ledgers + auto l = std::make_shared( + create_genesis, + env.app().config(), + std::vector{fake_amemdment++}, + env.app().getNodeFamily()); + history.push_back(l); + + // When putting validators into the negative UNL, we start with + // validator 0, then validator 1 ... + int nidx = 0; + while (l->seq() <= param.numLedgers) + { + l = std::make_shared( + *l, env.app().timeKeeper().closeTime()); + history.push_back(l); + + if (l->isFlagLedger()) + { + l->updateNegativeUNL(); + OpenView accum(&*l); + if (l->negativeUnl().size() < param.negUNLSize) + { + auto tx = createTx(true, l->seq(), UNLKeys[nidx]); + if (!applyAndTestResult(env, accum, tx, true)) + break; + ++nidx; + } + else if (l->negativeUnl().size() == param.negUNLSize) + { + if (param.hasToDisable) + { + auto tx = createTx(true, l->seq(), UNLKeys[nidx]); + if (!applyAndTestResult(env, accum, tx, true)) + break; + ++nidx; + } + if (param.hasToReEnable) + { + auto tx = createTx(false, l->seq(), UNLKeys[0]); + if (!applyAndTestResult(env, accum, tx, true)) + break; + } + } + accum.apply(*l); + } + l->updateSkipList(); + } + return negUnlSizeTest( + l, param.negUNLSize, param.hasToDisable, param.hasToReEnable); + } + + /** + * Create a validation + * @param ledger the ledger the validation validates + * @param v the validator + * @return the validation + */ + std::shared_ptr + createSTVal(std::shared_ptr const& ledger, NodeID const& v) + { + static auto keyPair = randomKeyPair(KeyType::secp256k1); + return std::make_shared( + env.app().timeKeeper().now(), + keyPair.first, + keyPair.second, + v, + [&](STValidation& v) { + v.setFieldH256(sfLedgerHash, ledger->info().hash); + v.setFieldU32(sfLedgerSequence, ledger->seq()); + v.setFlag(vfFullValidation); + }); + }; + + /** + * Walk the ledger history and create validation messages for the ledgers + * + * @tparam NeedValidation a function to decided if a validation is needed + * @param needVal if a validation is needed for this particular combination + * of ledger and validator + */ + template + void + walkHistoryAndAddValidations(NeedValidation&& needVal) + { + std::uint32_t curr = 0; + std::size_t need = 256 + 1; + // only last 256 + 1 ledgers need validations + if (history.size() > need) + curr = history.size() - need; + for (; curr != history.size(); ++curr) + { + for (std::size_t i = 0; i < param.numNodes; ++i) + { + if (needVal(history[curr], i)) + { + RCLValidation v(createSTVal(history[curr], UNLNodeIDs[i])); + v.setTrusted(); + validations.add(UNLNodeIDs[i], v); + } + } + } + } + + std::shared_ptr + lastLedger() const + { + return history.back(); + } + + jtx::Env env; + Parameter param; + RCLValidations& validations; + std::vector UNLKeys; + hash_set UNLKeySet; + std::vector UNLNodeIDs; + hash_set UNLNodeIDSet; + LedgerHistory history; + bool goodHistory; +}; + +auto defaultPreVote = [](NegativeUNLVote& vote) {}; +/** + * Create a NegativeUNLVote object. It then creates ttUNL_MODIFY Tx as its vote + * on negative UNL changes. + * + * @tparam PreVote a function to be called before vote + * @param history the ledger history + * @param myId the voting validator + * @param expect the number of ttUNL_MODIFY Tx expected + * @param pre the PreVote function + * @return true if the number of ttUNL_MODIFY Txes created meet expectation + */ +template +bool +voteAndCheck( + NetworkHistory& history, + NodeID const& myId, + std::size_t expect, + PreVote const& pre = defaultPreVote) +{ + NegativeUNLVote vote(myId, history.env.journal); + pre(vote); + auto txSet = std::make_shared( + SHAMapType::TRANSACTION, history.env.app().getNodeFamily()); + vote.doVoting( + history.lastLedger(), history.UNLKeySet, history.validations, txSet); + return countTx(txSet) == expect; +} + +/** + * Test the private member functions of NegativeUNLVote + */ +class NegativeUNLVoteInternal_test : public beast::unit_test::suite +{ + void + testAddTx() + { + testcase("Create UNLModify Tx"); + jtx::Env env(*this); + + NodeID myId(0xA0); + NegativeUNLVote vote(myId, env.journal); + + // one add, one remove + auto txSet = std::make_shared( + SHAMapType::TRANSACTION, env.app().getNodeFamily()); + PublicKey toDisableKey; + PublicKey toReEnableKey; + LedgerIndex seq(1234); + BEAST_EXPECT(countTx(txSet) == 0); + vote.addTx(seq, toDisableKey, NegativeUNLVote::ToDisable, txSet); + BEAST_EXPECT(countTx(txSet) == 1); + vote.addTx(seq, toReEnableKey, NegativeUNLVote::ToReEnable, txSet); + BEAST_EXPECT(countTx(txSet) == 2); + // content of a tx is implicitly tested after applied to a ledger + // in later test cases + } + + void + testPickOneCandidate() + { + testcase("Pick One Candidate"); + jtx::Env env(*this); + + NodeID myId(0xA0); + NegativeUNLVote vote(myId, env.journal); + + uint256 pad_0(0); + uint256 pad_f = ~pad_0; + NodeID n_1(1); + NodeID n_2(2); + NodeID n_3(3); + std::vector candidates({n_1}); + BEAST_EXPECT(vote.choose(pad_0, candidates) == n_1); + BEAST_EXPECT(vote.choose(pad_f, candidates) == n_1); + candidates.emplace_back(2); + BEAST_EXPECT(vote.choose(pad_0, candidates) == n_1); + BEAST_EXPECT(vote.choose(pad_f, candidates) == n_2); + candidates.emplace_back(3); + BEAST_EXPECT(vote.choose(pad_0, candidates) == n_1); + BEAST_EXPECT(vote.choose(pad_f, candidates) == n_3); + } + + void + testBuildScoreTableSpecialCases() + { + testcase("Build Score Table"); + /* + * 1. no skip list + * 2. short skip list + * 3. local node not enough history + * 4. a node double validated some seq + * 5. local node had enough validations but on a wrong chain + * 6. a good case, long enough history and perfect scores + */ + { + // 1. no skip list + NetworkHistory history = {*this, {10, 0, false, false, 1}}; + BEAST_EXPECT(history.goodHistory); + if (history.goodHistory) + { + NegativeUNLVote vote( + history.UNLNodeIDs[3], history.env.journal); + BEAST_EXPECT(!vote.buildScoreTable( + history.lastLedger(), + history.UNLNodeIDSet, + history.validations)); + } + } + + { + // 2. short skip list + NetworkHistory history = {*this, {10, 0, false, false, 256 / 2}}; + BEAST_EXPECT(history.goodHistory); + if (history.goodHistory) + { + NegativeUNLVote vote( + history.UNLNodeIDs[3], history.env.journal); + BEAST_EXPECT(!vote.buildScoreTable( + history.lastLedger(), + history.UNLNodeIDSet, + history.validations)); + } + } + + { + // 3. local node not enough history + NetworkHistory history = {*this, {10, 0, false, false, 256 + 2}}; + BEAST_EXPECT(history.goodHistory); + if (history.goodHistory) + { + NodeID myId = history.UNLNodeIDs[3]; + history.walkHistoryAndAddValidations( + [&](std::shared_ptr const& l, + std::size_t idx) -> bool { + // skip half my validations. + return !( + history.UNLNodeIDs[idx] == myId && + l->seq() % 2 == 0); + }); + NegativeUNLVote vote(myId, history.env.journal); + BEAST_EXPECT(!vote.buildScoreTable( + history.lastLedger(), + history.UNLNodeIDSet, + history.validations)); + } + } + + { + // 4. a node double validated some seq + // 5. local node had enough validations but on a wrong chain + NetworkHistory history = {*this, {10, 0, false, false, 256 + 2}}; + // We need two chains for these tests + bool wrongChainSuccess = history.goodHistory; + BEAST_EXPECT(wrongChainSuccess); + NetworkHistory::LedgerHistory wrongChain = + std::move(history.history); + // Create a new chain and use it as the one that majority of nodes + // follow + history.createLedgerHistory(); + BEAST_EXPECT(history.goodHistory); + + if (history.goodHistory && wrongChainSuccess) + { + NodeID myId = history.UNLNodeIDs[3]; + NodeID badNode = history.UNLNodeIDs[4]; + history.walkHistoryAndAddValidations( + [&](std::shared_ptr const& l, + std::size_t idx) -> bool { + // everyone but me + return !(history.UNLNodeIDs[idx] == myId); + }); + + // local node validate wrong chain + // a node double validates + for (auto& l : wrongChain) + { + RCLValidation v1(history.createSTVal(l, myId)); + history.validations.add(myId, v1); + RCLValidation v2(history.createSTVal(l, badNode)); + history.validations.add(badNode, v2); + } + + NegativeUNLVote vote(myId, history.env.journal); + + // local node still on wrong chain, can build a scoreTable, + // but all other nodes' scores are zero + auto scoreTable = vote.buildScoreTable( + wrongChain.back(), + history.UNLNodeIDSet, + history.validations); + BEAST_EXPECT(scoreTable); + if (scoreTable) + { + for (auto const& [n, score] : *scoreTable) + { + if (n == myId) + BEAST_EXPECT(score == 256); + else + BEAST_EXPECT(score == 0); + } + } + + // if local node switched to right history, but cannot build + // scoreTable because not enough local validations + BEAST_EXPECT(!vote.buildScoreTable( + history.lastLedger(), + history.UNLNodeIDSet, + history.validations)); + } + } + + { + // 6. a good case + NetworkHistory history = {*this, {10, 0, false, false, 256 + 1}}; + BEAST_EXPECT(history.goodHistory); + if (history.goodHistory) + { + history.walkHistoryAndAddValidations( + [&](std::shared_ptr const& l, + std::size_t idx) -> bool { return true; }); + NegativeUNLVote vote( + history.UNLNodeIDs[3], history.env.journal); + auto scoreTable = vote.buildScoreTable( + history.lastLedger(), + history.UNLNodeIDSet, + history.validations); + BEAST_EXPECT(scoreTable); + if (scoreTable) + { + for (auto const& [_, score] : *scoreTable) + { + (void)_; + BEAST_EXPECT(score == 256); + } + } + } + } + } + + /** + * Find all candidates and check if the number of candidates meets + * expectation + * + * @param vote the NegativeUNLVote object + * @param unl the validators + * @param negUnl the negative UNL validators + * @param scoreTable the score table of validators + * @param numDisable number of Disable candidates expected + * @param numReEnable number of ReEnable candidates expected + * @return true if the number of candidates meets expectation + */ + bool + checkCandidateSizes( + NegativeUNLVote& vote, + hash_set const& unl, + hash_set const& negUnl, + hash_map const& scoreTable, + std::size_t numDisable, + std::size_t numReEnable) + { + auto [disableCandidates, reEnableCandidates] = + vote.findAllCandidates(unl, negUnl, scoreTable); + bool rightDisable = disableCandidates.size() == numDisable; + bool rightReEnable = reEnableCandidates.size() == numReEnable; + return rightDisable && rightReEnable; + }; + + void + testFindAllCandidates() + { + testcase("Find All Candidates"); + /* + * -- unl size: 35 + * -- negUnl size: 3 + * + * 0. all good scores + * 1. all bad scores + * 2. all between watermarks + * 3. 2 good scorers in negUnl + * 4. 2 bad scorers not in negUnl + * 5. 2 in negUnl but not in unl, have a remove candidate from score + * table + * 6. 2 in negUnl but not in unl, no remove candidate from score table + * 7. 2 new validators have good scores, already in negUnl + * 8. 2 new validators have bad scores, not in negUnl + * 9. expired the new validators have bad scores, not in negUnl + */ + NetworkHistory history = {*this, {35, 0, false, false, 0}}; + + hash_set negUnl_012; + for (std::uint32_t i = 0; i < 3; ++i) + negUnl_012.insert(history.UNLNodeIDs[i]); + + // build a good scoreTable to use, or copy and modify + hash_map goodScoreTable; + for (auto const& n : history.UNLNodeIDs) + goodScoreTable[n] = NegativeUNLVote::negativeUnlHighWaterMark + 1; + + NegativeUNLVote vote(history.UNLNodeIDs[0], history.env.journal); + + { + // all good scores + BEAST_EXPECT(checkCandidateSizes( + vote, history.UNLNodeIDSet, negUnl_012, goodScoreTable, 0, 3)); + } + { + // all bad scores + hash_map scoreTable; + for (auto& n : history.UNLNodeIDs) + scoreTable[n] = NegativeUNLVote::negativeUnlLowWaterMark - 1; + BEAST_EXPECT(checkCandidateSizes( + vote, history.UNLNodeIDSet, negUnl_012, scoreTable, 35 - 3, 0)); + } + { + // all between watermarks + hash_map scoreTable; + for (auto& n : history.UNLNodeIDs) + scoreTable[n] = NegativeUNLVote::negativeUnlLowWaterMark + 1; + BEAST_EXPECT(checkCandidateSizes( + vote, history.UNLNodeIDSet, negUnl_012, scoreTable, 0, 0)); + } + + { + // 2 good scorers in negUnl + auto scoreTable = goodScoreTable; + scoreTable[*negUnl_012.begin()] = + NegativeUNLVote::negativeUnlLowWaterMark + 1; + BEAST_EXPECT(checkCandidateSizes( + vote, history.UNLNodeIDSet, negUnl_012, scoreTable, 0, 2)); + } + + { + // 2 bad scorers not in negUnl + auto scoreTable = goodScoreTable; + scoreTable[history.UNLNodeIDs[11]] = + NegativeUNLVote::negativeUnlLowWaterMark - 1; + scoreTable[history.UNLNodeIDs[12]] = + NegativeUNLVote::negativeUnlLowWaterMark - 1; + BEAST_EXPECT(checkCandidateSizes( + vote, history.UNLNodeIDSet, negUnl_012, scoreTable, 2, 3)); + } + + { + // 2 in negUnl but not in unl, have a remove candidate from score + // table + hash_set UNL_temp = history.UNLNodeIDSet; + UNL_temp.erase(history.UNLNodeIDs[0]); + UNL_temp.erase(history.UNLNodeIDs[1]); + BEAST_EXPECT(checkCandidateSizes( + vote, UNL_temp, negUnl_012, goodScoreTable, 0, 3)); + } + + { + // 2 in negUnl but not in unl, no remove candidate from score table + auto scoreTable = goodScoreTable; + scoreTable.erase(history.UNLNodeIDs[0]); + scoreTable.erase(history.UNLNodeIDs[1]); + scoreTable[history.UNLNodeIDs[2]] = + NegativeUNLVote::negativeUnlLowWaterMark + 1; + hash_set UNL_temp = history.UNLNodeIDSet; + UNL_temp.erase(history.UNLNodeIDs[0]); + UNL_temp.erase(history.UNLNodeIDs[1]); + BEAST_EXPECT(checkCandidateSizes( + vote, UNL_temp, negUnl_012, scoreTable, 0, 2)); + } + + { + // 2 new validators + NodeID new_1(0xbead); + NodeID new_2(0xbeef); + hash_set nowTrusted = {new_1, new_2}; + hash_set UNL_temp = history.UNLNodeIDSet; + UNL_temp.insert(new_1); + UNL_temp.insert(new_2); + vote.newValidators(256, nowTrusted); + { + // 2 new validators have good scores, already in negUnl + auto scoreTable = goodScoreTable; + scoreTable[new_1] = + NegativeUNLVote::negativeUnlHighWaterMark + 1; + scoreTable[new_2] = + NegativeUNLVote::negativeUnlHighWaterMark + 1; + hash_set negUnl_temp = negUnl_012; + negUnl_temp.insert(new_1); + negUnl_temp.insert(new_2); + BEAST_EXPECT(checkCandidateSizes( + vote, UNL_temp, negUnl_temp, scoreTable, 0, 3 + 2)); + } + { + // 2 new validators have bad scores, not in negUnl + auto scoreTable = goodScoreTable; + scoreTable[new_1] = 0; + scoreTable[new_2] = 0; + BEAST_EXPECT(checkCandidateSizes( + vote, UNL_temp, negUnl_012, scoreTable, 0, 3)); + } + { + // expired the new validators have bad scores, not in negUnl + vote.purgeNewValidators( + 256 + NegativeUNLVote::newValidatorDisableSkip + 1); + auto scoreTable = goodScoreTable; + scoreTable[new_1] = 0; + scoreTable[new_2] = 0; + BEAST_EXPECT(checkCandidateSizes( + vote, UNL_temp, negUnl_012, scoreTable, 2, 3)); + } + } + } + + void + testFindAllCandidatesCombination() + { + testcase("Find All Candidates Combination"); + /* + * == combination 1: + * -- unl size: 34, 35, 80 + * -- nUnl size: 0, 50%, all + * -- score pattern: all 0, all negativeUnlLowWaterMark & +1 & -1, all + * negativeUnlHighWaterMark & +1 & -1, all 100% + * + * == combination 2: + * -- unl size: 34, 35, 80 + * -- negativeUnl size: 0, all + * -- nUnl size: one on, one off, one on, one off, + * -- score pattern: 2*(negativeUnlLowWaterMark, +1, -1) & + * 2*(negativeUnlHighWaterMark, +1, -1) & rest + * negativeUnlMinLocalValsToVote + */ + + jtx::Env env(*this); + + NodeID myId(0xA0); + NegativeUNLVote vote(myId, env.journal); + + std::array unlSizes = {34, 35, 80}; + std::array nUnlPercent = {0, 50, 100}; + std::array scores = { + 0, + NegativeUNLVote::negativeUnlLowWaterMark - 1, + NegativeUNLVote::negativeUnlLowWaterMark, + NegativeUNLVote::negativeUnlLowWaterMark + 1, + NegativeUNLVote::negativeUnlHighWaterMark - 1, + NegativeUNLVote::negativeUnlHighWaterMark, + NegativeUNLVote::negativeUnlHighWaterMark + 1, + NegativeUNLVote::negativeUnlMinLocalValsToVote}; + + //== combination 1: + { + auto fillScoreTable = + [&](std::uint32_t unl_size, + std::uint32_t nUnl_size, + std::uint32_t score, + hash_set& unl, + hash_set& negUnl, + hash_map& scoreTable) { + std::vector nodeIDs; + std::vector keys = createPublicKeys(unl_size); + for (auto const& k : keys) + { + nodeIDs.emplace_back(calcNodeID(k)); + unl.emplace(nodeIDs.back()); + scoreTable[nodeIDs.back()] = score; + } + for (std::uint32_t i = 0; i < nUnl_size; ++i) + negUnl.insert(nodeIDs[i]); + }; + + for (auto us : unlSizes) + { + for (auto np : nUnlPercent) + { + for (auto score : scores) + { + hash_set unl; + hash_set negUnl; + hash_map scoreTable; + fillScoreTable( + us, us * np / 100, score, unl, negUnl, scoreTable); + BEAST_EXPECT(unl.size() == us); + BEAST_EXPECT(negUnl.size() == us * np / 100); + BEAST_EXPECT(scoreTable.size() == us); + + std::size_t toDisable_expect = 0; + std::size_t toReEnable_expect = 0; + if (np == 0) + { + if (score < + NegativeUNLVote::negativeUnlLowWaterMark) + { + toDisable_expect = us; + } + } + else if (np == 50) + { + if (score > + NegativeUNLVote::negativeUnlHighWaterMark) + { + toReEnable_expect = us * np / 100; + } + } + else + { + if (score > + NegativeUNLVote::negativeUnlHighWaterMark) + { + toReEnable_expect = us; + } + } + BEAST_EXPECT(checkCandidateSizes( + vote, + unl, + negUnl, + scoreTable, + toDisable_expect, + toReEnable_expect)); + } + } + } + + //== combination 2: + { + auto fillScoreTable = + [&](std::uint32_t unl_size, + std::uint32_t nUnl_percent, + hash_set& unl, + hash_set& negUnl, + hash_map& scoreTable) { + std::vector nodeIDs; + std::vector keys = + createPublicKeys(unl_size); + for (auto const& k : keys) + { + nodeIDs.emplace_back(calcNodeID(k)); + unl.emplace(nodeIDs.back()); + } + + std::uint32_t nIdx = 0; + for (auto score : scores) + { + scoreTable[nodeIDs[nIdx++]] = score; + scoreTable[nodeIDs[nIdx++]] = score; + } + for (; nIdx < unl_size;) + { + scoreTable[nodeIDs[nIdx++]] = scores.back(); + } + + if (nUnl_percent == 100) + { + negUnl = unl; + } + else if (nUnl_percent == 50) + { + for (std::uint32_t i = 1; i < unl_size; i += 2) + negUnl.insert(nodeIDs[i]); + } + }; + + for (auto us : unlSizes) + { + for (auto np : nUnlPercent) + { + hash_set unl; + hash_set negUnl; + hash_map scoreTable; + + fillScoreTable(us, np, unl, negUnl, scoreTable); + BEAST_EXPECT(unl.size() == us); + BEAST_EXPECT(negUnl.size() == us * np / 100); + BEAST_EXPECT(scoreTable.size() == us); + + std::size_t toDisable_expect = 0; + std::size_t toReEnable_expect = 0; + if (np == 0) + { + toDisable_expect = 4; + } + else if (np == 50) + { + toReEnable_expect = negUnl.size() - 6; + } + else + { + toReEnable_expect = negUnl.size() - 12; + } + BEAST_EXPECT(checkCandidateSizes( + vote, + unl, + negUnl, + scoreTable, + toDisable_expect, + toReEnable_expect)); + } + } + } + } + } + + void + testNewValidators() + { + testcase("New Validators"); + jtx::Env env(*this); + + NodeID myId(0xA0); + NegativeUNLVote vote(myId, env.journal); + + // test cases: + // newValidators_ of the NegativeUNLVote empty, add one + // add a new one and one already added + // add a new one and some already added + // purge and see some are expired + + NodeID n1(0xA1); + NodeID n2(0xA2); + NodeID n3(0xA3); + + vote.newValidators(2, {n1}); + BEAST_EXPECT(vote.newValidators_.size() == 1); + if (vote.newValidators_.size() == 1) + { + BEAST_EXPECT(vote.newValidators_.begin()->first == n1); + BEAST_EXPECT(vote.newValidators_.begin()->second == 2); + } + + vote.newValidators(3, {n1, n2}); + BEAST_EXPECT(vote.newValidators_.size() == 2); + if (vote.newValidators_.size() == 2) + { + BEAST_EXPECT(vote.newValidators_[n1] == 2); + BEAST_EXPECT(vote.newValidators_[n2] == 3); + } + + vote.newValidators( + NegativeUNLVote::newValidatorDisableSkip, {n1, n2, n3}); + BEAST_EXPECT(vote.newValidators_.size() == 3); + if (vote.newValidators_.size() == 3) + { + BEAST_EXPECT(vote.newValidators_[n1] == 2); + BEAST_EXPECT(vote.newValidators_[n2] == 3); + BEAST_EXPECT( + vote.newValidators_[n3] == + NegativeUNLVote::newValidatorDisableSkip); + } + + vote.purgeNewValidators(NegativeUNLVote::newValidatorDisableSkip + 2); + BEAST_EXPECT(vote.newValidators_.size() == 3); + vote.purgeNewValidators(NegativeUNLVote::newValidatorDisableSkip + 3); + BEAST_EXPECT(vote.newValidators_.size() == 2); + vote.purgeNewValidators(NegativeUNLVote::newValidatorDisableSkip + 4); + BEAST_EXPECT(vote.newValidators_.size() == 1); + BEAST_EXPECT(vote.newValidators_.begin()->first == n3); + BEAST_EXPECT( + vote.newValidators_.begin()->second == + NegativeUNLVote::newValidatorDisableSkip); + } + + void + run() override + { + testAddTx(); + testPickOneCandidate(); + testBuildScoreTableSpecialCases(); + testFindAllCandidates(); + testFindAllCandidatesCombination(); + testNewValidators(); + } +}; + +/** + * Rest the build score table function of NegativeUNLVote. + * This was a part of NegativeUNLVoteInternal. It is redundant and has long + * runtime. So we separate it out as a manual test. + */ +class NegativeUNLVoteScoreTable_test : public beast::unit_test::suite +{ + void + testBuildScoreTableCombination() + { + testcase("Build Score Table Combination"); + /* + * local node good history, correct scores: + * == combination: + * -- unl size: 10, 34, 35, 50 + * -- score pattern: all 0, all 50%, all 100%, two 0% two 50% rest 100% + */ + std::array unlSizes = {10, 34, 35, 50}; + std::array, 4> scorePattern = { + {{{0, 0, 0}}, {{50, 50, 50}}, {{100, 100, 100}}, {{0, 50, 100}}}}; + + for (auto unlSize : unlSizes) + { + for (std::uint32_t sp = 0; sp < 4; ++sp) + { + NetworkHistory history = { + *this, {unlSize, 0, false, false, 256 + 2}}; + BEAST_EXPECT(history.goodHistory); + if (history.goodHistory) + { + NodeID myId = history.UNLNodeIDs[3]; + history.walkHistoryAndAddValidations( + [&](std::shared_ptr const& l, + std::size_t idx) -> bool { + std::size_t k; + if (idx < 2) + k = 0; + else if (idx < 4) + k = 1; + else + k = 2; + + bool add_50 = + scorePattern[sp][k] == 50 && l->seq() % 2 == 0; + bool add_100 = scorePattern[sp][k] == 100; + bool add_me = history.UNLNodeIDs[idx] == myId; + return add_50 || add_100 || add_me; + }); + + NegativeUNLVote vote(myId, history.env.journal); + auto scoreTable = vote.buildScoreTable( + history.lastLedger(), + history.UNLNodeIDSet, + history.validations); + BEAST_EXPECT(scoreTable); + if (scoreTable) + { + std::uint32_t i = 0; // looping unl + auto checkScores = [&](std::uint32_t score, + std::uint32_t k) -> bool { + if (history.UNLNodeIDs[i] == myId) + return score == 256; + if (scorePattern[sp][k] == 0) + return score == 0; + if (scorePattern[sp][k] == 50) + return score == 256 / 2; + if (scorePattern[sp][k] == 100) + return score == 256; + else + return false; + }; + for (; i < 2; ++i) + { + BEAST_EXPECT(checkScores( + (*scoreTable)[history.UNLNodeIDs[i]], 0)); + } + for (; i < 4; ++i) + { + BEAST_EXPECT(checkScores( + (*scoreTable)[history.UNLNodeIDs[i]], 1)); + } + for (; i < unlSize; ++i) + { + BEAST_EXPECT(checkScores( + (*scoreTable)[history.UNLNodeIDs[i]], 2)); + } + } + } + } + } + } + + void + run() override + { + testBuildScoreTableCombination(); + } +}; + +/* + * Test the doVoting function of NegativeUNLVote. + * The test cases are split to 5 classes for parallel execution. + * + * Voting tests: (use hasToDisable and hasToReEnable in some of the cases) + * + * == all good score, nUnl empty + * -- txSet.size = 0 + * == all good score, nUnl not empty (use hasToDisable) + * -- txSet.size = 1 + * + * == 2 nodes offline, nUnl empty (use hasToReEnable) + * -- txSet.size = 1 + * == 2 nodes offline, in nUnl + * -- txSet.size = 0 + * + * == 2 nodes offline, not in nUnl, but maxListed + * -- txSet.size = 0 + * + * == 2 nodes offline including me, not in nUnl + * -- txSet.size = 0 + * == 2 nodes offline, not in negativeUnl, but I'm not a validator + * -- txSet.size = 0 + * == 2 in nUnl, but not in unl, no other remove candidates + * -- txSet.size = 1 + * + * == 2 new validators have bad scores + * -- txSet.size = 0 + * == 2 expired new validators have bad scores + * -- txSet.size = 1 + */ + +class NegativeUNLVoteGoodScore_test : public beast::unit_test::suite +{ + void + testDoVoting() + { + testcase("Do Voting"); + + { + //== all good score, negativeUnl empty + //-- txSet.size = 0 + NetworkHistory history = {*this, {51, 0, false, false, {}}}; + BEAST_EXPECT(history.goodHistory); + if (history.goodHistory) + { + history.walkHistoryAndAddValidations( + [&](std::shared_ptr const& l, + std::size_t idx) -> bool { return true; }); + BEAST_EXPECT(voteAndCheck(history, history.UNLNodeIDs[0], 0)); + } + } + + { + // all good score, negativeUnl not empty (use hasToDisable) + //-- txSet.size = 1 + NetworkHistory history = {*this, {37, 0, true, false, {}}}; + BEAST_EXPECT(history.goodHistory); + if (history.goodHistory) + { + history.walkHistoryAndAddValidations( + [&](std::shared_ptr const& l, + std::size_t idx) -> bool { return true; }); + BEAST_EXPECT(voteAndCheck(history, history.UNLNodeIDs[0], 1)); + } + } + } + + void + run() override + { + testDoVoting(); + } +}; + +class NegativeUNLVoteOffline_test : public beast::unit_test::suite +{ + void + testDoVoting() + { + testcase("Do Voting"); + + { + //== 2 nodes offline, negativeUnl empty (use hasToReEnable) + //-- txSet.size = 1 + NetworkHistory history = {*this, {29, 1, false, true, {}}}; + BEAST_EXPECT(history.goodHistory); + if (history.goodHistory) + { + history.walkHistoryAndAddValidations( + [&](std::shared_ptr const& l, + std::size_t idx) -> bool { + // skip node 0 and node 1 + return idx > 1; + }); + BEAST_EXPECT( + voteAndCheck(history, history.UNLNodeIDs.back(), 1)); + } + } + + { + // 2 nodes offline, in negativeUnl + //-- txSet.size = 0 + NetworkHistory history = {*this, {30, 1, true, false, {}}}; + BEAST_EXPECT(history.goodHistory); + if (history.goodHistory) + { + NodeID n1 = + calcNodeID(*history.lastLedger()->negativeUnl().begin()); + NodeID n2 = + calcNodeID(*history.lastLedger()->negativeUnlToDisable()); + history.walkHistoryAndAddValidations( + [&](std::shared_ptr const& l, + std::size_t idx) -> bool { + // skip node 0 and node 1 + return history.UNLNodeIDs[idx] != n1 && + history.UNLNodeIDs[idx] != n2; + }); + BEAST_EXPECT( + voteAndCheck(history, history.UNLNodeIDs.back(), 0)); + } + } + } + + void + run() override + { + testDoVoting(); + } +}; + +class NegativeUNLVoteMaxListed_test : public beast::unit_test::suite +{ + void + testDoVoting() + { + testcase("Do Voting"); + + { + // 2 nodes offline, not in negativeUnl, but maxListed + //-- txSet.size = 0 + NetworkHistory history = {*this, {32, 8, true, true, {}}}; + BEAST_EXPECT(history.goodHistory); + if (history.goodHistory) + { + history.walkHistoryAndAddValidations( + [&](std::shared_ptr const& l, + std::size_t idx) -> bool { + // skip node 0 ~ 10 + return idx > 10; + }); + BEAST_EXPECT( + voteAndCheck(history, history.UNLNodeIDs.back(), 0)); + } + } + } + + void + run() override + { + testDoVoting(); + } +}; + +class NegativeUNLVoteRetiredValidator_test : public beast::unit_test::suite +{ + void + testDoVoting() + { + testcase("Do Voting"); + + { + //== 2 nodes offline including me, not in negativeUnl + //-- txSet.size = 0 + NetworkHistory history = {*this, {35, 0, false, false, {}}}; + BEAST_EXPECT(history.goodHistory); + if (history.goodHistory) + { + history.walkHistoryAndAddValidations( + [&](std::shared_ptr const& l, + std::size_t idx) -> bool { return idx > 1; }); + BEAST_EXPECT(voteAndCheck(history, history.UNLNodeIDs[0], 0)); + } + } + + { + // 2 nodes offline, not in negativeUnl, but I'm not a validator + //-- txSet.size = 0 + NetworkHistory history = {*this, {40, 0, false, false, {}}}; + BEAST_EXPECT(history.goodHistory); + if (history.goodHistory) + { + history.walkHistoryAndAddValidations( + [&](std::shared_ptr const& l, + std::size_t idx) -> bool { return idx > 1; }); + BEAST_EXPECT(voteAndCheck(history, NodeID(0xdeadbeef), 0)); + } + } + + { + //== 2 in negativeUnl, but not in unl, no other remove candidates + //-- txSet.size = 1 + NetworkHistory history = {*this, {25, 2, false, false, {}}}; + BEAST_EXPECT(history.goodHistory); + if (history.goodHistory) + { + history.walkHistoryAndAddValidations( + [&](std::shared_ptr const& l, + std::size_t idx) -> bool { return idx > 1; }); + BEAST_EXPECT(voteAndCheck( + history, + history.UNLNodeIDs.back(), + 1, + [&](NegativeUNLVote& vote) { + history.UNLKeySet.erase(history.UNLKeys[0]); + history.UNLKeySet.erase(history.UNLKeys[1]); + })); + } + } + } + + void + run() override + { + testDoVoting(); + } +}; + +class NegativeUNLVoteNewValidator_test : public beast::unit_test::suite +{ + void + testDoVoting() + { + testcase("Do Voting"); + + { + //== 2 new validators have bad scores + //-- txSet.size = 0 + NetworkHistory history = {*this, {15, 0, false, false, {}}}; + BEAST_EXPECT(history.goodHistory); + if (history.goodHistory) + { + history.walkHistoryAndAddValidations( + [&](std::shared_ptr const& l, + std::size_t idx) -> bool { return true; }); + BEAST_EXPECT(voteAndCheck( + history, + history.UNLNodeIDs[0], + 0, + [&](NegativeUNLVote& vote) { + auto extra_key_1 = + randomKeyPair(KeyType::ed25519).first; + auto extra_key_2 = + randomKeyPair(KeyType::ed25519).first; + history.UNLKeySet.insert(extra_key_1); + history.UNLKeySet.insert(extra_key_2); + hash_set nowTrusted; + nowTrusted.insert(calcNodeID(extra_key_1)); + nowTrusted.insert(calcNodeID(extra_key_2)); + vote.newValidators( + history.lastLedger()->seq(), nowTrusted); + })); + } + } + + { + //== 2 expired new validators have bad scores + //-- txSet.size = 1 + NetworkHistory history = { + *this, + {21, + 0, + false, + false, + NegativeUNLVote::newValidatorDisableSkip * 2}}; + BEAST_EXPECT(history.goodHistory); + if (history.goodHistory) + { + history.walkHistoryAndAddValidations( + [&](std::shared_ptr const& l, + std::size_t idx) -> bool { return true; }); + BEAST_EXPECT(voteAndCheck( + history, + history.UNLNodeIDs[0], + 1, + [&](NegativeUNLVote& vote) { + auto extra_key_1 = + randomKeyPair(KeyType::ed25519).first; + auto extra_key_2 = + randomKeyPair(KeyType::ed25519).first; + history.UNLKeySet.insert(extra_key_1); + history.UNLKeySet.insert(extra_key_2); + hash_set nowTrusted; + nowTrusted.insert(calcNodeID(extra_key_1)); + nowTrusted.insert(calcNodeID(extra_key_2)); + vote.newValidators(256, nowTrusted); + })); + } + } + } + + void + run() override + { + testDoVoting(); + } +}; + +class NegativeUNLVoteFilterValidations_test : public beast::unit_test::suite +{ + void + testFilterValidations() + { + testcase("Filter Validations"); + jtx::Env env(*this); + auto l = std::make_shared( + create_genesis, + env.app().config(), + std::vector{}, + env.app().getNodeFamily()); + + auto createSTVal = [&](std::pair const& keys) { + return std::make_shared( + env.app().timeKeeper().now(), + keys.first, + keys.second, + calcNodeID(keys.first), + [&](STValidation& v) { + v.setFieldH256(sfLedgerHash, l->info().hash); + v.setFieldU32(sfLedgerSequence, l->seq()); + v.setFlag(vfFullValidation); + }); + }; + + // create keys and validations + std::uint32_t numNodes = 10; + std::uint32_t negUnlSize = 3; + std::vector cfgKeys; + hash_set activeValidators; + hash_set nUnlKeys; + std::vector> vals; + for (int i = 0; i < numNodes; ++i) + { + auto keyPair = randomKeyPair(KeyType::secp256k1); + vals.emplace_back(createSTVal(keyPair)); + cfgKeys.push_back(toBase58(TokenType::NodePublic, keyPair.first)); + activeValidators.emplace(calcNodeID(keyPair.first)); + if (i < negUnlSize) + { + nUnlKeys.insert(keyPair.first); + } + } + + // setup the ValidatorList + auto& validators = env.app().validators(); + auto& local = *nUnlKeys.begin(); + std::vector cfgPublishers; + validators.load(local, cfgKeys, cfgPublishers); + validators.updateTrusted(activeValidators); + BEAST_EXPECT(validators.getTrustedMasterKeys().size() == numNodes); + validators.setNegativeUnl(nUnlKeys); + BEAST_EXPECT(validators.getNegativeUnl().size() == negUnlSize); + + // test the filter + BEAST_EXPECT(vals.size() == numNodes); + vals = validators.negativeUNLFilter(std::move(vals)); + BEAST_EXPECT(vals.size() == numNodes - negUnlSize); + } + + void + run() override + { + testFilterValidations(); + } +}; + +class NegativeUNLgRPC_test : public beast::unit_test::suite +{ + template + std::string + toByteString(T const& data) + { + const char* bytes = reinterpret_cast(data.data()); + return {bytes, data.size()}; + } + + void + testGRPC() + { + testcase("gRPC test"); + + auto gRpcTest = [this]( + std::uint32_t negUnlSize, + bool hasToDisable, + bool hasToReEnable) -> bool { + NetworkHistory history = { + *this, {20, negUnlSize, hasToDisable, hasToReEnable, {}}}; + if (!history.goodHistory) + return false; + + auto const& negUnlObject = + history.lastLedger()->read(keylet::negativeUNL()); + if (!negUnlSize && !hasToDisable && !hasToReEnable && !negUnlObject) + return true; + if (!negUnlObject) + return false; + + org::xrpl::rpc::v1::NegativeUnl to; + ripple::RPC::convert(to, *negUnlObject); + bool goodSize = to.negative_unl_entries_size() == negUnlSize && + to.has_validator_to_disable() == hasToDisable && + to.has_validator_to_re_enable() == hasToReEnable; + if (!goodSize) + return false; + + if (negUnlSize) + { + if (!negUnlObject->isFieldPresent(sfNegativeUNL)) + return false; + auto const& nUnlData = + negUnlObject->getFieldArray(sfNegativeUNL); + if (nUnlData.size() != negUnlSize) + return false; + int idx = 0; + for (auto const& n : nUnlData) + { + if (!n.isFieldPresent(sfPublicKey) || + !n.isFieldPresent(sfFirstLedgerSequence)) + return false; + + if (!to.negative_unl_entries(idx).has_ledger_sequence() || + !to.negative_unl_entries(idx).has_public_key()) + return false; + + if (to.negative_unl_entries(idx).public_key().value() != + toByteString(n.getFieldVL(sfPublicKey))) + return false; + + if (to.negative_unl_entries(idx) + .ledger_sequence() + .value() != n.getFieldU32(sfFirstLedgerSequence)) + return false; + + ++idx; + } + } + + if (hasToDisable) + { + if (!negUnlObject->isFieldPresent(sfNegativeUNLToDisable)) + return false; + if (to.validator_to_disable().value() != + toByteString( + negUnlObject->getFieldVL(sfNegativeUNLToDisable))) + return false; + } + + if (hasToReEnable) + { + if (!negUnlObject->isFieldPresent(sfNegativeUNLToReEnable)) + return false; + if (to.validator_to_re_enable().value() != + toByteString( + negUnlObject->getFieldVL(sfNegativeUNLToReEnable))) + return false; + } + + return true; + }; + + BEAST_EXPECT(gRpcTest(0, false, false)); + BEAST_EXPECT(gRpcTest(2, true, true)); + } + + void + run() override + { + testGRPC(); + } +}; + +BEAST_DEFINE_TESTSUITE(NegativeUNL, ledger, ripple); +BEAST_DEFINE_TESTSUITE(NegativeUNLNoAmendment, ledger, ripple); + +BEAST_DEFINE_TESTSUITE(NegativeUNLVoteInternal, consensus, ripple); +BEAST_DEFINE_TESTSUITE_MANUAL(NegativeUNLVoteScoreTable, consensus, ripple); +BEAST_DEFINE_TESTSUITE_PRIO(NegativeUNLVoteGoodScore, consensus, ripple, 1); +BEAST_DEFINE_TESTSUITE_PRIO(NegativeUNLVoteOffline, consensus, ripple, 1); +BEAST_DEFINE_TESTSUITE_PRIO(NegativeUNLVoteMaxListed, consensus, ripple, 1); +BEAST_DEFINE_TESTSUITE_PRIO( + NegativeUNLVoteRetiredValidator, + consensus, + ripple, + 1); +BEAST_DEFINE_TESTSUITE_PRIO(NegativeUNLVoteNewValidator, consensus, ripple, 1); +BEAST_DEFINE_TESTSUITE(NegativeUNLVoteFilterValidations, consensus, ripple); +BEAST_DEFINE_TESTSUITE(NegativeUNLgRPC, ledger, ripple); + +/////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////// +bool +negUnlSizeTest( + std::shared_ptr const& l, + size_t size, + bool hasToDisable, + bool hasToReEnable) +{ + bool sameSize = l->negativeUnl().size() == size; + bool sameToDisable = + (l->negativeUnlToDisable() != boost::none) == hasToDisable; + bool sameToReEnable = + (l->negativeUnlToReEnable() != boost::none) == hasToReEnable; + + return sameSize && sameToDisable && sameToReEnable; +} + +bool +applyAndTestResult(jtx::Env& env, OpenView& view, STTx const& tx, bool pass) +{ + auto res = apply(env.app(), view, tx, ApplyFlags::tapNONE, env.journal); + if (pass) + return res.first == tesSUCCESS; + else + return res.first == tefFAILURE || res.first == temDISABLED; +} + +bool +VerifyPubKeyAndSeq( + std::shared_ptr const& l, + hash_map nUnlLedgerSeq) +{ + auto sle = l->read(keylet::negativeUNL()); + if (!sle) + return false; + if (!sle->isFieldPresent(sfNegativeUNL)) + return false; + + auto const& nUnlData = sle->getFieldArray(sfNegativeUNL); + if (nUnlData.size() != nUnlLedgerSeq.size()) + return false; + + for (auto const& n : nUnlData) + { + if (!n.isFieldPresent(sfFirstLedgerSequence) || + !n.isFieldPresent(sfPublicKey)) + return false; + + auto seq = n.getFieldU32(sfFirstLedgerSequence); + auto d = n.getFieldVL(sfPublicKey); + auto s = makeSlice(d); + if (!publicKeyType(s)) + return false; + PublicKey pk(s); + auto it = nUnlLedgerSeq.find(pk); + if (it == nUnlLedgerSeq.end()) + return false; + if (it->second != seq) + return false; + nUnlLedgerSeq.erase(it); + } + return nUnlLedgerSeq.size() == 0; +} + +std::size_t +countTx(std::shared_ptr const& txSet) +{ + std::size_t count = 0; + for (auto i = txSet->begin(); i != txSet->end(); ++i) + { + ++count; + } + return count; +}; + +std::vector +createPublicKeys(std::size_t n) +{ + std::vector keys; + std::size_t ss = 33; + std::vector data(ss, 0); + data[0] = 0xED; + for (int i = 0; i < n; ++i) + { + data[1]++; + Slice s(data.data(), ss); + keys.emplace_back(s); + } + return keys; +} + +STTx +createTx(bool disabling, LedgerIndex seq, PublicKey const& txKey) +{ + auto fill = [&](auto& obj) { + obj.setFieldU8(sfUNLModifyDisabling, disabling ? 1 : 0); + obj.setFieldU32(sfLedgerSequence, seq); + obj.setFieldVL(sfUNLModifyValidator, txKey); + }; + return STTx(ttUNL_MODIFY, fill); +} + +} // namespace test +} // namespace ripple diff --git a/src/test/consensus/Validations_test.cpp b/src/test/consensus/Validations_test.cpp index 9455d3931bd..2473c9a7f3a 100644 --- a/src/test/consensus/Validations_test.cpp +++ b/src/test/consensus/Validations_test.cpp @@ -707,10 +707,18 @@ class Validations_test : public beast::unit_test::suite Node a = harness.makeNode(); Ledger ledgerA = h["a"]; - BEAST_EXPECT(ValStatus::current == harness.add(a.validate(ledgerA))); BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerA.id())); + + // Keep the validation from expire + harness.clock().advance(harness.parms().validationSET_EXPIRES); + harness.vals().setSeqToKeep(ledgerA.seq()); + harness.vals().expire(); + BEAST_EXPECT(harness.vals().numTrustedForLedger(ledgerA.id())); + + // Allow the validation to expire harness.clock().advance(harness.parms().validationSET_EXPIRES); + harness.vals().setSeqToKeep(++ledgerA.seq()); harness.vals().expire(); BEAST_EXPECT(!harness.vals().numTrustedForLedger(ledgerA.id())); } diff --git a/src/test/core/Config_test.cpp b/src/test/core/Config_test.cpp index 0e829642af8..03282fd59de 100644 --- a/src/test/core/Config_test.cpp +++ b/src/test/core/Config_test.cpp @@ -1014,6 +1014,57 @@ r.ripple.com 51235 } } + void + testAmendment() + { + testcase("amendment"); + struct ConfigUnit + { + std::string unit; + std::uint32_t numSeconds; + std::uint32_t configVal; + bool shouldPass; + }; + + std::vector units = { + {"seconds", 1, 15 * 60, false}, + {"minutes", 60, 14, false}, + {"minutes", 60, 15, true}, + {"hours", 3600, 10, true}, + {"days", 86400, 10, true}, + {"weeks", 604800, 2, true}, + {"months", 2592000, 1, false}, + {"years", 31536000, 1, false}}; + + std::string space = ""; + for (auto& [unit, sec, val, shouldPass] : units) + { + Config c; + std::string toLoad(R"rippleConfig( +[amendment_majority_time] +)rippleConfig"); + toLoad += std::to_string(val) + space + unit; + space = space == "" ? " " : ""; + + try + { + c.loadFromString(toLoad); + if (shouldPass) + BEAST_EXPECT( + c.AMENDMENT_MAJORITY_TIME.count() == val * sec); + else + fail(); + } + catch (std::runtime_error&) + { + if (!shouldPass) + pass(); + else + fail(); + } + } + } + void run() override { @@ -1027,6 +1078,7 @@ r.ripple.com 51235 testWhitespace(); testComments(); testGetters(); + testAmendment(); } }; diff --git a/src/test/jtx/CaptureLogs.h b/src/test/jtx/CaptureLogs.h new file mode 100644 index 00000000000..30a562e99d0 --- /dev/null +++ b/src/test/jtx/CaptureLogs.h @@ -0,0 +1,80 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +namespace ripple { +namespace test { + +/** + * @brief Log manager for CaptureSinks. This class holds the stream + * instance that is written to by the sinks. Upon destruction, all + * contents of the stream are assigned to the string specified in the + * ctor + */ +class CaptureLogs : public Logs +{ + std::stringstream strm_; + std::string* pResult_; + + /** + * @brief sink for writing all log messages to a stringstream + */ + class CaptureSink : public beast::Journal::Sink + { + std::stringstream& strm_; + + public: + CaptureSink( + beast::severities::Severity threshold, + std::stringstream& strm) + : beast::Journal::Sink(threshold, false), strm_(strm) + { + } + + void + write(beast::severities::Severity level, std::string const& text) + override + { + strm_ << text; + } + }; + +public: + explicit CaptureLogs(std::string* pResult) + : Logs(beast::severities::kInfo), pResult_(pResult) + { + } + + ~CaptureLogs() override + { + *pResult_ = strm_.str(); + } + + std::unique_ptr + makeSink( + std::string const& partition, + beast::severities::Severity threshold) override + { + return std::make_unique(threshold, strm_); + } +}; + +} // namespace test +} // namespace ripple diff --git a/src/test/jtx/CheckMessageLogs.h b/src/test/jtx/CheckMessageLogs.h new file mode 100644 index 00000000000..66f5f7e106c --- /dev/null +++ b/src/test/jtx/CheckMessageLogs.h @@ -0,0 +1,75 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +namespace ripple { +namespace test { + +/** Log manager that searches for a specific message substring + */ +class CheckMessageLogs : public Logs +{ + std::string msg_; + bool* pFound_; + + class CheckMessageSink : public beast::Journal::Sink + { + CheckMessageLogs& owner_; + + public: + CheckMessageSink( + beast::severities::Severity threshold, + CheckMessageLogs& owner) + : beast::Journal::Sink(threshold, false), owner_(owner) + { + } + + void + write(beast::severities::Severity level, std::string const& text) + override + { + if (text.find(owner_.msg_) != std::string::npos) + *owner_.pFound_ = true; + } + }; + +public: + /** Constructor + + @param msg The message string to search for + @param pFound Pointer to the variable to set to true if the message is + found + */ + CheckMessageLogs(std::string msg, bool* pFound) + : Logs{beast::severities::kDebug}, msg_{std::move(msg)}, pFound_{pFound} + { + } + + std::unique_ptr + makeSink( + std::string const& partition, + beast::severities::Severity threshold) override + { + return std::make_unique(threshold, *this); + } +}; + +} // namespace test +} // namespace ripple diff --git a/src/test/jtx/Env.h b/src/test/jtx/Env.h index f06cfbf7a9c..f2934bb5002 100644 --- a/src/test/jtx/Env.h +++ b/src/test/jtx/Env.h @@ -27,6 +27,7 @@ #include #include #include // +#include #include #include #include @@ -131,7 +132,8 @@ class Env AppBundle( beast::unit_test::suite& suite, std::unique_ptr config, - std::unique_ptr logs); + std::unique_ptr logs, + beast::severities::Severity thresh); ~AppBundle(); }; @@ -163,12 +165,10 @@ class Env Env(beast::unit_test::suite& suite_, std::unique_ptr config, FeatureBitset features, - std::unique_ptr logs = nullptr) + std::unique_ptr logs = nullptr, + beast::severities::Severity thresh = beast::severities::kError) : test(suite_) - , bundle_( - suite_, - std::move(config), - logs ? std::move(logs) : std::make_unique(suite_)) + , bundle_(suite_, std::move(config), std::move(logs), thresh) , journal{bundle_.app->journal("Env")} { memoize(Account::master); @@ -211,11 +211,13 @@ class Env */ Env(beast::unit_test::suite& suite_, std::unique_ptr config, - std::unique_ptr logs = nullptr) + std::unique_ptr logs = nullptr, + beast::severities::Severity thresh = beast::severities::kError) : Env(suite_, std::move(config), supported_amendments(), - std::move(logs)) + std::move(logs), + thresh) { } diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index a9b7c3430ff..855dfe7bbf0 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -59,12 +59,22 @@ namespace jtx { Env::AppBundle::AppBundle( beast::unit_test::suite& suite, std::unique_ptr config, - std::unique_ptr logs) + std::unique_ptr logs, + beast::severities::Severity thresh) : AppBundle() { using namespace beast::severities; - // Use kFatal threshold to reduce noise from STObject. - setDebugLogSink(std::make_unique("Debug", kFatal, suite)); + if (logs) + { + setDebugLogSink(logs->makeSink("Debug", kFatal)); + } + else + { + logs = std::make_unique(suite); + // Use kFatal threshold to reduce noise from STObject. + setDebugLogSink( + std::make_unique("Debug", kFatal, suite)); + } auto timeKeeper_ = std::make_unique(); timeKeeper = timeKeeper_.get(); // Hack so we don't have to call Config::setup @@ -72,7 +82,7 @@ Env::AppBundle::AppBundle( owned = make_Application( std::move(config), std::move(logs), std::move(timeKeeper_)); app = owned.get(); - app->logs().threshold(kError); + app->logs().threshold(thresh); if (!app->setup()) Throw("Env::AppBundle: setup failed"); timeKeeper->set(app->getLedgerMaster().getClosedLedger()->info().closeTime); diff --git a/src/test/ledger/SkipList_test.cpp b/src/test/ledger/SkipList_test.cpp index 56c1efa0375..386a8027746 100644 --- a/src/test/ledger/SkipList_test.cpp +++ b/src/test/ledger/SkipList_test.cpp @@ -39,7 +39,7 @@ class SkipList_test : public beast::unit_test::suite create_genesis, config, std::vector{}, - env.app().family()); + env.app().getNodeFamily()); history.push_back(prev); for (auto i = 0; i < 1023; ++i) { diff --git a/src/test/ledger/View_test.cpp b/src/test/ledger/View_test.cpp index 3c462df280f..6822eb015c9 100644 --- a/src/test/ledger/View_test.cpp +++ b/src/test/ledger/View_test.cpp @@ -134,7 +134,10 @@ class View_test : public beast::unit_test::suite Env env(*this); Config config; std::shared_ptr const genesis = std::make_shared( - create_genesis, config, std::vector{}, env.app().family()); + create_genesis, + config, + std::vector{}, + env.app().getNodeFamily()); auto const ledger = std::make_shared( *genesis, env.app().timeKeeper().closeTime()); wipe(*ledger); @@ -388,7 +391,10 @@ class View_test : public beast::unit_test::suite Env env(*this); Config config; std::shared_ptr const genesis = std::make_shared( - create_genesis, config, std::vector{}, env.app().family()); + create_genesis, + config, + std::vector{}, + env.app().getNodeFamily()); auto const ledger = std::make_shared( *genesis, env.app().timeKeeper().closeTime()); auto setup123 = [&ledger, this]() { @@ -769,7 +775,7 @@ class View_test : public beast::unit_test::suite create_genesis, config, std::vector{}, - env.app().family()); + env.app().getNodeFamily()); auto const ledger = std::make_shared( *genesis, env.app().timeKeeper().closeTime()); wipe(*ledger); diff --git a/src/test/nodestore/DatabaseShard_test.cpp b/src/test/nodestore/DatabaseShard_test.cpp index 7e0b746cb62..c4606ecd7b4 100644 --- a/src/test/nodestore/DatabaseShard_test.cpp +++ b/src/test/nodestore/DatabaseShard_test.cpp @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -27,13 +28,135 @@ #include #include #include +#include +#include #include +#include +#include +#include #include #include namespace ripple { namespace NodeStore { +/** std::uniform_int_distribution is platform dependent. + * Unit test for deterministic shards is the following: it generates + * predictable accounts and transactions, packs them into ledgers + * and makes the shard. The hash of this shard should be equal to the + * given value. On different platforms (precisely, Linux and Mac) + * hashes of the resulting shard was different. It was unvestigated + * that the problem is in the class std::uniform_int_distribution + * which generates different pseudorandom sequences on different + * platforms, but we need predictable sequence. + */ +template +struct uniformIntDistribution +{ + using resultType = IntType; + + const resultType A, B; + + struct paramType + { + const resultType A, B; + + paramType(resultType aa, resultType bb) : A(aa), B(bb) + { + } + }; + + explicit uniformIntDistribution( + const resultType a = 0, + const resultType b = std::numeric_limits::max()) + : A(a), B(b) + { + } + + explicit uniformIntDistribution(const paramType& params) + : A(params.A), B(params.B) + { + } + + template + resultType + operator()(Generator& g) const + { + return rnd(g, A, B); + } + + template + resultType + operator()(Generator& g, const paramType& params) const + { + return rnd(g, params.A, params.B); + } + + resultType + a() const + { + return A; + } + + resultType + b() const + { + return B; + } + + resultType + min() const + { + return A; + } + + resultType + max() const + { + return B; + } + +private: + template + resultType + rnd(Generator& g, const resultType a, const resultType b) const + { + static_assert( + std::is_convertible:: + value, + "Ups..."); + static_assert( + Generator::min() == 0, "If non-zero we have handle the offset"); + const resultType range = b - a + 1; + assert(Generator::max() >= range); // Just for safety + const resultType rejectLim = g.max() % range; + resultType n; + do + n = g(); + while (n <= rejectLim); + return (n % range) + a; + } +}; + +template +Integral +randInt(Engine& engine, Integral min, Integral max) +{ + assert(max > min); + + // This should have no state and constructing it should + // be very cheap. If that turns out not to be the case + // it could be hand-optimized. + return uniformIntDistribution(min, max)(engine); +} + +template +Integral +randInt(Engine& engine, Integral max) +{ + return randInt(engine, Integral(0), max); +} + // Tests DatabaseShard class // class DatabaseShard_test : public TestBase @@ -87,7 +210,7 @@ class DatabaseShard_test : public TestBase { int p; if (n >= 2) - p = rand_int(rng_, 2 * dataSize); + p = randInt(rng_, 2 * dataSize); else p = 0; @@ -99,27 +222,27 @@ class DatabaseShard_test : public TestBase int from, to; do { - from = rand_int(rng_, n - 1); - to = rand_int(rng_, n - 1); + from = randInt(rng_, n - 1); + to = randInt(rng_, n - 1); } while (from == to); pay.push_back(std::make_pair(from, to)); } - n += !rand_int(rng_, nLedgers / dataSize); + n += !randInt(rng_, nLedgers / dataSize); if (n > accounts_.size()) { char str[9]; for (int j = 0; j < 8; ++j) - str[j] = 'a' + rand_int(rng_, 'z' - 'a'); + str[j] = 'a' + randInt(rng_, 'z' - 'a'); str[8] = 0; accounts_.emplace_back(str); } nAccounts_.push_back(n); payAccounts_.push_back(std::move(pay)); - xrpAmount_.push_back(rand_int(rng_, 90) + 10); + xrpAmount_.push_back(randInt(rng_, 90) + 10); } } @@ -495,7 +618,7 @@ class DatabaseShard_test : public TestBase } std::optional - createShard(TestData& data, DatabaseShard& db, int maxShardNumber) + createShard(TestData& data, DatabaseShard& db, int maxShardNumber = 1) { int shardNumber = -1; @@ -669,7 +792,7 @@ class DatabaseShard_test : public TestBase for (std::uint32_t i = 0; i < nTestShards * 2; ++i) { - std::uint32_t n = rand_int(data.rng_, nTestShards - 1) + 1; + std::uint32_t n = randInt(data.rng_, nTestShards - 1) + 1; if (bitMask & (1ll << n)) { db->removePreShard(n); @@ -978,6 +1101,90 @@ class DatabaseShard_test : public TestBase } } + std::string + ripemd160File(std::string filename) + { + using beast::hash_append; + std::ifstream input(filename, std::ios::in | std::ios::binary); + char buf[4096]; + ripemd160_hasher h; + + while (input.read(buf, 4096), input.gcount() > 0) + hash_append(h, buf, input.gcount()); + + auto const binResult = static_cast(h); + const auto charDigest = binResult.data(); + std::string result; + boost::algorithm::hex( + charDigest, + charDigest + sizeof(binResult), + std::back_inserter(result)); + + return result; + } + + void + testDeterministicShard( + std::string const& backendType, + std::uint64_t const seedValue) + { + using namespace test::jtx; + + std::string ripemd160Key("4CFA8985836B549EC99D2E9705707F488DC91E4E"), + ripemd160Dat("8CC61F503C36339803F8C2FC652C1102DDB889F1"); + + for (int i = 0; i < 2; i++) + { + beast::temp_dir shardDir; + { + Env env{ + *this, + testConfig( + (i ? "" : "deterministicShard"), + backendType, + shardDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + BEAST_EXPECT(db); + + TestData data(seedValue, 4); + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + if (createShard(data, *db) < 0) + return; + } + { + Env env{*this, testConfig("", backendType, shardDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + BEAST_EXPECT(db); + + TestData data(seedValue, 4); + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + waitShard(*db, 1); + + for (std::uint32_t j = 0; j < ledgersPerShard; ++j) + checkLedger(data, *db, *data.ledgers_[j]); + } + + boost::filesystem::path path(shardDir.path()); + path /= "1"; + boost::filesystem::path keypath = path / (backendType + ".key"); + std::string key = ripemd160File(keypath.string()); + boost::filesystem::path datpath = path / (backendType + ".dat"); + std::string dat = ripemd160File(datpath.string()); + + std::cerr << "Iteration " << i << ": RIPEMD160[" << backendType + << ".key] = " << key << std::endl; + std::cerr << "Iteration " << i << ": RIPEMD160[" << backendType + << ".dat] = " << dat << std::endl; + + BEAST_EXPECT(key == ripemd160Key); + BEAST_EXPECT(dat == ripemd160Dat); + } + } + void testAll(std::string const& backendType) { @@ -991,6 +1198,7 @@ class DatabaseShard_test : public TestBase testCorruptedDatabase(backendType, seedValue + 40); testIllegalFinalKey(backendType, seedValue + 50); testImport(backendType, seedValue + 60); + testDeterministicShard(backendType, seedValue + 70); } public: diff --git a/src/test/nodestore/Database_test.cpp b/src/test/nodestore/Database_test.cpp index b1a88bea557..826f5ccf5bf 100644 --- a/src/test/nodestore/Database_test.cpp +++ b/src/test/nodestore/Database_test.cpp @@ -18,8 +18,12 @@ //============================================================================== #include +#include #include #include +#include +#include +#include #include #include @@ -35,6 +39,409 @@ class Database_test : public TestBase { } + void + testConfig() + { + testcase("Config"); + + using namespace ripple::test; + using namespace ripple::test::jtx; + + auto const integrityWarning = + "reducing the data integrity guarantees from the " + "default [sqlite] behavior is not recommended for " + "nodes storing large amounts of history, because of the " + "difficulty inherent in rebuilding corrupted data."; + { + // defaults + Env env(*this); + + auto const s = setup_DatabaseCon(env.app().config()); + + if (BEAST_EXPECT(s.globalPragma->size() == 3)) + { + BEAST_EXPECT( + s.globalPragma->at(0) == "PRAGMA journal_mode=wal;"); + BEAST_EXPECT( + s.globalPragma->at(1) == "PRAGMA synchronous=normal;"); + BEAST_EXPECT( + s.globalPragma->at(2) == "PRAGMA temp_store=file;"); + } + } + { + // High safety level + DatabaseCon::Setup::globalPragma.reset(); + + bool found = false; + Env env = [&]() { + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("safety_level", "high"); + } + p->LEDGER_HISTORY = 100'000'000; + + return Env( + *this, + std::move(p), + std::make_unique( + integrityWarning, &found), + beast::severities::kWarning); + }(); + + BEAST_EXPECT(!found); + auto const s = setup_DatabaseCon(env.app().config()); + if (BEAST_EXPECT(s.globalPragma->size() == 3)) + { + BEAST_EXPECT( + s.globalPragma->at(0) == "PRAGMA journal_mode=wal;"); + BEAST_EXPECT( + s.globalPragma->at(1) == "PRAGMA synchronous=normal;"); + BEAST_EXPECT( + s.globalPragma->at(2) == "PRAGMA temp_store=file;"); + } + } + { + // Low safety level + DatabaseCon::Setup::globalPragma.reset(); + + bool found = false; + Env env = [&]() { + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("safety_level", "low"); + } + p->LEDGER_HISTORY = 100'000'000; + + return Env( + *this, + std::move(p), + std::make_unique( + integrityWarning, &found), + beast::severities::kWarning); + }(); + + BEAST_EXPECT(found); + auto const s = setup_DatabaseCon(env.app().config()); + if (BEAST_EXPECT(s.globalPragma->size() == 3)) + { + BEAST_EXPECT( + s.globalPragma->at(0) == "PRAGMA journal_mode=memory;"); + BEAST_EXPECT( + s.globalPragma->at(1) == "PRAGMA synchronous=off;"); + BEAST_EXPECT( + s.globalPragma->at(2) == "PRAGMA temp_store=memory;"); + } + } + { + // Override individual settings + DatabaseCon::Setup::globalPragma.reset(); + + bool found = false; + Env env = [&]() { + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("journal_mode", "off"); + section.set("synchronous", "extra"); + section.set("temp_store", "default"); + } + + return Env( + *this, + std::move(p), + std::make_unique( + integrityWarning, &found), + beast::severities::kWarning); + }(); + + // No warning, even though higher risk settings were used because + // LEDGER_HISTORY is small + BEAST_EXPECT(!found); + auto const s = setup_DatabaseCon(env.app().config()); + if (BEAST_EXPECT(s.globalPragma->size() == 3)) + { + BEAST_EXPECT( + s.globalPragma->at(0) == "PRAGMA journal_mode=off;"); + BEAST_EXPECT( + s.globalPragma->at(1) == "PRAGMA synchronous=extra;"); + BEAST_EXPECT( + s.globalPragma->at(2) == "PRAGMA temp_store=default;"); + } + } + { + // Override individual settings with large history + DatabaseCon::Setup::globalPragma.reset(); + + bool found = false; + Env env = [&]() { + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("journal_mode", "off"); + section.set("synchronous", "extra"); + section.set("temp_store", "default"); + } + p->LEDGER_HISTORY = 50'000'000; + + return Env( + *this, + std::move(p), + std::make_unique( + integrityWarning, &found), + beast::severities::kWarning); + }(); + + // No warning, even though higher risk settings were used because + // LEDGER_HISTORY is small + BEAST_EXPECT(found); + auto const s = setup_DatabaseCon(env.app().config()); + if (BEAST_EXPECT(s.globalPragma->size() == 3)) + { + BEAST_EXPECT( + s.globalPragma->at(0) == "PRAGMA journal_mode=off;"); + BEAST_EXPECT( + s.globalPragma->at(1) == "PRAGMA synchronous=extra;"); + BEAST_EXPECT( + s.globalPragma->at(2) == "PRAGMA temp_store=default;"); + } + } + { + // Error: Mix safety_level and individual settings + DatabaseCon::Setup::globalPragma.reset(); + auto const expected = + "Failed to initialize SQLite databases: " + "Configuration file may not define both \"safety_level\" and " + "\"journal_mode\""; + bool found = false; + + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("safety_level", "low"); + section.set("journal_mode", "off"); + section.set("synchronous", "extra"); + section.set("temp_store", "default"); + } + + try + { + Env env( + *this, + std::move(p), + std::make_unique(expected, &found), + beast::severities::kWarning); + fail(); + } + catch (...) + { + BEAST_EXPECT(found); + } + } + { + // Error: Mix safety_level and one setting (gotta catch 'em all) + DatabaseCon::Setup::globalPragma.reset(); + auto const expected = + "Failed to initialize SQLite databases: Configuration file may " + "not define both \"safety_level\" and \"journal_mode\""; + bool found = false; + + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("safety_level", "high"); + section.set("journal_mode", "off"); + } + + try + { + Env env( + *this, + std::move(p), + std::make_unique(expected, &found), + beast::severities::kWarning); + fail(); + } + catch (...) + { + BEAST_EXPECT(found); + } + } + { + // Error: Mix safety_level and one setting (gotta catch 'em all) + DatabaseCon::Setup::globalPragma.reset(); + auto const expected = + "Failed to initialize SQLite databases: Configuration file may " + "not define both \"safety_level\" and \"synchronous\""; + bool found = false; + + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("safety_level", "low"); + section.set("synchronous", "extra"); + } + + try + { + Env env( + *this, + std::move(p), + std::make_unique(expected, &found), + beast::severities::kWarning); + fail(); + } + catch (...) + { + BEAST_EXPECT(found); + } + } + { + // Error: Mix safety_level and one setting (gotta catch 'em all) + DatabaseCon::Setup::globalPragma.reset(); + auto const expected = + "Failed to initialize SQLite databases: Configuration file may " + "not define both \"safety_level\" and \"temp_store\""; + bool found = false; + + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("safety_level", "high"); + section.set("temp_store", "default"); + } + + try + { + Env env( + *this, + std::move(p), + std::make_unique(expected, &found), + beast::severities::kWarning); + fail(); + } + catch (...) + { + BEAST_EXPECT(found); + } + } + { + // Error: Invalid value + DatabaseCon::Setup::globalPragma.reset(); + auto const expected = + "Failed to initialize SQLite databases: Invalid safety_level " + "value: slow"; + bool found = false; + + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("safety_level", "slow"); + } + + try + { + Env env( + *this, + std::move(p), + std::make_unique(expected, &found), + beast::severities::kWarning); + fail(); + } + catch (...) + { + BEAST_EXPECT(found); + } + } + { + // Error: Invalid value + DatabaseCon::Setup::globalPragma.reset(); + auto const expected = + "Failed to initialize SQLite databases: Invalid journal_mode " + "value: fast"; + bool found = false; + + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("journal_mode", "fast"); + } + + try + { + Env env( + *this, + std::move(p), + std::make_unique(expected, &found), + beast::severities::kWarning); + fail(); + } + catch (...) + { + BEAST_EXPECT(found); + } + } + { + // Error: Invalid value + DatabaseCon::Setup::globalPragma.reset(); + auto const expected = + "Failed to initialize SQLite databases: Invalid synchronous " + "value: instant"; + bool found = false; + + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("synchronous", "instant"); + } + + try + { + Env env( + *this, + std::move(p), + std::make_unique(expected, &found), + beast::severities::kWarning); + fail(); + } + catch (...) + { + BEAST_EXPECT(found); + } + } + { + // Error: Invalid value + DatabaseCon::Setup::globalPragma.reset(); + auto const expected = + "Failed to initialize SQLite databases: Invalid temp_store " + "value: network"; + bool found = false; + + auto p = test::jtx::envconfig(); + { + auto& section = p->section("sqlite"); + section.set("temp_store", "network"); + } + + try + { + Env env( + *this, + std::move(p), + std::make_unique(expected, &found), + beast::severities::kWarning); + fail(); + } + catch (...) + { + BEAST_EXPECT(found); + } + } + } + + //-------------------------------------------------------------------------- + void testImport( std::string const& destBackendType, @@ -221,6 +628,8 @@ class Database_test : public TestBase { std::int64_t const seedValue = 50; + testConfig(); + testNodeStore("memory", false, seedValue); // Persistent backend tests diff --git a/src/test/overlay/compression_test.cpp b/src/test/overlay/compression_test.cpp index a73225eb3b0..454b10136f5 100644 --- a/src/test/overlay/compression_test.cpp +++ b/src/test/overlay/compression_test.cpp @@ -84,21 +84,14 @@ class compression_test : public beast::unit_test::suite std::shared_ptr proto, protocol::MessageType mt, uint16_t nbuffers, - const char* msg, - bool log = false) + std::string msg) { - if (log) - printf("=== compress/decompress %s ===\n", msg); + testcase("Compress/Decompress: " + msg); + Message m(*proto, mt); auto& buffer = m.getBuffer(Compressed::On); - if (log) - printf( - "==> compressed, original %d bytes, compressed %d bytes\n", - (int)m.getBuffer(Compressed::Off).size(), - (int)m.getBuffer(Compressed::On).size()); - boost::beast::multi_buffer buffers; // simulate multi-buffer @@ -112,26 +105,15 @@ class compression_test : public beast::unit_test::suite buffers.commit(boost::asio::buffer_copy( buffers.prepare(slice.size()), boost::asio::buffer(slice))); } - auto header = - ripple::detail::parseMessageHeader(buffers.data(), buffer.size()); - - if (log) - printf( - "==> parsed header: buffers size %d, compressed %d, algorithm " - "%d, header size %d, payload size %d, buffer size %d\n", - (int)buffers.size(), - header->algorithm != Algorithm::None, - (int)header->algorithm, - (int)header->header_size, - (int)header->payload_wire_size, - (int)buffer.size()); + + boost::system::error_code ec; + auto header = ripple::detail::parseMessageHeader( + ec, buffers.data(), buffer.size()); + + BEAST_EXPECT(header); if (header->algorithm == Algorithm::None) - { - if (log) - printf("==> NOT COMPRESSED\n"); return; - } std::vector decompressed; decompressed.resize(header->uncompressed_size); @@ -157,8 +139,6 @@ class compression_test : public beast::unit_test::suite uncompressed.begin() + ripple::compression::headerBytes, uncompressed.end(), decompressed.begin())); - if (log) - printf("\n"); } std::shared_ptr @@ -460,4 +440,4 @@ class compression_test : public beast::unit_test::suite BEAST_DEFINE_TESTSUITE_MANUAL_PRIO(compression, ripple_data, ripple, 20); } // namespace test -} // namespace ripple \ No newline at end of file +} // namespace ripple diff --git a/src/test/protocol/BuildInfo_test.cpp b/src/test/protocol/BuildInfo_test.cpp new file mode 100644 index 00000000000..82ad4d67963 --- /dev/null +++ b/src/test/protocol/BuildInfo_test.cpp @@ -0,0 +1,118 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +namespace ripple { + +class BuildInfo_test : public beast::unit_test::suite +{ +public: + void + testEncodeSoftwareVersion() + { + testcase("EncodeSoftwareVersion"); + + auto encodedVersion = BuildInfo::encodeSoftwareVersion("1.2.3-b7"); + + // the first two bytes identify the particular implementation, 0x183B + BEAST_EXPECT( + (encodedVersion & 0xFFFF'0000'0000'0000LLU) == + 0x183B'0000'0000'0000LLU); + + // the next three bytes: major version, minor version, patch version, + // 0x010203 + BEAST_EXPECT( + (encodedVersion & 0x0000'FFFF'FF00'0000LLU) == + 0x0000'0102'0300'0000LLU); + + // the next two bits: + { + // 01 if a beta + BEAST_EXPECT( + (encodedVersion & 0x0000'0000'00C0'0000LLU) >> 22 == 0b01); + // 10 if an RC + encodedVersion = BuildInfo::encodeSoftwareVersion("1.2.4-rc7"); + BEAST_EXPECT( + (encodedVersion & 0x0000'0000'00C0'0000LLU) >> 22 == 0b10); + // 11 if neither an RC nor a beta + encodedVersion = BuildInfo::encodeSoftwareVersion("1.2.5"); + BEAST_EXPECT( + (encodedVersion & 0x0000'0000'00C0'0000LLU) >> 22 == 0b11); + } + + // the next six bits: rc/beta number (1-63) + encodedVersion = BuildInfo::encodeSoftwareVersion("1.2.6-b63"); + BEAST_EXPECT((encodedVersion & 0x0000'0000'003F'0000LLU) >> 16 == 63); + + // the last two bytes are zeros + BEAST_EXPECT((encodedVersion & 0x0000'0000'0000'FFFFLLU) == 0); + + // Test some version strings with wrong formats: + // no rc/beta number + encodedVersion = BuildInfo::encodeSoftwareVersion("1.2.3-b"); + BEAST_EXPECT((encodedVersion & 0x0000'0000'00FF'0000LLU) == 0); + // rc/beta number out of range + encodedVersion = BuildInfo::encodeSoftwareVersion("1.2.3-b64"); + BEAST_EXPECT((encodedVersion & 0x0000'0000'00FF'0000LLU) == 0); + + // Check that the rc/beta number of a release is 0: + encodedVersion = BuildInfo::encodeSoftwareVersion("1.2.6"); + BEAST_EXPECT((encodedVersion & 0x0000'0000'003F'0000LLU) == 0); + } + + void + testIsRippledVersion() + { + testcase("IsRippledVersion"); + auto vFF = 0xFFFF'FFFF'FFFF'FFFFLLU; + BEAST_EXPECT(!BuildInfo::isRippledVersion(vFF)); + auto vRippled = 0x183B'0000'0000'0000LLU; + BEAST_EXPECT(BuildInfo::isRippledVersion(vRippled)); + } + + void + testIsNewerVersion() + { + testcase("IsNewerVersion"); + auto vFF = 0xFFFF'FFFF'FFFF'FFFFLLU; + BEAST_EXPECT(!BuildInfo::isNewerVersion(vFF)); + + auto v159 = BuildInfo::encodeSoftwareVersion("1.5.9"); + BEAST_EXPECT(!BuildInfo::isNewerVersion(v159)); + + auto vCurrent = BuildInfo::getEncodedVersion(); + BEAST_EXPECT(!BuildInfo::isNewerVersion(vCurrent)); + + auto vMax = BuildInfo::encodeSoftwareVersion("255.255.255"); + BEAST_EXPECT(BuildInfo::isNewerVersion(vMax)); + } + + void + run() override + { + testEncodeSoftwareVersion(); + testIsRippledVersion(); + testIsNewerVersion(); + } +}; + +BEAST_DEFINE_TESTSUITE(BuildInfo, protocol, ripple); +} // namespace ripple diff --git a/src/test/rpc/AccountTx_test.cpp b/src/test/rpc/AccountTx_test.cpp index 534dd8235f8..aa40d6c0b5d 100644 --- a/src/test/rpc/AccountTx_test.cpp +++ b/src/test/rpc/AccountTx_test.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -175,7 +176,8 @@ class AccountTx_test : public beast::unit_test::suite p[jss::ledger_index_max] = 1; BEAST_EXPECT(isErr( env.rpc("json", "account_tx", to_string(p)), - rpcLGR_IDXS_INVALID)); + (RPC::ApiMaximumSupportedVersion == 1 ? rpcLGR_IDXS_INVALID + : rpcINVALID_LGR_RANGE))); } // Ledger index min only @@ -190,7 +192,8 @@ class AccountTx_test : public beast::unit_test::suite p[jss::ledger_index_min] = env.current()->info().seq; BEAST_EXPECT(isErr( env.rpc("json", "account_tx", to_string(p)), - rpcLGR_IDXS_INVALID)); + (RPC::ApiMaximumSupportedVersion == 1 ? rpcLGR_IDXS_INVALID + : rpcINVALID_LGR_RANGE))); } // Ledger index max only diff --git a/src/test/rpc/Feature_test.cpp b/src/test/rpc/Feature_test.cpp index 731e3560dfc..5773f756667 100644 --- a/src/test/rpc/Feature_test.cpp +++ b/src/test/rpc/Feature_test.cpp @@ -218,10 +218,9 @@ class Feature_test : public beast::unit_test::suite BEAST_EXPECTS( feature.isMember(jss::validations), feature[jss::name].asString() + " validations"); - BEAST_EXPECTS( - feature.isMember(jss::vote), - feature[jss::name].asString() + " vote"); - BEAST_EXPECT(feature[jss::vote] == 256); + BEAST_EXPECT(feature[jss::count] == 1); + BEAST_EXPECT(feature[jss::threshold] == 1); + BEAST_EXPECT(feature[jss::validations] == 1); BEAST_EXPECT(feature[jss::majority] == 2740); } } diff --git a/src/test/rpc/LedgerRequestRPC_test.cpp b/src/test/rpc/LedgerRequestRPC_test.cpp index c7d84009969..eab8280517b 100644 --- a/src/test/rpc/LedgerRequestRPC_test.cpp +++ b/src/test/rpc/LedgerRequestRPC_test.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include namespace ripple { @@ -297,10 +298,19 @@ class LedgerRequestRPC_test : public beast::unit_test::suite // date check to trigger env.timeKeeper().adjustCloseTime(weeks{3}); result = env.rpc("ledger_request", "1")[jss::result]; - BEAST_EXPECT(result[jss::error] == "noCurrent"); BEAST_EXPECT(result[jss::status] == "error"); - BEAST_EXPECT( - result[jss::error_message] == "Current ledger is unavailable."); + if (RPC::ApiMaximumSupportedVersion == 1) + { + BEAST_EXPECT(result[jss::error] == "noCurrent"); + BEAST_EXPECT( + result[jss::error_message] == "Current ledger is unavailable."); + } + else + { + BEAST_EXPECT(result[jss::error] == "notSynced"); + BEAST_EXPECT( + result[jss::error_message] == "Not synced to the network."); + } } void diff --git a/src/test/rpc/RPCCall_test.cpp b/src/test/rpc/RPCCall_test.cpp index 174feb0b7f8..b0292020398 100644 --- a/src/test/rpc/RPCCall_test.cpp +++ b/src/test/rpc/RPCCall_test.cpp @@ -1437,7 +1437,8 @@ static RPCCallTestData const rpcCallTestArray[] = { __LINE__, {"account_tx", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "580", "579"}, RPCCallTestData::no_exception, - R"({ + RPC::ApiMaximumSupportedVersion == 1 ? + R"({ "method" : "account_tx", "params" : [ { @@ -1446,6 +1447,17 @@ static RPCCallTestData const rpcCallTestArray[] = { "error_message" : "Ledger indexes invalid." } ] + })" + : + R"({ + "method" : "account_tx", + "params" : [ + { + "error" : "notSynced", + "error_code" : 55, + "error_message" : "Not synced to the network." + } + ] })", }, { @@ -5905,7 +5917,8 @@ static RPCCallTestData const rpcCallTestArray[] = { __LINE__, {"tx_account", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "580", "579"}, RPCCallTestData::no_exception, - R"({ + RPC::ApiMaximumSupportedVersion == 1 ? + R"({ "method" : "tx_account", "params" : [ { @@ -5914,6 +5927,17 @@ static RPCCallTestData const rpcCallTestArray[] = { "error_message" : "Ledger indexes invalid." } ] + })" + : + R"({ + "method" : "tx_account", + "params" : [ + { + "error" : "notSynced", + "error_code" : 55, + "error_message" : "Not synced to the network." + } + ] })", }, { diff --git a/src/test/rpc/ValidatorRPC_test.cpp b/src/test/rpc/ValidatorRPC_test.cpp index 51050c679d8..a43eba2932a 100644 --- a/src/test/rpc/ValidatorRPC_test.cpp +++ b/src/test/rpc/ValidatorRPC_test.cpp @@ -134,6 +134,39 @@ class ValidatorRPC_test : public beast::unit_test::suite auto const jrr = env.rpc("validator_list_sites")[jss::result]; BEAST_EXPECT(jrr[jss::validator_sites].size() == 0); } + // Negative UNL empty + { + auto const jrr = env.rpc("validators")[jss::result]; + BEAST_EXPECT(jrr[jss::NegativeUNL].isNull()); + } + // Negative UNL update + { + hash_set disabledKeys; + auto k1 = randomKeyPair(KeyType::ed25519).first; + auto k2 = randomKeyPair(KeyType::ed25519).first; + disabledKeys.insert(k1); + disabledKeys.insert(k2); + env.app().validators().setNegativeUnl(disabledKeys); + + auto const jrr = env.rpc("validators")[jss::result]; + auto& jrrnUnl = jrr[jss::NegativeUNL]; + auto jrrnUnlSize = jrrnUnl.size(); + BEAST_EXPECT(jrrnUnlSize == 2); + for (std::uint32_t x = 0; x < jrrnUnlSize; ++x) + { + auto parsedKey = parseBase58( + TokenType::NodePublic, jrrnUnl[x].asString()); + BEAST_EXPECT(parsedKey); + if (parsedKey) + BEAST_EXPECT( + disabledKeys.find(*parsedKey) != disabledKeys.end()); + } + + disabledKeys.clear(); + env.app().validators().setNegativeUnl(disabledKeys); + auto const jrrUpdated = env.rpc("validators")[jss::result]; + BEAST_EXPECT(jrrUpdated[jss::NegativeUNL].isNull()); + } } void diff --git a/src/test/server/Server_test.cpp b/src/test/server/Server_test.cpp index ef132d2eb0c..521661c5895 100644 --- a/src/test/server/Server_test.cpp +++ b/src/test/server/Server_test.cpp @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -375,60 +376,6 @@ class Server_test : public beast::unit_test::suite pass(); } - /** - * @brief sink for writing all log messages to a stringstream - */ - class CaptureSink : public beast::Journal::Sink - { - std::stringstream& strm_; - - public: - CaptureSink( - beast::severities::Severity threshold, - std::stringstream& strm) - : beast::Journal::Sink(threshold, false), strm_(strm) - { - } - - void - write(beast::severities::Severity level, std::string const& text) - override - { - strm_ << text; - } - }; - - /** - * @brief Log manager for CaptureSinks. This class holds the stream - * instance that is written to by the sinks. Upon destruction, all - * contents of the stream are assigned to the string specified in the - * ctor - */ - class CaptureLogs : public Logs - { - std::stringstream strm_; - std::string& result_; - - public: - explicit CaptureLogs(std::string& result) - : Logs(beast::severities::kInfo), result_(result) - { - } - - ~CaptureLogs() override - { - result_ = strm_.str(); - } - - std::unique_ptr - makeSink( - std::string const& partition, - beast::severities::Severity threshold) override - { - return std::make_unique(threshold, strm_); - } - }; - void testBadConfig() { @@ -444,7 +391,7 @@ class Server_test : public beast::unit_test::suite (*cfg).deprecatedClearSection("port_rpc"); return cfg; }), - std::make_unique(messages)}; + std::make_unique(&messages)}; }); BEAST_EXPECT( messages.find("Missing 'ip' in [port_rpc]") != std::string::npos); @@ -457,7 +404,7 @@ class Server_test : public beast::unit_test::suite (*cfg)["port_rpc"].set("ip", getEnvLocalhostAddr()); return cfg; }), - std::make_unique(messages)}; + std::make_unique(&messages)}; }); BEAST_EXPECT( messages.find("Missing 'port' in [port_rpc]") != std::string::npos); @@ -471,7 +418,7 @@ class Server_test : public beast::unit_test::suite (*cfg)["port_rpc"].set("port", "0"); return cfg; }), - std::make_unique(messages)}; + std::make_unique(&messages)}; }); BEAST_EXPECT( messages.find("Invalid value '0' for key 'port' in [port_rpc]") != @@ -487,7 +434,7 @@ class Server_test : public beast::unit_test::suite (*cfg)["port_rpc"].set("protocol", ""); return cfg; }), - std::make_unique(messages)}; + std::make_unique(&messages)}; }); BEAST_EXPECT( messages.find("Missing 'protocol' in [port_rpc]") != @@ -522,7 +469,7 @@ class Server_test : public beast::unit_test::suite (*cfg)["port_ws"].set("admin", getEnvLocalhostAddr()); return cfg; }), - std::make_unique(messages)}; + std::make_unique(&messages)}; }); BEAST_EXPECT( messages.find("Required section [server] is missing") != @@ -548,7 +495,7 @@ class Server_test : public beast::unit_test::suite (*cfg)["server"].append("port_ws"); return cfg; }), - std::make_unique(messages)}; + std::make_unique(&messages)}; }); BEAST_EXPECT( messages.find("Missing section: [port_peer]") != std::string::npos); diff --git a/src/test/shamap/FetchPack_test.cpp b/src/test/shamap/FetchPack_test.cpp index eaa4dbc79c3..348e59e704a 100644 --- a/src/test/shamap/FetchPack_test.cpp +++ b/src/test/shamap/FetchPack_test.cpp @@ -118,7 +118,7 @@ class FetchPack_test : public beast::unit_test::suite using namespace beast::severities; test::SuiteJournal journal("FetchPack_test", *this); - TestFamily f(journal); + TestNodeFamily f(journal); std::shared_ptr t1(std::make_shared
(SHAMapType::FREE, f)); pass(); diff --git a/src/test/shamap/SHAMapSync_test.cpp b/src/test/shamap/SHAMapSync_test.cpp index 78f295d2fc5..61b4eaae150 100644 --- a/src/test/shamap/SHAMapSync_test.cpp +++ b/src/test/shamap/SHAMapSync_test.cpp @@ -91,7 +91,7 @@ class SHAMapSync_test : public beast::unit_test::suite using namespace beast::severities; test::SuiteJournal journal("SHAMapSync_test", *this); - TestFamily f(journal), f2(journal); + TestNodeFamily f(journal), f2(journal); SHAMap source(SHAMapType::FREE, f); SHAMap destination(SHAMapType::FREE, f2); @@ -140,7 +140,6 @@ class SHAMapSync_test : public beast::unit_test::suite .addRootNode( source.getHash(), makeSlice(*gotNodes_a.begin()), - snfWIRE, nullptr) .isGood()); } diff --git a/src/test/shamap/SHAMap_test.cpp b/src/test/shamap/SHAMap_test.cpp index ae5ae575032..49d5d5638ec 100644 --- a/src/test/shamap/SHAMap_test.cpp +++ b/src/test/shamap/SHAMap_test.cpp @@ -139,7 +139,7 @@ class SHAMap_test : public beast::unit_test::suite else testcase("add/traverse unbacked"); - tests::TestFamily f(journal); + tests::TestNodeFamily f(journal); // h3 and h4 differ only in the leaf, same terminal node (level 19) uint256 h1, h2, h3, h4, h5; @@ -327,7 +327,7 @@ class SHAMap_test : public beast::unit_test::suite "292891fe4ef6cee585fdc6fda1e09eb4d386363158ec3321b8123e5a772c6c" "a8"); - tests::TestFamily tf{journal}; + tests::TestNodeFamily tf{journal}; SHAMap map{SHAMapType::FREE, tf}; if (!backed) map.setUnbacked(); diff --git a/src/test/shamap/common.h b/src/test/shamap/common.h index 45177da4b64..760f8ca4e82 100644 --- a/src/test/shamap/common.h +++ b/src/test/shamap/common.h @@ -29,22 +29,31 @@ namespace ripple { namespace tests { -class TestFamily : public Family +class TestNodeFamily : public Family { private: + std::unique_ptr db_; + + std::shared_ptr fbCache_; + std::shared_ptr tnCache_; + TestStopwatch clock_; NodeStore::DummyScheduler scheduler_; - TreeNodeCache treecache_; - FullBelowCache fullbelow_; RootStoppable parent_; - std::unique_ptr db_; - bool shardBacked_; - beast::Journal j_; + + beast::Journal const j_; public: - TestFamily(beast::Journal j) - : treecache_("TreeNodeCache", 65536, std::chrono::minutes{1}, clock_, j) - , fullbelow_("full_below", clock_) + TestNodeFamily(beast::Journal j) + : fbCache_(std::make_shared( + "App family full below cache", + clock_)) + , tnCache_(std::make_shared( + "App family tree node cache", + 65536, + std::chrono::minutes{1}, + clock_, + j)) , parent_("TestRootStoppable") , j_(j) { @@ -53,72 +62,57 @@ class TestFamily : public Family testSection.set("Path", "SHAMap_test"); db_ = NodeStore::Manager::instance().make_Database( "test", scheduler_, 1, parent_, testSection, j); - shardBacked_ = - dynamic_cast(db_.get()) != nullptr; - } - - beast::manual_clock - clock() - { - return clock_; - } - - beast::Journal const& - journal() override - { - return j_; } - FullBelowCache& - fullbelow() override + NodeStore::Database& + db() override { - return fullbelow_; + return *db_; } - FullBelowCache const& - fullbelow() const override + NodeStore::Database const& + db() const override { - return fullbelow_; + return *db_; } - TreeNodeCache& - treecache() override + beast::Journal const& + journal() override { - return treecache_; + return j_; } - TreeNodeCache const& - treecache() const override + std::shared_ptr getFullBelowCache(std::uint32_t) override { - return treecache_; + return fbCache_; } - NodeStore::Database& - db() override + std::shared_ptr getTreeNodeCache(std::uint32_t) override { - return *db_; + return tnCache_; } - NodeStore::Database const& - db() const override + void + sweep() override { - return *db_; + fbCache_->sweep(); + tnCache_->sweep(); } bool isShardBacked() const override { - return shardBacked_; + return true; } void - missing_node(std::uint32_t refNum) override + missingNode(std::uint32_t refNum) override { Throw("missing node"); } void - missing_node(uint256 const& refHash, std::uint32_t refNum) override + missingNode(uint256 const& refHash, std::uint32_t refNum) override { Throw("missing node"); } @@ -126,8 +120,14 @@ class TestFamily : public Family void reset() override { - fullbelow_.reset(); - treecache_.reset(); + fbCache_->reset(); + tnCache_->reset(); + } + + beast::manual_clock + clock() + { + return clock_; } };