diff --git a/.cargo/audit.toml b/.cargo/audit.toml index 9e638ec9df..c186ffc7b7 100644 --- a/.cargo/audit.toml +++ b/.cargo/audit.toml @@ -1,19 +1,9 @@ [advisories] ignore = [ - # All data structures using borsh ser/de have been reviewed for ZST's and we have found no reason for concern. - # TODO: Remove this line after borsh-rs has been upgraded to >=0.12.0 - # https://github.com/near/borsh-rs/pull/146 - "RUSTSEC-2023-0033", - - # Repo flagged as unmaintained but our clap dependency uses it - # TODO: Remove this if clap is upgraded to >=3.0.0 - "RUSTSEC-2021-0139", # We are not using a special allocator and will not suffer this issue "RUSTSEC-2021-0145", + # We are not using RSA + "RUSTSEC-2023-0071" - # PGP should be upgraded to 0.10.1 which removes the "unmaintained" dependency but we can't do this as pgp and snow - # specify different version dependencies for curve25519-dalek that are currently unresolvable. - # TODO: Check and see if pgp and snow can be resolved and if so, upgrade them and remove this ignore - "RUSTSEC-2023-0028", ] \ No newline at end of file diff --git a/.github/workflows/base_node_binaries.json b/.github/workflows/base_node_binaries.json index 983bb5db96..ce4c1c7675 100644 --- a/.github/workflows/base_node_binaries.json +++ b/.github/workflows/base_node_binaries.json @@ -32,7 +32,7 @@ }, { "name": "macos-arm64", - "runs-on": "macos-12", + "runs-on": "macos-14", "rust": "stable", "target": "aarch64-apple-darwin", "cross": false, diff --git a/.github/workflows/base_node_binaries.yml b/.github/workflows/base_node_binaries.yml index b0aa019e03..d7b4f0828e 100644 --- a/.github/workflows/base_node_binaries.yml +++ b/.github/workflows/base_node_binaries.yml @@ -19,13 +19,14 @@ name: Build Matrix of Binaries env: TBN_FILENAME: "tari_suite" TBN_BUNDLE_ID_BASE: "com.tarilabs.pkg" + TBN_SIG_FN: "sha256-unsigned.txt" toolchain: nightly-2023-06-04 matrix-json-file: ".github/workflows/base_node_binaries.json" CARGO_HTTP_MULTIPLEXING: false CARGO_UNSTABLE_SPARSE_REGISTRY: true CARGO: cargo - # CARGO_OPTIONS: "--verbose" CARGO_OPTIONS: "--release" + CARGO_CACHE: true concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -98,9 +99,12 @@ jobs: - name: Declare TestNet for tags id: set-tari-network if: ${{ startsWith(github.ref, 'refs/tags/v') }} + env: + GHA_NETWORK: ${{ github.ref_name }} + # GHA_NETWORK: "v1.0.0-rc.4" shell: bash run: | - source buildtools/multinet_envs.sh ${{ github.ref_name }} + source buildtools/multinet_envs.sh ${{ env.GHA_NETWORK }} echo ${TARI_NETWORK} echo ${TARI_NETWORK_DIR} echo "TARI_NETWORK=${TARI_NETWORK}" >> $GITHUB_ENV @@ -180,6 +184,16 @@ jobs: echo "PLATFORM_SPECIFIC_DIR=osx" >> $GITHUB_ENV echo "LIB_EXT=.dylib" >> $GITHUB_ENV + # Hardcoded sdk for MacOSX on ARM64 + - name: Set environment variables - macOS - ARM64 (pin/sdk) + # Debug + if: ${{ false }} + # if: ${{ startsWith(runner.os,'macOS') && matrix.builds.name == 'macos-arm64' }} + run: | + xcrun --show-sdk-path + ls -la "/Library/Developer/CommandLineTools/SDKs/" + echo "RANDOMX_RS_CMAKE_OSX_SYSROOT=/Library/Developer/CommandLineTools/SDKs/MacOSX12.1.sdk" >> $GITHUB_ENV + - name: Set environment variables - Ubuntu if: startsWith(runner.os,'Linux') shell: bash @@ -206,7 +220,7 @@ jobs: echo "C:\Strawberry\perl\bin" >> $GITHUB_PATH - name: Cache cargo files and outputs - if: ${{ ( ! startsWith(github.ref, 'refs/tags/v') ) && ( ! matrix.builds.cross ) }} + if: ${{ ( ! startsWith(github.ref, 'refs/tags/v') ) && ( ! matrix.builds.cross ) && ( env.CARGO_CACHE ) }} uses: Swatinem/rust-cache@v2 with: key: ${{ matrix.builds.target }} @@ -269,6 +283,28 @@ jobs: fi ls -la ${{ env.MTS_SOURCE }} + - name: Build minotari_node metrics release binary for linux-x86_64 + if: ${{ startsWith(runner.os,'Linux') && ( ! matrix.builds.cross ) && matrix.builds.name == 'linux-x86_64' }} + shell: bash + run: | + ${{ env.CARGO }} build ${{ env.CARGO_OPTIONS }} \ + --target ${{ matrix.builds.target }} \ + --features "${{ matrix.builds.features }}, metrics" \ + --bin minotari_node \ + ${{ matrix.builds.flags }} --locked + cp -v "$GITHUB_WORKSPACE/target/${{ matrix.builds.target }}/release/minotari_node" "${{ env.MTS_SOURCE }}/minotari_node-metrics" + ls -la ${{ env.MTS_SOURCE }} + + - name: Pre/unsigned OSX Artifact upload for Archive + # Debug + if: ${{ false }} + # if: startsWith(runner.os,'macOS') + continue-on-error: true + uses: actions/upload-artifact@v4 + with: + name: ${{ env.TBN_FILENAME }}_unsigned-archive-${{ matrix.builds.name }} + path: "${{ env.MTS_SOURCE }}/*" + - name: Build the macOS pkg if: startsWith(runner.os,'macOS') continue-on-error: true @@ -302,8 +338,11 @@ jobs: "minotari_merge_mining_proxy" ) for FILE in "${FILES[@]}"; do - codesign --options runtime --force --verify --verbose --timestamp --sign "Developer ID Application: $MACOS_APPLICATION_ID" "/tmp/tari_testnet/runtime/$FILE" - codesign --verify --deep --display --verbose=4 "/tmp/tari_testnet/runtime/$FILE" + codesign --options runtime --force --verify --verbose --timestamp \ + --sign "Developer ID Application: $MACOS_APPLICATION_ID" \ + "/tmp/tari_testnet/runtime/$FILE" + codesign --verify --deep --display --verbose=4 \ + "/tmp/tari_testnet/runtime/$FILE" cp -vf "/tmp/tari_testnet/runtime/$FILE" "$GITHUB_WORKSPACE${{ env.TBN_DIST }}" done distDirPKG=$(mktemp -d -t ${{ env.TBN_FILENAME }}) @@ -360,7 +399,6 @@ jobs: cat "${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.TARI_VERSION }}.pkg.sha256" echo "Checksum verification for pkg is " ${SHARUN} --check "${{ env.TBN_FILENAME }}-${{ matrix.builds.name }}-${{ env.TARI_VERSION }}.pkg.sha256" - cp -v *.pkg* ${{ env.MTS_SOURCE }} - name: Artifact upload for macOS pkg if: startsWith(runner.os,'macOS') @@ -416,26 +454,6 @@ jobs: name: ${{ env.TBN_FILENAME }}_archive-${{ matrix.builds.name }} path: "${{ github.workspace }}${{ env.TBN_DIST }}/${{ env.BINFILE }}.zip*" - - name: Prep Miner for upload - shell: bash - run: | - cd "${{ github.workspace }}${{ env.TBN_DIST }}" - cp -v "minotari_miner${{ env.TBN_EXT}}" \ - "minotari_miner-${{ matrix.builds.name }}${{ env.TBN_EXT}}" - echo "Compute miner shasum" - ${SHARUN} "minotari_miner-${{ matrix.builds.name }}${{ env.TBN_EXT}}" \ - >> "minotari_miner-${{ matrix.builds.name }}${{ env.TBN_EXT}}.sha256" - echo "Show the shasum" - cat "minotari_miner-${{ matrix.builds.name }}${{ env.TBN_EXT}}.sha256" - echo "Checksum verification for miner is " - ${SHARUN} --check "minotari_miner-${{ matrix.builds.name }}${{ env.TBN_EXT}}.sha256" - - - name: Artifact upload for Miner - uses: actions/upload-artifact@v4 - with: - name: minotari_miner-${{ matrix.builds.name }} - path: "${{ github.workspace }}${{ env.TBN_DIST }}/minotari_miner-${{ matrix.builds.name }}${{ env.TBN_EXT}}*" - - name: Prep diag-utils archive for upload continue-on-error: true shell: bash @@ -490,14 +508,14 @@ jobs: sudo apt-get update sudo apt-get --no-install-recommends --assume-yes install dos2unix ls -alhtR - if [ -f "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.txt.sha256-unsigned" ] ; then - rm -fv "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.txt.sha256-unsigned" + if [ -f "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.${{ env.TBN_SIG_FN }}" ] ; then + rm -fv "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.${{ env.TBN_SIG_FN }}" fi # Merge all sha256 files into one - find . -name "*.sha256" -type f -print | xargs cat >> "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.txt.sha256-unsigned" - dos2unix --quiet "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.txt.sha256-unsigned" - cat "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.txt.sha256-unsigned" - sha256sum --ignore-missing --check "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.txt.sha256-unsigned" + find . -name "*.sha256" -type f -print | xargs cat >> "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.${{ env.TBN_SIG_FN }}" + dos2unix --quiet "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.${{ env.TBN_SIG_FN }}" + cat "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.${{ env.TBN_SIG_FN }}" + sha256sum --ignore-missing --check "${{ env.TBN_FILENAME }}-${{ env.TARI_VERSION }}.${{ env.TBN_SIG_FN }}" ls -alhtR - name: Create release @@ -512,7 +530,7 @@ jobs: replacesArtifacts: true - name: Sync assets to S3 - continue-on-error: true # Don't break if s3 upload fails + continue-on-error: true if: ${{ env.AWS_SECRET_ACCESS_KEY != '' && matrix.builds.runs-on != 'self-hosted' }} env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} diff --git a/CODEOWNERS b/CODEOWNERS index 8adc61ce3b..ea022ec823 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -8,3 +8,7 @@ base_layer/core/src/consensus/**/* @tari-project/lead-maintainers base_layer/core/src/**/* @tari-project/tari-core-developers base_layer/key_manager/src/**/* @tari-project/tari-core-developers base_layer/wallet/src/**/* @tari-project/tari-core-developers + +# meta management requires approvals by lead maintainers +meta/crates.io/* @tari-project/lead-maintainers +meta/gpg_keys/* @tari-project/lead-maintainers diff --git a/Cargo.lock b/Cargo.lock index 222e3457d5..ba5cfb48eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -95,9 +95,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.4" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" dependencies = [ "anstyle", "anstyle-parse", @@ -115,30 +115,30 @@ checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" [[package]] name = "anstyle-parse" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.1" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" dependencies = [ "anstyle", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -1300,7 +1300,7 @@ dependencies = [ "globwalk", "humantime 2.1.0", "inventory", - "itertools 0.12.0", + "itertools 0.12.1", "junit-report", "lazy-regex", "linked-hash-map", @@ -1321,7 +1321,7 @@ checksum = "01091e28d1f566c8b31b67948399d2efd6c0a8f6228a9785519ed7b73f7f0aef" dependencies = [ "cucumber-expressions", "inflections", - "itertools 0.12.0", + "itertools 0.12.1", "proc-macro2", "quote", "regex", @@ -2138,15 +2138,15 @@ dependencies = [ [[package]] name = "globset" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759c97c1e17c55525b57192c06a267cda0ac5210b222d6b82189a2338fa1c13d" +checksum = "57da3b9b5b85bd66f31093f8c408b90a74431672542466497dcbdfdc02034be1" dependencies = [ "aho-corasick", "bstr", - "fnv", "log", - "regex", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", ] [[package]] @@ -2482,17 +2482,16 @@ dependencies = [ [[package]] name = "ignore" -version = "0.4.20" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbe7873dab538a9a44ad79ede1faf5f30d49f9a5c883ddbab48bce81b64b7492" +checksum = "b46810df39e66e925525d6e38ce1e7f6e1d208f72dc39757880fcb66e2c58af1" dependencies = [ + "crossbeam-deque", "globset", - "lazy_static", "log", "memchr", - "regex", + "regex-automata 0.4.3", "same-file", - "thread_local", "walkdir", "winapi-util", ] @@ -2592,9 +2591,9 @@ checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "inventory" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0508c56cfe9bfd5dfeb0c22ab9a6abfda2f27bdca422132e494266351ed8d83c" +checksum = "f958d3d68f4167080a18141e10381e7634563984a537f2a49a30fd8e53ac5767" [[package]] name = "ipnet" @@ -2633,9 +2632,9 @@ dependencies = [ [[package]] name = "itertools" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" dependencies = [ "either", ] @@ -2748,9 +2747,9 @@ checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] name = "libgit2-sys" -version = "0.16.1+1.7.1" +version = "0.16.2+1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2a2bb3680b094add03bb3732ec520ece34da31a8cd2d633d1389d0f0fb60d0c" +checksum = "ee4126d8b4ee5c9d9ea891dd875cfdc1e9d0950437179104b183d7d8a74d24e8" dependencies = [ "cc", "libc", @@ -2884,8 +2883,9 @@ checksum = "a94d21414c1f4a51209ad204c1776a3d0765002c76c6abcb602a6f09f1e881c7" [[package]] name = "log4rs" -version = "1.2.0" -source = "git+https://github.com/tari-project/log4rs.git#e1051fd3a1bec9c55d055f60176a96cf11e58505" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0816135ae15bd0391cf284eab37e6e3ee0a6ee63d2ceeb659862bd8d0a984ca6" dependencies = [ "anyhow", "arc-swap", @@ -2896,11 +2896,13 @@ dependencies = [ "libc", "log", "log-mdc", + "once_cell", "parking_lot 0.12.1", + "rand", "serde", "serde-value", "serde_json", - "serde_yaml 0.8.26", + "serde_yaml", "thiserror", "thread-id", "typemap-ors", @@ -5073,18 +5075,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_yaml" -version = "0.8.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" -dependencies = [ - "indexmap 1.9.3", - "ryu", - "serde", - "yaml-rust", -] - [[package]] name = "serde_yaml" version = "0.9.27" @@ -5261,9 +5251,9 @@ dependencies = [ [[package]] name = "snow" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58021967fd0a5eeeb23b08df6cc244a4d4a5b4aec1d27c9e02fad1a58b4cd74e" +checksum = "2e87c18a6608909007e75a60e04d03eda77b601c94de1c74d9a9dc2c04ab789a" dependencies = [ "aes-gcm", "blake2", @@ -5602,7 +5592,7 @@ dependencies = [ "prost-build", "serde", "serde_json", - "serde_yaml 0.9.27", + "serde_yaml", "sha2 0.10.8", "structopt", "tari_crypto", @@ -5889,7 +5879,7 @@ version = "1.0.0-dan.5" [[package]] name = "tari_hash_domains" -version = "0.1.0" +version = "1.0.0-dan.5" dependencies = [ "tari_crypto", ] @@ -5979,7 +5969,7 @@ dependencies = [ [[package]] name = "tari_libtor" -version = "0.24.0" +version = "1.0.0-pre.8" dependencies = [ "derivative", "libtor", @@ -5988,14 +5978,13 @@ dependencies = [ "rand", "tari_common", "tari_p2p", - "tari_shutdown", "tempfile", "tor-hash-passwd", ] [[package]] name = "tari_metrics" -version = "0.1.0" +version = "1.0.0-pre.8" dependencies = [ "anyhow", "futures 0.3.29", @@ -6065,7 +6054,7 @@ dependencies = [ [[package]] name = "tari_script" -version = "0.12.0" +version = "1.0.0-pre.8" dependencies = [ "blake2", "borsh", @@ -7155,6 +7144,15 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -7185,6 +7183,21 @@ dependencies = [ "windows_x86_64_msvc 0.48.5", ] +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -7197,6 +7210,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -7209,6 +7228,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -7221,6 +7246,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -7233,6 +7264,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -7245,6 +7282,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -7257,6 +7300,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -7269,6 +7318,12 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + [[package]] name = "winnow" version = "0.5.18" diff --git a/README.md b/README.md index ac3d27d6a2..2be0f65b07 100644 --- a/README.md +++ b/README.md @@ -23,8 +23,8 @@ The recommended running versions of each network are: | Network | Version | |-----------|-------------| | Stagenet | --- | -| Nextnet | 1.0.0-rc.2 | -| Esmeralda | 1.0.0-pre.5 | +| Nextnet | 1.0.0-rc.5 | +| Esmeralda | 1.0.0-pre.8 | For more detail about versioning, see [Release Ideology](https://github.com/tari-project/tari/blob/development/docs/src/branching_releases.md). diff --git a/applications/minotari_app_grpc/proto/base_node.proto b/applications/minotari_app_grpc/proto/base_node.proto index df0ef382ee..57aabd60f8 100644 --- a/applications/minotari_app_grpc/proto/base_node.proto +++ b/applications/minotari_app_grpc/proto/base_node.proto @@ -310,9 +310,9 @@ enum Sorting { message MetaData { // The current chain height, or the block number of the longest valid chain, or `None` if there is no chain - uint64 height_of_longest_chain = 1; + uint64 best_block_height = 1; // The block hash of the current tip of the longest valid chain, or `None` for an empty chain - bytes best_block = 2; + bytes best_block_hash = 2; // This is the min height this node can provide complete blocks for. A 0 here means this node is archival and can provide complete blocks for every height. uint64 pruned_height = 6; // The current geometric mean of the pow of the chain tip, or `None` if there is no chain @@ -347,6 +347,7 @@ message GetNewBlockResult{ // This is the completed block Block block = 2; bytes merge_mining_hash =3; + bytes tari_unique_id =4; } // This is the message that is returned for a miner after it asks for a new block. @@ -359,6 +360,7 @@ message GetNewBlockBlobResult{ bytes block_body = 3; bytes merge_mining_hash =4; bytes utxo_mr = 5; + bytes tari_unique_id =6; } // This is mining data for the miner asking for a new block diff --git a/applications/minotari_app_grpc/src/conversions/chain_metadata.rs b/applications/minotari_app_grpc/src/conversions/chain_metadata.rs index 3a5c3e7e69..5baa628e50 100644 --- a/applications/minotari_app_grpc/src/conversions/chain_metadata.rs +++ b/applications/minotari_app_grpc/src/conversions/chain_metadata.rs @@ -29,8 +29,8 @@ impl From for grpc::MetaData { let mut diff = [0u8; 32]; meta.accumulated_difficulty().to_big_endian(&mut diff); Self { - height_of_longest_chain: meta.height_of_longest_chain(), - best_block: meta.best_block().to_vec(), + best_block_height: meta.best_block_height(), + best_block_hash: meta.best_block_hash().to_vec(), pruned_height: meta.pruned_height(), accumulated_difficulty: diff.to_vec(), } diff --git a/applications/minotari_console_wallet/Cargo.toml b/applications/minotari_console_wallet/Cargo.toml index 49926fa2d2..3cdec9bb8d 100644 --- a/applications/minotari_console_wallet/Cargo.toml +++ b/applications/minotari_console_wallet/Cargo.toml @@ -39,7 +39,7 @@ digest = "0.10" futures = { version = "^0.3.16", default-features = false, features = [ "alloc", ] } -log4rs = { git = "https://github.com/tari-project/log4rs.git", default_features = false, features = [ +log4rs = { version = "1.3.0", default_features = false, features = [ "config_parsing", "threshold_filter", "yaml_format", diff --git a/applications/minotari_console_wallet/src/automation/commands.rs b/applications/minotari_console_wallet/src/automation/commands.rs index 96cc7906e9..c811e4fd14 100644 --- a/applications/minotari_console_wallet/src/automation/commands.rs +++ b/applications/minotari_console_wallet/src/automation/commands.rs @@ -38,7 +38,10 @@ use minotari_app_grpc::tls::certs::{generate_self_signed_certs, print_warning, w use minotari_wallet::{ connectivity_service::WalletConnectivityInterface, output_manager_service::{handle::OutputManagerHandle, UtxoSelectionCriteria}, - transaction_service::handle::{TransactionEvent, TransactionServiceHandle}, + transaction_service::{ + handle::{TransactionEvent, TransactionServiceHandle}, + storage::models::WalletTransaction, + }, TransactionStage, WalletConfig, WalletSqlite, @@ -90,6 +93,8 @@ pub enum WalletCommand { DiscoverPeer, Whois, ExportUtxos, + ExportTx, + ImportTx, ExportSpentUtxos, CountUtxos, SetBaseNode, @@ -800,6 +805,34 @@ pub async fn command_runner( }, Err(e) => eprintln!("ExportUtxos error! {}", e), }, + ExportTx(args) => match transaction_service.get_any_transaction(args.tx_id.into()).await { + Ok(Some(tx)) => { + if let Some(file) = args.output_file { + if let Err(e) = write_tx_to_csv_file(tx, file) { + eprintln!("ExportTx error! {}", e); + } + } else { + println!("Tx: {:?}", tx); + } + }, + Ok(None) => { + eprintln!("ExportTx error!, No tx found ") + }, + Err(e) => eprintln!("ExportTx error! {}", e), + }, + ImportTx(args) => { + match load_tx_from_csv_file(args.input_file) { + Ok(txs) => { + for tx in txs { + match transaction_service.import_transaction(tx).await { + Ok(id) => println!("imported tx: {}", id), + Err(e) => eprintln!("Could not import tx {}", e), + }; + } + }, + Err(e) => eprintln!("ImportTx error! {}", e), + }; + }, ExportSpentUtxos(args) => match output_service.get_spent_outputs().await { Ok(utxos) => { let utxos: Vec<(WalletOutput, Commitment)> = @@ -1081,6 +1114,29 @@ fn write_utxos_to_csv_file(utxos: Vec<(WalletOutput, Commitment)>, file_path: Pa } Ok(()) } + +fn write_tx_to_csv_file(tx: WalletTransaction, file_path: PathBuf) -> Result<(), CommandError> { + let file = File::create(file_path).map_err(|e| CommandError::CSVFile(e.to_string()))?; + let mut csv_file = LineWriter::new(file); + let tx_string = serde_json::to_string(&tx).map_err(|e| CommandError::CSVFile(e.to_string()))?; + writeln!(csv_file, "{}", tx_string).map_err(|e| CommandError::CSVFile(e.to_string()))?; + + Ok(()) +} + +fn load_tx_from_csv_file(file_path: PathBuf) -> Result, CommandError> { + let file_contents = fs::read_to_string(file_path).map_err(|e| CommandError::CSVFile(e.to_string()))?; + let mut results = Vec::new(); + for line in file_contents.lines() { + if let Ok(tx) = serde_json::from_str(line) { + results.push(tx); + } else { + return Err(CommandError::CSVFile("Could not read json file".to_string())); + } + } + Ok(results) +} + #[allow(dead_code)] fn write_json_file, T: Serialize>(path: P, data: &T) -> Result<(), CommandError> { fs::create_dir_all(path.as_ref().parent().unwrap()).map_err(|e| CommandError::JsonFile(e.to_string()))?; @@ -1109,7 +1165,7 @@ async fn get_tip_height(wallet: &WalletSqlite) -> Option { .await .ok() .and_then(|t| t.metadata) - .map(|m| m.height_of_longest_chain), + .map(|m| m.best_block_height), None => None, } } diff --git a/applications/minotari_console_wallet/src/cli.rs b/applications/minotari_console_wallet/src/cli.rs index 40cc5c8b85..6adf8d501d 100644 --- a/applications/minotari_console_wallet/src/cli.rs +++ b/applications/minotari_console_wallet/src/cli.rs @@ -123,6 +123,8 @@ pub enum CliCommands { DiscoverPeer(DiscoverPeerArgs), Whois(WhoisArgs), ExportUtxos(ExportUtxosArgs), + ExportTx(ExportTxArgs), + ImportTx(ImportTxArgs), ExportSpentUtxos(ExportUtxosArgs), CountUtxos, SetBaseNode(SetBaseNodeArgs), @@ -241,6 +243,19 @@ pub struct ExportUtxosArgs { pub output_file: Option, } +#[derive(Debug, Args, Clone)] +pub struct ExportTxArgs { + pub tx_id: u64, + #[clap(short, long)] + pub output_file: Option, +} + +#[derive(Debug, Args, Clone)] +pub struct ImportTxArgs { + #[clap(short, long)] + pub input_file: PathBuf, +} + #[derive(Debug, Args, Clone)] pub struct SetBaseNodeArgs { pub public_key: UniPublicKey, diff --git a/applications/minotari_console_wallet/src/grpc/wallet_grpc_server.rs b/applications/minotari_console_wallet/src/grpc/wallet_grpc_server.rs index 1f45fa9b53..54904d9b0b 100644 --- a/applications/minotari_console_wallet/src/grpc/wallet_grpc_server.rs +++ b/applications/minotari_console_wallet/src/grpc/wallet_grpc_server.rs @@ -165,7 +165,7 @@ impl WalletGrpcServer { .wallet .db .get_chain_metadata()? - .map(|m| m.height_of_longest_chain()) + .map(|m| m.best_block_height()) .unwrap_or_default(); Ok(self.rules.consensus_constants(height)) } diff --git a/applications/minotari_console_wallet/src/init/mod.rs b/applications/minotari_console_wallet/src/init/mod.rs index d5faff3b2a..d37acb7763 100644 --- a/applications/minotari_console_wallet/src/init/mod.rs +++ b/applications/minotari_console_wallet/src/init/mod.rs @@ -22,7 +22,7 @@ #![allow(dead_code, unused)] -use std::{fs, path::PathBuf, str::FromStr, sync::Arc}; +use std::{fs, path::PathBuf, str::FromStr, sync::Arc, time::Instant}; use log::*; use minotari_app_utilities::identity_management::setup_node_identity; @@ -442,6 +442,8 @@ pub async fn init_wallet( .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Error consensus manager. {}", e)))?; let factories = CryptoFactories::default(); + let now = Instant::now(); + let mut wallet = Wallet::start( wallet_config, config.peer_seeds.clone(), @@ -463,12 +465,11 @@ pub async fn init_wallet( WalletError::CommsInitializationError(cie) => cie.to_exit_error(), e => ExitError::new(ExitCode::WalletError, format!("Error creating Wallet Container: {}", e)), })?; - if let Some(hs) = wallet.comms.hidden_service() { - wallet - .db - .set_tor_identity(hs.tor_identity().clone()) - .map_err(|e| ExitError::new(ExitCode::WalletError, format!("Problem writing tor identity. {}", e)))?; - } + + error!( + target: LOG_TARGET, + "Wallet started in {}ms", now.elapsed().as_millis() + ); if let Some(file_name) = seed_words_file_name { let seed_words = wallet.get_seed_words(&MnemonicLanguage::English)?.join(" "); diff --git a/applications/minotari_console_wallet/src/lib.rs b/applications/minotari_console_wallet/src/lib.rs index 96525d44d1..6f14065eb6 100644 --- a/applications/minotari_console_wallet/src/lib.rs +++ b/applications/minotari_console_wallet/src/lib.rs @@ -151,7 +151,7 @@ pub fn run_wallet_with_cli( if config.wallet.use_libtor && config.wallet.p2p.transport.is_tor() { let tor = Tor::initialize()?; tor.update_comms_transport(&mut config.wallet.p2p.transport)?; - runtime.spawn(tor.run(shutdown.to_signal())); + tor.run_background(); debug!( target: LOG_TARGET, "Updated Tor comms transport: {:?}", config.wallet.p2p.transport diff --git a/applications/minotari_console_wallet/src/ui/components/base_node.rs b/applications/minotari_console_wallet/src/ui/components/base_node.rs index c7a00acdf1..561e877606 100644 --- a/applications/minotari_console_wallet/src/ui/components/base_node.rs +++ b/applications/minotari_console_wallet/src/ui/components/base_node.rs @@ -68,7 +68,7 @@ impl Component for BaseNode { OnlineStatus::Online => { let base_node_state = app_state.get_base_node_state(); if let Some(ref metadata) = base_node_state.chain_metadata { - let tip = metadata.height_of_longest_chain(); + let tip = metadata.best_block_height(); let synced = base_node_state.is_synced.unwrap_or_default(); let (tip_color, sync_text) = if synced { diff --git a/applications/minotari_console_wallet/src/ui/components/transactions_tab.rs b/applications/minotari_console_wallet/src/ui/components/transactions_tab.rs index 50fa7ff213..98aa436e3b 100644 --- a/applications/minotari_console_wallet/src/ui/components/transactions_tab.rs +++ b/applications/minotari_console_wallet/src/ui/components/transactions_tab.rs @@ -198,10 +198,7 @@ impl TransactionsTab { .collect(); let base_node_state = app_state.get_base_node_state(); - let chain_height = base_node_state - .chain_metadata - .as_ref() - .map(|cm| cm.height_of_longest_chain()); + let chain_height = base_node_state.chain_metadata.as_ref().map(|cm| cm.best_block_height()); let mut column0_items = Vec::new(); let mut column1_items = Vec::new(); diff --git a/applications/minotari_console_wallet/src/ui/state/app_state.rs b/applications/minotari_console_wallet/src/ui/state/app_state.rs index cc2a3b5196..2c1fd54b4a 100644 --- a/applications/minotari_console_wallet/src/ui/state/app_state.rs +++ b/applications/minotari_console_wallet/src/ui/state/app_state.rs @@ -896,12 +896,12 @@ impl AppStateInner { }); self.data.contacts = ui_contacts; + self.refresh_network_id().await?; self.updated = true; Ok(()) } pub async fn refresh_burnt_proofs_state(&mut self) -> Result<(), UiError> { - // let db_burnt_proofs = self.wallet.db.get_burnt_proofs()?; let db_burnt_proofs = self.wallet.db.fetch_burnt_proofs()?; let mut ui_proofs: Vec = vec![]; @@ -921,7 +921,43 @@ impl AppStateInner { Ok(()) } + pub async fn refresh_network_id(&mut self) -> Result<(), UiError> { + let wallet_id = WalletIdentity::new(self.wallet.comms.node_identity(), self.wallet.network.as_network()); + let eid = wallet_id.address.to_emoji_string(); + let qr_link = format!( + "tari://{}/transactions/send?tariAddress={}", + wallet_id.network, + wallet_id.address.to_hex() + ); + let code = QrCode::new(qr_link).unwrap(); + let image = code + .render::() + .dark_color(unicode::Dense1x2::Dark) + .light_color(unicode::Dense1x2::Light) + .build() + .lines() + .skip(1) + .fold("".to_string(), |acc, l| format!("{}{}\n", acc, l)); + let identity = MyIdentity { + tari_address: wallet_id.address.to_hex(), + network_address: wallet_id + .node_identity + .public_addresses() + .iter() + .map(|a| a.to_string()) + .collect::>() + .join(", "), + emoji_id: eid, + qr_code: image, + node_id: wallet_id.node_identity.node_id().to_string(), + }; + self.data.my_identity = identity; + self.updated = true; + Ok(()) + } + pub async fn refresh_connected_peers_state(&mut self) -> Result<(), UiError> { + self.refresh_network_id().await?; let connections = self.wallet.comms.connectivity().get_active_connections().await?; let peer_manager = self.wallet.comms.peer_manager(); let mut peers = Vec::with_capacity(connections.len()); diff --git a/applications/minotari_console_wallet/src/ui/state/wallet_event_monitor.rs b/applications/minotari_console_wallet/src/ui/state/wallet_event_monitor.rs index da5a085e23..768d9c3b17 100644 --- a/applications/minotari_console_wallet/src/ui/state/wallet_event_monitor.rs +++ b/applications/minotari_console_wallet/src/ui/state/wallet_event_monitor.rs @@ -74,13 +74,6 @@ impl WalletEventMonitor { let mut base_node_changed = wallet_connectivity.get_current_base_node_watcher(); let mut base_node_events = self.app_state_inner.read().await.get_base_node_event_stream(); - // let mut software_update_notif = self - // .app_state_inner - // .read() - // .await - // .get_software_updater() - // .new_update_notifier() - // .clone(); let mut contacts_liveness_events = self.app_state_inner.read().await.get_contacts_liveness_event_stream(); diff --git a/applications/minotari_console_wallet/src/wallet_modes.rs b/applications/minotari_console_wallet/src/wallet_modes.rs index a518eff7f5..70fe202652 100644 --- a/applications/minotari_console_wallet/src/wallet_modes.rs +++ b/applications/minotari_console_wallet/src/wallet_modes.rs @@ -475,6 +475,7 @@ async fn run_grpc( #[cfg(test)] mod test { + use std::path::Path; use crate::{cli::CliCommands, wallet_modes::parse_command_file}; @@ -499,6 +500,10 @@ mod test { --start-time now --message Stressing_it_a_bit...!_(from_Feeling-a-bit-Generous) \ 5c4f2a4b3f3f84e047333218a84fd24f581a9d7e4f23b78e3714e9d174427d615e + export-tx 123456789 --output-file pie.txt + + import-tx --input-file pie_this_message.txt + # End of script file " .to_string(); @@ -511,6 +516,8 @@ mod test { let mut make_it_rain = false; let mut coin_split = false; let mut discover_peer = false; + let mut export_tx = false; + let mut import_tx = false; let mut whois = false; for command in commands { match command { @@ -524,6 +531,16 @@ mod test { CliCommands::DiscoverPeer(_) => discover_peer = true, CliCommands::Whois(_) => whois = true, CliCommands::ExportUtxos(_) => {}, + CliCommands::ExportTx(args) => { + if args.tx_id == 123456789 && args.output_file == Some("pie.txt".into()) { + export_tx = true + } + }, + CliCommands::ImportTx(args) => { + if args.input_file == Path::new("pie_this_message.txt") { + import_tx = true + } + }, CliCommands::ExportSpentUtxos(_) => {}, CliCommands::CountUtxos => {}, CliCommands::SetBaseNode(_) => {}, @@ -537,6 +554,16 @@ mod test { CliCommands::CreateTlsCerts => {}, } } - assert!(get_balance && send_tari && burn_tari && make_it_rain && coin_split && discover_peer && whois); + assert!( + get_balance && + send_tari && + burn_tari && + make_it_rain && + coin_split && + discover_peer && + whois && + export_tx && + import_tx + ); } } diff --git a/applications/minotari_merge_mining_proxy/src/block_template_protocol.rs b/applications/minotari_merge_mining_proxy/src/block_template_protocol.rs index 1c44b5bc9e..04e59852e4 100644 --- a/applications/minotari_merge_mining_proxy/src/block_template_protocol.rs +++ b/applications/minotari_merge_mining_proxy/src/block_template_protocol.rs @@ -179,7 +179,7 @@ impl BlockTemplateProtocol<'_> { .get_tip_info(grpc::Empty {}) .await? .into_inner(); - let tip_height = tip.metadata.as_ref().map(|m| m.height_of_longest_chain).unwrap_or(0); + let tip_height = tip.metadata.as_ref().map(|m| m.best_block_height).unwrap_or(0); if height <= tip_height { warn!( diff --git a/applications/minotari_merge_mining_proxy/src/proxy.rs b/applications/minotari_merge_mining_proxy/src/proxy.rs index 9476e5bcee..4c9edf8aba 100644 --- a/applications/minotari_merge_mining_proxy/src/proxy.rs +++ b/applications/minotari_merge_mining_proxy/src/proxy.rs @@ -195,7 +195,7 @@ impl InnerService { .get_ref() .metadata .as_ref() - .map(|meta| meta.height_of_longest_chain) + .map(|meta| meta.best_block_height) .ok_or(MmProxyError::GrpcResponseMissingField("base node metadata"))?; if result.get_ref().initial_sync_achieved != self.initial_sync_achieved.load(Ordering::SeqCst) { self.initial_sync_achieved @@ -416,7 +416,7 @@ impl InnerService { self.initial_sync_achieved.store(true, Ordering::SeqCst); let msg = format!( "Initial base node sync achieved. Ready to mine at height #{}", - metadata.as_ref().map(|h| h.height_of_longest_chain).unwrap_or_default(), + metadata.as_ref().map(|h| h.best_block_height).unwrap_or_default(), ); debug!(target: LOG_TARGET, "{}", msg); println!("{}", msg); @@ -424,7 +424,7 @@ impl InnerService { } else { let msg = format!( "Initial base node sync not achieved, current height at #{} ... (waiting = {})", - metadata.as_ref().map(|h| h.height_of_longest_chain).unwrap_or_default(), + metadata.as_ref().map(|h| h.best_block_height).unwrap_or_default(), self.config.wait_for_initial_sync_at_startup, ); debug!(target: LOG_TARGET, "{}", msg); @@ -590,7 +590,7 @@ impl InnerService { let tip_header = client .get_header_by_hash(grpc::GetHeaderByHashRequest { - hash: chain_metadata.best_block, + hash: chain_metadata.best_block_hash, }) .await?; diff --git a/applications/minotari_miner/Cargo.toml b/applications/minotari_miner/Cargo.toml index ae5853de5f..4d4f8693d3 100644 --- a/applications/minotari_miner/Cargo.toml +++ b/applications/minotari_miner/Cargo.toml @@ -28,7 +28,7 @@ derivative = "2.2.0" futures = "0.3" hex = "0.4.2" log = { version = "0.4", features = ["std"] } -log4rs = { git = "https://github.com/tari-project/log4rs.git", default_features = false, features = ["config_parsing", "threshold_filter", "yaml_format", "console_appender", "rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"] } +log4rs = { version = "1.3.0", default_features = false, features = ["config_parsing", "threshold_filter", "yaml_format", "console_appender", "rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"] } native-tls = "0.2" num_cpus = "1.13" rand = "0.8" diff --git a/applications/minotari_miner/src/run_miner.rs b/applications/minotari_miner/src/run_miner.rs index abe74eadca..98c4823c43 100644 --- a/applications/minotari_miner/src/run_miner.rs +++ b/applications/minotari_miner/src/run_miner.rs @@ -417,7 +417,7 @@ async fn validate_tip( .get_tip_info(minotari_app_grpc::tari_rpc::Empty {}) .await? .into_inner(); - let longest_height = tip.clone().metadata.unwrap().height_of_longest_chain; + let longest_height = tip.clone().metadata.unwrap().best_block_height; if let Some(height) = mine_until_height { if longest_height >= height { return Err(MinerError::MineUntilHeightReached(height)); diff --git a/applications/minotari_node/Cargo.toml b/applications/minotari_node/Cargo.toml index b13cf425f4..138ad1e680 100644 --- a/applications/minotari_node/Cargo.toml +++ b/applications/minotari_node/Cargo.toml @@ -38,7 +38,7 @@ futures = { version = "^0.3.16", default-features = false, features = ["alloc"] qrcode = { version = "0.12" } log = { version = "0.4.8", features = ["std"] } log-mdc = "0.1.0" -log4rs = { git = "https://github.com/tari-project/log4rs.git", default_features = false, features = ["config_parsing", "threshold_filter", "yaml_format", "console_appender", "rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"] } +log4rs = { version = "1.3.0", default_features = false, features = ["config_parsing", "threshold_filter", "yaml_format", "console_appender", "rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"] } nom = "7.1" rustyline = "9.0" rustyline-derive = "0.5" @@ -52,7 +52,7 @@ tonic = { version = "0.8.3", features = ["tls", "tls-roots" ] } tari_metrics = { path = "../../infrastructure/metrics", optional = true, features = ["server"] } [features] -default = ["metrics", "libtor"] +default = ["libtor"] metrics = ["tari_metrics", "tari_comms/metrics"] safe = [] libtor = ["tari_libtor"] diff --git a/applications/minotari_node/src/bootstrap.rs b/applications/minotari_node/src/bootstrap.rs index c2d0ce7753..45fa169623 100644 --- a/applications/minotari_node/src/bootstrap.rs +++ b/applications/minotari_node/src/bootstrap.rs @@ -28,7 +28,14 @@ use tari_common::{ configuration::bootstrap::ApplicationType, exit_codes::{ExitCode, ExitError}, }; -use tari_comms::{peer_manager::Peer, protocol::rpc::RpcServer, NodeIdentity, UnspawnedCommsNode}; +use tari_comms::{ + multiaddr::{Error as MultiaddrError, Multiaddr}, + peer_manager::Peer, + protocol::rpc::RpcServer, + tor::TorIdentity, + NodeIdentity, + UnspawnedCommsNode, +}; use tari_comms_dht::Dht; use tari_core::{ base_node, @@ -79,6 +86,7 @@ pub struct BaseNodeBootstrapper<'a, B> { impl BaseNodeBootstrapper<'_, B> where B: BlockchainBackend + 'static { + #[allow(clippy::too_many_lines)] pub async fn bootstrap(self) -> Result { let mut base_node_config = self.app_config.base_node.clone(); let mut p2p_config = self.app_config.base_node.p2p.clone(); @@ -164,10 +172,33 @@ where B: BlockchainBackend + 'static let comms = comms.add_protocol_extension(mempool_protocol); let comms = Self::setup_rpc_services(comms, &handles, self.db.into(), &p2p_config); - let comms = initialization::spawn_comms_using_transport(comms, p2p_config.transport.clone()) - .await - .map_err(|e| e.to_exit_error())?; + let comms = if p2p_config.transport.transport_type == TransportType::Tor { + let path = base_node_config.tor_identity_file.clone(); + let node_id = comms.node_identity(); + let after_comms = move |identity: TorIdentity| { + let address_string = format!("/onion3/{}:{}", identity.service_id, identity.onion_port); + if let Err(e) = identity_management::save_as_json(&path, &identity) { + error!(target: LOG_TARGET, "Failed to save tor identity{:?}", e); + } + trace!(target: LOG_TARGET, "resave the tor identity {:?}", identity); + let result: Result = address_string.parse(); + if result.is_err() { + error!(target: LOG_TARGET, "Failed to parse tor identity as multiaddr{:?}", result); + return; + } + let address = result.unwrap(); + if !node_id.public_addresses().contains(&address) { + node_id.add_public_address(address); + } + }; + initialization::spawn_comms_using_transport(comms, p2p_config.transport.clone(), after_comms).await + } else { + let after_comms = |_identity| {}; + initialization::spawn_comms_using_transport(comms, p2p_config.transport.clone(), after_comms).await + }; + + let comms = comms.map_err(|e| e.to_exit_error())?; // Save final node identity after comms has initialized. This is required because the public_address can be // changed by comms during initialization when using tor. match p2p_config.transport.transport_type { @@ -177,10 +208,6 @@ where B: BlockchainBackend + 'static .map_err(|e| ExitError::new(ExitCode::IdentityError, e))?; }, }; - if let Some(hs) = comms.hidden_service() { - identity_management::save_as_json(&base_node_config.tor_identity_file, hs.tor_identity()) - .map_err(|e| ExitError::new(ExitCode::IdentityError, e))?; - } handles.register(comms); diff --git a/applications/minotari_node/src/commands/command/check_db.rs b/applications/minotari_node/src/commands/command/check_db.rs index b6e59ac4fe..8858bb39bd 100644 --- a/applications/minotari_node/src/commands/command/check_db.rs +++ b/applications/minotari_node/src/commands/command/check_db.rs @@ -43,12 +43,12 @@ impl CommandContext { /// Function to process the check-db command pub async fn check_db(&mut self) -> Result<(), Error> { let meta = self.node_service.get_metadata().await?; - let mut height = meta.height_of_longest_chain(); + let mut height = meta.best_block_height(); let mut missing_blocks = Vec::new(); let mut missing_headers = Vec::new(); print!("Searching for height: "); // We need to check every header, but not every block. - let horizon_height = meta.horizon_block_height(height); + let horizon_height = meta.pruned_height_at_given_chain_tip(height); while height > 0 { print!("{}", height); io::stdout().flush().await?; diff --git a/applications/minotari_node/src/commands/command/get_network_stats.rs b/applications/minotari_node/src/commands/command/get_network_stats.rs index 73432c9378..19c86a52e4 100644 --- a/applications/minotari_node/src/commands/command/get_network_stats.rs +++ b/applications/minotari_node/src/commands/command/get_network_stats.rs @@ -25,6 +25,7 @@ use async_trait::async_trait; use clap::Parser; use super::{CommandContext, HandleCommand}; +#[cfg(feature = "metrics")] use crate::table::Table; /// Displays network stats diff --git a/applications/minotari_node/src/commands/command/list_connections.rs b/applications/minotari_node/src/commands/command/list_connections.rs index 61b162a13c..a4c9d5343d 100644 --- a/applications/minotari_node/src/commands/command/list_connections.rs +++ b/applications/minotari_node/src/commands/command/list_connections.rs @@ -64,7 +64,7 @@ impl CommandContext { let chain_height = peer .get_metadata(1) .and_then(|v| bincode::deserialize::(v).ok()) - .map(|metadata| format!("height: {}", metadata.metadata.height_of_longest_chain())); + .map(|metadata| format!("height: {}", metadata.metadata.best_block_height())); let ua = peer.user_agent; let rpc_sessions = self diff --git a/applications/minotari_node/src/commands/command/list_peers.rs b/applications/minotari_node/src/commands/command/list_peers.rs index 9199e32fb1..ab487b7ee7 100644 --- a/applications/minotari_node/src/commands/command/list_peers.rs +++ b/applications/minotari_node/src/commands/command/list_peers.rs @@ -86,7 +86,7 @@ impl CommandContext { .get_metadata(1) .and_then(|v| bincode::deserialize::(v).ok()) { - s.push(format!("chain height: {}", metadata.metadata.height_of_longest_chain())); + s.push(format!("chain height: {}", metadata.metadata.best_block_height())); } if let Some(last_seen) = peer.addresses.last_seen() { diff --git a/applications/minotari_node/src/commands/command/list_validator_nodes.rs b/applications/minotari_node/src/commands/command/list_validator_nodes.rs index 5e63a02728..93d4c7dd84 100644 --- a/applications/minotari_node/src/commands/command/list_validator_nodes.rs +++ b/applications/minotari_node/src/commands/command/list_validator_nodes.rs @@ -62,13 +62,11 @@ impl CommandContext { /// Function to process the list-connections command pub async fn list_validator_nodes(&mut self, args: Args) -> Result<(), Error> { let metadata = self.blockchain_db.get_chain_metadata().await?; - let constants = self - .consensus_rules - .consensus_constants(metadata.height_of_longest_chain()); + let constants = self.consensus_rules.consensus_constants(metadata.best_block_height()); let height = args .epoch .map(|epoch| constants.epoch_to_block_height(epoch)) - .unwrap_or_else(|| metadata.height_of_longest_chain()); + .unwrap_or_else(|| metadata.best_block_height()); let current_epoch = constants.block_height_to_epoch(height); let next_epoch = VnEpoch(current_epoch.as_u64() + 1); let next_epoch_height = constants.epoch_to_block_height(next_epoch); diff --git a/applications/minotari_node/src/commands/command/period_stats.rs b/applications/minotari_node/src/commands/command/period_stats.rs index 2870989a5b..edd749d699 100644 --- a/applications/minotari_node/src/commands/command/period_stats.rs +++ b/applications/minotari_node/src/commands/command/period_stats.rs @@ -58,7 +58,7 @@ impl CommandContext { ) -> Result<(), Error> { let meta = self.node_service.get_metadata().await?; - let mut height = meta.height_of_longest_chain(); + let mut height = meta.best_block_height(); // Currently gets the stats for: tx count, hash rate estimation, target difficulty, solvetime. let mut results: Vec<(usize, f64, u64, u64, usize)> = Vec::new(); diff --git a/applications/minotari_node/src/commands/command/status.rs b/applications/minotari_node/src/commands/command/status.rs index e63e256b7b..f4d101a104 100644 --- a/applications/minotari_node/src/commands/command/status.rs +++ b/applications/minotari_node/src/commands/command/status.rs @@ -64,7 +64,7 @@ impl CommandContext { status_line.add_field("State", self.state_machine_info.borrow().state_info.short_desc()); let metadata = self.node_service.get_metadata().await?; - let height = metadata.height_of_longest_chain(); + let height = metadata.best_block_height(); let last_header = self .node_service .get_header(height) @@ -76,16 +76,10 @@ impl CommandContext { ); status_line.add_field( "Tip", - format!( - "{} ({})", - metadata.height_of_longest_chain(), - last_block_time.to_rfc2822() - ), + format!("{} ({})", metadata.best_block_height(), last_block_time.to_rfc2822()), ); - let constants = self - .consensus_rules - .consensus_constants(metadata.height_of_longest_chain()); + let constants = self.consensus_rules.consensus_constants(metadata.best_block_height()); let fut = self.mempool_service.get_mempool_stats(); if let Ok(mempool_stats) = time::timeout(Duration::from_secs(5), fut).await? { status_line.add_field( diff --git a/applications/minotari_node/src/grpc/base_node_grpc_server.rs b/applications/minotari_node/src/grpc/base_node_grpc_server.rs index f331237a31..e01bdbe248 100644 --- a/applications/minotari_node/src/grpc/base_node_grpc_server.rs +++ b/applications/minotari_node/src/grpc/base_node_grpc_server.rs @@ -379,7 +379,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { Status::internal(err.to_string()), )); }, - Ok(data) => data.height_of_longest_chain(), + Ok(data) => data.best_block_height(), }; let sorting: Sorting = request.sorting(); @@ -635,6 +635,23 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { )) }, }; + let gen_hash = handler + .get_header(0) + .await + .map_err(|_| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument("Tari genesis block not found".to_string()), + ) + })? + .ok_or_else(|| { + obscure_error_if_true( + report_error_flag, + Status::not_found("Tari genesis block not found".to_string()), + ) + })? + .hash() + .to_vec(); // construct response let block_hash = new_block.hash().to_vec(); let mining_hash = match new_block.header.pow.pow_algo { @@ -651,6 +668,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { block_hash, block, merge_mining_hash: mining_hash, + tari_unique_id: gen_hash, }; debug!(target: LOG_TARGET, "Sending GetNewBlock response to client"); Ok(Response::new(response)) @@ -704,6 +722,23 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { PowAlgorithm::Sha3x => new_block.header.mining_hash().to_vec(), PowAlgorithm::RandomX => new_block.header.merge_mining_hash().to_vec(), }; + let gen_hash = handler + .get_header(0) + .await + .map_err(|_| { + obscure_error_if_true( + report_error_flag, + Status::invalid_argument("Tari genesis block not found".to_string()), + ) + })? + .ok_or_else(|| { + obscure_error_if_true( + report_error_flag, + Status::not_found("Tari genesis block not found".to_string()), + ) + })? + .hash() + .to_vec(); let (header, block_body) = new_block.into_header_body(); let mut header_bytes = Vec::new(); @@ -718,6 +753,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { block_body: block_body_bytes, merge_mining_hash: mining_hash, utxo_mr: header.output_mr.to_vec(), + tari_unique_id: gen_hash, }; debug!(target: LOG_TARGET, "Sending GetNewBlockBlob response to client"); Ok(Response::new(response)) diff --git a/applications/minotari_node/src/grpc/blocks.rs b/applications/minotari_node/src/grpc/blocks.rs index 76167b77d5..e8448e7a64 100644 --- a/applications/minotari_node/src/grpc/blocks.rs +++ b/applications/minotari_node/src/grpc/blocks.rs @@ -55,7 +55,7 @@ pub async fn block_heights( .get_metadata() .await .map_err(|e| Status::internal(e.to_string()))?; - let tip = metadata.height_of_longest_chain(); + let tip = metadata.best_block_height(); // Avoid overflow let height_from_tip = cmp::min(tip, from_tip); let start = cmp::max(tip - height_from_tip, 0); diff --git a/applications/minotari_node/src/lib.rs b/applications/minotari_node/src/lib.rs index 780f97ad19..7e4cc087d2 100644 --- a/applications/minotari_node/src/lib.rs +++ b/applications/minotari_node/src/lib.rs @@ -55,10 +55,9 @@ use tokio::task; use tonic::transport::{Identity, Server, ServerTlsConfig}; use crate::cli::Cli; -pub use crate::{ - config::{ApplicationConfig, BaseNodeConfig, DatabaseType}, - metrics::MetricsConfig, -}; +pub use crate::config::{ApplicationConfig, BaseNodeConfig, DatabaseType}; +#[cfg(feature = "metrics")] +pub use crate::metrics::MetricsConfig; const LOG_TARGET: &str = "minotari::base_node::app"; diff --git a/applications/minotari_node/src/main.rs b/applications/minotari_node/src/main.rs index e02f2ac9aa..60fd7b1889 100644 --- a/applications/minotari_node/src/main.rs +++ b/applications/minotari_node/src/main.rs @@ -152,7 +152,7 @@ fn main_inner() -> Result<(), ExitError> { if config.base_node.use_libtor && config.base_node.p2p.transport.is_tor() { let tor = Tor::initialize()?; tor.update_comms_transport(&mut config.base_node.p2p.transport)?; - runtime.spawn(tor.run(shutdown.to_signal())); + tor.run_background(); debug!( target: LOG_TARGET, "Updated Tor comms transport: {:?}", config.base_node.p2p.transport diff --git a/applications/minotari_node/src/recovery.rs b/applications/minotari_node/src/recovery.rs index e47feab973..a346e83865 100644 --- a/applications/minotari_node/src/recovery.rs +++ b/applications/minotari_node/src/recovery.rs @@ -152,7 +152,7 @@ async fn do_recovery( let max_height = source_database .get_chain_metadata() .map_err(|e| anyhow!("Could not get max chain height: {}", e))? - .height_of_longest_chain(); + .best_block_height(); // we start at height 1 let mut counter = 1; print!("Starting recovery at height: "); diff --git a/base_layer/chat_ffi/Cargo.toml b/base_layer/chat_ffi/Cargo.toml index 73e6b54c01..c886374a73 100644 --- a/base_layer/chat_ffi/Cargo.toml +++ b/base_layer/chat_ffi/Cargo.toml @@ -19,7 +19,7 @@ tari_utilities = { version = "0.7" } libc = "0.2.65" libsqlite3-sys = { version = "0.25.1", features = ["bundled"], optional = true } log = "0.4.6" -log4rs = { git = "https://github.com/tari-project/log4rs.git", features = ["console_appender", "file_appender", "yaml_format"] } +log4rs = { version = "1.3.0", features = ["console_appender", "file_appender", "yaml_format"] } thiserror = "1.0.26" tokio = "1.23" diff --git a/base_layer/common_types/src/chain_metadata.rs b/base_layer/common_types/src/chain_metadata.rs index 6c0d444a89..0ab3d7285b 100644 --- a/base_layer/common_types/src/chain_metadata.rs +++ b/base_layer/common_types/src/chain_metadata.rs @@ -25,22 +25,22 @@ use std::fmt::{Display, Error, Formatter}; use primitive_types::U256; use serde::{Deserialize, Serialize}; -use crate::types::{BlockHash, FixedHash}; +use crate::types::BlockHash; #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash)] pub struct ChainMetadata { - /// The current chain height, or the block number of the longest valid chain, or `None` if there is no chain - height_of_longest_chain: u64, + /// The current chain height, or the block number of the longest valid chain + best_block_height: u64, /// The block hash of the current tip of the longest valid chain - best_block: BlockHash, + best_block_hash: BlockHash, /// The configured number of blocks back from the tip that this database tracks. A value of 0 indicates that /// pruning mode is disabled and the node will keep full blocks from the time it was set. If pruning horizon /// was previously enabled, previously pruned blocks will remain pruned. If set from initial sync, full blocks /// are preserved from genesis (i.e. the database is in full archival mode). pruning_horizon: u64, /// The height of the pruning horizon. This indicates from what height a full block can be provided - /// (exclusive). If `pruned_height` is equal to the `height_of_longest_chain` no blocks can be - /// provided. Archival nodes wil always have an `pruned_height` of zero. + /// (exclusive). If `pruned_height` is equal to the `best_block_height` no blocks can be + /// provided. Archival nodes wil always have a `pruned_height` of zero. pruned_height: u64, /// The total accumulated proof of work of the longest chain accumulated_difficulty: U256, @@ -50,16 +50,16 @@ pub struct ChainMetadata { impl ChainMetadata { pub fn new( - height: u64, - hash: BlockHash, + best_block_height: u64, + best_block_hash: BlockHash, pruning_horizon: u64, pruned_height: u64, accumulated_difficulty: U256, timestamp: u64, ) -> ChainMetadata { ChainMetadata { - height_of_longest_chain: height, - best_block: hash, + best_block_height, + best_block_hash, pruning_horizon, pruned_height, accumulated_difficulty, @@ -67,37 +67,16 @@ impl ChainMetadata { } } - pub fn empty() -> ChainMetadata { - ChainMetadata { - height_of_longest_chain: 0, - best_block: FixedHash::zero(), - pruning_horizon: 0, - pruned_height: 0, - accumulated_difficulty: 0.into(), - timestamp: 0, - } - } - /// The block height at the pruning horizon, given the chain height of the network. Typically database backends /// cannot provide any block data earlier than this point. /// Zero is returned if the blockchain still hasn't reached the pruning horizon. - pub fn horizon_block_height(&self, chain_tip: u64) -> u64 { + pub fn pruned_height_at_given_chain_tip(&self, chain_tip: u64) -> u64 { match self.pruning_horizon { 0 => 0, - horizon => chain_tip.saturating_sub(horizon), + pruning_horizon => chain_tip.saturating_sub(pruning_horizon), } } - /// Set the pruning horizon to indicate that the chain is in archival mode (i.e. a pruning horizon of zero) - pub fn archival_mode(&mut self) { - self.pruning_horizon = 0; - } - - /// Set the pruning horizon - pub fn set_pruning_horizon(&mut self, pruning_horizon: u64) { - self.pruning_horizon = pruning_horizon; - } - /// The configured number of blocks back from the tip that this database tracks. A value of 0 indicates that /// pruning mode is disabled and the node will keep full blocks from the time it was set. If pruning horizon /// was previously enabled, previously pruned blocks will remain pruned. If set from initial sync, full blocks @@ -117,13 +96,13 @@ impl ChainMetadata { } /// Returns the height of longest chain. - pub fn height_of_longest_chain(&self) -> u64 { - self.height_of_longest_chain + pub fn best_block_height(&self) -> u64 { + self.best_block_height } /// The height of the pruning horizon. This indicates from what height a full block can be provided - /// (exclusive). If `pruned_height` is equal to the `height_of_longest_chain` no blocks can be - /// provided. Archival nodes wil always have an `pruned_height` of zero. + /// (exclusive). If `pruned_height` is equal to the `best_block_height` no blocks can be + /// provided. Archival nodes wil always have a `pruned_height` of zero. pub fn pruned_height(&self) -> u64 { self.pruned_height } @@ -132,8 +111,8 @@ impl ChainMetadata { self.accumulated_difficulty } - pub fn best_block(&self) -> &BlockHash { - &self.best_block + pub fn best_block_hash(&self) -> &BlockHash { + &self.best_block_hash } pub fn timestamp(&self) -> u64 { @@ -143,14 +122,11 @@ impl ChainMetadata { impl Display for ChainMetadata { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { - let height = self.height_of_longest_chain; - let best_block = self.best_block; - let accumulated_difficulty = self.accumulated_difficulty; - writeln!(f, "Height of longest chain: {}", height)?; - writeln!(f, "Total accumulated difficulty: {}", accumulated_difficulty)?; - writeln!(f, "Best block: {}", best_block)?; + writeln!(f, "Best block height: {}", self.best_block_height)?; + writeln!(f, "Total accumulated difficulty: {}", self.accumulated_difficulty)?; + writeln!(f, "Best block hash: {}", self.best_block_hash)?; writeln!(f, "Pruning horizon: {}", self.pruning_horizon)?; - writeln!(f, "Effective pruned height: {}", self.pruned_height)?; + writeln!(f, "Pruned height: {}", self.pruned_height)?; Ok(()) } } @@ -161,33 +137,53 @@ mod test { #[test] fn horizon_block_on_default() { - let metadata = ChainMetadata::empty(); - assert_eq!(metadata.horizon_block_height(0), 0); + let metadata = ChainMetadata { + best_block_height: 0, + best_block_hash: Default::default(), + pruning_horizon: 0, + pruned_height: 0, + accumulated_difficulty: Default::default(), + timestamp: 0, + }; + assert_eq!(metadata.pruned_height_at_given_chain_tip(0), 0); } #[test] fn pruned_mode() { - let mut metadata = ChainMetadata::empty(); + let mut metadata = ChainMetadata { + best_block_height: 0, + best_block_hash: Default::default(), + pruning_horizon: 0, + pruned_height: 0, + accumulated_difficulty: Default::default(), + timestamp: 0, + }; assert!(!metadata.is_pruned_node()); assert!(metadata.is_archival_node()); - metadata.set_pruning_horizon(2880); + metadata.pruning_horizon = 2880; assert!(metadata.is_pruned_node()); assert!(!metadata.is_archival_node()); - assert_eq!(metadata.horizon_block_height(0), 0); - assert_eq!(metadata.horizon_block_height(100), 0); - assert_eq!(metadata.horizon_block_height(2880), 0); - assert_eq!(metadata.horizon_block_height(2881), 1); + assert_eq!(metadata.pruned_height_at_given_chain_tip(0), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(100), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(2880), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(2881), 1); } #[test] fn archival_node() { - let mut metadata = ChainMetadata::empty(); - metadata.archival_mode(); + let metadata = ChainMetadata { + best_block_height: 0, + best_block_hash: Default::default(), + pruning_horizon: 0, + pruned_height: 0, + accumulated_difficulty: Default::default(), + timestamp: 0, + }; // Chain is still empty - assert_eq!(metadata.horizon_block_height(0), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(0), 0); // When pruning horizon is zero, the horizon block is always 0, the genesis block - assert_eq!(metadata.horizon_block_height(0), 0); - assert_eq!(metadata.horizon_block_height(100), 0); - assert_eq!(metadata.horizon_block_height(2881), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(0), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(100), 0); + assert_eq!(metadata.pruned_height_at_given_chain_tip(2881), 0); } } diff --git a/base_layer/contacts/src/chat_client/src/networking.rs b/base_layer/contacts/src/chat_client/src/networking.rs index fa84a20e9f..0dc3a0f124 100644 --- a/base_layer/contacts/src/chat_client/src/networking.rs +++ b/base_layer/contacts/src/chat_client/src/networking.rs @@ -22,14 +22,14 @@ use std::{str::FromStr, sync::Arc, time::Duration}; -use log::trace; +use log::{error, trace}; use minotari_app_utilities::{identity_management, identity_management::load_from_json}; // Re-exports pub use tari_comms::{ - multiaddr::Multiaddr, + multiaddr::{Error as MultiaddrError, Multiaddr}, peer_manager::{NodeIdentity, PeerFeatures}, }; -use tari_comms::{peer_manager::Peer, CommsNode, UnspawnedCommsNode}; +use tari_comms::{peer_manager::Peer, tor::TorIdentity, CommsNode, UnspawnedCommsNode}; use tari_contacts::contacts_service::{handle::ContactsServiceHandle, ContactsServiceInitializer}; use tari_p2p::{ comms_connector::pubsub_connector, @@ -109,10 +109,30 @@ pub async fn start( for peer in seed_peers { peer_manager.add_peer(peer).await?; } - - let comms = spawn_comms_using_transport(comms, p2p_config.transport.clone()).await?; - - // Save final node identity after comms has initialized. This is required because the public_address can be + let comms = if p2p_config.transport.transport_type == TransportType::Tor { + let path = config.chat_client.tor_identity_file.clone(); + let node_id = comms.node_identity(); + let after_comms = move |identity: TorIdentity| { + let address_string = format!("/onion3/{}:{}", identity.service_id, identity.onion_port); + if let Err(e) = identity_management::save_as_json(&path, &identity) { + error!(target: LOG_TARGET, "Failed to save tor identity{:?}", e); + } + let result: Result = address_string.parse(); + if result.is_err() { + error!(target: LOG_TARGET, "Failed to parse tor identity as multiaddr{:?}", result); + return; + } + let address = result.unwrap(); + trace!(target: LOG_TARGET, "resave the chat tor identity {:?}", identity); + if !node_id.public_addresses().contains(&address) { + node_id.add_public_address(address); + } + }; + spawn_comms_using_transport(comms, p2p_config.transport.clone(), after_comms).await? + } else { + let after_comms = |_identity| {}; + spawn_comms_using_transport(comms, p2p_config.transport.clone(), after_comms).await? + }; // changed by comms during initialization when using tor. match p2p_config.transport.transport_type { TransportType::Tcp => {}, // Do not overwrite TCP public_address in the base_node_id! @@ -121,10 +141,7 @@ pub async fn start( trace!(target: LOG_TARGET, "save chat identity file"); }, }; - if let Some(hs) = comms.hidden_service() { - identity_management::save_as_json(&config.chat_client.tor_identity_file, hs.tor_identity())?; - trace!(target: LOG_TARGET, "resave the chat tor identity {:?}", hs.tor_identity()); - } + handles.register(comms); let comms = handles.expect_handle::(); diff --git a/base_layer/core/src/base_node/chain_metadata_service/service.rs b/base_layer/core/src/base_node/chain_metadata_service/service.rs index 1253c167e4..3b0e88f082 100644 --- a/base_layer/core/src/base_node/chain_metadata_service/service.rs +++ b/base_layer/core/src/base_node/chain_metadata_service/service.rs @@ -202,7 +202,7 @@ impl ChainMetadataService { target: LOG_TARGET, "Received chain metadata from NodeId '{}' #{}, Acc_diff {}", event.node_id, - chain_metadata.height_of_longest_chain(), + chain_metadata.best_block_height(), chain_metadata.accumulated_difficulty(), ); @@ -257,8 +257,8 @@ mod test { let mut bytes = [0u8; 32]; diff.to_big_endian(&mut bytes); proto::ChainMetadata { - height_of_longest_chain: 1, - best_block: vec![ + best_block_height: 1, + best_block_hash: vec![ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, ], @@ -293,7 +293,7 @@ mod test { let (mut service, liveness_mock_state, mut base_node_receiver, _) = setup(); let mut proto_chain_metadata = create_sample_proto_chain_metadata(); - proto_chain_metadata.height_of_longest_chain = 123; + proto_chain_metadata.best_block_height = 123; let chain_metadata = proto_chain_metadata.clone().try_into().unwrap(); task::spawn(async move { @@ -311,7 +311,7 @@ mod test { unpack_enum!(LivenessRequest::SetMetadataEntry(metadata_key, data) = last_call); assert_eq!(metadata_key, MetadataKey::ChainMetadata); let chain_metadata = proto::ChainMetadata::decode(data.as_slice()).unwrap(); - assert_eq!(chain_metadata.height_of_longest_chain, 123); + assert_eq!(chain_metadata.best_block_height, 123); } #[tokio::test] async fn handle_liveness_event_ok() { @@ -333,8 +333,8 @@ mod test { let metadata = events_rx.recv().await.unwrap().peer_metadata().unwrap(); assert_eq!(*metadata.node_id(), node_id); assert_eq!( - metadata.claimed_chain_metadata().height_of_longest_chain(), - proto_chain_metadata.height_of_longest_chain + metadata.claimed_chain_metadata().best_block_height(), + proto_chain_metadata.best_block_height ); } diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index 4acbbff490..d5fd4f932b 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -602,15 +602,15 @@ where B: BlockchainBackend + 'static // We check the current tip and orphan status of the block because we cannot guarantee that mempool state is // correct and the mmr root calculation is only valid if the block is building on the tip. let current_meta = self.blockchain_db.get_chain_metadata().await?; - if header.prev_hash != *current_meta.best_block() { + if header.prev_hash != *current_meta.best_block_hash() { debug!( target: LOG_TARGET, "Orphaned block #{}: ({}), current tip is: #{} ({}). We need to fetch the complete block from peer: \ ({})", header.height, block_hash.to_hex(), - current_meta.height_of_longest_chain(), - current_meta.best_block().to_hex(), + current_meta.best_block_height(), + current_meta.best_block_hash().to_hex(), source_peer, ); #[cfg(feature = "metrics")] diff --git a/base_layer/core/src/base_node/proto/chain_metadata.proto b/base_layer/core/src/base_node/proto/chain_metadata.proto index 8f77ffac0c..bdd6061b53 100644 --- a/base_layer/core/src/base_node/proto/chain_metadata.proto +++ b/base_layer/core/src/base_node/proto/chain_metadata.proto @@ -9,14 +9,14 @@ package tari.base_node; message ChainMetadata { // The current chain height, or the block number of the longest valid chain, or `None` if there is no chain - uint64 height_of_longest_chain = 1; + uint64 best_block_height = 1; // The block hash of the current tip of the longest valid chain, or `None` for an empty chain - bytes best_block = 2; + bytes best_block_hash = 2; // The current geometric mean of the pow of the chain tip, or `None` if there is no chain bytes accumulated_difficulty = 5; // The effective height of the pruning horizon. This indicates from what height // a full block can be provided (exclusive). - // If `pruned_height` is equal to the `height_of_longest_chain` no blocks can be provided. + // If `pruned_height` is equal to the `best_block_height` no blocks can be provided. // Archival nodes wil always have an `pruned_height` of zero. uint64 pruned_height = 6; // Timestamp of the last block in the chain, or `None` if there is no chain diff --git a/base_layer/core/src/base_node/proto/chain_metadata.rs b/base_layer/core/src/base_node/proto/chain_metadata.rs index 702141d288..333f80aceb 100644 --- a/base_layer/core/src/base_node/proto/chain_metadata.rs +++ b/base_layer/core/src/base_node/proto/chain_metadata.rs @@ -41,23 +41,23 @@ impl TryFrom for ChainMetadata { } let accumulated_difficulty = U256::from_big_endian(&metadata.accumulated_difficulty); - let height_of_longest_chain = metadata.height_of_longest_chain; + let best_block_height = metadata.best_block_height; let pruning_horizon = if metadata.pruned_height == 0 { metadata.pruned_height } else { - height_of_longest_chain.saturating_sub(metadata.pruned_height) + best_block_height.saturating_sub(metadata.pruned_height) }; - if metadata.best_block.is_empty() { + if metadata.best_block_hash.is_empty() { return Err("Best block is missing".to_string()); } let hash: FixedHash = metadata - .best_block + .best_block_hash .try_into() .map_err(|e| format!("Malformed best block: {}", e))?; Ok(ChainMetadata::new( - height_of_longest_chain, + best_block_height, hash, pruning_horizon, metadata.pruned_height, @@ -74,8 +74,8 @@ impl From for proto::ChainMetadata { .accumulated_difficulty() .to_big_endian(&mut accumulated_difficulty); Self { - height_of_longest_chain: metadata.height_of_longest_chain(), - best_block: metadata.best_block().to_vec(), + best_block_height: metadata.best_block_height(), + best_block_hash: metadata.best_block_hash().to_vec(), pruned_height: metadata.pruned_height(), accumulated_difficulty: accumulated_difficulty.to_vec(), timestamp: metadata.timestamp(), @@ -84,7 +84,7 @@ impl From for proto::ChainMetadata { } impl proto::ChainMetadata { - pub fn height_of_longest_chain(&self) -> u64 { - self.height_of_longest_chain + pub fn best_block_height(&self) -> u64 { + self.best_block_height } } diff --git a/base_layer/core/src/base_node/proto/rpc.proto b/base_layer/core/src/base_node/proto/rpc.proto index bbc6aea8b3..00532116b0 100644 --- a/base_layer/core/src/base_node/proto/rpc.proto +++ b/base_layer/core/src/base_node/proto/rpc.proto @@ -57,16 +57,20 @@ message SyncKernelsRequest { } message SyncUtxosRequest { + // Start header hash to sync UTXOs from bytes start_header_hash = 1; + // End header hash to sync UTXOs to bytes end_header_hash = 2; } -message SyncUtxosResponse { - tari.types.TransactionOutput output = 1; - bytes mined_header = 2; -} -message PrunedOutput { - bytes hash = 1; +message SyncUtxosResponse { + oneof txo { + // The unspent transaction output + tari.types.TransactionOutput output = 1; + // If the TXO is spent, the commitment bytes are returned + bytes commitment = 2; + } + bytes mined_header = 3; } message SyncUtxosByBlockRequest { diff --git a/base_layer/core/src/base_node/proto/wallet_rpc.proto b/base_layer/core/src/base_node/proto/wallet_rpc.proto index e00dcce3c3..fb9c852ea3 100644 --- a/base_layer/core/src/base_node/proto/wallet_rpc.proto +++ b/base_layer/core/src/base_node/proto/wallet_rpc.proto @@ -35,27 +35,27 @@ enum TxLocation { message TxQueryResponse { TxLocation location = 1; - bytes block_hash = 2; + bytes best_block_hash = 2; uint64 confirmations = 3; bool is_synced = 4; - uint64 height_of_longest_chain = 5; + uint64 best_block_height = 5; uint64 mined_timestamp = 6; } message TxQueryBatchResponse { tari.types.Signature signature = 1; TxLocation location = 2; - bytes block_hash = 3; + bytes best_block_hash = 3; uint64 confirmations = 4; - uint64 block_height = 5; + uint64 best_block_height = 5; uint64 mined_timestamp = 6; } message TxQueryBatchResponses { repeated TxQueryBatchResponse responses = 1; bool is_synced = 2; - bytes tip_hash = 3; - uint64 height_of_longest_chain = 4; + bytes best_block_hash = 3; + uint64 best_block_height = 4; uint64 tip_mined_timestamp = 5; } diff --git a/base_layer/core/src/base_node/proto/wallet_rpc.rs b/base_layer/core/src/base_node/proto/wallet_rpc.rs index 6159694a11..1b92892655 100644 --- a/base_layer/core/src/base_node/proto/wallet_rpc.rs +++ b/base_layer/core/src/base_node/proto/wallet_rpc.rs @@ -128,10 +128,10 @@ impl From for proto::TxSubmissionResponse { #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TxQueryResponse { pub location: TxLocation, - pub block_hash: Option, + pub best_block_hash: Option, pub confirmations: u64, pub is_synced: bool, - pub height_of_longest_chain: u64, + pub best_block_height: u64, pub mined_timestamp: Option, } @@ -139,9 +139,9 @@ pub struct TxQueryResponse { pub struct TxQueryBatchResponse { pub signature: Signature, pub location: TxLocation, - pub block_hash: Option, + pub best_block_hash: Option, pub confirmations: u64, - pub block_height: u64, + pub best_block_height: u64, pub mined_timestamp: Option, } @@ -192,10 +192,10 @@ impl TryFrom for TxQueryResponse { type Error = String; fn try_from(proto_response: proto::TxQueryResponse) -> Result { - let hash = if proto_response.block_hash.is_empty() { + let hash = if proto_response.best_block_hash.is_empty() { None } else { - Some(match BlockHash::try_from(proto_response.block_hash.clone()) { + Some(match BlockHash::try_from(proto_response.best_block_hash.clone()) { Ok(h) => h, Err(e) => { return Err(format!("Malformed block hash: {}", e)); @@ -213,10 +213,10 @@ impl TryFrom for TxQueryResponse { proto::TxLocation::from_i32(proto_response.location) .ok_or_else(|| "Invalid or unrecognised `TxLocation` enum".to_string())?, )?, - block_hash: hash, + best_block_hash: hash, confirmations: proto_response.confirmations, is_synced: proto_response.is_synced, - height_of_longest_chain: proto_response.height_of_longest_chain, + best_block_height: proto_response.best_block_height, mined_timestamp, }) } @@ -226,10 +226,10 @@ impl From for proto::TxQueryResponse { fn from(response: TxQueryResponse) -> Self { Self { location: proto::TxLocation::from(response.location) as i32, - block_hash: response.block_hash.map(|v| v.to_vec()).unwrap_or(vec![]), + best_block_hash: response.best_block_hash.map(|v| v.to_vec()).unwrap_or(vec![]), confirmations: response.confirmations, is_synced: response.is_synced, - height_of_longest_chain: response.height_of_longest_chain, + best_block_height: response.best_block_height, mined_timestamp: response.mined_timestamp.unwrap_or_default(), } } @@ -239,10 +239,10 @@ impl TryFrom for TxQueryBatchResponse { type Error = String; fn try_from(proto_response: proto::TxQueryBatchResponse) -> Result { - let hash = if proto_response.block_hash.is_empty() { + let hash = if proto_response.best_block_hash.is_empty() { None } else { - Some(match BlockHash::try_from(proto_response.block_hash.clone()) { + Some(match BlockHash::try_from(proto_response.best_block_hash.clone()) { Ok(h) => h, Err(e) => { return Err(format!("Malformed block hash: {}", e)); @@ -263,8 +263,8 @@ impl TryFrom for TxQueryBatchResponse { proto::TxLocation::from_i32(proto_response.location) .ok_or_else(|| "Invalid or unrecognised `TxLocation` enum".to_string())?, )?, - block_hash: hash, - block_height: proto_response.block_height, + best_block_hash: hash, + best_block_height: proto_response.best_block_height, confirmations: proto_response.confirmations, mined_timestamp, }) diff --git a/base_layer/core/src/base_node/rpc/service.rs b/base_layer/core/src/base_node/rpc/service.rs index 4a6d62032d..34b8a30d46 100644 --- a/base_layer/core/src/base_node/rpc/service.rs +++ b/base_layer/core/src/base_node/rpc/service.rs @@ -124,13 +124,13 @@ impl BaseNodeWalletRpcService { { None => (), Some(header) => { - let confirmations = chain_metadata.height_of_longest_chain().saturating_sub(header.height); + let confirmations = chain_metadata.best_block_height().saturating_sub(header.height); let response = TxQueryResponse { location: TxLocation::Mined as i32, - block_hash: block_hash.to_vec(), + best_block_hash: block_hash.to_vec(), confirmations, is_synced, - height_of_longest_chain: chain_metadata.height_of_longest_chain(), + best_block_height: chain_metadata.best_block_height(), mined_timestamp: header.timestamp.as_u64(), }; return Ok(response); @@ -148,10 +148,10 @@ impl BaseNodeWalletRpcService { { TxStorageResponse::UnconfirmedPool => TxQueryResponse { location: TxLocation::InMempool as i32, - block_hash: vec![], + best_block_hash: vec![], confirmations: 0, is_synced, - height_of_longest_chain: chain_metadata.height_of_longest_chain(), + best_block_height: chain_metadata.best_block_height(), mined_timestamp: 0, }, TxStorageResponse::ReorgPool | @@ -163,10 +163,10 @@ impl BaseNodeWalletRpcService { TxStorageResponse::NotStoredFeeTooLow | TxStorageResponse::NotStoredAlreadyMined => TxQueryResponse { location: TxLocation::NotStored as i32, - block_hash: vec![], + best_block_hash: vec![], confirmations: 0, is_synced, - height_of_longest_chain: chain_metadata.height_of_longest_chain(), + best_block_height: chain_metadata.best_block_height(), mined_timestamp: 0, }, }; @@ -311,17 +311,17 @@ impl BaseNodeWalletService for BaseNodeWalletRpc responses.push(TxQueryBatchResponse { signature: Some(SignatureProto::from(signature)), location: response.location, - block_hash: response.block_hash, + best_block_hash: response.best_block_hash, confirmations: response.confirmations, - block_height: response.height_of_longest_chain.saturating_sub(response.confirmations), + best_block_height: response.best_block_height.saturating_sub(response.confirmations), mined_timestamp: response.mined_timestamp, }); } Ok(Response::new(TxQueryBatchResponses { responses, is_synced, - tip_hash: metadata.best_block().to_vec(), - height_of_longest_chain: metadata.height_of_longest_chain(), + best_block_hash: metadata.best_block_hash().to_vec(), + best_block_height: metadata.best_block_height(), tip_mined_timestamp: metadata.timestamp(), })) } @@ -421,8 +421,8 @@ impl BaseNodeWalletService for BaseNodeWalletRpc .rpc_status_internal_error(LOG_TARGET)?; Ok(Response::new(UtxoQueryResponses { - best_block_height: metadata.height_of_longest_chain(), - best_block_hash: metadata.best_block().to_vec(), + best_block_height: metadata.best_block_height(), + best_block_hash: metadata.best_block_hash().to_vec(), responses: mined_info_resp .into_iter() .flatten() @@ -520,8 +520,8 @@ impl BaseNodeWalletService for BaseNodeWalletRpc .rpc_status_internal_error(LOG_TARGET)?; Ok(Response::new(QueryDeletedResponse { - best_block_height: metadata.height_of_longest_chain(), - best_block_hash: metadata.best_block().to_vec(), + best_block_height: metadata.best_block_height(), + best_block_hash: metadata.best_block_hash().to_vec(), data: return_data, })) } @@ -671,7 +671,7 @@ impl BaseNodeWalletService for BaseNodeWalletRpc .rpc_status_internal_error(LOG_TARGET)?; let stats = self .mempool() - .get_fee_per_gram_stats(count, metadata.height_of_longest_chain()) + .get_fee_per_gram_stats(count, metadata.best_block_height()) .await .rpc_status_internal_error(LOG_TARGET)?; diff --git a/base_layer/core/src/base_node/state_machine_service/states/events_and_states.rs b/base_layer/core/src/base_node/state_machine_service/states/events_and_states.rs index 5f0ef9c9f5..e44673939a 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/events_and_states.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/events_and_states.rs @@ -126,7 +126,7 @@ impl Display for SyncStatus { f, "Lagging behind {} peers (#{}, Difficulty: {})", sync_peers.len(), - network.height_of_longest_chain(), + network.best_block_height(), network.accumulated_difficulty(), ), UpToDate => f.write_str("UpToDate"), diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs index ec17dcba56..0620e84bc3 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync.rs @@ -57,26 +57,33 @@ impl HorizonStateSync { Err(err) => return err.into(), }; + let sync_peers = &mut self.sync_peers; + // Order sync peers according to accumulated difficulty + sync_peers.sort_by(|a, b| { + b.claimed_chain_metadata() + .accumulated_difficulty() + .cmp(&a.claimed_chain_metadata().accumulated_difficulty()) + }); + + // Target horizon sync height based on the last header we have synced let last_header = match shared.db.fetch_last_header().await { Ok(h) => h, Err(err) => return err.into(), }; + let target_horizon_sync_height = local_metadata.pruned_height_at_given_chain_tip(last_header.height); - let horizon_sync_height = local_metadata.horizon_block_height(last_header.height); - if local_metadata.pruned_height() >= horizon_sync_height { - info!(target: LOG_TARGET, "Horizon state was already synchronized."); + // Determine if we need to sync horizon state + if local_metadata.pruned_height() >= target_horizon_sync_height { + info!(target: LOG_TARGET, "Horizon state is already synchronized."); return StateEvent::HorizonStateSynchronized; } - - // We're already synced because we have full blocks higher than our target pruned height - if local_metadata.height_of_longest_chain() >= horizon_sync_height { + if local_metadata.best_block_height() >= target_horizon_sync_height { info!( target: LOG_TARGET, - "Tip height is higher than our pruned height. Horizon state is already synchronized." + "Our tip height is higher than our target pruned height. Horizon state is already synchronized." ); return StateEvent::HorizonStateSynchronized; } - let sync_peers = &mut self.sync_peers; let db = shared.db.clone(); let config = shared.config.blockchain_sync_config.clone(); @@ -90,7 +97,7 @@ impl HorizonStateSync { connectivity, rules, sync_peers, - horizon_sync_height, + target_horizon_sync_height, prover, validator, ); diff --git a/base_layer/core/src/base_node/state_machine_service/states/listening.rs b/base_layer/core/src/base_node/state_machine_service/states/listening.rs index 9e030714fc..20d307fc08 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/listening.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/listening.rs @@ -330,8 +330,8 @@ fn determine_sync_mode( let network_tip_accum_difficulty = network.claimed_chain_metadata().accumulated_difficulty(); let local_tip_accum_difficulty = local.accumulated_difficulty(); if local_tip_accum_difficulty < network_tip_accum_difficulty { - let local_tip_height = local.height_of_longest_chain(); - let network_tip_height = network.claimed_chain_metadata().height_of_longest_chain(); + let local_tip_height = local.best_block_height(); + let network_tip_height = network.claimed_chain_metadata().best_block_height(); info!( target: LOG_TARGET, "Our local blockchain accumulated difficulty is a little behind that of the network. We're at block #{} \ @@ -350,7 +350,7 @@ fn determine_sync_mode( let pruned_mode = local.pruning_horizon() > 0; let pruning_horizon_check = network.claimed_chain_metadata().pruning_horizon() > 0 && network.claimed_chain_metadata().pruning_horizon() < local.pruning_horizon(); - let pruning_height_check = network.claimed_chain_metadata().pruned_height() > local.height_of_longest_chain(); + let pruning_height_check = network.claimed_chain_metadata().pruned_height() > local.best_block_height(); let sync_able_peer = match (pruned_mode, pruning_horizon_check, pruning_height_check) { (true, true, _) => { info!( @@ -366,7 +366,7 @@ fn determine_sync_mode( target: LOG_TARGET, "The remote peer is a pruned node, and it cannot supply the blocks we need. Remote pruned height # {}, current local tip #{}", network.claimed_chain_metadata().pruned_height(), - local.height_of_longest_chain(), + local.best_block_height(), ); false }, @@ -421,9 +421,9 @@ fn determine_sync_mode( // Equals "Our blockchain is up-to-date." }, - local.height_of_longest_chain(), + local.best_block_height(), local_tip_accum_difficulty, - network.claimed_chain_metadata().height_of_longest_chain(), + network.claimed_chain_metadata().best_block_height(), network_tip_accum_difficulty, ); UpToDate diff --git a/base_layer/core/src/base_node/state_machine_service/states/sync_decide.rs b/base_layer/core/src/base_node/state_machine_service/states/sync_decide.rs index 674af6eb33..ad853601c8 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/sync_decide.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/sync_decide.rs @@ -62,63 +62,63 @@ impl DecideNextSync { ); if local_metadata.pruning_horizon() > 0 { - let last_header = match shared.db.fetch_last_header().await { - Ok(h) => h, - Err(err) => return err.into(), - }; - - let horizon_sync_height = local_metadata.horizon_block_height(last_header.height); // Filter sync peers that claim to be able to provide blocks up until our pruned height - let sync_peers = self - .sync_peers + debug!(target: LOG_TARGET, "Local metadata: {}", local_metadata); + let mut sync_peers = self.sync_peers.clone(); + let sync_peers = sync_peers .drain(..) .filter(|sync_peer| { let remote_metadata = sync_peer.claimed_chain_metadata(); - remote_metadata.height_of_longest_chain() >= horizon_sync_height + debug!(target: LOG_TARGET, "Peer metadata: {}", remote_metadata); + // Must be able to provide the correct amount of full blocks past the pruned height (i.e. the + // pruning horizon), otherwise our horizon spec will not be met + remote_metadata.best_block_height().saturating_sub(remote_metadata.pruned_height()) >= + local_metadata.pruning_horizon() && + // Must have a better blockchain tip than us + remote_metadata.best_block_height() > local_metadata.best_block_height() }) .collect::>(); if sync_peers.is_empty() { warn!( target: LOG_TARGET, - "Unable to find any appropriate sync peers for horizon sync" + "Unable to find any appropriate sync peers for horizon sync, trying for block sync" ); - return Continue; - } - - debug!( - target: LOG_TARGET, - "Proceeding to horizon sync with {} sync peer(s) with a best latency of {:.2?}", - sync_peers.len(), - sync_peers.first().map(|p| p.latency()).unwrap_or_default() - ); - ProceedToHorizonSync(sync_peers) - } else { - // Filter sync peers that are able to provide full blocks from our current tip - let sync_peers = self - .sync_peers - .drain(..) - .filter(|sync_peer| { - sync_peer.claimed_chain_metadata().pruned_height() <= local_metadata.height_of_longest_chain() - }) - .collect::>(); - - if sync_peers.is_empty() { - warn!( + } else { + debug!( target: LOG_TARGET, - "Unable to find any appropriate sync peers for block sync" + "Proceeding to horizon sync with {} sync peer(s) with a best latency of {:.2?}", + sync_peers.len(), + sync_peers.first().map(|p| p.latency()).unwrap_or_default() ); - return Continue; + return ProceedToHorizonSync(sync_peers); } + } + + // This is not a pruned node or horizon sync is not possible, try for block sync + + // Filter sync peers that are able to provide full blocks from our current tip + let sync_peers = self + .sync_peers + .drain(..) + .filter(|sync_peer| { + let remote_metadata = sync_peer.claimed_chain_metadata(); + remote_metadata.pruned_height() <= local_metadata.best_block_height() + }) + .collect::>(); - debug!( - target: LOG_TARGET, - "Proceeding to block sync with {} sync peer(s) with a best latency of {:.2?}", - sync_peers.len(), - sync_peers.first().map(|p| p.latency()).unwrap_or_default() - ); - ProceedToBlockSync(sync_peers) + if sync_peers.is_empty() { + warn!(target: LOG_TARGET, "Unable to find any appropriate sync peers for block sync"); + return Continue; } + + debug!( + target: LOG_TARGET, + "Proceeding to block sync with {} sync peer(s) with a best latency of {:.2?}", + sync_peers.len(), + sync_peers.first().map(|p| p.latency()).unwrap_or_default() + ); + ProceedToBlockSync(sync_peers) } } diff --git a/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs index c06325f2af..89eed1ce79 100644 --- a/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs @@ -233,7 +233,7 @@ impl<'a, B: BlockchainBackend + 'static> BlockSynchronizer<'a, B> { let tip_header = self.db.fetch_last_header().await?; let local_metadata = self.db.get_chain_metadata().await?; - if tip_header.height <= local_metadata.height_of_longest_chain() { + if tip_header.height <= local_metadata.best_block_height() { debug!( target: LOG_TARGET, "Blocks already synchronized to height {}.", tip_header.height @@ -243,7 +243,7 @@ impl<'a, B: BlockchainBackend + 'static> BlockSynchronizer<'a, B> { let tip_hash = tip_header.hash(); let tip_height = tip_header.height; - let best_height = local_metadata.height_of_longest_chain(); + let best_height = local_metadata.best_block_height(); let chain_header = self.db.fetch_chain_header(best_height).await?; let best_full_block_hash = chain_header.accumulated_data().hash; diff --git a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs index 9df904a4f7..12514a63bf 100644 --- a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs @@ -259,13 +259,12 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { let best_header = self.db.fetch_last_chain_header().await?; let best_block_header = self .db - .fetch_chain_header(best_block_metadata.height_of_longest_chain()) + .fetch_chain_header(best_block_metadata.best_block_height()) .await?; let best_header_height = best_header.height(); let best_block_height = best_block_header.height(); - if best_header_height < best_block_height || - best_block_height < self.local_cached_metadata.height_of_longest_chain() + if best_header_height < best_block_height || best_block_height < self.local_cached_metadata.best_block_height() { return Err(BlockHeaderSyncError::ChainStorageError( ChainStorageError::CorruptedDatabase("Inconsistent block and header data".to_string()), @@ -301,7 +300,7 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { .height() .checked_sub(split_info.reorg_steps_back) .unwrap_or_default(), - sync_peer.claimed_chain_metadata().height_of_longest_chain(), + sync_peer.claimed_chain_metadata().best_block_height(), sync_peer, ); self.synchronize_headers(sync_peer.clone(), &mut client, *split_info, max_latency) @@ -647,6 +646,11 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { header.hash().to_hex(), latency ); + trace!( + target: LOG_TARGET, + "{}", + header + ); if let Some(prev_header_height) = prev_height { if header.height != prev_header_height.saturating_add(1) { warn!( @@ -694,7 +698,7 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { sync_peer.add_sample(last_sync_timer.elapsed()); self.hooks.call_on_progress_header_hooks( current_height, - sync_peer.claimed_chain_metadata().height_of_longest_chain(), + sync_peer.claimed_chain_metadata().best_block_height(), &sync_peer, ); diff --git a/base_layer/core/src/base_node/sync/horizon_state_sync/error.rs b/base_layer/core/src/base_node/sync/horizon_state_sync/error.rs index 4f1a40ff89..6aff7e4510 100644 --- a/base_layer/core/src/base_node/sync/horizon_state_sync/error.rs +++ b/base_layer/core/src/base_node/sync/horizon_state_sync/error.rs @@ -30,6 +30,7 @@ use tari_comms::{ }; use tari_crypto::errors::RangeProofError; use tari_mmr::{error::MerkleMountainRangeError, sparse_merkle_tree::SMTError}; +use tari_utilities::ByteArrayError; use thiserror::Error; use tokio::task; @@ -97,6 +98,14 @@ pub enum HorizonSyncError { PeerNotFound, #[error("Sparse Merkle Tree error: {0}")] SMTError(#[from] SMTError), + #[error("ByteArrayError error: {0}")] + ByteArrayError(String), +} + +impl From for HorizonSyncError { + fn from(e: ByteArrayError) -> Self { + HorizonSyncError::ByteArrayError(e.to_string()) + } } impl From for HorizonSyncError { @@ -142,7 +151,8 @@ impl HorizonSyncError { err @ HorizonSyncError::ConversionError(_) | err @ HorizonSyncError::MerkleMountainRangeError(_) | err @ HorizonSyncError::FixedHashSizeError(_) | - err @ HorizonSyncError::TransactionError(_) => Some(BanReason { + err @ HorizonSyncError::TransactionError(_) | + err @ HorizonSyncError::ByteArrayError(_) => Some(BanReason { reason: format!("{}", err), ban_duration: BanPeriod::Long, }), diff --git a/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs index 3a4a528841..302ca53a4b 100644 --- a/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/horizon_state_sync/synchronizer.rs @@ -43,6 +43,7 @@ use crate::{ hooks::Hooks, horizon_state_sync::{HorizonSyncInfo, HorizonSyncStatus}, rpc, + rpc::BaseNodeSyncRpcClient, BlockchainSyncConfig, SyncPeer, }, @@ -50,13 +51,15 @@ use crate::{ chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, ChainStorageError, MmrTree}, common::{rolling_avg::RollingAverageTime, BanPeriod}, consensus::ConsensusManager, - proto::base_node::{SyncKernelsRequest, SyncUtxosRequest, SyncUtxosResponse}, + proto::base_node::{sync_utxos_response::Txo, SyncKernelsRequest, SyncUtxosRequest, SyncUtxosResponse}, transactions::transaction_components::{ transaction_output::batch_verify_range_proofs, + OutputType, TransactionKernel, TransactionOutput, }, validation::{helpers, FinalHorizonStateValidation}, + OutputSmt, PrunedKernelMmr, }; @@ -129,7 +132,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { target: LOG_TARGET, "Preparing database for horizon sync to height #{}", self.horizon_sync_height ); - let header = self.db().fetch_header(self.horizon_sync_height).await?.ok_or_else(|| { + let to_header = self.db().fetch_header(self.horizon_sync_height).await?.ok_or_else(|| { ChainStorageError::ValueNotFound { entity: "Header", field: "height", @@ -139,7 +142,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { let mut latency_increases_counter = 0; loop { - match self.sync(&header).await { + match self.sync(&to_header).await { Ok(()) => return Ok(()), Err(err @ HorizonSyncError::AllSyncPeersExceedLatency) => { // If we don't have many sync peers to select from, return the listening state and see if we can get @@ -167,7 +170,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { } } - async fn sync(&mut self, header: &BlockHeader) -> Result<(), HorizonSyncError> { + async fn sync(&mut self, to_header: &BlockHeader) -> Result<(), HorizonSyncError> { let sync_peer_node_ids = self.sync_peers.iter().map(|p| p.node_id()).cloned().collect::>(); info!( target: LOG_TARGET, @@ -176,7 +179,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ); let mut latency_counter = 0usize; for node_id in sync_peer_node_ids { - match self.connect_and_attempt_sync(&node_id, header).await { + match self.connect_and_attempt_sync(&node_id, to_header).await { Ok(_) => return Ok(()), // Try another peer Err(err) => { @@ -213,8 +216,27 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { async fn connect_and_attempt_sync( &mut self, node_id: &NodeId, - header: &BlockHeader, + to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { + // Connect + let (mut client, sync_peer) = self.connect_sync_peer(node_id).await?; + + // Perform horizon sync + debug!(target: LOG_TARGET, "Check if pruning is needed"); + self.prune_if_needed().await?; + self.sync_kernels_and_outputs(sync_peer.clone(), &mut client, to_header) + .await?; + + // Validate and finalize horizon sync + self.finalize_horizon_sync(&sync_peer).await?; + + Ok(()) + } + + async fn connect_sync_peer( + &mut self, + node_id: &NodeId, + ) -> Result<(BaseNodeSyncRpcClient, SyncPeer), HorizonSyncError> { let peer_index = self .get_sync_peer_index(node_id) .ok_or(HorizonSyncError::PeerNotFound)?; @@ -246,14 +268,9 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { max_latency: self.max_latency, }); } - debug!(target: LOG_TARGET, "Sync peer latency is {:.2?}", latency); - let sync_peer = self.sync_peers[peer_index].clone(); - self.begin_sync(sync_peer.clone(), &mut client, header).await?; - self.finalize_horizon_sync(&sync_peer).await?; - - Ok(()) + Ok((client, self.sync_peers[peer_index].clone())) } async fn dial_sync_peer(&self, node_id: &NodeId) -> Result { @@ -269,30 +286,100 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { Ok(conn) } - async fn begin_sync( + async fn sync_kernels_and_outputs( &mut self, sync_peer: SyncPeer, client: &mut rpc::BaseNodeSyncRpcClient, to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { - debug!(target: LOG_TARGET, "Initializing"); - self.initialize().await?; - + // Note: We do not need to rewind kernels if the sync fails due to it being validated when inserted into + // the database. Furthermore, these kernels will also be successfully removed when we need to rewind + // the blockchain for whatever reason. debug!(target: LOG_TARGET, "Synchronizing kernels"); self.synchronize_kernels(sync_peer.clone(), client, to_header).await?; debug!(target: LOG_TARGET, "Synchronizing outputs"); - self.synchronize_outputs(sync_peer, client, to_header).await?; - Ok(()) + match self.synchronize_outputs(sync_peer, client, to_header).await { + Ok(_) => Ok(()), + Err(err) => { + // We need to clean up the outputs + let _ = self.clean_up_failed_output_sync(to_header).await; + Err(err) + }, + } } - async fn initialize(&mut self) -> Result<(), HorizonSyncError> { - let db = self.db(); - let local_metadata = db.get_chain_metadata().await?; + /// We clean up a failed output sync attempt and ignore any errors that occur during the clean up process. + async fn clean_up_failed_output_sync(&mut self, to_header: &BlockHeader) { + let tip_header = if let Ok(header) = self.db.fetch_tip_header().await { + header + } else { + return; + }; + let db = self.db().clone(); + let mut txn = db.write_transaction(); + let mut current_header = to_header.clone(); + loop { + if let Ok(outputs) = self.db.fetch_outputs_in_block(current_header.hash()).await { + for (count, output) in (1..=outputs.len()).zip(outputs.iter()) { + // Note: We do not need to clean up the SMT as it was not saved in the database yet, however, we + // need to clean up the outputs + txn.prune_output_from_all_dbs( + output.hash(), + output.commitment.clone(), + output.features.output_type, + ); + if let Err(e) = txn.commit().await { + warn!( + target: LOG_TARGET, + "Clean up failed sync - prune output from all dbs for header '{}': {}", + current_header.hash(), e + ); + } + if count % 100 == 0 || count == outputs.len() { + if let Err(e) = txn.commit().await { + warn!( + target: LOG_TARGET, + "Clean up failed sync - commit prune outputs for header '{}': {}", + current_header.hash(), e + ); + } + } + } + } + if let Err(e) = txn.commit().await { + warn!( + target: LOG_TARGET, "Clean up failed output sync - commit delete kernels for header '{}': {}", + current_header.hash(), e + ); + } + if let Ok(header) = db.fetch_header_by_block_hash(current_header.prev_hash).await { + if let Some(previous_header) = header { + current_header = previous_header; + } else { + warn!(target: LOG_TARGET, "Could not clean up failed output sync, previous_header link missing frm db"); + break; + } + } else { + warn!( + target: LOG_TARGET, + "Could not clean up failed output sync, header '{}' not in db", + current_header.prev_hash.to_hex() + ); + break; + } + if ¤t_header.hash() == tip_header.hash() { + debug!(target: LOG_TARGET, "Finished cleaning up failed output sync"); + break; + } + } + } - let new_prune_height = cmp::min(local_metadata.height_of_longest_chain(), self.horizon_sync_height); + async fn prune_if_needed(&mut self) -> Result<(), HorizonSyncError> { + let local_metadata = self.db.get_chain_metadata().await?; + let new_prune_height = cmp::min(local_metadata.best_block_height(), self.horizon_sync_height); if local_metadata.pruned_height() < new_prune_height { debug!(target: LOG_TARGET, "Pruning block chain to height {}", new_prune_height); - db.prune_to_height(new_prune_height).await?; + self.db.prune_to_height(new_prune_height).await?; } Ok(()) @@ -328,7 +415,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { "Requesting kernels from {} to {} ({} remaining)", local_num_kernels, remote_num_kernels, - remote_num_kernels - local_num_kernels, + remote_num_kernels.saturating_sub(local_num_kernels), ); let latency = client.get_last_request_latency(); @@ -374,7 +461,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { } txn.insert_kernel_via_horizon_sync(kernel, *current_header.hash(), mmr_position); - if mmr_position == current_header.header().kernel_mmr_size - 1 { + if mmr_position == current_header.header().kernel_mmr_size.saturating_sub(1) { let num_kernels = kernel_hashes.len(); debug!( target: LOG_TARGET, @@ -425,9 +512,9 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { num_kernels, mmr_position + 1, end, - end - (mmr_position + 1) + end.saturating_sub(mmr_position + 1) ); - if mmr_position < end - 1 { + if mmr_position < end.saturating_sub(1) { current_header = db.fetch_chain_header(current_header.height() + 1).await?; } } @@ -471,6 +558,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { Ok(()) } + // Synchronize outputs, returning true if any keys were deleted from the output SMT. #[allow(clippy::too_many_lines)] async fn synchronize_outputs( &mut self, @@ -479,9 +567,26 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { to_header: &BlockHeader, ) -> Result<(), HorizonSyncError> { info!(target: LOG_TARGET, "Starting output sync from peer {}", sync_peer); + let db = self.db().clone(); + let tip_header = db.fetch_tip_header().await?; - let remote_num_outputs = to_header.output_smt_size; - self.num_outputs = remote_num_outputs; + // Estimate the number of outputs to be downloaded; this cannot be known exactly until the sync is complete. + let mut current_header = to_header.clone(); + self.num_outputs = 0; + loop { + current_header = + if let Some(previous_header) = db.fetch_header_by_block_hash(current_header.prev_hash).await? { + self.num_outputs += current_header + .output_smt_size + .saturating_sub(previous_header.output_smt_size); + previous_header + } else { + break; + }; + if ¤t_header.hash() == tip_header.hash() { + break; + } + } let info = HorizonSyncInfo::new(vec![sync_peer.node_id().clone()], HorizonSyncStatus::Outputs { current: 0, @@ -490,86 +595,115 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { }); self.hooks.call_on_progress_horizon_hooks(info); - debug!( - target: LOG_TARGET, - "Requesting outputs from {}", - remote_num_outputs, - ); - let db = self.db().clone(); - - let end = remote_num_outputs; - let end_hash = to_header.hash(); - let start_hash = db.fetch_chain_header(1).await?; - let gen_block = db.fetch_chain_header(0).await?; - let latency = client.get_last_request_latency(); debug!( target: LOG_TARGET, - "Initiating output sync with peer `{}` (latency = {}ms)", + "Initiating output sync with peer `{}`, requesting ~{} outputs, tip_header height `{}`, \ + last_chain_header height `{}` (latency = {}ms)", sync_peer.node_id(), - latency.unwrap_or_default().as_millis() + self.num_outputs, + tip_header.height(), + db.fetch_last_chain_header().await?.height(), + latency.unwrap_or_default().as_millis(), ); + let start_chain_header = db.fetch_chain_header(tip_header.height() + 1).await?; let req = SyncUtxosRequest { - start_header_hash: start_hash.hash().to_vec(), - end_header_hash: end_hash.to_vec(), + start_header_hash: start_chain_header.hash().to_vec(), + end_header_hash: to_header.hash().to_vec(), }; - let mut output_stream = client.sync_utxos(req).await?; let mut txn = db.write_transaction(); - let mut utxo_counter = gen_block.header().output_smt_size; + let mut utxo_counter = 0u64; + let mut stxo_counter = 0u64; let timer = Instant::now(); let mut output_smt = db.fetch_tip_smt().await?; let mut last_sync_timer = Instant::now(); let mut avg_latency = RollingAverageTime::new(20); + let mut inputs_to_delete = Vec::new(); while let Some(response) = output_stream.next().await { let latency = last_sync_timer.elapsed(); avg_latency.add_sample(latency); let res: SyncUtxosResponse = response?; - utxo_counter += 1; - if utxo_counter > end { - return Err(HorizonSyncError::IncorrectResponse( - "Peer sent too many outputs".to_string(), - )); - } - let output = res - .output - .ok_or_else(|| HorizonSyncError::IncorrectResponse("Peer sent no transaction output data".into()))?; - let output_header = FixedHash::try_from(res.mined_header) + let output_header_hash = FixedHash::try_from(res.mined_header) .map_err(|_| HorizonSyncError::IncorrectResponse("Peer sent no mined header".into()))?; let current_header = self .db() - .fetch_header_by_block_hash(output_header) + .fetch_header_by_block_hash(output_header_hash) .await? .ok_or_else(|| { HorizonSyncError::IncorrectResponse("Peer sent mined header we do not know of".into()) })?; - let constants = self.rules.consensus_constants(current_header.height).clone(); - let output = TransactionOutput::try_from(output).map_err(HorizonSyncError::ConversionError)?; - trace!( + let proto_output = res + .txo + .ok_or_else(|| HorizonSyncError::IncorrectResponse("Peer sent no transaction output data".into()))?; + match proto_output { + Txo::Output(output) => { + utxo_counter += 1; + // Increase the estimate number of outputs to be downloaded (for display purposes only). + if utxo_counter >= self.num_outputs { + self.num_outputs = utxo_counter + u64::from(current_header.hash() != to_header.hash()); + } + + let constants = self.rules.consensus_constants(current_header.height).clone(); + let output = TransactionOutput::try_from(output).map_err(HorizonSyncError::ConversionError)?; + debug!( target: LOG_TARGET, - "UTXO {} received from sync peer", + "UTXO `{}` received from sync peer ({} of {})", output.hash(), - ); - helpers::check_tari_script_byte_size(&output.script, constants.max_script_byte_size())?; - - batch_verify_range_proofs(&self.prover, &[&output])?; - let smt_key = NodeKey::try_from(output.commitment.as_bytes())?; - let smt_node = ValueHash::try_from(output.smt_hash(current_header.height).as_slice())?; - output_smt.insert(smt_key, smt_node)?; - txn.insert_output_via_horizon_sync( - output, - current_header.hash(), - current_header.height, - current_header.timestamp.as_u64(), - ); + utxo_counter, + self.num_outputs, + ); + helpers::check_tari_script_byte_size(&output.script, constants.max_script_byte_size())?; + + batch_verify_range_proofs(&self.prover, &[&output])?; + let smt_key = NodeKey::try_from(output.commitment.as_bytes())?; + let smt_node = ValueHash::try_from(output.smt_hash(current_header.height).as_slice())?; + output_smt.insert(smt_key, smt_node)?; + txn.insert_output_via_horizon_sync( + output, + current_header.hash(), + current_header.height, + current_header.timestamp.as_u64(), + ); - // we have checked the range proof, and we have checked that the linked to header exists. - txn.commit().await?; + // We have checked the range proof, and we have checked that the linked to header exists. + txn.commit().await?; + }, + Txo::Commitment(commitment_bytes) => { + stxo_counter += 1; + + let commitment = Commitment::from_canonical_bytes(commitment_bytes.as_slice())?; + match self + .db() + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .await? + { + Some(output_hash) => { + debug!( + target: LOG_TARGET, + "STXO hash `{}` received from sync peer ({})", + output_hash, + stxo_counter, + ); + let smt_key = NodeKey::try_from(commitment_bytes.as_slice())?; + output_smt.delete(&smt_key)?; + // This will only be committed once the SMT has been verified due to rewind difficulties if + // we need to abort the sync + inputs_to_delete.push((output_hash, commitment)); + }, + None => { + return Err(HorizonSyncError::IncorrectResponse( + "Peer sent unknown commitment hash".into(), + )) + }, + } + }, + } if utxo_counter % 100 == 0 { let info = HorizonSyncInfo::new(vec![sync_peer.node_id().clone()], HorizonSyncStatus::Outputs { @@ -583,33 +717,63 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { sync_peer.add_sample(last_sync_timer.elapsed()); last_sync_timer = Instant::now(); } - if utxo_counter != end { - return Err(HorizonSyncError::IncorrectResponse( - "Peer did not send enough outputs".to_string(), - )); + // The SMT can only be verified after all outputs have been downloaded, due to the way we optimize fetching + // outputs from the sync peer. As an example: + // 1. Initial sync: + // - We request outputs from height 0 to 100 (the tranche) + // - The sync peer only returns outputs per block that would still be unspent at height 100 and all inputs + // per block. All outputs that were created and spent within the tranche are never returned. + // - For example, an output is created in block 50 and spent in block 70. It would be included in the SMT for + // headers from height 50 to 69, but due to the optimization, the sync peer would never know about it. + // 2. Consecutive sync: + // - We request outputs from height 101 to 200 (the tranche) + // - The sync peer only returns outputs per block that would still be unspent at height 200, as well as all + // inputs per block, but in this case, only those inputs that are not an output of the current tranche of + // outputs. Similarly, all outputs created and spent within the tranche are never returned. + // - For example, an output is created in block 110 and spent in block 180. It would be included in the SMT + // for headers from height 110 to 179, but due to the optimization, the sync peer would never know about + // it. + // 3. In both cases it would be impossible to verify the SMT per block, as we would not be able to update the + // SMT with the outputs that were created and spent within the tranche. + HorizonStateSynchronization::::check_output_smt_root_hash(&mut output_smt, to_header)?; + + // Commit in chunks to avoid locking the database for too long + let inputs_to_delete_len = inputs_to_delete.len(); + for (count, (output_hash, commitment)) in (1..=inputs_to_delete_len).zip(inputs_to_delete.into_iter()) { + txn.prune_output_from_all_dbs(output_hash, commitment, OutputType::default()); + if count % 100 == 0 || count == inputs_to_delete_len { + txn.commit().await?; + } } + // This has a very low probability of failure + db.set_tip_smt(output_smt).await?; debug!( target: LOG_TARGET, - "finished syncing UTXOs: {} downloaded in {:.2?}", - end, + "Finished syncing TXOs: {} unspent and {} spent downloaded in {:.2?}", + utxo_counter, + stxo_counter, timer.elapsed() ); + Ok(()) + } + + // Helper function to check the output SMT root hash against the expected root hash. + fn check_output_smt_root_hash(output_smt: &mut OutputSmt, header: &BlockHeader) -> Result<(), HorizonSyncError> { let root = FixedHash::try_from(output_smt.hash().as_slice())?; - if root != to_header.output_mr { + if root != header.output_mr { warn!( target: LOG_TARGET, - "Final target root(#{}) did not match expected (#{})", - to_header.output_mr.to_hex(), + "Target root(#{}) did not match expected (#{})", + header.output_mr.to_hex(), root.to_hex(), ); return Err(HorizonSyncError::InvalidMrRoot { mr_tree: "UTXO SMT".to_string(), - at_height: to_header.height, - expected_hex: to_header.output_mr.to_hex(), + at_height: header.height, + expected_hex: header.output_mr.to_hex(), actual_hex: root.to_hex(), }); } - db.set_tip_smt(output_smt).await?; Ok(()) } @@ -647,7 +811,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { header.height(), *header.hash(), header.accumulated_data().total_accumulated_difficulty, - *metadata.best_block(), + *metadata.best_block_hash(), header.timestamp(), ) .set_pruned_height(header.height()) @@ -693,7 +857,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { curr_header.height(), curr_header.header().kernel_mmr_size, prev_kernel_mmr, - curr_header.header().kernel_mmr_size - 1 + curr_header.header().kernel_mmr_size.saturating_sub(1) ); trace!(target: LOG_TARGET, "Number of utxos returned: {}", utxos.len()); diff --git a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs index 8b03e476a4..67890f56be 100644 --- a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs +++ b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs @@ -20,7 +20,11 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{convert::TryInto, sync::Arc, time::Instant}; +use std::{ + convert::{TryFrom, TryInto}, + sync::Arc, + time::Instant, +}; use log::*; use tari_comms::{ @@ -28,7 +32,7 @@ use tari_comms::{ protocol::rpc::{Request, RpcStatus, RpcStatusResultExt}, utils, }; -use tari_utilities::hex::Hex; +use tari_utilities::{hex::Hex, ByteArray}; use tokio::{sync::mpsc, task}; #[cfg(feature = "metrics")] @@ -36,7 +40,8 @@ use crate::base_node::metrics; use crate::{ blocks::BlockHeader, chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend}, - proto::base_node::{SyncUtxosRequest, SyncUtxosResponse}, + proto, + proto::base_node::{sync_utxos_response::Txo, SyncUtxosRequest, SyncUtxosResponse}, }; const LOG_TARGET: &str = "c::base_node::sync_rpc::sync_utxo_task"; @@ -70,7 +75,7 @@ where B: BlockchainBackend + 'static .fetch_header_by_block_hash(start_hash) .await .rpc_status_internal_error(LOG_TARGET)? - .ok_or_else(|| RpcStatus::not_found("Start header hash is was not found"))?; + .ok_or_else(|| RpcStatus::not_found("Start header hash was not found"))?; let end_hash = msg .end_header_hash @@ -83,7 +88,7 @@ where B: BlockchainBackend + 'static .fetch_header_by_block_hash(end_hash) .await .rpc_status_internal_error(LOG_TARGET)? - .ok_or_else(|| RpcStatus::not_found("End header hash is was not found"))?; + .ok_or_else(|| RpcStatus::not_found("End header hash was not found"))?; if start_header.height > end_header.height { return Err(RpcStatus::bad_request(&format!( "Start header height({}) cannot be greater than the end header height({})", @@ -123,78 +128,180 @@ where B: BlockchainBackend + 'static ) -> Result<(), RpcStatus> { debug!( target: LOG_TARGET, - "Starting stream task with current_header: {}, end_header: {},", + "Starting stream task with current_header: {}, end_header: {}", current_header.hash().to_hex(), end_header.hash().to_hex(), ); + + // If this is a pruned node and outputs have been requested for an initial sync, we need to discover and send + // the outputs from the genesis block that have been pruned as well + let mut pruned_genesis_block_outputs = Vec::new(); + let metadata = self + .db + .get_chain_metadata() + .await + .rpc_status_internal_error(LOG_TARGET)?; + if current_header.height == 1 && metadata.is_pruned_node() { + let genesis_block = self.db.fetch_genesis_block(); + for output in genesis_block.block().body.outputs() { + let output_hash = output.hash(); + if self + .db + .fetch_output(output_hash) + .await + .rpc_status_internal_error(LOG_TARGET)? + .is_none() + { + trace!( + target: LOG_TARGET, + "Spent genesis TXO (commitment '{}') to peer", + output.commitment.to_hex() + ); + pruned_genesis_block_outputs.push(Ok(SyncUtxosResponse { + txo: Some(Txo::Commitment(output.commitment.as_bytes().to_vec())), + mined_header: current_header.hash().to_vec(), + })); + } + } + } + + let start_header = current_header.clone(); loop { let timer = Instant::now(); let current_header_hash = current_header.hash(); - debug!( target: LOG_TARGET, - "current header = {} ({})", + "Streaming TXO(s) for block #{} ({})", current_header.height, current_header_hash.to_hex() ); - if tx.is_closed() { - debug!( - target: LOG_TARGET, - "Peer '{}' exited UTXO sync session early", self.peer_node_id - ); + debug!(target: LOG_TARGET, "Peer '{}' exited TXO sync session early", self.peer_node_id); break; } let outputs_with_statuses = self .db - .fetch_outputs_in_block_with_spend_state(current_header.hash(), Some(end_header.hash())) + .fetch_outputs_in_block_with_spend_state(current_header_hash, Some(end_header.hash())) .await .rpc_status_internal_error(LOG_TARGET)?; + if tx.is_closed() { + debug!(target: LOG_TARGET, "Peer '{}' exited TXO sync session early", self.peer_node_id); + break; + } + + let mut outputs = Vec::with_capacity(outputs_with_statuses.len()); + for (output, spent) in outputs_with_statuses { + if !spent { + match proto::types::TransactionOutput::try_from(output.clone()) { + Ok(tx_ouput) => { + trace!( + target: LOG_TARGET, + "Unspent TXO (commitment '{}') to peer", + output.commitment.to_hex() + ); + outputs.push(Ok(SyncUtxosResponse { + txo: Some(Txo::Output(tx_ouput)), + mined_header: current_header_hash.to_vec(), + })); + }, + Err(e) => { + return Err(RpcStatus::general(&format!( + "Output '{}' RPC conversion error ({})", + output.hash().to_hex(), + e + ))) + }, + } + } + } debug!( target: LOG_TARGET, - "Streaming UTXO(s) for block #{}.", + "Adding {} outputs in response for block #{} '{}'", outputs.len(), current_header.height, + current_header_hash ); + + let inputs_in_block = self + .db + .fetch_inputs_in_block(current_header_hash) + .await + .rpc_status_internal_error(LOG_TARGET)?; if tx.is_closed() { + debug!(target: LOG_TARGET, "Peer '{}' exited TXO sync session early", self.peer_node_id); + break; + } + + let mut inputs = Vec::with_capacity(inputs_in_block.len()); + for input in inputs_in_block { + let output_from_current_tranche = if let Some(mined_info) = self + .db + .fetch_output(input.output_hash()) + .await + .rpc_status_internal_error(LOG_TARGET)? + { + mined_info.mined_height >= start_header.height + } else { + false + }; + + if output_from_current_tranche { + trace!(target: LOG_TARGET, "Spent TXO (hash '{}') not sent to peer", input.output_hash().to_hex()); + } else { + let input_commitment = match self.db.fetch_output(input.output_hash()).await { + Ok(Some(o)) => o.output.commitment, + Ok(None) => { + return Err(RpcStatus::general(&format!( + "Mined info for input '{}' not found", + input.output_hash().to_hex() + ))) + }, + Err(e) => { + return Err(RpcStatus::general(&format!( + "Input '{}' not found ({})", + input.output_hash().to_hex(), + e + ))) + }, + }; + trace!(target: LOG_TARGET, "Spent TXO (commitment '{}') to peer", input_commitment.to_hex()); + inputs.push(Ok(SyncUtxosResponse { + txo: Some(Txo::Commitment(input_commitment.as_bytes().to_vec())), + mined_header: current_header_hash.to_vec(), + })); + } + } + debug!( + target: LOG_TARGET, + "Adding {} inputs in response for block #{} '{}'", inputs.len(), + current_header.height, + current_header_hash + ); + + let mut txos = Vec::with_capacity(outputs.len() + inputs.len()); + txos.append(&mut outputs); + txos.append(&mut inputs); + if start_header == current_header { debug!( target: LOG_TARGET, - "Peer '{}' exited UTXO sync session early", self.peer_node_id + "Adding {} genesis block pruned inputs in response for block #{} '{}'", pruned_genesis_block_outputs.len(), + current_header.height, + current_header_hash ); - break; + txos.append(&mut pruned_genesis_block_outputs); } - - let utxos = outputs_with_statuses - .into_iter() - .filter_map(|(output, spent)| { - // We only send unspent utxos - if spent { - None - } else { - match output.try_into() { - Ok(tx_ouput) => Some(Ok(SyncUtxosResponse { - output: Some(tx_ouput), - mined_header: current_header.hash().to_vec(), - })), - Err(err) => Some(Err(err)), - } - } - }) - .collect::, String>>() - .map_err(|err| RpcStatus::bad_request(&err))? - .into_iter() - .map(Ok); + let txos = txos.into_iter(); // Ensure task stops if the peer prematurely stops their RPC session - let utxos_len = utxos.len(); - if utils::mpsc::send_all(tx, utxos).await.is_err() { + let txos_len = txos.len(); + if utils::mpsc::send_all(tx, txos).await.is_err() { break; } debug!( target: LOG_TARGET, - "Streamed {} utxos in {:.2?} (including stream backpressure)", - utxos_len, + "Streamed {} TXOs in {:.2?} (including stream backpressure)", + txos_len, timer.elapsed() ); @@ -217,7 +324,7 @@ where B: BlockchainBackend + 'static debug!( target: LOG_TARGET, - "UTXO sync completed to Header hash = {}", + "TXO sync completed to Header hash = {}", current_header.hash().to_hex() ); diff --git a/base_layer/core/src/base_node/sync/sync_peer.rs b/base_layer/core/src/base_node/sync/sync_peer.rs index 70d9b83df5..52877c627c 100644 --- a/base_layer/core/src/base_node/sync/sync_peer.rs +++ b/base_layer/core/src/base_node/sync/sync_peer.rs @@ -135,6 +135,8 @@ mod test { use super::*; mod sort_by_latency { + use primitive_types::U256; + use tari_common_types::types::FixedHash; use tari_comms::types::{CommsPublicKey, CommsSecretKey}; use tari_crypto::keys::{PublicKey, SecretKey}; @@ -147,7 +149,12 @@ mod test { let pk = CommsPublicKey::from_secret_key(&sk); let node_id = NodeId::from_key(&pk); let latency_option = latency.map(|latency| Duration::from_millis(latency as u64)); - PeerChainMetadata::new(node_id, ChainMetadata::empty(), latency_option).into() + PeerChainMetadata::new( + node_id, + ChainMetadata::new(0, FixedHash::zero(), 0, 0, U256::zero(), 0), + latency_option, + ) + .into() } #[test] diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index e108dae80a..71af766fca 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -26,7 +26,7 @@ use primitive_types::U256; use rand::{rngs::OsRng, RngCore}; use tari_common_types::{ chain_metadata::ChainMetadata, - types::{BlockHash, Commitment, FixedHash, HashOutput, PublicKey, Signature}, + types::{BlockHash, Commitment, HashOutput, PublicKey, Signature}, }; use tari_utilities::epoch_time::EpochTime; @@ -59,9 +59,10 @@ use crate::{ }, common::rolling_vec::RollingVec, proof_of_work::{PowAlgorithm, TargetDifficultyWindow}, - transactions::transaction_components::{TransactionKernel, TransactionOutput}, + transactions::transaction_components::{OutputType, TransactionInput, TransactionKernel, TransactionOutput}, OutputSmt, }; + const LOG_TARGET: &str = "c::bn::async_db"; fn trace_log(name: &str, f: F) -> R @@ -142,6 +143,10 @@ impl AsyncBlockchainDb { pub fn inner(&self) -> &BlockchainDatabase { &self.db } + + pub fn fetch_genesis_block(&self) -> ChainBlock { + self.db.fetch_genesis_block() + } } impl AsyncBlockchainDb { @@ -154,15 +159,23 @@ impl AsyncBlockchainDb { //---------------------------------- TXO --------------------------------------------// + make_async_fn!(fetch_output(output_hash: HashOutput) -> Option, "fetch_output"); + + make_async_fn!(fetch_input(output_hash: HashOutput) -> Option, "fetch_input"); + + make_async_fn!(fetch_unspent_output_hash_by_commitment(commitment: Commitment) -> Option, "fetch_unspent_output_by_commitment"); + make_async_fn!(fetch_outputs_with_spend_status_at_tip(hashes: Vec) -> Vec>, "fetch_outputs_with_spend_status_at_tip"); make_async_fn!(fetch_outputs_mined_info(hashes: Vec) -> Vec>, "fetch_outputs_mined_info"); make_async_fn!(fetch_inputs_mined_info(hashes: Vec) -> Vec>, "fetch_inputs_mined_info"); - make_async_fn!(fetch_outputs_in_block_with_spend_state(hash: HashOutput, spend_header: Option) -> Vec<(TransactionOutput, bool)>, "fetch_outputs_in_block_with_spend_state"); + make_async_fn!(fetch_outputs_in_block_with_spend_state(header_hash: HashOutput, spend_status_at_header: Option) -> Vec<(TransactionOutput, bool)>, "fetch_outputs_in_block_with_spend_state"); - make_async_fn!(fetch_outputs_in_block(hash: HashOutput) -> Vec, "fetch_outputs_in_block"); + make_async_fn!(fetch_outputs_in_block(header_hash: HashOutput) -> Vec, "fetch_outputs_in_block"); + + make_async_fn!(fetch_inputs_in_block(header_hash: HashOutput) -> Vec, "fetch_inputs_in_block"); make_async_fn!(utxo_count() -> usize, "utxo_count"); @@ -350,6 +363,22 @@ impl<'a, B: BlockchainBackend + 'static> AsyncDbTransaction<'a, B> { self } + pub fn prune_output_from_all_dbs( + &mut self, + output_hash: HashOutput, + commitment: Commitment, + output_type: OutputType, + ) -> &mut Self { + self.transaction + .prune_output_from_all_dbs(output_hash, commitment, output_type); + self + } + + pub fn delete_all_kernerls_in_block(&mut self, block_hash: BlockHash) -> &mut Self { + self.transaction.delete_all_kernerls_in_block(block_hash); + self + } + pub fn update_block_accumulated_data_via_horizon_sync( &mut self, header_hash: HashOutput, diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index d291a136a6..895982a371 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -3,7 +3,7 @@ use tari_common_types::{ chain_metadata::ChainMetadata, - types::{Commitment, FixedHash, HashOutput, PublicKey, Signature}, + types::{Commitment, HashOutput, PublicKey, Signature}, }; use super::TemplateRegistrationEntry; @@ -91,7 +91,7 @@ pub trait BlockchainBackend: Send + Sync { fn fetch_outputs_in_block_with_spend_state( &self, header_hash: &HashOutput, - spend_status_at_header: Option, + spend_status_at_header: Option, ) -> Result, ChainStorageError>; /// Fetch a specific output. Returns the output diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index fffa20a275..41d4655f78 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -301,6 +301,11 @@ where B: BlockchainBackend Ok(blockchain_db) } + /// Get the genesis block form the consensus manager + pub fn fetch_genesis_block(&self) -> ChainBlock { + self.consensus_manager.get_genesis_block() + } + /// Returns a reference to the consensus cosntants at the current height pub fn consensus_constants(&self) -> Result<&ConsensusConstants, ChainStorageError> { let height = self.get_height()?; @@ -367,7 +372,7 @@ where B: BlockchainBackend /// that case to re-sync the metadata; or else just exit the program. pub fn get_height(&self) -> Result { let db = self.db_read_access()?; - Ok(db.fetch_chain_metadata()?.height_of_longest_chain()) + Ok(db.fetch_chain_metadata()?.best_block_height()) } /// Return the accumulated proof of work of the longest chain. @@ -383,12 +388,24 @@ where B: BlockchainBackend db.fetch_chain_metadata() } - pub fn fetch_unspent_output_by_commitment( + /// Returns a copy of the current output mined info + pub fn fetch_output(&self, output_hash: HashOutput) -> Result, ChainStorageError> { + let db = self.db_read_access()?; + db.fetch_output(&output_hash) + } + + /// Returns a copy of the current input mined info + pub fn fetch_input(&self, output_hash: HashOutput) -> Result, ChainStorageError> { + let db = self.db_read_access()?; + db.fetch_input(&output_hash) + } + + pub fn fetch_unspent_output_hash_by_commitment( &self, - commitment: &Commitment, + commitment: Commitment, ) -> Result, ChainStorageError> { let db = self.db_read_access()?; - db.fetch_unspent_output_hash_by_commitment(commitment) + db.fetch_unspent_output_hash_by_commitment(&commitment) } /// Return a list of matching utxos, with each being `None` if not found. If found, the transaction @@ -456,16 +473,21 @@ where B: BlockchainBackend pub fn fetch_outputs_in_block_with_spend_state( &self, - hash: HashOutput, - spend_status_at_header: Option, + header_hash: HashOutput, + spend_status_at_header: Option, ) -> Result, ChainStorageError> { let db = self.db_read_access()?; - db.fetch_outputs_in_block_with_spend_state(&hash, spend_status_at_header) + db.fetch_outputs_in_block_with_spend_state(&header_hash, spend_status_at_header) + } + + pub fn fetch_outputs_in_block(&self, header_hash: HashOutput) -> Result, ChainStorageError> { + let db = self.db_read_access()?; + db.fetch_outputs_in_block(&header_hash) } - pub fn fetch_outputs_in_block(&self, hash: HashOutput) -> Result, ChainStorageError> { + pub fn fetch_inputs_in_block(&self, header_hash: HashOutput) -> Result, ChainStorageError> { let db = self.db_read_access()?; - db.fetch_outputs_in_block(&hash) + db.fetch_inputs_in_block(&header_hash) } /// Returns the number of UTXOs in the current unspent set @@ -983,7 +1005,7 @@ where B: BlockchainBackend info!( target: LOG_TARGET, "Best chain is now at height: {}", - db.fetch_chain_metadata()?.height_of_longest_chain() + db.fetch_chain_metadata()?.best_block_height() ); // If blocks were added and the node is in pruned mode, perform pruning prune_database_if_needed(&mut *db, self.config.pruning_horizon, self.config.pruning_interval)?; @@ -1074,12 +1096,12 @@ where B: BlockchainBackend } if end.is_none() { // `(n..)` means fetch blocks until this node's tip - end = Some(metadata.height_of_longest_chain()); + end = Some(metadata.best_block_height()); } let (start, end) = (start.unwrap(), end.unwrap()); - if end > metadata.height_of_longest_chain() { + if end > metadata.best_block_height() { return Err(ChainStorageError::ValueNotFound { entity: "Block", field: "end height", @@ -1282,13 +1304,13 @@ pub fn calculate_mmr_roots( let body = &block.body; let metadata = db.fetch_chain_metadata()?; - if header.prev_hash != *metadata.best_block() { + if header.prev_hash != *metadata.best_block_hash() { return Err(ChainStorageError::CannotCalculateNonTipMmr(format!( "Block (#{}) is not building on tip, previous hash is {} but the current tip is #{} {}", header.height, header.prev_hash, - metadata.height_of_longest_chain(), - metadata.best_block(), + metadata.best_block_height(), + metadata.best_block_hash(), ))); } @@ -1655,7 +1677,7 @@ fn fetch_block_by_hash( fn check_for_valid_height(db: &T, height: u64) -> Result<(u64, bool), ChainStorageError> { let metadata = db.fetch_chain_metadata()?; - let tip_height = metadata.height_of_longest_chain(); + let tip_height = metadata.best_block_height(); if height > tip_height { return Err(ChainStorageError::InvalidQuery(format!( "Cannot get block at height {}. Chain tip is at {}", @@ -1678,7 +1700,7 @@ fn rewind_to_height( // Delete headers let last_header_height = last_header.height; let metadata = db.fetch_chain_metadata()?; - let last_block_height = metadata.height_of_longest_chain(); + let last_block_height = metadata.best_block_height(); // We use the cmp::max value here because we'll only delete headers here and leave remaining headers to be deleted // with the whole block let steps_back = last_header_height @@ -1725,9 +1747,7 @@ fn rewind_to_height( target_height ); - let effective_pruning_horizon = metadata - .height_of_longest_chain() - .saturating_sub(metadata.pruned_height()); + let effective_pruning_horizon = metadata.best_block_height().saturating_sub(metadata.pruned_height()); let prune_past_horizon = metadata.is_pruned_node() && steps_back > effective_pruning_horizon; if prune_past_horizon { warn!( @@ -1761,7 +1781,7 @@ fn rewind_to_height( last_block_height - h - 1 })?; let metadata = db.fetch_chain_metadata()?; - let expected_block_hash = *metadata.best_block(); + let expected_block_hash = *metadata.best_block_hash(); txn.set_best_block( chain_header.height(), chain_header.accumulated_data().hash, @@ -1927,7 +1947,7 @@ fn swap_to_highest_pow_chain( // lets clear out all remaining headers that dont have a matching block // rewind to height will first delete the headers, then try delete from blocks, if we call this to the current // height it will only trim the extra headers with no blocks - rewind_to_height(db, metadata.height_of_longest_chain())?; + rewind_to_height(db, metadata.best_block_height())?; let strongest_orphan_tips = db.fetch_strongest_orphan_chain_tips()?; if strongest_orphan_tips.is_empty() { // we have no orphan chain tips, we have trimmed remaining headers, we are on the best tip we have, so lets @@ -2333,7 +2353,7 @@ fn find_strongest_orphan_tip( // block height will also be discarded. fn cleanup_orphans(db: &mut T, orphan_storage_capacity: usize) -> Result<(), ChainStorageError> { let metadata = db.fetch_chain_metadata()?; - let horizon_height = metadata.horizon_block_height(metadata.height_of_longest_chain()); + let horizon_height = metadata.pruned_height_at_given_chain_tip(metadata.best_block_height()); db.delete_oldest_orphans(horizon_height, orphan_storage_capacity) } @@ -2348,18 +2368,18 @@ fn prune_database_if_needed( return Ok(()); } - let db_height = metadata.height_of_longest_chain(); - let abs_pruning_horizon = db_height.saturating_sub(pruning_horizon); - + let prune_to_height_target = metadata.best_block_height().saturating_sub(pruning_horizon); debug!( target: LOG_TARGET, - "Current pruned height is: {}, pruning horizon is: {}, while the pruning interval is: {}", + "Blockchain height: {}, pruning horizon: {}, pruned height: {}, prune to height target: {}, pruning interval: {}", + metadata.best_block_height(), + metadata.pruning_horizon(), metadata.pruned_height(), - abs_pruning_horizon, + prune_to_height_target, pruning_interval, ); - if metadata.pruned_height() < abs_pruning_horizon.saturating_sub(pruning_interval) { - prune_to_height(db, abs_pruning_horizon)?; + if metadata.pruned_height() < prune_to_height_target.saturating_sub(pruning_interval) { + prune_to_height(db, prune_to_height_target)?; } Ok(()) @@ -2387,14 +2407,14 @@ fn prune_to_height(db: &mut T, target_horizon_height: u64) return Ok(()); } - if target_horizon_height > metadata.height_of_longest_chain() { + if target_horizon_height > metadata.best_block_height() { return Err(ChainStorageError::InvalidArguments { func: "prune_to_height", arg: "target_horizon_height", message: format!( "Target pruning horizon {} is greater than current block height {}", target_horizon_height, - metadata.height_of_longest_chain() + metadata.best_block_height() ), }); } @@ -2825,8 +2845,8 @@ mod test { let tip = access.fetch_tip_header().unwrap(); assert_eq!(tip.hash(), block.hash()); let metadata = access.fetch_chain_metadata().unwrap(); - assert_eq!(metadata.best_block(), block.hash()); - assert_eq!(metadata.height_of_longest_chain(), block.height()); + assert_eq!(metadata.best_block_hash(), block.hash()); + assert_eq!(metadata.best_block_height(), block.height()); assert!(access.contains(&DbKey::HeaderHash(*block.hash())).unwrap()); let mut all_blocks = main_chain @@ -2920,8 +2940,8 @@ mod test { let tip = access.fetch_tip_header().unwrap(); assert_eq!(tip.hash(), block.hash()); let metadata = access.fetch_chain_metadata().unwrap(); - assert_eq!(metadata.best_block(), block.hash()); - assert_eq!(metadata.height_of_longest_chain(), block.height()); + assert_eq!(metadata.best_block_hash(), block.hash()); + assert_eq!(metadata.best_block_height(), block.height()); assert!(access.contains(&DbKey::HeaderHash(*block.hash())).unwrap()); let mut all_blocks = main_chain.into_iter().chain(orphan_chain_b).collect::>(); @@ -3348,7 +3368,7 @@ mod test { } fn check_whole_chain(db: &mut TempDatabase) { - let mut h = db.fetch_chain_metadata().unwrap().height_of_longest_chain(); + let mut h = db.fetch_chain_metadata().unwrap().best_block_height(); while h > 0 { // fetch_chain_header_by_height will error if there are internal inconsistencies db.fetch_chain_header_by_height(h).unwrap(); diff --git a/base_layer/core/src/chain_storage/db_transaction.rs b/base_layer/core/src/chain_storage/db_transaction.rs index 6ca0d7bf52..8ad752a8ee 100644 --- a/base_layer/core/src/chain_storage/db_transaction.rs +++ b/base_layer/core/src/chain_storage/db_transaction.rs @@ -33,7 +33,7 @@ use tari_utilities::hex::Hex; use crate::{ blocks::{Block, BlockHeader, BlockHeaderAccumulatedData, ChainBlock, ChainHeader, UpdateBlockAccumulatedData}, chain_storage::{error::ChainStorageError, HorizonData, Reorg}, - transactions::transaction_components::{TransactionKernel, TransactionOutput}, + transactions::transaction_components::{OutputType, TransactionKernel, TransactionOutput}, OutputSmt, }; @@ -132,6 +132,26 @@ impl DbTransaction { self } + pub fn prune_output_from_all_dbs( + &mut self, + output_hash: HashOutput, + commitment: Commitment, + output_type: OutputType, + ) -> &mut Self { + self.operations.push(WriteOperation::PruneOutputFromAllDbs { + output_hash, + commitment, + output_type, + }); + self + } + + pub fn delete_all_kernerls_in_block(&mut self, block_hash: BlockHash) -> &mut Self { + self.operations + .push(WriteOperation::DeleteAllKernelsInBlock { block_hash }); + self + } + pub fn delete_all_inputs_in_block(&mut self, block_hash: BlockHash) -> &mut Self { self.operations .push(WriteOperation::DeleteAllInputsInBlock { block_hash }); @@ -304,6 +324,14 @@ pub enum WriteOperation { PruneOutputsSpentAtHash { block_hash: BlockHash, }, + PruneOutputFromAllDbs { + output_hash: HashOutput, + commitment: Commitment, + output_type: OutputType, + }, + DeleteAllKernelsInBlock { + block_hash: BlockHash, + }, DeleteAllInputsInBlock { block_hash: BlockHash, }, @@ -387,6 +415,18 @@ impl fmt::Display for WriteOperation { write!(f, "Update Block data for block {}", header_hash) }, PruneOutputsSpentAtHash { block_hash } => write!(f, "Prune output(s) at hash: {}", block_hash), + PruneOutputFromAllDbs { + output_hash, + commitment, + output_type, + } => write!( + f, + "Prune output from all dbs, hash : {}, commitment: {},output_type: {}", + output_hash, + commitment.to_hex(), + output_type, + ), + DeleteAllKernelsInBlock { block_hash } => write!(f, "Delete kernels in block {}", block_hash), DeleteAllInputsInBlock { block_hash } => write!(f, "Delete outputs in block {}", block_hash), SetAccumulatedDataForOrphan(accumulated_data) => { write!(f, "Set accumulated data for orphan {}", accumulated_data) diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs index 991b9bdef1..ce660df04a 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs @@ -82,7 +82,7 @@ where }) }, err @ Err(lmdb_zero::Error::Code(lmdb_zero::error::MAP_FULL)) => { - error!( + info!( target: LOG_TARGET, "Could not insert {} bytes with key '{}' into '{}' ({:?})", val_buf.len(), to_hex(key.as_lmdb_bytes()), table_name, err ); diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 66c9273c36..781aaed34b 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -97,6 +97,7 @@ use crate::{ transactions::{ aggregated_body::AggregateBody, transaction_components::{ + OutputType, SpentOutput, TransactionInput, TransactionKernel, @@ -391,6 +392,16 @@ impl LMDBDatabase { PruneOutputsSpentAtHash { block_hash } => { self.prune_outputs_spent_at_hash(&write_txn, block_hash)?; }, + PruneOutputFromAllDbs { + output_hash, + commitment, + output_type, + } => { + self.prune_output_from_all_dbs(&write_txn, output_hash, commitment, *output_type)?; + }, + DeleteAllKernelsInBlock { block_hash } => { + self.delete_all_kernels_in_block(&write_txn, block_hash)?; + }, DeleteAllInputsInBlock { block_hash } => { self.delete_all_inputs_in_block(&write_txn, block_hash)?; }, @@ -516,11 +527,6 @@ impl LMDBDatabase { ] } - fn prune_output(&self, txn: &WriteTransaction<'_>, key: OutputKey) -> Result<(), ChainStorageError> { - lmdb_delete(txn, &self.utxos_db, &key.convert_to_comp_key(), "utxos_db")?; - Ok(()) - } - fn insert_output( &self, txn: &WriteTransaction<'_>, @@ -1418,21 +1424,93 @@ impl LMDBDatabase { let inputs = lmdb_fetch_matching_after::(write_txn, &self.inputs_db, block_hash.as_slice())?; - for input in inputs { + for input_data in inputs { + let input = input_data.input; + // From 'utxo_commitment_index::utxo_commitment_index' + if let SpentOutput::OutputData { commitment, .. } = input.spent_output.clone() { + debug!(target: LOG_TARGET, "Pruning output from 'utxo_commitment_index': key '{}'", commitment.to_hex()); + lmdb_delete( + write_txn, + &self.utxo_commitment_index, + commitment.as_bytes(), + "utxo_commitment_index", + )?; + } + // From 'utxos_db::utxos_db' + if let Some(key_bytes) = + lmdb_get::<_, Vec>(write_txn, &self.txos_hash_to_index_db, input.output_hash().as_slice())? + { + let mut buffer = [0u8; 32]; + buffer.copy_from_slice(&key_bytes[0..32]); + let key = OutputKey::new(&FixedHash::from(buffer), &input.output_hash())?; + debug!(target: LOG_TARGET, "Pruning output from 'utxos_db': key '{}'", key.0); + lmdb_delete(write_txn, &self.utxos_db, &key.convert_to_comp_key(), "utxos_db")?; + }; + // From 'txos_hash_to_index_db::utxos_db' + debug!( + target: LOG_TARGET, + "Pruning output from 'txos_hash_to_index_db': key '{}'", + input.output_hash().to_hex() + ); lmdb_delete( write_txn, &self.txos_hash_to_index_db, - input.hash.as_slice(), + input.output_hash().as_slice(), "utxos_db", )?; - let key = OutputKey::new(block_hash, &input.hash)?; - debug!(target: LOG_TARGET, "Pruning output: {:?}", key); - self.prune_output(write_txn, key)?; } Ok(()) } + fn prune_output_from_all_dbs( + &self, + write_txn: &WriteTransaction<'_>, + output_hash: &HashOutput, + commitment: &Commitment, + output_type: OutputType, + ) -> Result<(), ChainStorageError> { + match lmdb_get::<_, Vec>(write_txn, &self.txos_hash_to_index_db, output_hash.as_slice())? { + Some(key_bytes) => { + if !matches!(output_type, OutputType::Burn) { + debug!(target: LOG_TARGET, "Pruning output from 'utxo_commitment_index': key '{}'", commitment.to_hex()); + lmdb_delete( + write_txn, + &self.utxo_commitment_index, + commitment.as_bytes(), + "utxo_commitment_index", + )?; + } + debug!(target: LOG_TARGET, "Pruning output from 'txos_hash_to_index_db': key '{}'", output_hash.to_hex()); + lmdb_delete( + write_txn, + &self.txos_hash_to_index_db, + output_hash.as_slice(), + "utxos_db", + )?; + + let mut buffer = [0u8; 32]; + buffer.copy_from_slice(&key_bytes[0..32]); + let key = OutputKey::new(&FixedHash::from(buffer), output_hash)?; + debug!(target: LOG_TARGET, "Pruning output from 'utxos_db': key '{}'", key.0); + lmdb_delete(write_txn, &self.utxos_db, &key.convert_to_comp_key(), "utxos_db")?; + }, + None => return Err(ChainStorageError::InvalidOperation("Output key not found".to_string())), + } + + Ok(()) + } + + fn delete_all_kernels_in_block( + &self, + txn: &WriteTransaction<'_>, + block_hash: &BlockHash, + ) -> Result<(), ChainStorageError> { + self.delete_block_kernels(txn, block_hash.as_slice())?; + debug!(target: LOG_TARGET, "Deleted kernels in block {}", block_hash.to_hex()); + Ok(()) + } + #[allow(clippy::ptr_arg)] fn fetch_orphan(&self, txn: &ConstTransaction<'_>, hash: &HashOutput) -> Result, ChainStorageError> { let val: Option = lmdb_get(txn, &self.orphans_db, hash.deref())?; @@ -1484,7 +1562,7 @@ impl LMDBDatabase { // Clean up bad blocks that are far from the tip let metadata = fetch_metadata(txn, &self.metadata_db)?; let deleted_before_height = metadata - .height_of_longest_chain() + .best_block_height() .saturating_sub(CLEAN_BAD_BLOCKS_BEFORE_REL_HEIGHT); if deleted_before_height == 0 { return Ok(()); @@ -1892,23 +1970,23 @@ impl BlockchainBackend for LMDBDatabase { fn fetch_outputs_in_block_with_spend_state( &self, - header_hash: &HashOutput, - spend_status_at_header: Option, + previous_header_hash: &HashOutput, + spend_status_at_header: Option, ) -> Result, ChainStorageError> { let txn = self.read_transaction()?; let mut outputs: Vec<(TransactionOutput, bool)> = - lmdb_fetch_matching_after::(&txn, &self.utxos_db, header_hash.deref())? + lmdb_fetch_matching_after::(&txn, &self.utxos_db, previous_header_hash.deref())? .into_iter() .map(|row| (row.output, false)) .collect(); - if let Some(header) = spend_status_at_header { + if let Some(header_hash) = spend_status_at_header { let header_height = - self.fetch_height_from_hash(&txn, header_hash)? + self.fetch_height_from_hash(&txn, &header_hash)? .ok_or(ChainStorageError::ValueNotFound { entity: "Header", field: "hash", - value: header.to_hex(), + value: header_hash.to_hex(), })?; for output in &mut outputs { let hash = output.0.hash(); @@ -1919,7 +1997,7 @@ impl BlockchainBackend for LMDBDatabase { ChainStorageError::ValueNotFound { entity: "input", field: "hash", - value: header.to_hex(), + value: header_hash.to_hex(), }, )?; if input.spent_height <= header_height { @@ -1958,10 +2036,13 @@ impl BlockchainBackend for LMDBDatabase { lmdb_fetch_matching_after(&txn, &self.utxos_db, header_hash.as_slice()) } - fn fetch_inputs_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError> { + fn fetch_inputs_in_block( + &self, + previous_header_hash: &HashOutput, + ) -> Result, ChainStorageError> { let txn = self.read_transaction()?; Ok( - lmdb_fetch_matching_after(&txn, &self.inputs_db, header_hash.as_slice())? + lmdb_fetch_matching_after(&txn, &self.inputs_db, previous_header_hash.as_slice())? .into_iter() .map(|f: TransactionInputRowData| f.input) .collect(), @@ -2020,14 +2101,14 @@ impl BlockchainBackend for LMDBDatabase { let txn = self.read_transaction()?; let metadata = self.fetch_chain_metadata()?; - let height = metadata.height_of_longest_chain(); + let height = metadata.best_block_height(); let header = lmdb_get(&txn, &self.headers_db, &height)?.ok_or_else(|| ChainStorageError::ValueNotFound { entity: "Header", field: "height", value: height.to_string(), })?; let accumulated_data = self - .fetch_header_accumulated_data_by_height(&txn, metadata.height_of_longest_chain())? + .fetch_header_accumulated_data_by_height(&txn, metadata.best_block_height())? .ok_or_else(|| ChainStorageError::ValueNotFound { entity: "BlockHeaderAccumulatedData", field: "height", @@ -2288,11 +2369,11 @@ impl BlockchainBackend for LMDBDatabase { }; let metadata = fetch_metadata(&txn, &self.metadata_db)?; - if metadata.height_of_longest_chain() == last_header.height { + if metadata.best_block_height() == last_header.height { return Ok(0); } - let start = metadata.height_of_longest_chain() + 1; + let start = metadata.best_block_height() + 1; let end = last_header.height; let mut num_deleted = 0; diff --git a/base_layer/core/src/proof_of_work/monero_rx/helpers.rs b/base_layer/core/src/proof_of_work/monero_rx/helpers.rs index ab5b34150b..0ec008c626 100644 --- a/base_layer/core/src/proof_of_work/monero_rx/helpers.rs +++ b/base_layer/core/src/proof_of_work/monero_rx/helpers.rs @@ -105,8 +105,11 @@ pub fn verify_header( ) -> Result { let monero_data = MoneroPowData::from_header(header, consensus)?; let expected_merge_mining_hash = header.merge_mining_hash(); - let extra_field = ExtraField::try_parse(&monero_data.coinbase_tx_extra) - .map_err(|_| MergeMineError::DeserializeError("Invalid extra field".to_string()))?; + let extra_field = ExtraField::try_parse(&monero_data.coinbase_tx_extra); + let extra_field = extra_field.unwrap_or_else(|ex_field| { + warn!(target: LOG_TARGET, "Error deserializing, Monero extra field"); + ex_field + }); // Check that the Tari MM hash is found in the Monero coinbase transaction // and that only 1 Tari header is found @@ -1248,4 +1251,27 @@ mod test { ); assert_eq!(difficulty.as_u64(), 430603); } + + #[test] + fn test_extra_field_deserialize() { + let bytes = vec![ + 3, 33, 0, 149, 5, 198, 66, 174, 39, 113, 243, 68, 202, 221, 222, 116, 10, 209, 194, 56, 247, 252, 23, 248, + 28, 44, 81, 91, 44, 214, 211, 242, 3, 12, 70, 0, 0, 0, 1, 251, 88, 0, 0, 96, 49, 163, 82, 175, 205, 74, + 138, 126, 250, 226, 106, 10, 255, 139, 49, 41, 168, 110, 203, 150, 252, 208, 234, 140, 2, 17, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + let raw_extra_field = RawExtraField(bytes); + let res = ExtraField::try_parse(&raw_extra_field); + assert!(res.is_err()); + let field = res.unwrap_err(); + let mm_tag = SubField::MergeMining( + Some(VarInt(0)), + Hash::from_slice( + hex::decode("9505c642ae2771f344caddde740ad1c238f7fc17f81c2c515b2cd6d3f2030c46") + .unwrap() + .as_slice(), + ), + ); + assert_eq!(field.0[0], mm_tag); + } } diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index d60d6cfa03..87338619ea 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -32,7 +32,7 @@ use tari_common::configuration::Network; use tari_common_types::{ chain_metadata::ChainMetadata, tari_address::TariAddress, - types::{Commitment, FixedHash, HashOutput, PublicKey, Signature}, + types::{Commitment, HashOutput, PublicKey, Signature}, }; use tari_storage::lmdb_store::LMDBConfig; use tari_test_utils::paths::create_temporary_data_path; @@ -277,7 +277,7 @@ impl BlockchainBackend for TempDatabase { fn fetch_outputs_in_block_with_spend_state( &self, header_hash: &HashOutput, - spend_status_at_header: Option, + spend_status_at_header: Option, ) -> Result, ChainStorageError> { self.db .as_ref() diff --git a/base_layer/core/src/validation/block_body/block_body_full_validator.rs b/base_layer/core/src/validation/block_body/block_body_full_validator.rs index 7175b23b18..c16924fc25 100644 --- a/base_layer/core/src/validation/block_body/block_body_full_validator.rs +++ b/base_layer/core/src/validation/block_body/block_body_full_validator.rs @@ -105,15 +105,15 @@ impl BlockBodyValidator for BlockBodyFullValidator { } fn validate_block_metadata(block: &Block, metadata: &ChainMetadata) -> Result<(), ValidationError> { - if block.header.prev_hash != *metadata.best_block() { + if block.header.prev_hash != *metadata.best_block_hash() { return Err(ValidationError::IncorrectPreviousHash { - expected: metadata.best_block().to_hex(), + expected: metadata.best_block_hash().to_hex(), block_hash: block.hash().to_hex(), }); } - if block.header.height != metadata.height_of_longest_chain() + 1 { + if block.header.height != metadata.best_block_height() + 1 { return Err(ValidationError::IncorrectHeight { - expected: metadata.height_of_longest_chain() + 1, + expected: metadata.best_block_height() + 1, block_height: block.header.height, }); } diff --git a/base_layer/core/src/validation/transaction/transaction_chain_validator.rs b/base_layer/core/src/validation/transaction/transaction_chain_validator.rs index fd4f6535b2..7879cd50f5 100644 --- a/base_layer/core/src/validation/transaction/transaction_chain_validator.rs +++ b/base_layer/core/src/validation/transaction/transaction_chain_validator.rs @@ -62,7 +62,7 @@ impl TransactionValidator for TransactionChainLinkedValida { let db = self.db.db_read_access()?; - let tip_height = db.fetch_chain_metadata()?.height_of_longest_chain(); + let tip_height = db.fetch_chain_metadata()?.best_block_height(); self.aggregate_body_validator.validate(&tx.body, tip_height, &*db)?; }; diff --git a/base_layer/core/src/validation/transaction/transaction_internal_validator.rs b/base_layer/core/src/validation/transaction/transaction_internal_validator.rs index 74725418f4..7991352d06 100644 --- a/base_layer/core/src/validation/transaction/transaction_internal_validator.rs +++ b/base_layer/core/src/validation/transaction/transaction_internal_validator.rs @@ -86,8 +86,8 @@ impl TransactionInternalConsistencyValidator { &tx.offset, &tx.script_offset, None, - Some(*tip_metadata.best_block()), - tip_metadata.height_of_longest_chain(), + Some(*tip_metadata.best_block_hash()), + tip_metadata.best_block_height(), ) } } diff --git a/base_layer/core/tests/chain_storage_tests/chain_storage.rs b/base_layer/core/tests/chain_storage_tests/chain_storage.rs index c5e35ff216..b3f2018dad 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_storage.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_storage.rs @@ -133,9 +133,9 @@ fn test_store_and_retrieve_block() { let hash = blocks[0].hash(); // Check the metadata let metadata = db.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 0); + assert_eq!(metadata.best_block_height(), 0); assert_eq!(metadata.best_block(), hash); - assert_eq!(metadata.horizon_block(metadata.height_of_longest_chain()), 0); + assert_eq!(metadata.horizon_block(metadata.best_block_height()), 0); // Fetch the block back let block0 = db.fetch_block(0, true).unwrap(); assert_eq!(block0.confirmations(), 1); @@ -151,7 +151,7 @@ fn test_add_multiple_blocks() { let consensus_manager = ConsensusManagerBuilder::new(network).build(); let store = create_store_with_consensus(consensus_manager.clone()); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 0); + assert_eq!(metadata.best_block_height(), 0); let block0 = store.fetch_block(0, true).unwrap(); assert_eq!(metadata.best_block(), block0.hash()); // Add another block @@ -165,7 +165,7 @@ fn test_add_multiple_blocks() { .unwrap(); let metadata = store.get_chain_metadata().unwrap(); let hash = block1.hash(); - assert_eq!(metadata.height_of_longest_chain(), 1); + assert_eq!(metadata.best_block_height(), 1); assert_eq!(metadata.best_block(), hash); // Adding blocks is idempotent assert_eq!( @@ -174,7 +174,7 @@ fn test_add_multiple_blocks() { ); // Check the metadata let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 1); + assert_eq!(metadata.best_block_height(), 1); assert_eq!(metadata.best_block(), hash); } @@ -309,15 +309,15 @@ fn test_rewind_past_horizon_height() { let _block4 = append_block(&store, &block3, vec![], &consensus_manager, Difficulty::min()).unwrap(); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 4); + assert_eq!(metadata.best_block_height(), 4); // we should not be able to rewind to the future - assert!(store.rewind_to_height(metadata.height_of_longest_chain() + 1).is_err()); + assert!(store.rewind_to_height(metadata.best_block_height() + 1).is_err()); let horizon_height = metadata.pruned_height(); assert_eq!(horizon_height, 2); // rewinding past pruning horizon should set us to height 0 so we can resync from gen block. assert!(store.rewind_to_height(horizon_height - 1).is_ok()); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 0); + assert_eq!(metadata.best_block_height(), 0); } #[test] @@ -366,7 +366,7 @@ fn test_handle_tip_reorg_with_zero_conf() { &consensus_manager ) .is_ok()); - assert_eq!(store.get_chain_metadata().unwrap().height_of_longest_chain(), 3); + assert_eq!(store.get_chain_metadata().unwrap().best_block_height(), 3); // Create Forked Chain @@ -406,7 +406,7 @@ fn test_handle_tip_reorg_with_zero_conf() { // Check that B2 was removed from the block orphans and A2 has been orphaned. assert!(store.fetch_orphan(*orphan_blocks[2].hash()).is_err()); assert!(store.fetch_orphan(*blocks[2].hash()).is_ok()); - assert_eq!(store.get_chain_metadata().unwrap().height_of_longest_chain(), 2); + assert_eq!(store.get_chain_metadata().unwrap().best_block_height(), 2); // Block B3 let txs = vec![ @@ -470,7 +470,7 @@ fn test_handle_tip_reorg_with_zero_conf() { } else { panic!(); } - assert_eq!(store.get_chain_metadata().unwrap().height_of_longest_chain(), 5); + assert_eq!(store.get_chain_metadata().unwrap().best_block_height(), 5); } #[test] #[allow(clippy::too_many_lines)] @@ -1374,7 +1374,7 @@ fn test_restore_metadata_and_pruning_horizon_update() { db.add_block(block1.to_arc_block()).unwrap(); block_hash = *block1.hash(); let metadata = db.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 1); + assert_eq!(metadata.best_block_height(), 1); assert_eq!(metadata.best_block(), &block_hash); assert_eq!(metadata.pruning_horizon(), 1000); } @@ -1394,7 +1394,7 @@ fn test_restore_metadata_and_pruning_horizon_update() { .unwrap(); let metadata = db.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 1); + assert_eq!(metadata.best_block_height(), 1); assert_eq!(metadata.best_block(), &block_hash); assert_eq!(metadata.pruning_horizon(), 2000); } @@ -1412,7 +1412,7 @@ fn test_restore_metadata_and_pruning_horizon_update() { .unwrap(); let metadata = db.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 1); + assert_eq!(metadata.best_block_height(), 1); assert_eq!(metadata.best_block(), &block_hash); assert_eq!(metadata.pruning_horizon(), 900); } @@ -1439,7 +1439,7 @@ fn test_invalid_block() { let mut outputs = vec![vec![output]]; let block0_hash = *blocks[0].hash(); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 0); + assert_eq!(metadata.best_block_height(), 0); assert_eq!(metadata.best_block(), &block0_hash); assert_eq!(store.fetch_block(0, true).unwrap().block().hash(), block0_hash); assert!(store.fetch_block(1, true).is_err()); @@ -1464,7 +1464,7 @@ fn test_invalid_block() { ); let block1_hash = *blocks[1].hash(); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 1); + assert_eq!(metadata.best_block_height(), 1); assert_eq!(metadata.best_block(), &block1_hash); assert_eq!(store.fetch_block(0, true).unwrap().hash(), &block0_hash); assert_eq!(store.fetch_block(1, true).unwrap().hash(), &block1_hash); @@ -1487,7 +1487,7 @@ fn test_invalid_block() { .unwrap_err() ); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 1); + assert_eq!(metadata.best_block_height(), 1); assert_eq!(metadata.best_block(), &block1_hash); assert_eq!(store.fetch_block(0, true).unwrap().hash(), &block0_hash); assert_eq!(store.fetch_block(1, true).unwrap().hash(), &block1_hash); @@ -1511,7 +1511,7 @@ fn test_invalid_block() { ); let block2_hash = blocks[2].hash(); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 2); + assert_eq!(metadata.best_block_height(), 2); assert_eq!(metadata.best_block(), block2_hash); assert_eq!(store.fetch_block(0, true).unwrap().hash(), &block0_hash); assert_eq!(store.fetch_block(1, true).unwrap().hash(), &block1_hash); @@ -1934,7 +1934,7 @@ fn test_fails_validation() { unpack_enum!(ValidationError::CustomError(_s) = source); let metadata = store.get_chain_metadata().unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 0); + assert_eq!(metadata.best_block_height(), 0); } #[test] @@ -1974,7 +1974,7 @@ fn pruned_mode_cleanup_and_fetch_block() { let metadata = store.get_chain_metadata().unwrap(); assert_eq!(metadata.pruned_height(), 2); - assert_eq!(metadata.height_of_longest_chain(), 5); + assert_eq!(metadata.best_block_height(), 5); assert_eq!(metadata.pruning_horizon(), 3); } diff --git a/base_layer/core/tests/helpers/block_builders.rs b/base_layer/core/tests/helpers/block_builders.rs index b4f11bc64b..1c9f038df1 100644 --- a/base_layer/core/tests/helpers/block_builders.rs +++ b/base_layer/core/tests/helpers/block_builders.rs @@ -191,6 +191,7 @@ fn update_genesis_block_mmr_roots(template: NewBlockTemplate) -> Result( consensus: &ConsensusManager, achieved_difficulty: Difficulty, key_manager: &MemoryDbKeyManager, -) -> Result { +) -> Result<(ChainBlock, WalletOutput), ChainStorageError> { append_block_with_coinbase(db, prev_block, txns, consensus, achieved_difficulty, key_manager) .await - .map(|(b, _)| b) + .map(|(b, wo)| (b, wo)) } /// Create a new block with the provided transactions and add a coinbase output. The new MMR roots are calculated, and @@ -577,7 +578,7 @@ pub async fn construct_chained_blocks( let mut prev_block = block0; let mut blocks = Vec::new(); for _i in 0..n { - let block = append_block(db, &prev_block, vec![], consensus, Difficulty::min(), key_manager) + let (block, _) = append_block(db, &prev_block, vec![], consensus, Difficulty::min(), key_manager) .await .unwrap(); prev_block = block.clone(); diff --git a/base_layer/core/tests/helpers/nodes.rs b/base_layer/core/tests/helpers/nodes.rs index 98702db9d8..207373969c 100644 --- a/base_layer/core/tests/helpers/nodes.rs +++ b/base_layer/core/tests/helpers/nodes.rs @@ -41,7 +41,7 @@ use tari_core::{ LocalNodeCommsInterface, StateMachineHandle, }, - chain_storage::{BlockchainDatabase, Validators}, + chain_storage::{BlockchainDatabase, BlockchainDatabaseConfig, Validators}, consensus::{ConsensusManager, ConsensusManagerBuilder, NetworkConsensus}, mempool::{ service::{LocalMempoolService, MempoolHandle}, @@ -52,7 +52,7 @@ use tari_core::{ OutboundMempoolServiceInterface, }, proof_of_work::randomx_factory::RandomXFactory, - test_helpers::blockchain::{create_store_with_consensus_and_validators, TempDatabase}, + test_helpers::blockchain::{create_store_with_consensus_and_validators_and_config, TempDatabase}, validation::{ mocks::MockValidator, transaction::TransactionChainLinkedValidator, @@ -186,7 +186,11 @@ impl BaseNodeBuilder { /// Build the test base node and start its services. #[allow(clippy::redundant_closure)] - pub async fn start(self, data_path: &str) -> (NodeInterfaces, ConsensusManager) { + pub async fn start( + self, + data_path: &str, + blockchain_db_config: BlockchainDatabaseConfig, + ) -> (NodeInterfaces, ConsensusManager) { let validators = self.validators.unwrap_or_else(|| { Validators::new( MockValidator::new(true), @@ -198,7 +202,11 @@ impl BaseNodeBuilder { let consensus_manager = self .consensus_manager .unwrap_or_else(|| ConsensusManagerBuilder::new(network).build().unwrap()); - let blockchain_db = create_store_with_consensus_and_validators(consensus_manager.clone(), validators); + let blockchain_db = create_store_with_consensus_and_validators_and_config( + consensus_manager.clone(), + validators, + blockchain_db_config, + ); let mempool_validator = TransactionChainLinkedValidator::new(blockchain_db.clone(), consensus_manager.clone()); let mempool = Mempool::new( self.mempool_config.unwrap_or_default(), @@ -234,127 +242,53 @@ pub async fn wait_until_online(nodes: &[&NodeInterfaces]) { } } -// Creates a network with two Base Nodes where each node in the network knows the other nodes in the network. -#[allow(dead_code)] -pub async fn create_network_with_2_base_nodes(data_path: &str) -> (NodeInterfaces, NodeInterfaces, ConsensusManager) { - let alice_node_identity = random_node_identity(); - let bob_node_identity = random_node_identity(); - - let network = Network::LocalNet; - let (alice_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(alice_node_identity.clone()) - .with_peers(vec![bob_node_identity.clone()]) - .start(data_path) - .await; - let (bob_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(bob_node_identity) - .with_peers(vec![alice_node_identity]) - .with_consensus_manager(consensus_manager) - .start(data_path) - .await; - - wait_until_online(&[&alice_node, &bob_node]).await; - - (alice_node, bob_node, consensus_manager) -} - -// Creates a network with two Base Nodes where each node in the network knows the other nodes in the network. -#[allow(dead_code)] -pub async fn create_network_with_2_base_nodes_with_config>( - mempool_service_config: MempoolServiceConfig, - liveness_service_config: LivenessConfig, - p2p_config: P2pConfig, +// Creates a network with multiple Base Nodes where each node in the network knows the other nodes in the network. +pub async fn create_network_with_multiple_base_nodes_with_config>( + mempool_service_configs: Vec, + liveness_service_configs: Vec, + blockchain_db_configs: Vec, + p2p_configs: Vec, consensus_manager: ConsensusManager, data_path: P, -) -> (NodeInterfaces, NodeInterfaces, ConsensusManager) { - let alice_node_identity = random_node_identity(); - let bob_node_identity = random_node_identity(); - let network = Network::LocalNet; - let (alice_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(alice_node_identity.clone()) - .with_mempool_service_config(mempool_service_config.clone()) - .with_liveness_service_config(liveness_service_config.clone()) - .with_p2p_config(p2p_config.clone()) - .with_consensus_manager(consensus_manager) - .start(data_path.as_ref().join("alice").as_os_str().to_str().unwrap()) - .await; - let (bob_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(bob_node_identity) - .with_peers(vec![alice_node_identity]) - .with_mempool_service_config(mempool_service_config) - .with_liveness_service_config(liveness_service_config) - .with_p2p_config(p2p_config.clone()) - .with_consensus_manager(consensus_manager) - .start(data_path.as_ref().join("bob").as_os_str().to_str().unwrap()) - .await; - - wait_until_online(&[&alice_node, &bob_node]).await; - - (alice_node, bob_node, consensus_manager) -} - -// Creates a network with three Base Nodes where each node in the network knows the other nodes in the network. -#[allow(dead_code)] -pub async fn create_network_with_3_base_nodes( - data_path: &str, -) -> (NodeInterfaces, NodeInterfaces, NodeInterfaces, ConsensusManager) { - let network = Network::LocalNet; - let consensus_manager = ConsensusManagerBuilder::new(network).build().unwrap(); - create_network_with_3_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig::default(), - consensus_manager, - data_path, - ) - .await -} - -// Creates a network with three Base Nodes where each node in the network knows the other nodes in the network. -#[allow(dead_code)] -pub async fn create_network_with_3_base_nodes_with_config>( - mempool_service_config: MempoolServiceConfig, - liveness_service_config: LivenessConfig, - consensus_manager: ConsensusManager, - data_path: P, -) -> (NodeInterfaces, NodeInterfaces, NodeInterfaces, ConsensusManager) { - let alice_node_identity = random_node_identity(); - let bob_node_identity = random_node_identity(); - let carol_node_identity = random_node_identity(); - let network = Network::LocalNet; - - log::info!( - "Alice = {}, Bob = {}, Carol = {}", - alice_node_identity.node_id().short_str(), - bob_node_identity.node_id().short_str(), - carol_node_identity.node_id().short_str() - ); - let (carol_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(carol_node_identity.clone()) - .with_mempool_service_config(mempool_service_config.clone()) - .with_liveness_service_config(liveness_service_config.clone()) - .with_consensus_manager(consensus_manager) - .start(data_path.as_ref().join("carol").as_os_str().to_str().unwrap()) - .await; - let (bob_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(bob_node_identity.clone()) - .with_peers(vec![carol_node_identity.clone()]) - .with_mempool_service_config(mempool_service_config.clone()) - .with_liveness_service_config(liveness_service_config.clone()) - .with_consensus_manager(consensus_manager) - .start(data_path.as_ref().join("bob").as_os_str().to_str().unwrap()) - .await; - let (alice_node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .with_node_identity(alice_node_identity) - .with_peers(vec![bob_node_identity, carol_node_identity]) - .with_mempool_service_config(mempool_service_config) - .with_liveness_service_config(liveness_service_config) - .with_consensus_manager(consensus_manager) - .start(data_path.as_ref().join("alice").as_os_str().to_str().unwrap()) - .await; + network: Network, +) -> (Vec, ConsensusManager) { + let num_of_nodes = mempool_service_configs.len(); + if num_of_nodes != liveness_service_configs.len() || + num_of_nodes != blockchain_db_configs.len() || + num_of_nodes != p2p_configs.len() + { + panic!("create_network_with_multiple_base_nodes_with_config: All configs must be the same length"); + } + let mut node_identities = Vec::with_capacity(num_of_nodes); + for i in 0..num_of_nodes { + node_identities.push(random_node_identity()); + log::info!( + "node identity {} = `{}`", + i + 1, + node_identities[node_identities.len() - 1].node_id().short_str() + ); + } + let mut node_interfaces = Vec::with_capacity(num_of_nodes); + for i in 0..num_of_nodes { + let (node, _) = BaseNodeBuilder::new(network.into()) + .with_node_identity(node_identities[i].clone()) + .with_peers(node_identities.iter().take(i).cloned().collect()) + .with_mempool_service_config(mempool_service_configs[i].clone()) + .with_liveness_service_config(liveness_service_configs[i].clone()) + .with_p2p_config(p2p_configs[i].clone()) + .with_consensus_manager(consensus_manager.clone()) + .start( + data_path.as_ref().join(i.to_string()).as_os_str().to_str().unwrap(), + blockchain_db_configs[i], + ) + .await; + node_interfaces.push(node); + } - wait_until_online(&[&alice_node, &bob_node, &carol_node]).await; + let node_interface_refs = node_interfaces.iter().collect::>(); + wait_until_online(node_interface_refs.as_slice()).await; - (alice_node, bob_node, carol_node, consensus_manager) + (node_interfaces, consensus_manager) } // Helper function for creating a random node indentity. @@ -445,15 +379,18 @@ async fn setup_base_node_services( blockchain_db.clone().into(), base_node_service, )); - let comms = comms + let mut comms = comms .add_protocol_extension(rpc_server) .spawn_with_transport(MemoryTransport) .await .unwrap(); // Set the public address for tests - comms - .node_identity() - .add_public_address(comms.listening_address().clone()); + let address = comms + .connection_manager_requester() + .wait_until_listening() + .await + .unwrap(); + comms.node_identity().add_public_address(address.bind_address().clone()); let outbound_nci = handles.expect_handle::(); let local_nci = handles.expect_handle::(); diff --git a/base_layer/core/tests/helpers/sync.rs b/base_layer/core/tests/helpers/sync.rs index c3af805031..a15cf7981f 100644 --- a/base_layer/core/tests/helpers/sync.rs +++ b/base_layer/core/tests/helpers/sync.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use tari_common::configuration::Network; use tari_common_types::types::HashOutput; @@ -28,19 +28,32 @@ use tari_comms::peer_manager::NodeId; use tari_core::{ base_node::{ chain_metadata_service::PeerChainMetadata, - state_machine_service::states::{BlockSync, HeaderSyncState, StateEvent, StatusInfo}, + state_machine_service::states::{ + BlockSync, + DecideNextSync, + HeaderSyncState, + HorizonStateSync, + StateEvent, + StatusInfo, + }, sync::SyncPeer, BaseNodeStateMachine, BaseNodeStateMachineConfig, SyncValidators, }, blocks::ChainBlock, - chain_storage::DbTransaction, + chain_storage::{BlockchainDatabaseConfig, DbTransaction}, consensus::{ConsensusConstantsBuilder, ConsensusManager, ConsensusManagerBuilder}, mempool::MempoolServiceConfig, proof_of_work::{randomx_factory::RandomXFactory, Difficulty}, test_helpers::blockchain::TempDatabase, - transactions::key_manager::{create_memory_db_key_manager, MemoryDbKeyManager}, + transactions::{ + key_manager::{create_memory_db_key_manager, MemoryDbKeyManager}, + tari_amount::T, + test_helpers::schema_to_transaction, + transaction_components::{Transaction, WalletOutput}, + }, + txn_schema, validation::mocks::MockValidator, }; use tari_p2p::{services::liveness::LivenessConfig, P2pConfig}; @@ -50,11 +63,12 @@ use tokio::sync::{broadcast, watch}; use crate::helpers::{ block_builders::{append_block, create_genesis_block}, - nodes::{create_network_with_2_base_nodes_with_config, NodeInterfaces}, + nodes::{create_network_with_multiple_base_nodes_with_config, NodeInterfaces}, }; static EMISSION: [u64; 2] = [10, 10]; +/// Helper function to initialize header sync with a single peer pub fn initialize_sync_headers_with_ping_pong_data( local_node_interfaces: &NodeInterfaces, peer_node_interfaces: &NodeInterfaces, @@ -69,6 +83,7 @@ pub fn initialize_sync_headers_with_ping_pong_data( ) } +/// Helper function to initialize header sync with a single peer pub async fn sync_headers_execute( state_machine: &mut BaseNodeStateMachine, header_sync: &mut HeaderSyncState, @@ -76,6 +91,7 @@ pub async fn sync_headers_execute( header_sync.next_event(state_machine).await } +/// Helper function to initialize block sync with a single peer pub fn initialize_sync_blocks(peer_node_interfaces: &NodeInterfaces) -> BlockSync { BlockSync::from(vec![SyncPeer::from(PeerChainMetadata::new( peer_node_interfaces.node_identity.node_id().clone(), @@ -84,6 +100,7 @@ pub fn initialize_sync_blocks(peer_node_interfaces: &NodeInterfaces) -> BlockSyn ))]) } +/// Helper function to initialize block sync with a single peer pub async fn sync_blocks_execute( state_machine: &mut BaseNodeStateMachine, block_sync: &mut BlockSync, @@ -91,67 +108,108 @@ pub async fn sync_blocks_execute( block_sync.next_event(state_machine).await } -pub async fn create_network_with_local_and_peer_nodes() -> ( - BaseNodeStateMachine, - NodeInterfaces, - NodeInterfaces, +/// Helper function to decide what to do next +pub async fn decide_horizon_sync( + local_state_machine: &mut BaseNodeStateMachine, + local_header_sync: HeaderSyncState, +) -> StateEvent { + let mut next_sync = DecideNextSync::from(local_header_sync.clone()); + next_sync.next_event(local_state_machine).await +} + +/// Helper function to initialize horizon state sync with a single peer +pub fn initialize_horizon_sync_without_header_sync(peer_node_interfaces: &NodeInterfaces) -> HorizonStateSync { + HorizonStateSync::from(vec![SyncPeer::from(PeerChainMetadata::new( + peer_node_interfaces.node_identity.node_id().clone(), + peer_node_interfaces.blockchain_db.get_chain_metadata().unwrap(), + None, + ))]) +} + +/// Helper function to initialize horizon state sync with a single peer +pub async fn horizon_sync_execute( + state_machine: &mut BaseNodeStateMachine, + horizon_sync: &mut HorizonStateSync, +) -> StateEvent { + horizon_sync.next_event(state_machine).await +} + +/// Helper function to create a network with multiple nodes +pub async fn create_network_with_multiple_nodes( + blockchain_db_configs: Vec, +) -> ( + Vec>, + Vec, ChainBlock, ConsensusManager, MemoryDbKeyManager, + WalletOutput, ) { + let num_nodes = blockchain_db_configs.len(); + if num_nodes < 2 { + panic!("Must have at least 2 nodes"); + } let network = Network::LocalNet; let temp_dir = tempdir().unwrap(); let key_manager = create_memory_db_key_manager(); let consensus_constants = ConsensusConstantsBuilder::new(network) .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) .build(); - let (initial_block, _) = create_genesis_block(&consensus_constants, &key_manager).await; + let (initial_block, coinbase_wallet_output) = create_genesis_block(&consensus_constants, &key_manager).await; let consensus_manager = ConsensusManagerBuilder::new(network) .add_consensus_constants(consensus_constants) .with_block(initial_block.clone()) .build() .unwrap(); - let (local_node, peer_node, consensus_manager) = create_network_with_2_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig { - auto_ping_interval: Some(Duration::from_millis(100)), - ..Default::default() - }, - P2pConfig::default(), + let (node_interfaces, consensus_manager) = create_network_with_multiple_base_nodes_with_config( + vec![MempoolServiceConfig::default(); num_nodes], + vec![ + LivenessConfig { + auto_ping_interval: Some(Duration::from_millis(100)), + ..Default::default() + }; + num_nodes + ], + blockchain_db_configs, + vec![P2pConfig::default(); num_nodes], consensus_manager, temp_dir.path().to_str().unwrap(), + network, ) .await; let shutdown = Shutdown::new(); - let (state_change_event_publisher, _) = broadcast::channel(10); - let (status_event_sender, _status_event_receiver) = watch::channel(StatusInfo::new()); - // Alice needs a state machine for header sync - let local_state_machine = BaseNodeStateMachine::new( - local_node.blockchain_db.clone().into(), - local_node.local_nci.clone(), - local_node.comms.connectivity(), - local_node.comms.peer_manager(), - local_node.chain_metadata_handle.get_event_stream(), - BaseNodeStateMachineConfig::default(), - SyncValidators::new(MockValidator::new(true), MockValidator::new(true)), - status_event_sender, - state_change_event_publisher, - RandomXFactory::default(), - consensus_manager.clone(), - shutdown.to_signal(), - ); + let mut state_machines = Vec::with_capacity(num_nodes); + for node_interface in node_interfaces.iter().take(num_nodes) { + let (state_change_event_publisher, _) = broadcast::channel(10); + let (status_event_sender, _status_event_receiver) = watch::channel(StatusInfo::new()); + state_machines.push(BaseNodeStateMachine::new( + node_interface.blockchain_db.clone().into(), + node_interface.local_nci.clone(), + node_interface.comms.connectivity(), + node_interface.comms.peer_manager(), + node_interface.chain_metadata_handle.get_event_stream(), + BaseNodeStateMachineConfig::default(), + SyncValidators::new(MockValidator::new(true), MockValidator::new(true)), + status_event_sender, + state_change_event_publisher, + RandomXFactory::default(), + consensus_manager.clone(), + shutdown.to_signal(), + )); + } ( - local_state_machine, - local_node, - peer_node, + state_machines, + node_interfaces, initial_block, consensus_manager, key_manager, + coinbase_wallet_output, ) } +/// Helper enum to specify what to delete #[allow(dead_code)] #[derive(Debug)] pub enum WhatToDelete { @@ -169,12 +227,12 @@ fn delete_block(txn: &mut DbTransaction, node: &NodeInterfaces, blocks: &[ChainB blocks[index + 1].height(), blocks[index + 1].accumulated_data().hash, blocks[index + 1].accumulated_data().total_accumulated_difficulty, - *node.blockchain_db.get_chain_metadata().unwrap().best_block(), + *node.blockchain_db.get_chain_metadata().unwrap().best_block_hash(), blocks[index + 1].to_chain_header().timestamp(), ); } -// Delete blocks and headers in reverse order; the first block in the slice wil not be deleted +/// Delete blocks and headers in reverse order; the first block in the slice wil not be deleted pub fn delete_some_blocks_and_headers( blocks_with_anchor: &[ChainBlock], instruction: WhatToDelete, @@ -229,6 +287,7 @@ pub fn delete_some_blocks_and_headers( } } +/// Set the best block in the blockchain_db #[allow(dead_code)] pub fn set_best_block(block: &ChainBlock, previous_block_hash: &HashOutput, node: &NodeInterfaces) { let mut txn = DbTransaction::new(); @@ -242,47 +301,59 @@ pub fn set_best_block(block: &ChainBlock, previous_block_hash: &HashOutput, node node.blockchain_db.write(txn).unwrap(); } +/// Add some existing blocks to the blockchain_db pub fn add_some_existing_blocks(blocks: &[ChainBlock], node: &NodeInterfaces) { for block in blocks { let _res = node.blockchain_db.add_block(block.block().clone().into()).unwrap(); } } -// Return blocks added, including the start block +/// Return blocks and coinbases added, including the start block and coinbase pub async fn create_and_add_some_blocks( node: &NodeInterfaces, start_block: &ChainBlock, + start_coinbase: &WalletOutput, number_of_blocks: usize, consensus_manager: &ConsensusManager, key_manager: &MemoryDbKeyManager, difficulties: &[u64], -) -> Vec { - if number_of_blocks != difficulties.len() { + transactions: &Option>>, +) -> (Vec, Vec) { + let transactions = if let Some(val) = transactions { + val.clone() + } else { + vec![vec![]; number_of_blocks] + }; + if number_of_blocks != difficulties.len() || number_of_blocks != transactions.len() { panic!( - "Number of blocks ({}) and difficulties length ({}) must be equal", + "Number of blocks ({}), transactions length ({}) and difficulties length ({}) must be equal", number_of_blocks, + transactions.len(), difficulties.len() ); } let mut blocks = vec![start_block.clone()]; + let mut coinbases = vec![start_coinbase.clone()]; let mut prev_block = start_block.clone(); - for item in difficulties.iter().take(number_of_blocks) { - prev_block = append_block( + for (item, txns) in difficulties.iter().zip(transactions.iter()) { + let (new_block, coinbase) = append_block( &node.blockchain_db, &prev_block, - vec![], + txns.clone(), consensus_manager, Difficulty::from_u64(*item).unwrap(), key_manager, ) .await .unwrap(); - blocks.push(prev_block.clone()); + prev_block = new_block.clone(); + blocks.push(new_block.clone()); + coinbases.push(coinbase.clone()); } - blocks + (blocks, coinbases) } -// We give some time for the peer to be banned as it is an async process +/// We give some time for the peer to be banned as it is an async process pub async fn wait_for_is_peer_banned(this_node: &NodeInterfaces, peer_node_id: &NodeId, seconds: u64) -> bool { let interval_ms = 100; let intervals = seconds * 1000 / interval_ms; @@ -300,3 +371,143 @@ pub async fn wait_for_is_peer_banned(this_node: &NodeInterfaces, peer_node_id: & } false } + +/// Condensed format of the state machine state for display +pub fn state_event(event: &StateEvent) -> String { + match event { + StateEvent::Initialized => "Initialized".to_string(), + StateEvent::HeadersSynchronized(_, _) => "HeadersSynchronized".to_string(), + StateEvent::HeaderSyncFailed(_) => "HeaderSyncFailed".to_string(), + StateEvent::ProceedToHorizonSync(_) => "ProceedToHorizonSync".to_string(), + StateEvent::ProceedToBlockSync(_) => "ProceedToBlockSync".to_string(), + StateEvent::HorizonStateSynchronized => "HorizonStateSynchronized".to_string(), + StateEvent::HorizonStateSyncFailure => "HorizonStateSyncFailure".to_string(), + StateEvent::BlocksSynchronized => "BlocksSynchronized".to_string(), + StateEvent::BlockSyncFailed => "BlockSyncFailed".to_string(), + StateEvent::FallenBehind(_) => "FallenBehind".to_string(), + StateEvent::NetworkSilence => "NetworkSilence".to_string(), + StateEvent::FatalError(_) => "FatalError".to_string(), + StateEvent::Continue => "Continue".to_string(), + StateEvent::UserQuit => "UserQuit".to_string(), + } +} + +/// Return blocks and coinbases added, including the start block and coinbase +pub async fn create_block_chain_with_transactions( + node: &NodeInterfaces, + initial_block: &ChainBlock, + initial_coinbase: &WalletOutput, + consensus_manager: &ConsensusManager, + key_manager: &MemoryDbKeyManager, + intermediate_height: u64, + number_of_blocks: usize, + spend_genesis_coinbase_in_block: usize, + follow_up_transaction_in_block: usize, + follow_up_coinbases_to_spend: usize, +) -> (Vec, Vec) { + assert!(spend_genesis_coinbase_in_block > 1); + assert!((spend_genesis_coinbase_in_block as u64) < intermediate_height); + assert!(follow_up_transaction_in_block > spend_genesis_coinbase_in_block + 1); + assert!((follow_up_transaction_in_block as u64) > intermediate_height); + assert!(number_of_blocks as u64 > follow_up_transaction_in_block as u64 + intermediate_height + 1); + let add_blocks_a = spend_genesis_coinbase_in_block - 1; + let add_blocks_b = follow_up_transaction_in_block - 1 - add_blocks_a; + let add_blocks_c = number_of_blocks - add_blocks_a - add_blocks_b; + assert!(follow_up_coinbases_to_spend > add_blocks_a); + assert!(follow_up_coinbases_to_spend < follow_up_transaction_in_block); + + // Create a blockchain with some blocks to enable spending the genesys coinbase early on + let (blocks_a, coinbases_a) = create_and_add_some_blocks( + node, + initial_block, + initial_coinbase, + add_blocks_a, + consensus_manager, + key_manager, + &vec![3; add_blocks_a], + &None, + ) + .await; + assert_eq!(node.blockchain_db.get_height().unwrap(), add_blocks_a as u64); + assert_eq!( + node.blockchain_db.fetch_last_header().unwrap().height, + add_blocks_a as u64 + ); + // Add a transaction to spend the genesys coinbase + let schema = txn_schema!( + from: vec![initial_coinbase.clone()], + to: vec![1 * T; 10] + ); + let (txns_genesis_coinbase, _outputs) = schema_to_transaction(&[schema], key_manager).await; + let mut txns_all = vec![vec![]; add_blocks_b]; + txns_all[0] = txns_genesis_coinbase + .into_iter() + .map(|t| Arc::try_unwrap(t).unwrap()) + .collect::>(); + // Expand the blockchain with the genesys coinbase spend transaction + let (blocks_b, coinbases_b) = create_and_add_some_blocks( + node, + &blocks_a[blocks_a.len() - 1], + &coinbases_a[coinbases_a.len() - 1], + add_blocks_b, + consensus_manager, + key_manager, + &vec![3; add_blocks_b], + &Some(txns_all), + ) + .await; + assert_eq!( + node.blockchain_db.get_height().unwrap(), + (add_blocks_a + add_blocks_b) as u64 + ); + assert_eq!( + node.blockchain_db.fetch_last_header().unwrap().height, + (add_blocks_a + add_blocks_b) as u64 + ); + // Add a transaction to spend some more coinbase outputs + let mut coinbases_to_spend = Vec::with_capacity(follow_up_coinbases_to_spend); + for coinbase in coinbases_a.iter().skip(1) + // Skip the genesys coinbase + { + coinbases_to_spend.push(coinbase.clone()); + } + for coinbase in coinbases_b + .iter() + .skip(1) // Skip the last coinbase of the previously added blocks + .take(follow_up_coinbases_to_spend - coinbases_to_spend.len()) + { + coinbases_to_spend.push(coinbase.clone()); + } + assert_eq!(coinbases_to_spend.len(), follow_up_coinbases_to_spend); + let schema = txn_schema!( + from: coinbases_to_spend, + to: vec![1 * T; 20] + ); + let (txns_additional_coinbases, _outputs) = schema_to_transaction(&[schema], key_manager).await; + let mut txns_all = vec![vec![]; add_blocks_c]; + txns_all[0] = txns_additional_coinbases + .into_iter() + .map(|t| Arc::try_unwrap(t).unwrap()) + .collect::>(); + // Expand the blockchain with the spend transaction + let (blocks_c, coinbases_c) = create_and_add_some_blocks( + node, + &blocks_b[blocks_b.len() - 1], + &coinbases_b[coinbases_b.len() - 1], + add_blocks_c, + consensus_manager, + key_manager, + &vec![3; add_blocks_c], + &Some(txns_all), + ) + .await; + assert_eq!(node.blockchain_db.get_height().unwrap(), number_of_blocks as u64); + assert_eq!( + node.blockchain_db.fetch_last_header().unwrap().height, + number_of_blocks as u64 + ); + let blocks = [&blocks_a[..], &blocks_b[1..], &blocks_c[1..]].concat(); + let coinbases = [&coinbases_a[..], &coinbases_b[1..], &coinbases_c[1..]].concat(); + + (blocks, coinbases) +} diff --git a/base_layer/core/tests/tests/base_node_rpc.rs b/base_layer/core/tests/tests/base_node_rpc.rs index ec11eff7c3..bc8f0c39fb 100644 --- a/base_layer/core/tests/tests/base_node_rpc.rs +++ b/base_layer/core/tests/tests/base_node_rpc.rs @@ -41,6 +41,7 @@ use tari_core::{ sync::rpc::BaseNodeSyncRpcService, }, blocks::ChainBlock, + chain_storage::BlockchainDatabaseConfig, consensus::{ConsensusConstantsBuilder, ConsensusManager, ConsensusManagerBuilder, NetworkConsensus}, proto::{ base_node::{FetchMatchingUtxos, Signatures as SignaturesProto, SyncUtxosByBlockRequest}, @@ -94,7 +95,7 @@ async fn setup() -> ( .unwrap(); let (mut base_node, _consensus_manager) = BaseNodeBuilder::new(network) .with_consensus_manager(consensus_manager.clone()) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; base_node.mock_base_node_state_machine.publish_status(StatusInfo { bootstrapped: true, @@ -160,7 +161,7 @@ async fn test_base_node_wallet_rpc() { let resp = TxQueryResponse::try_from(resp).unwrap(); assert_eq!(resp.confirmations, 0); - assert_eq!(resp.block_hash, None); + assert_eq!(resp.best_block_hash, None); assert_eq!(resp.location, TxLocation::NotStored); // First lets try submit tx2 which will be an orphan tx @@ -178,7 +179,7 @@ async fn test_base_node_wallet_rpc() { let resp = TxQueryResponse::try_from(service.transaction_query(req).await.unwrap().into_message()).unwrap(); assert_eq!(resp.confirmations, 0); - assert_eq!(resp.block_hash, None); + assert_eq!(resp.best_block_hash, None); assert_eq!(resp.location, TxLocation::NotStored); // Now submit a block with Tx1 in it so that Tx2 is no longer an orphan @@ -201,7 +202,7 @@ async fn test_base_node_wallet_rpc() { let resp = TxQueryResponse::try_from(service.transaction_query(req).await.unwrap().into_message()).unwrap(); assert_eq!(resp.confirmations, 0); - assert_eq!(resp.block_hash, None); + assert_eq!(resp.best_block_hash, None); assert_eq!(resp.location, TxLocation::InMempool); // Now if we submit Tx1 is should return as rejected as AlreadyMined as Tx1's kernel is present @@ -245,7 +246,7 @@ async fn test_base_node_wallet_rpc() { let resp = TxQueryResponse::try_from(service.transaction_query(req).await.unwrap().into_message()).unwrap(); assert_eq!(resp.confirmations, 1); - assert_eq!(resp.block_hash, Some(block1.hash())); + assert_eq!(resp.best_block_hash, Some(block1.hash())); assert_eq!(resp.location, TxLocation::Mined); // try a batch query let msg = SignaturesProto { diff --git a/base_layer/core/tests/tests/block_sync.rs b/base_layer/core/tests/tests/block_sync.rs index 9011a4b276..ae22cb32b6 100644 --- a/base_layer/core/tests/tests/block_sync.rs +++ b/base_layer/core/tests/tests/block_sync.rs @@ -20,21 +20,40 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use tari_core::base_node::state_machine_service::states::StateEvent; +use tari_core::{base_node::state_machine_service::states::StateEvent, chain_storage::BlockchainDatabaseConfig}; -use crate::helpers::{sync, sync::WhatToDelete}; +use crate::helpers::{ + sync, + sync::{state_event, WhatToDelete}, +}; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_block_sync_happy_path() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add some block to Bob's chain - let _bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &initial_block, 5, &consensus_manager, &key_manager, &[3; 5]).await; + let (_blocks, _coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &initial_block, + &initial_coinbase, + 5, + &consensus_manager, + &key_manager, + &[3; 5], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 5); // Alice attempts header sync @@ -78,17 +97,26 @@ async fn test_block_sync_peer_supplies_no_blocks_with_ban() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add some block to Bob's chain - let blocks = sync::create_and_add_some_blocks( + let (blocks, _coinbases) = sync::create_and_add_some_blocks( &bob_node, &initial_block, + &initial_coinbase, 10, &consensus_manager, &key_manager, &[3; 10], + &None, ) .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 10); @@ -129,17 +157,26 @@ async fn test_block_sync_peer_supplies_not_all_blocks_with_ban() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add some block to Bob's chain - let blocks = sync::create_and_add_some_blocks( + let (blocks, _coinbases) = sync::create_and_add_some_blocks( &bob_node, &initial_block, + &initial_coinbase, 10, &consensus_manager, &key_manager, &[3; 10], + &None, ) .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 10); @@ -174,3 +211,176 @@ async fn test_block_sync_peer_supplies_not_all_blocks_with_ban() { // Bob will be banned assert!(sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); } + +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_block_sync_with_conbase_spend_happy_path_1() { + //` cargo test --release --test core_integration_tests + //` tests::horizon_sync::test_block_sync_with_conbase_spend_happy_path_1 > .\target\output.txt 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + // Create the network with Bob (archival node) and Carol (archival node) + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + // Carol is an archival node + BlockchainDatabaseConfig::default(), + // Bob is an archival node + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut carol_state_machine = state_machines.remove(0); + let carol_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); + + // Create a blockchain that spends the genesys coinbase early on and then later spends some more coinbase outputs + let follow_up_coinbases_to_spend = 4; + let (blocks, _coinbases) = sync::create_block_chain_with_transactions( + &bob_node, + &initial_block, + &initial_coinbase, + &consensus_manager, + &key_manager, + 3, + 10, // > follow_up_transaction_in_block + intermediate_height + 1 + 2, // < intermediate_height, + 5, // > intermediate_height + follow_up_coinbases_to_spend, // > spend_genesis_coinbase_in_block - 1, < follow_up_transaction_in_block + ) + .await; + + // Now rewind Bob's chain to height 1 (> pruning_horizon, < follow_up_transaction_in_block) + sync::delete_some_blocks_and_headers(&blocks[1..=10], WhatToDelete::BlocksAndHeaders, &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 1); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 1); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 1. Carol attempts header sync sync from Bob + println!("\n1. Carol attempts header sync sync from Bob\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 1); + + // 2. Carol attempts block sync from Bob to the tip (to height 1) + println!("\n2. Carol attempts block sync from Bob to the tip (to height 1)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut carol_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); + + // Give Bob some more blocks + sync::add_some_existing_blocks(&blocks[2..=2], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 2); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 2); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 3. Carol attempts header sync sync from Bob + println!("\n3. Carol attempts header sync sync from Bob\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 2); + + // 4. Carol attempts block sync from Bob to the tip (to height 2) + println!("\n4. Carol attempts block sync from Bob to the tip (to height 2)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut carol_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); +} + +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_block_sync_with_conbase_spend_happy_path_2() { + //` cargo test --release --test core_integration_tests + //` tests::horizon_sync::test_block_sync_with_conbase_spend_happy_path_2 > .\target\output.txt 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + // Create the network with Bob (archival node) and Carol (archival node) + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + // Carol is an archival node + BlockchainDatabaseConfig::default(), + // Bob is an archival node + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut carol_state_machine = state_machines.remove(0); + let carol_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); + + // Create a blockchain that spends the genesys coinbase early on and then later spends some more coinbase outputs + let follow_up_coinbases_to_spend = 4; + let (_blocks, _coinbases) = sync::create_block_chain_with_transactions( + &bob_node, + &initial_block, + &initial_coinbase, + &consensus_manager, + &key_manager, + 3, + 10, // > follow_up_transaction_in_block + intermediate_height + 1 + 2, // < intermediate_height, + 5, // > intermediate_height + follow_up_coinbases_to_spend, // > spend_genesis_coinbase_in_block - 1, < follow_up_transaction_in_block + ) + .await; + + // 1. Carol attempts header sync sync from Bob + println!("\n1. Carol attempts header sync sync from Bob\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 10); + + // 2. Carol attempts block sync from Bob to the tip (to height 10) + println!("\n2. Carol attempts block sync from Bob to the tip (to height 10)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut carol_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); +} diff --git a/base_layer/core/tests/tests/header_sync.rs b/base_layer/core/tests/tests/header_sync.rs index 5745f24125..7e5125b75d 100644 --- a/base_layer/core/tests/tests/header_sync.rs +++ b/base_layer/core/tests/tests/header_sync.rs @@ -20,7 +20,10 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use tari_core::base_node::{state_machine_service::states::StateEvent, sync::HeaderSyncStatus}; +use tari_core::{ + base_node::{state_machine_service::states::StateEvent, sync::HeaderSyncStatus}, + chain_storage::BlockchainDatabaseConfig, +}; use crate::helpers::{sync, sync::WhatToDelete}; @@ -30,12 +33,28 @@ async fn test_header_sync_happy_path() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add 1 block to Bob's chain - let bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &initial_block, 1, &consensus_manager, &key_manager, &[3]).await; + let (bob_blocks, bob_coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &initial_block, + &initial_coinbase, + 1, + &consensus_manager, + &key_manager, + &[3], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 1); // Alice attempts header sync, still on the genesys block, headers will be lagging @@ -74,8 +93,17 @@ async fn test_header_sync_happy_path() { } // Bob adds another block - let _bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &bob_blocks[1], 1, &consensus_manager, &key_manager, &[3]).await; + let (_blocks, _coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &bob_blocks[1], + &bob_coinbases[1], + 1, + &consensus_manager, + &key_manager, + &[3], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 2); // Alice attempts header sync, still on the genesys block, headers will be lagging @@ -102,25 +130,56 @@ async fn test_header_sync_with_fork_happy_path() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add 1 block to Bob's chain - let bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &initial_block, 1, &consensus_manager, &key_manager, &[3]).await; + let (bob_blocks, bob_coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &initial_block, + &initial_coinbase, + 1, + &consensus_manager, + &key_manager, + &[3], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 1); // Bob adds another block - let bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &bob_blocks[1], 1, &consensus_manager, &key_manager, &[3]).await; + let (bob_blocks, bob_coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &bob_blocks[1], + &bob_coinbases[1], + 1, + &consensus_manager, + &key_manager, + &[3], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 2); // Alice adds 3 (different) blocks, with POW on par with Bob's chain, but with greater height - let _alice_blocks = - sync::create_and_add_some_blocks(&alice_node, &initial_block, 3, &consensus_manager, &key_manager, &[ - 3, 2, 1, - ]) - .await; + let _alice_blocks = sync::create_and_add_some_blocks( + &alice_node, + &initial_block, + &initial_coinbase, + 3, + &consensus_manager, + &key_manager, + &[3, 2, 1], + &None, + ) + .await; assert_eq!(alice_node.blockchain_db.get_height().unwrap(), 3); assert_eq!( alice_node @@ -148,8 +207,17 @@ async fn test_header_sync_with_fork_happy_path() { assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); // Bob adds more blocks and draws ahead of Alice - let _bob_blocks = - sync::create_and_add_some_blocks(&bob_node, &bob_blocks[1], 2, &consensus_manager, &key_manager, &[3; 2]).await; + let _blocks = sync::create_and_add_some_blocks( + &bob_node, + &bob_blocks[1], + &bob_coinbases[1], + 2, + &consensus_manager, + &key_manager, + &[3; 2], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 4); // Alice attempts header sync to Bob's chain with higher POW, headers will be lagging with reorg steps @@ -176,17 +244,26 @@ async fn test_header_sync_uneven_headers_and_blocks_happy_path() { // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add blocks and headers to Bob's chain, with more headers than blocks - let blocks = sync::create_and_add_some_blocks( + let (blocks, _coinbases) = sync::create_and_add_some_blocks( &bob_node, &initial_block, + &initial_coinbase, 10, &consensus_manager, &key_manager, &[3; 10], + &None, ) .await; sync::delete_some_blocks_and_headers(&blocks[5..=10], WhatToDelete::Blocks, &bob_node); @@ -224,17 +301,26 @@ async fn test_header_sync_uneven_headers_and_blocks_peer_lies_about_pow_no_ban() // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add blocks and headers to Bob's chain, with more headers than blocks - let blocks = sync::create_and_add_some_blocks( + let (blocks, _coinbases) = sync::create_and_add_some_blocks( &bob_node, &initial_block, + &initial_coinbase, 10, &consensus_manager, &key_manager, &[3; 10], + &None, ) .await; sync::delete_some_blocks_and_headers(&blocks[5..=10], WhatToDelete::Blocks, &bob_node); @@ -287,12 +373,28 @@ async fn test_header_sync_even_headers_and_blocks_peer_lies_about_pow_with_ban() // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add blocks and headers to Bob's chain - let blocks = - sync::create_and_add_some_blocks(&bob_node, &initial_block, 6, &consensus_manager, &key_manager, &[3; 6]).await; + let (blocks, _coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &initial_block, + &initial_coinbase, + 6, + &consensus_manager, + &key_manager, + &[3; 6], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 6); assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 6); @@ -333,12 +435,28 @@ async fn test_header_sync_even_headers_and_blocks_peer_metadata_improve_with_reo // env_logger::init(); // Set `$env:RUST_LOG = "trace"` // Create the network with Alice node and Bob node - let (mut alice_state_machine, alice_node, bob_node, initial_block, consensus_manager, key_manager) = - sync::create_network_with_local_and_peer_nodes().await; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig::default(), + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); // Add blocks and headers to Bob's chain - let blocks = - sync::create_and_add_some_blocks(&bob_node, &initial_block, 6, &consensus_manager, &key_manager, &[3; 6]).await; + let (blocks, coinbases) = sync::create_and_add_some_blocks( + &bob_node, + &initial_block, + &initial_coinbase, + 6, + &consensus_manager, + &key_manager, + &[3; 6], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 6); assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 6); @@ -351,8 +469,17 @@ async fn test_header_sync_even_headers_and_blocks_peer_metadata_improve_with_reo let mut header_sync = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); // Bob's chain will reorg with improved metadata sync::delete_some_blocks_and_headers(&blocks[4..=6], WhatToDelete::Blocks, &bob_node); - let _blocks = - sync::create_and_add_some_blocks(&bob_node, &blocks[4], 3, &consensus_manager, &key_manager, &[3; 3]).await; + let _blocks = sync::create_and_add_some_blocks( + &bob_node, + &blocks[4], + &coinbases[4], + 3, + &consensus_manager, + &key_manager, + &[3; 3], + &None, + ) + .await; assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 7); assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 7); let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync).await; diff --git a/base_layer/core/tests/tests/horizon_sync.rs b/base_layer/core/tests/tests/horizon_sync.rs new file mode 100644 index 0000000000..c1be254adf --- /dev/null +++ b/base_layer/core/tests/tests/horizon_sync.rs @@ -0,0 +1,841 @@ +// Copyright 2022. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::cmp::min; + +use tari_core::{ + base_node::state_machine_service::states::{HorizonStateSync, StateEvent}, + chain_storage::BlockchainDatabaseConfig, +}; + +use crate::helpers::{ + sync, + sync::{decide_horizon_sync, state_event, WhatToDelete}, +}; + +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_initial_horizon_sync_from_archival_node_happy_path() { + //` cargo test --release --test core_integration_tests + //` tests::horizon_sync::test_initial_horizon_sync_from_archival_node_happy_path > .\target\output.txt 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + // Create the network with Alice (pruning node) and Bob (archival node) + let pruning_horizon = 5; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + BlockchainDatabaseConfig { + orphan_storage_capacity: 5, + pruning_horizon, + pruning_interval: 5, + track_reorgs: false, + cleanup_orphans_at_startup: false, + }, + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); + + // Create a blockchain that spends the genesys coinbase early on and then later spends some more coinbase outputs + let follow_up_coinbases_to_spend = 15; + let (blocks, coinbases) = sync::create_block_chain_with_transactions( + &bob_node, + &initial_block, + &initial_coinbase, + &consensus_manager, + &key_manager, + pruning_horizon, + 30, // > follow_up_transaction_in_block + pruning_horizon + 1 + 3, // < pruning_horizon + 16, // > pruning_horizon + follow_up_coinbases_to_spend, // > spend_genesis_coinbase_in_block - 1, < follow_up_transaction_in_block + ) + .await; + + // Now rewind Bob's chain to height 10 (> pruning_horizon, < follow_up_transaction_in_block) + sync::delete_some_blocks_and_headers(&blocks[10..=30], WhatToDelete::BlocksAndHeaders, &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 10); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 10); + + // 1. Alice attempts horizon sync without having done header sync + println!("\n1. Alice attempts horizon sync without having done header sync\n"); + + let mut horizon_sync = sync::initialize_horizon_sync_without_header_sync(&bob_node); + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!(alice_node.blockchain_db.get_height().unwrap(), 0); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 2. Alice does header sync (to height 10) + println!("\n2. Alice does header sync (to height 10)\n"); + + let mut header_sync = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let _event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync).await; + assert_eq!(alice_node.blockchain_db.fetch_last_header().unwrap().height, 10); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 3. Alice attempts horizon sync after header sync (to height 5; includes genesys block UTXO spend) + println!("\n3. Alice attempts horizon sync after header sync (to height 5; includes genesys block UTXO spend)\n"); + let output_hash = initial_coinbase.hash(&key_manager).await.unwrap(); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_some()); + let commitment = initial_coinbase.commitment(&key_manager).await.unwrap(); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .unwrap() + .is_some()); + + let event = decide_horizon_sync(&mut alice_state_machine, header_sync.clone()).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("3. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height - pruning_horizon + ); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_none()); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment) + .unwrap() + .is_none()); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 4. Alice attempts horizon sync again without any change in the blockchain + println!("\n4. Alice attempts horizon sync again without any change in the blockchain\n"); + + let event = decide_horizon_sync(&mut alice_state_machine, header_sync).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("4. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height - pruning_horizon + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 5. Alice attempts block sync to the tip (to height 10) + println!("\n5. Alice attempts block sync to the tip (to height 10)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut alice_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // Give Bob some more blocks (containing the block with the spend transaction at height 16) + sync::add_some_existing_blocks(&blocks[11..=25], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 25); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 25); + + // 6. Alice does header sync to the new height (to height 25) + println!("\n6. Alice does header sync to the new height (to height 25)\n"); + + let mut header_sync = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let _event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync).await; + assert_eq!(alice_node.blockchain_db.fetch_last_header().unwrap().height, 25); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 7. Alice attempts horizon sync to the new pruning height (to height 20 - STXOs should be pruned) Outputs created + // after height 10 and spent up to height 20 with corresponding inputs should not be streamed; we do not have way + // to verify this except looking at the detail log files. + println!("\n7. Alice attempts horizon sync to the new pruning height (to height 20 - STXOs should be pruned)\n"); + let spent_coinbases = coinbases + .iter() + .skip(1) + .take(10) // To current height + .collect::>(); + for output in &spent_coinbases { + let output_hash = output.hash(&key_manager).await.unwrap(); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_some()); + let commitment = output.commitment(&key_manager).await.unwrap(); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment) + .unwrap() + .is_some()); + } + + let event = decide_horizon_sync(&mut alice_state_machine, header_sync).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("7. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height - pruning_horizon + ); + for output in &spent_coinbases { + let output_hash = output.hash(&key_manager).await.unwrap(); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_none()); + let commitment = output.commitment(&key_manager).await.unwrap(); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment) + .unwrap() + .is_none()); + } + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // Give Bob some more blocks (containing the block with the spend transaction at height 16) + sync::add_some_existing_blocks(&blocks[26..=30], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 30); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 30); + + // 8. Alice does header sync to the new height (to height 30) + println!("\n8. Alice does header sync to the new height (to height 30)\n"); + + let mut header_sync = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let _event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync).await; + assert_eq!(alice_node.blockchain_db.fetch_last_header().unwrap().height, 30); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 9. Alice attempts horizon sync to the new pruning height (to height 25) + println!("\n9. Alice attempts horizon sync to the new pruning height (to height 25)\n"); + + let event = decide_horizon_sync(&mut alice_state_machine, header_sync).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("9. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height - pruning_horizon + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); +} + +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_consecutive_horizon_sync_from_prune_node_happy_path() { + //` cargo test --release --test core_integration_tests + //` tests::horizon_sync::test_initial_horizon_sync_from_prune_node_happy_path > .\target\output.txt 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + // Create the network with Alice (pruning node) and Bob (archival node) and Carol (pruning node) + let pruning_horizon_alice = 4; + let pruning_horizon_carol = 12; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + // Alice is a pruned node + BlockchainDatabaseConfig { + orphan_storage_capacity: 5, + pruning_horizon: pruning_horizon_alice, + pruning_interval: 5, + track_reorgs: false, + cleanup_orphans_at_startup: false, + }, + // Carol is a pruned node + BlockchainDatabaseConfig { + orphan_storage_capacity: 5, + pruning_horizon: pruning_horizon_carol, + pruning_interval: 5, + track_reorgs: false, + cleanup_orphans_at_startup: false, + }, + // Bob is an archival node + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let mut carol_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let carol_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); + + // Create a blockchain that spends the genesys coinbase early on and then later spends some more coinbase outputs + let follow_up_coinbases_to_spend = 5; + let (blocks, _coinbases) = sync::create_block_chain_with_transactions( + &bob_node, + &initial_block, + &initial_coinbase, + &consensus_manager, + &key_manager, + min(pruning_horizon_alice, pruning_horizon_carol), + 28, // > follow_up_transaction_in_block + pruning_horizon_carol + 1 + 2, // < pruning_horizon_alice, < pruning_horizon_carol + 14, // > pruning_horizon_alice, > pruning_horizon_carol + follow_up_coinbases_to_spend, // > spend_genesis_coinbase_in_block - 1, < follow_up_transaction_in_block + ) + .await; + + // Now rewind Bob's chain to height 8 (> pruning_horizon, < follow_up_transaction_in_block) + sync::delete_some_blocks_and_headers(&blocks[8..=28], WhatToDelete::BlocksAndHeaders, &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 8); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 8); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 1. Alice attempts initial horizon sync from Bob (to pruning height 4; includes genesys block UTXO spend) + println!( + "\n1. Alice attempts initial horizon sync from Bob (to pruning height 4; includes genesys block UTXO spend)\n" + ); + let output_hash = initial_coinbase.hash(&key_manager).await.unwrap(); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_some()); + let commitment = initial_coinbase.commitment(&key_manager).await.unwrap(); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .unwrap() + .is_some()); + + let header_sync_alice_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync_alice_from_bob.clone()).await; + let alice_header_height = alice_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), alice_header_height); + assert_eq!(alice_header_height, 8); + let event = decide_horizon_sync(&mut alice_state_machine, header_sync_alice_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("1. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_header_height - pruning_horizon_alice + ); + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_none()); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment) + .unwrap() + .is_none()); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 2. Carol attempts initial horizon sync from Bob with inadequate height + println!("\n2. Carol attempts initial horizon sync from Bob with inadequate height\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 8); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_bob).await; + match event { + StateEvent::ProceedToBlockSync(_) => println!("Carol chose `ProceedToBlockSync` instead"), + _ => panic!("2. Carol should not choose '{:?}'", event), + } + + // Give Bob some more blocks + sync::add_some_existing_blocks(&blocks[9..=13], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 13); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 13); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 3. Alice attempts horizon sync from Bob (to pruning height 9) + println!("\n3. Alice attempts horizon sync from Bob (to pruning height 9)\n"); + + let mut header_sync_alice_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync_alice_from_bob).await; + let alice_header_height = alice_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), alice_header_height); + assert_eq!(alice_header_height, 13); + let event = decide_horizon_sync(&mut alice_state_machine, header_sync_alice_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("3. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_header_height - pruning_horizon_alice + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 4. Alice attempts block sync from Bob to the tip (to height 13) + println!("\n4. Alice attempts block sync from Bob to the tip (to height 13)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut alice_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 5 Carol attempts initial horizon sync from Alice with adequate height (but Alice is not an archival node) + println!( + "\n5. Carol attempts initial horizon sync from Alice with adequate height (but Alice is not an archival \ + node)\n" + ); + + let mut header_sync_carol_from_alice = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &alice_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_alice).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 13); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_alice).await; + match event { + StateEvent::Continue => println!("Carol chose `Continue` instead"), + _ => panic!("5. Carol should not choose '{:?}'", event), + } + // Alice will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, alice_node.node_identity.node_id(), 1).await); + + // 6. Carol attempts initial horizon sync from Bob with adequate height (to pruning height 1) + println!("\n6. Carol attempts initial horizon sync from Bob with adequate height (to height 1)\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 13); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("6. Carol should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut carol_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_header_height - pruning_horizon_carol + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); + + // Give Bob some more blocks + sync::add_some_existing_blocks(&blocks[14..=18], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 18); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 18); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 7. Alice attempts horizon sync from Bob (to pruning height 14) + println!("\n7. Alice attempts horizon sync from Bob (to pruning height 14)\n"); + + let mut header_sync_alice_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &bob_node); + let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync_alice_from_bob).await; + let alice_header_height = alice_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), alice_header_height); + assert_eq!(alice_header_height, 18); + let event = decide_horizon_sync(&mut alice_state_machine, header_sync_alice_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("7. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_header_height - pruning_horizon_alice + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 8. Alice attempts block sync from Bob to the tip (to height 18) + println!("\n8. Alice attempts block sync from Bob to the tip (to height 18)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut alice_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, bob_node.node_identity.node_id(), 1).await); + + // 9. Carol attempts horizon sync from Alice with inadequate pruning horizon (to height 6) + println!("\n9. Carol attempts horizon sync from Alice with inadequate pruning horizon (to height 6)\n"); + + let mut header_sync_carol_from_alice = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &alice_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_alice).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 18); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_alice).await; + match event { + StateEvent::Continue => println!("Carol chose `Continue` instead"), + _ => panic!("9. Carol should not choose '{:?}'", event), + } + // Alice will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, alice_node.node_identity.node_id(), 1).await); + + // Give Bob some more blocks + sync::add_some_existing_blocks(&blocks[14..=22], &bob_node); + assert_eq!(bob_node.blockchain_db.get_height().unwrap(), 22); + assert_eq!(bob_node.blockchain_db.fetch_last_header().unwrap().height, 22); + println!( + "\nBob's blockchain height: {}\n", + bob_node.blockchain_db.get_height().unwrap() + ); + + // 10. Carol attempts horizon sync from Bob (to pruning height 10) + println!("\n10. Carol attempts horizon sync from Bob (to pruning height 10)\n"); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 22); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("10. Carol should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut carol_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_header_height - pruning_horizon_carol + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); + + // 11. Carol attempts block sync from Bob to the tip (to height 22) + println!("\n11. Carol attempts block sync from Bob to the tip (to height 22)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut carol_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); + + // 12. Alice attempts horizon sync from Carol with adequate pruning horizon (to height 18) + println!("\n12. Alice attempts horizon sync from Carol with adequate pruning horizon (to height 18)\n"); + + let mut header_sync_alice_from_carol = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &carol_node); + let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync_alice_from_carol).await; + let alice_header_height = alice_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), alice_header_height); + assert_eq!(alice_header_height, 22); + let event = decide_horizon_sync(&mut alice_state_machine, header_sync_alice_from_carol).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("12. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_header_height - pruning_horizon_alice + ); + // Carol will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, carol_node.node_identity.node_id(), 1).await); +} + +#[allow(clippy::too_many_lines)] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_initial_horizon_sync_from_prune_node_happy_path() { + //` cargo test --release --test core_integration_tests + //` tests::horizon_sync::test_initial_horizon_sync_from_prune_node_happy_path > .\target\output.txt 2>&1 + // env_logger::init(); // Set `$env:RUST_LOG = "trace"` + + // Create the network with Alice (pruning node) and Bob (archival node) and Carol (pruning node) + let pruning_horizon_alice = 4; + let pruning_horizon_carol = 12; + let (mut state_machines, mut peer_nodes, initial_block, consensus_manager, key_manager, initial_coinbase) = + sync::create_network_with_multiple_nodes(vec![ + // Alice is a pruned node + BlockchainDatabaseConfig { + orphan_storage_capacity: 5, + pruning_horizon: pruning_horizon_alice, + pruning_interval: 5, + track_reorgs: false, + cleanup_orphans_at_startup: false, + }, + // Carol is a pruned node + BlockchainDatabaseConfig { + orphan_storage_capacity: 5, + pruning_horizon: pruning_horizon_carol, + pruning_interval: 5, + track_reorgs: false, + cleanup_orphans_at_startup: false, + }, + // Bob is an archival node + BlockchainDatabaseConfig::default(), + ]) + .await; + let mut alice_state_machine = state_machines.remove(0); + let mut carol_state_machine = state_machines.remove(0); + let alice_node = peer_nodes.remove(0); + let carol_node = peer_nodes.remove(0); + let bob_node = peer_nodes.remove(0); + + // Create a blockchain that spends the genesys coinbase early on and then later spends some more coinbase outputs + let follow_up_coinbases_to_spend = 5; + let (_blocks, _coinbases) = sync::create_block_chain_with_transactions( + &bob_node, + &initial_block, + &initial_coinbase, + &consensus_manager, + &key_manager, + min(pruning_horizon_alice, pruning_horizon_carol), + 28, // > follow_up_transaction_in_block + pruning_horizon_carol + 1 + 2, // < pruning_horizon_alice, < pruning_horizon_carol + 14, // > pruning_horizon_alice, > pruning_horizon_carol + follow_up_coinbases_to_spend, // > spend_genesis_coinbase_in_block - 1, < follow_up_transaction_in_block + ) + .await; + + // 1. Carol attempts initial horizon sync from Bob archival node (to pruning height 16) + println!("\n1. Carol attempts initial horizon sync from Bob archival node (to pruning height 16)\n"); + + let output_hash = initial_coinbase.hash(&key_manager).await.unwrap(); + assert!(carol_node.blockchain_db.fetch_output(output_hash).unwrap().is_some()); + let commitment = initial_coinbase.commitment(&key_manager).await.unwrap(); + assert!(carol_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .unwrap() + .is_some()); + + let mut header_sync_carol_from_bob = sync::initialize_sync_headers_with_ping_pong_data(&carol_node, &bob_node); + let event = sync::sync_headers_execute(&mut carol_state_machine, &mut header_sync_carol_from_bob).await; + let carol_header_height = carol_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), carol_header_height); + assert_eq!(carol_header_height, 28); + let event = decide_horizon_sync(&mut carol_state_machine, header_sync_carol_from_bob).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("1. Carol should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut carol_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_header_height - pruning_horizon_carol + ); + + assert!(carol_node.blockchain_db.fetch_output(output_hash).unwrap().is_none()); + assert!(carol_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .unwrap() + .is_none()); + + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); + + // 2. Carol attempts block sync from Bob to the tip (to height 28) + println!("\n2. Carol attempts block sync from Bob to the tip (to height 28)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&bob_node); + let event = sync::sync_blocks_execute(&mut carol_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + carol_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + carol_node.blockchain_db.get_height().unwrap(), + carol_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Bob will not be banned + assert!(!sync::wait_for_is_peer_banned(&carol_node, bob_node.node_identity.node_id(), 1).await); + + // 3. Alice attempts initial horizon sync from Carol prune node (to height 24) + println!("\n3. Alice attempts initial horizon sync from Carol prune node (to height 24)\n"); + + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_some()); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .unwrap() + .is_some()); + + let mut header_sync_alice_from_carol = sync::initialize_sync_headers_with_ping_pong_data(&alice_node, &carol_node); + let event = sync::sync_headers_execute(&mut alice_state_machine, &mut header_sync_alice_from_carol).await; + let alice_header_height = alice_node.blockchain_db.fetch_last_header().unwrap().height; + println!("Event: {} to header {}", state_event(&event), alice_header_height); + assert_eq!(alice_header_height, 28); + let event = decide_horizon_sync(&mut alice_state_machine, header_sync_alice_from_carol).await; + let mut horizon_sync = match event { + StateEvent::ProceedToHorizonSync(sync_peers) => HorizonStateSync::from(sync_peers), + _ => panic!("3. Alice should proceed to horizon sync"), + }; + let event = sync::horizon_sync_execute(&mut alice_state_machine, &mut horizon_sync).await; + + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::HorizonStateSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_header_height - pruning_horizon_alice + ); + + assert!(alice_node.blockchain_db.fetch_output(output_hash).unwrap().is_none()); + assert!(alice_node + .blockchain_db + .fetch_unspent_output_hash_by_commitment(commitment.clone()) + .unwrap() + .is_none()); + + // Carol will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, carol_node.node_identity.node_id(), 1).await); + + // 4. Alice attempts block sync from Carol prune node to the tip (to height 28) + println!("\n4. Alice attempts block sync from Carol prune node to the tip (to height 28)\n"); + + let mut block_sync = sync::initialize_sync_blocks(&carol_node); + let event = sync::sync_blocks_execute(&mut alice_state_machine, &mut block_sync).await; + println!( + "Event: {} to block {}", + state_event(&event), + alice_node.blockchain_db.get_height().unwrap() + ); + assert_eq!(event, StateEvent::BlocksSynchronized); + assert_eq!( + alice_node.blockchain_db.get_height().unwrap(), + alice_node.blockchain_db.fetch_last_header().unwrap().height + ); + // Carol will not be banned + assert!(!sync::wait_for_is_peer_banned(&alice_node, carol_node.node_identity.node_id(), 1).await); +} diff --git a/base_layer/core/tests/tests/mempool.rs b/base_layer/core/tests/tests/mempool.rs index 0bb1d7a6d3..8601ea4c95 100644 --- a/base_layer/core/tests/tests/mempool.rs +++ b/base_layer/core/tests/tests/mempool.rs @@ -28,6 +28,7 @@ use tari_common_types::types::{Commitment, PrivateKey, PublicKey, Signature}; use tari_comms_dht::domain_message::OutboundDomainMessage; use tari_core::{ base_node::state_machine_service::states::{ListeningInfo, StateInfo, StatusInfo}, + chain_storage::BlockchainDatabaseConfig, consensus::{ConsensusConstantsBuilder, ConsensusManager}, mempool::{Mempool, MempoolConfig, MempoolServiceConfig, TxStorageResponse}, proof_of_work::Difficulty, @@ -87,7 +88,7 @@ use crate::helpers::{ generate_block, generate_new_block, }, - nodes::{create_network_with_2_base_nodes_with_config, create_network_with_3_base_nodes_with_config}, + nodes::create_network_with_multiple_base_nodes_with_config, sample_blockchains::{create_new_blockchain, create_new_blockchain_with_constants}, }; @@ -1053,14 +1054,21 @@ async fn receive_and_propagate_transaction() { .with_block(block0) .build() .unwrap(); - let (mut alice_node, mut bob_node, mut carol_node, _consensus_manager) = - create_network_with_3_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig::default(), - consensus_manager, - temp_dir.path().to_str().unwrap(), - ) - .await; + + let (mut node_interfaces, _consensus_manager) = create_network_with_multiple_base_nodes_with_config( + vec![MempoolServiceConfig::default(); 3], + vec![LivenessConfig::default(); 3], + vec![BlockchainDatabaseConfig::default(); 3], + vec![P2pConfig::default(); 3], + consensus_manager, + temp_dir.path().to_str().unwrap(), + network, + ) + .await; + let mut alice_node = node_interfaces.remove(0); + let mut bob_node = node_interfaces.remove(0); + let mut carol_node = node_interfaces.remove(0); + alice_node.mock_base_node_state_machine.publish_status(StatusInfo { bootstrapped: true, state_info: StateInfo::Listening(ListeningInfo::new(true)), @@ -1722,14 +1730,20 @@ async fn block_event_and_reorg_event_handling() { .with_block(block0.clone()) .build() .unwrap(); - let (mut alice, mut bob, consensus_manager) = create_network_with_2_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig::default(), - P2pConfig::default(), + + let (mut node_interfaces, consensus_manager) = create_network_with_multiple_base_nodes_with_config( + vec![MempoolServiceConfig::default(); 2], + vec![LivenessConfig::default(); 2], + vec![BlockchainDatabaseConfig::default(); 2], + vec![P2pConfig::default(); 2], consensus_manager, temp_dir.path().to_str().unwrap(), + network, ) .await; + let mut alice = node_interfaces.remove(0); + let mut bob = node_interfaces.remove(0); + alice.mock_base_node_state_machine.publish_status(StatusInfo { bootstrapped: true, state_info: StateInfo::Listening(ListeningInfo::new(true)), diff --git a/base_layer/core/tests/tests/mod.rs b/base_layer/core/tests/tests/mod.rs index e36b646680..5e3ade249b 100644 --- a/base_layer/core/tests/tests/mod.rs +++ b/base_layer/core/tests/tests/mod.rs @@ -27,6 +27,7 @@ mod base_node_rpc; mod block_sync; mod block_validation; mod header_sync; +mod horizon_sync; mod mempool; mod node_comms_interface; mod node_service; diff --git a/base_layer/core/tests/tests/node_comms_interface.rs b/base_layer/core/tests/tests/node_comms_interface.rs index 5f02572356..4480cfce56 100644 --- a/base_layer/core/tests/tests/node_comms_interface.rs +++ b/base_layer/core/tests/tests/node_comms_interface.rs @@ -100,8 +100,8 @@ async fn inbound_get_metadata() { if let Ok(NodeCommsResponse::ChainMetadata(received_metadata)) = inbound_nch.handle_request(NodeCommsRequest::GetChainMetadata).await { - assert_eq!(received_metadata.height_of_longest_chain(), 0); - assert_eq!(received_metadata.best_block(), &block.hash()); + assert_eq!(received_metadata.best_block_height(), 0); + assert_eq!(received_metadata.best_block_hash(), &block.hash()); assert_eq!(received_metadata.pruning_horizon(), 0); } else { panic!(); @@ -464,7 +464,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { randomx_factory, ); - let block1 = append_block( + let (block1, _) = append_block( &store, &block0, vec![], @@ -474,7 +474,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { ) .await .unwrap(); - let block2 = append_block( + let (block2, _) = append_block( &store, &block1, vec![], @@ -484,7 +484,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { ) .await .unwrap(); - let block3 = append_block( + let (block3, _) = append_block( &store, &block2, vec![], @@ -494,7 +494,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { ) .await .unwrap(); - let block4 = append_block( + let (block4, _) = append_block( &store, &block3, vec![], @@ -504,7 +504,7 @@ async fn inbound_fetch_blocks_before_horizon_height() { ) .await .unwrap(); - let _block5 = append_block( + let (_block5, _) = append_block( &store, &block4, vec![], diff --git a/base_layer/core/tests/tests/node_service.rs b/base_layer/core/tests/tests/node_service.rs index 9877d99b44..c03a0d7b23 100644 --- a/base_layer/core/tests/tests/node_service.rs +++ b/base_layer/core/tests/tests/node_service.rs @@ -31,6 +31,7 @@ use tari_core::{ state_machine_service::states::{ListeningInfo, StateInfo, StatusInfo}, }, blocks::{ChainBlock, NewBlock}, + chain_storage::BlockchainDatabaseConfig, consensus::{ConsensusConstantsBuilder, ConsensusManager, ConsensusManagerBuilder, NetworkConsensus}, mempool::TxStorageResponse, proof_of_work::{randomx_factory::RandomXFactory, Difficulty, PowAlgorithm}, @@ -104,25 +105,37 @@ async fn propagate_and_forward_many_valid_blocks() { let (mut alice_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(alice_node_identity.clone()) .with_consensus_manager(rules) - .start(temp_dir.path().join("alice").to_str().unwrap()) + .start( + temp_dir.path().join("alice").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut bob_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(bob_node_identity.clone()) .with_peers(vec![alice_node_identity]) .with_consensus_manager(rules) - .start(temp_dir.path().join("bob").to_str().unwrap()) + .start( + temp_dir.path().join("bob").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut carol_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(carol_node_identity.clone()) .with_peers(vec![bob_node_identity.clone()]) .with_consensus_manager(rules) - .start(temp_dir.path().join("carol").to_str().unwrap()) + .start( + temp_dir.path().join("carol").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut dan_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(dan_node_identity) .with_peers(vec![carol_node_identity, bob_node_identity]) .with_consensus_manager(rules) - .start(temp_dir.path().join("dan").to_str().unwrap()) + .start( + temp_dir.path().join("dan").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; wait_until_online(&[&alice_node, &bob_node, &carol_node, &dan_node]).await; @@ -166,7 +179,8 @@ async fn propagate_and_forward_many_valid_blocks() { &key_manager, ) .await - .unwrap(), + .unwrap() + .0, ); blocks .extend(construct_chained_blocks(&alice_node.blockchain_db, blocks[0].clone(), &rules, 5, &key_manager).await); @@ -210,6 +224,7 @@ async fn propagate_and_forward_many_valid_blocks() { static EMISSION: [u64; 2] = [10, 10]; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] +#[allow(clippy::too_many_lines)] async fn propagate_and_forward_invalid_block_hash() { // Alice will propagate a "made up" block hash to Bob, Bob will request the block from Alice. Alice will not be able // to provide the block and so Bob will not propagate the hash further to Carol. @@ -234,19 +249,28 @@ async fn propagate_and_forward_invalid_block_hash() { let (mut alice_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(alice_node_identity.clone()) .with_consensus_manager(rules) - .start(temp_dir.path().join("alice").to_str().unwrap()) + .start( + temp_dir.path().join("alice").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut bob_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(bob_node_identity.clone()) .with_peers(vec![alice_node_identity]) .with_consensus_manager(rules) - .start(temp_dir.path().join("bob").to_str().unwrap()) + .start( + temp_dir.path().join("bob").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut carol_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(carol_node_identity) .with_peers(vec![bob_node_identity]) .with_consensus_manager(rules) - .start(temp_dir.path().join("carol").to_str().unwrap()) + .start( + temp_dir.path().join("carol").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; wait_until_online(&[&alice_node, &bob_node, &carol_node]).await; @@ -276,7 +300,7 @@ async fn propagate_and_forward_invalid_block_hash() { ) .await; let txs = txs.into_iter().map(|tx| (*tx).clone()).collect(); - let block1 = append_block( + let (block1, _) = append_block( &alice_node.blockchain_db, &block0, txs, @@ -361,7 +385,10 @@ async fn propagate_and_forward_invalid_block() { let (mut dan_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(dan_node_identity.clone()) .with_consensus_manager(rules) - .start(temp_dir.path().join("dan").to_str().unwrap()) + .start( + temp_dir.path().join("dan").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut carol_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(carol_node_identity.clone()) @@ -372,20 +399,29 @@ async fn propagate_and_forward_invalid_block() { mock_validator.clone(), stateless_block_validator.clone(), ) - .start(temp_dir.path().join("carol").to_str().unwrap()) + .start( + temp_dir.path().join("carol").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut bob_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(bob_node_identity.clone()) .with_peers(vec![dan_node_identity]) .with_consensus_manager(rules) .with_validators(mock_validator.clone(), mock_validator, stateless_block_validator) - .start(temp_dir.path().join("bob").to_str().unwrap()) + .start( + temp_dir.path().join("bob").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; let (mut alice_node, rules) = BaseNodeBuilder::new(network.into()) .with_node_identity(alice_node_identity) .with_peers(vec![bob_node_identity, carol_node_identity]) .with_consensus_manager(rules) - .start(temp_dir.path().join("alice").to_str().unwrap()) + .start( + temp_dir.path().join("alice").to_str().unwrap(), + BlockchainDatabaseConfig::default(), + ) .await; alice_node @@ -423,7 +459,7 @@ async fn propagate_and_forward_invalid_block() { // This is a valid block, however Bob, Carol and Dan's block validator is set to always reject the block // after fetching it. - let block1 = append_block( + let (block1, _) = append_block( &alice_node.blockchain_db, &block0, vec![], @@ -485,20 +521,20 @@ async fn local_get_metadata() { let network = Network::LocalNet; let key_manager = create_memory_db_key_manager(); let (mut node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; let db = &node.blockchain_db; let block0 = db.fetch_block(0, true).unwrap().try_into_chain_block().unwrap(); - let block1 = append_block(db, &block0, vec![], &consensus_manager, Difficulty::min(), &key_manager) + let (block1, _) = append_block(db, &block0, vec![], &consensus_manager, Difficulty::min(), &key_manager) .await .unwrap(); - let block2 = append_block(db, &block1, vec![], &consensus_manager, Difficulty::min(), &key_manager) + let (block2, _) = append_block(db, &block1, vec![], &consensus_manager, Difficulty::min(), &key_manager) .await .unwrap(); let metadata = node.local_nci.get_metadata().await.unwrap(); - assert_eq!(metadata.height_of_longest_chain(), 2); - assert_eq!(metadata.best_block(), block2.hash()); + assert_eq!(metadata.best_block_height(), 2); + assert_eq!(metadata.best_block_hash(), block2.hash()); node.shutdown().await; } @@ -517,7 +553,7 @@ async fn local_get_new_block_template_and_get_new_block() { .unwrap(); let (mut node, _rules) = BaseNodeBuilder::new(network.into()) .with_consensus_manager(rules) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; let schema = [ @@ -566,7 +602,7 @@ async fn local_get_new_block_with_zero_conf() { HeaderFullValidator::new(rules.clone(), difficulty_calculator), BlockBodyInternalConsistencyValidator::new(rules, true, factories.clone()), ) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; let (tx01, tx01_out) = spend_utxos( @@ -652,7 +688,7 @@ async fn local_get_new_block_with_combined_transaction() { HeaderFullValidator::new(rules.clone(), difficulty_calculator), BlockBodyInternalConsistencyValidator::new(rules, true, factories.clone()), ) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; let (tx01, tx01_out) = spend_utxos( @@ -718,7 +754,7 @@ async fn local_submit_block() { let network = Network::LocalNet; let key_manager = create_memory_db_key_manager(); let (mut node, consensus_manager) = BaseNodeBuilder::new(network.into()) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; let db = &node.blockchain_db; diff --git a/base_layer/core/tests/tests/node_state_machine.rs b/base_layer/core/tests/tests/node_state_machine.rs index 55e68c79de..01761fba4f 100644 --- a/base_layer/core/tests/tests/node_state_machine.rs +++ b/base_layer/core/tests/tests/node_state_machine.rs @@ -36,6 +36,7 @@ use tari_core::{ }, SyncValidators, }, + chain_storage::BlockchainDatabaseConfig, consensus::{ConsensusConstantsBuilder, ConsensusManagerBuilder}, mempool::MempoolServiceConfig, proof_of_work::{randomx_factory::RandomXFactory, Difficulty}, @@ -58,8 +59,7 @@ use crate::helpers::{ block_builders::{append_block, chain_block, create_genesis_block}, chain_metadata::MockChainMetadata, nodes::{ - create_network_with_2_base_nodes_with_config, - create_network_with_3_base_nodes_with_config, + create_network_with_multiple_base_nodes_with_config, random_node_identity, wait_until_online, BaseNodeBuilder, @@ -81,17 +81,26 @@ async fn test_listening_lagging() { .with_block(prev_block.clone()) .build() .unwrap(); - let (alice_node, bob_node, consensus_manager) = create_network_with_2_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig { - auto_ping_interval: Some(Duration::from_millis(100)), - ..Default::default() - }, - P2pConfig::default(), + + let (mut node_interfaces, consensus_manager) = create_network_with_multiple_base_nodes_with_config( + vec![MempoolServiceConfig::default(); 2], + vec![ + LivenessConfig { + auto_ping_interval: Some(Duration::from_millis(100)), + ..Default::default() + }; + 2 + ], + vec![BlockchainDatabaseConfig::default(); 2], + vec![P2pConfig::default(); 2], consensus_manager, temp_dir.path().to_str().unwrap(), + network, ) .await; + let alice_node = node_interfaces.remove(0); + let bob_node = node_interfaces.remove(0); + let shutdown = Shutdown::new(); let (state_change_event_publisher, _) = broadcast::channel(10); let (status_event_sender, _status_event_receiver) = watch::channel(StatusInfo::new()); @@ -117,7 +126,7 @@ async fn test_listening_lagging() { let mut bob_local_nci = bob_node.local_nci; // Bob Block 1 - no block event - let prev_block = append_block( + let (prev_block, _) = append_block( &bob_db, &prev_block, vec![], @@ -143,6 +152,7 @@ async fn test_listening_lagging() { assert!(matches!(next_event, StateEvent::FallenBehind(_))); } +#[allow(clippy::too_many_lines)] #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_listening_initial_fallen_behind() { let network = Network::LocalNet; @@ -157,23 +167,34 @@ async fn test_listening_initial_fallen_behind() { .with_block(gen_block.clone()) .build() .unwrap(); - let (alice_node, bob_node, charlie_node, consensus_manager) = create_network_with_3_base_nodes_with_config( - MempoolServiceConfig::default(), - LivenessConfig { - auto_ping_interval: Some(Duration::from_millis(100)), - ..Default::default() - }, + + let (mut node_interfaces, consensus_manager) = create_network_with_multiple_base_nodes_with_config( + vec![MempoolServiceConfig::default(); 3], + vec![ + LivenessConfig { + auto_ping_interval: Some(Duration::from_millis(100)), + ..Default::default() + }; + 3 + ], + vec![BlockchainDatabaseConfig::default(); 3], + vec![P2pConfig::default(); 3], consensus_manager, temp_dir.path().to_str().unwrap(), + network, ) .await; + let alice_node = node_interfaces.remove(0); + let bob_node = node_interfaces.remove(0); + let charlie_node = node_interfaces.remove(0); + let shutdown = Shutdown::new(); let bob_db = bob_node.blockchain_db; let mut bob_local_nci = bob_node.local_nci; // Bob Block 1 - no block event - let prev_block = append_block( + let (prev_block, _) = append_block( &bob_db, &gen_block, vec![], @@ -196,7 +217,7 @@ async fn test_listening_initial_fallen_behind() { let mut charlie_local_nci = charlie_node.local_nci; // charlie Block 1 - no block event - let prev_block = append_block( + let (prev_block, _) = append_block( &charlie_db, &gen_block, vec![], @@ -256,7 +277,7 @@ async fn test_listening_initial_fallen_behind() { async fn test_event_channel() { let temp_dir = tempdir().unwrap(); let (node, consensus_manager) = BaseNodeBuilder::new(Network::Esmeralda.into()) - .start(temp_dir.path().to_str().unwrap()) + .start(temp_dir.path().to_str().unwrap(), BlockchainDatabaseConfig::default()) .await; // let shutdown = Shutdown::new(); let db = create_test_blockchain_db(); diff --git a/base_layer/key_manager/src/cipher_seed.rs b/base_layer/key_manager/src/cipher_seed.rs index 0b5f7a63fa..acae2f54c1 100644 --- a/base_layer/key_manager/src/cipher_seed.rs +++ b/base_layer/key_manager/src/cipher_seed.rs @@ -283,16 +283,16 @@ impl CipherSeed { let expected_mac = Self::generate_mac(&birthday_bytes, entropy.reveal(), version, salt.as_ref(), &mac_key)?; // Verify the MAC in constant time to avoid leaking data - if mac.ct_eq(&expected_mac).unwrap_u8() == 0 { - return Err(KeyManagerError::DecryptionFailed); + if mac.ct_eq(&expected_mac).into() { + Ok(Self { + version, + birthday, + entropy: Box::from(*entropy.reveal()), + salt, + }) + } else { + Err(KeyManagerError::DecryptionFailed) } - - Ok(Self { - version, - birthday, - entropy: Box::from(*entropy.reveal()), - salt, - }) } /// Encrypt or decrypt data using ChaCha20 diff --git a/base_layer/mmr/src/balanced_binary_merkle_proof.rs b/base_layer/mmr/src/balanced_binary_merkle_proof.rs index 23fe5d8522..eb8ee789ce 100644 --- a/base_layer/mmr/src/balanced_binary_merkle_proof.rs +++ b/base_layer/mmr/src/balanced_binary_merkle_proof.rs @@ -20,7 +20,11 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{collections::HashMap, convert::TryFrom, marker::PhantomData}; +use std::{ + collections::{HashMap, HashSet}, + convert::TryFrom, + marker::PhantomData, +}; use borsh::{BorshDeserialize, BorshSerialize}; use digest::Digest; @@ -198,22 +202,38 @@ where D: Digest + DomainDigest .iter() .max() .ok_or(BalancedBinaryMerkleProofError::CantMergeZeroProofs)?; - + let mut consumed = HashSet::new(); // We need to compute the hashes row by row to be sure they are processed correctly. for height in (0..max_height).rev() { let hashes = computed_hashes.clone(); - for (index, leaf) in computed_hashes.iter_mut().enumerate() { + let mut dangling_paths = HashSet::new(); + for (index, leaf) in computed_hashes.iter_mut().enumerate().rev() { if self.heights[index] <= height { continue; } let Some(hash_or_index) = self.paths[index].pop() else { + // Check if we already joined with other path. + if !consumed.contains(&index) { + // If the path ended, it's going to be merged to some other path. + if !dangling_paths.insert(index) { + return Err(BalancedBinaryMerkleProofError::BadProofSemantics); + } + } // Path at this index already completely processed continue; }; let hash = match hash_or_index { MergedBalancedBinaryMerkleIndexOrHash::Index(index) => { + if !dangling_paths + .remove(&usize::try_from(index).map_err(|_| BalancedBinaryMerkleProofError::MathOverflow)?) + { + // If some path is joining our path, that path should have ended. + return Err(BalancedBinaryMerkleProofError::BadProofSemantics); + } + consumed + .insert(usize::try_from(index).map_err(|_| BalancedBinaryMerkleProofError::MathOverflow)?); let index = usize::try_from(index).map_err(|_| BalancedBinaryMerkleProofError::MathOverflow)?; // The index must also point to one of the proofs @@ -235,6 +255,14 @@ where D: Digest + DomainDigest // Parent self.node_indices[index] = (self.node_indices[index] - 1) >> 1; } + if !dangling_paths.is_empty() { + // Something path ended, but it's not joined with any other path. + return Err(BalancedBinaryMerkleProofError::BadProofSemantics); + } + } + if consumed.len() + 1 < self.paths.len() { + // If the proof is valid then all but one paths will be consumed by other paths. + return Err(BalancedBinaryMerkleProofError::BadProofSemantics); } Ok(computed_hashes[0] == *root) } @@ -292,7 +320,8 @@ mod test { heights: vec![1], _phantom: PhantomData, }; - assert!(!proof.verify_consume(&vec![0u8; 32], vec![vec![]]).unwrap()); + // This will fail because the node height is 1 and it's empty, so it's not going to compute the root hash. + proof.verify_consume(&vec![0u8; 32], vec![vec![]]).unwrap_err(); } #[test] @@ -334,10 +363,10 @@ mod test { #[test] fn test_merge_proof_full_tree() { - let leaves = (0..255).map(|i| vec![i; 32]).collect::>(); + let leaves = (0..=255).map(|i| vec![i; 32]).collect::>(); let bmt = BalancedBinaryMerkleTree::::create(leaves.clone()); let root = bmt.get_merkle_root(); - let proofs = (0..255) + let proofs = (0..=255) .map(|i| BalancedBinaryMerkleProof::generate_proof(&bmt, i)) .collect::, _>>() .unwrap(); @@ -382,11 +411,23 @@ mod test { heights: vec![0, 0], _phantom: PhantomData, }; - // This should fail but does not - // proof .verify_consume(&vec![5u8; 32], vec![vec![5u8; 32], vec![2u8; 32]]) .unwrap_err(); - assert!(proof + // This will fail because there are more hashes on the same level as there can be. + proof .verify_consume(&vec![5u8; 32], vec![vec![5u8; 32], vec![2u8; 32]]) - .unwrap()); + .unwrap_err(); + + let proof = MergedBalancedBinaryMerkleProof:: { + paths: vec![vec![MergedBalancedBinaryMerkleIndexOrHash::Hash(vec![5u8; 32])], vec![ + MergedBalancedBinaryMerkleIndexOrHash::Index(1), + ]], + node_indices: vec![1, 1], + heights: vec![0, 1], + _phantom: PhantomData, + }; + // This will fail because we can't have any more nodes if we have leaf at the root. + proof + .verify_consume(&vec![5u8; 32], vec![vec![5u8; 32], vec![2u8; 32]]) + .unwrap_err(); } #[test] diff --git a/base_layer/mmr/src/sparse_merkle_tree/tree.rs b/base_layer/mmr/src/sparse_merkle_tree/tree.rs index caa2b38102..922b4396b4 100644 --- a/base_layer/mmr/src/sparse_merkle_tree/tree.rs +++ b/base_layer/mmr/src/sparse_merkle_tree/tree.rs @@ -229,7 +229,7 @@ impl> SparseMerkleTree { Ok(result) } - /// Update and existing node at location `key` in the tree, or, if the key does not exist, insert a new node at + /// Update an existing node at location `key` in the tree, or, if the key does not exist, insert a new node at /// location `key` instead. Returns `Ok(UpdateResult::Updated)` if the node was updated, or /// `Ok(UpdateResult::Inserted)` if the node was inserted. /// diff --git a/base_layer/p2p/src/auto_update/mod.rs b/base_layer/p2p/src/auto_update/mod.rs index c67ea11a41..27b1c6cd88 100644 --- a/base_layer/p2p/src/auto_update/mod.rs +++ b/base_layer/p2p/src/auto_update/mod.rs @@ -205,10 +205,8 @@ async fn http_download(url: T) -> Result impl Iterator { diff --git a/base_layer/p2p/src/auto_update/signature.rs b/base_layer/p2p/src/auto_update/signature.rs index b8bae51e9c..fb770e9992 100644 --- a/base_layer/p2p/src/auto_update/signature.rs +++ b/base_layer/p2p/src/auto_update/signature.rs @@ -64,7 +64,60 @@ mod test { use pgp::Deserializable; use super::*; - use crate::auto_update::{maintainers, MAINTAINERS}; + use crate::auto_update::maintainers; + + const PUBLIC_KEY: &str = r#"-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBF6y/8YBEAC+9x9jq0q8sle/M8aYlp4b9cHJPb6sataUaMzOxx/hQ9WCrhU1 +GhJrDk+QPBMBtvT1oWMWa5KhMFNS1S0KTYbXensnF2tOdT6kSAWKXufW4hQ32p4B +NW6aqrOxKMLj7jI2hwlCgRvlK+51J/l7e1OvCpQFL3wH/VMPBG5TgIRmgLeFZWWB +WtD6VjOAJROBiESb5DW+ox3hyxFEKMmwdC+B8b346GJedGFZem9eaN3ApjYBz/Ev +YsQQk2zL/eK5HeSYfboFBCWQrpIFtaJwyhzRlW2s5jz79Jv6kULZ+SVmfRerqk9c +jCzp48R5SJxIulk/PThqZ7sE6vEvwoGGSUzhQ0z1LhhFXt/0qg0qNeIvGkO5HRIR +R5i73/WG1PlgmcjtZHV54M86sTwm3yMevlHI5+i8Y4PAcYulftX9fVf85SitnWS5 +oAg3xP0pIWWztk8Ng4hWMM7sGE7q7BpjxuuGjrb9SNOTQuK8I7hg81p08LSNioOm +RD2OTkgbzew4YIMy+SmkmrFWvoKCRrWxNsQl4osVhOcLOlVBYoIjnBxy7AmHZzZC +ftgH5n6ODoB0CqZrc+UcMX4CFVtI7vaZOp1mcHN8geMhq1TjMJoGGlYuonaO34wM +2o+n+HbFJBCzfz/Q4pnuGwPDlumFU08E++ch63joMtXM1qAD5rNJMHfebQARAQAB +tDBTdGFubGV5IEJvbmRpIDxzZGJvbmRpQHVzZXJzLm5vcmVwbHkuZ2l0aHViLmNv +bT6JAk4EEwEIADgWIQQze5HvxfECfYrt9j0YhbFJUEwKZAUCXrL/xgIbAwULCQgH +AgYVCgkICwIEFgIDAQIeAQIXgAAKCRAYhbFJUEwKZIvVEAC3uGPAduK06FWxERWj +qXDR/tj7rNh6MaYXTLDM79sXP9nOj9aZOmA6lKZDRZ8lQyoZykwlVriHkJLYFotj +mxBPfgy1j5a2I52sF1sZMxwCg1nChvDivvnXTORMMcTWtIFkKu3cdzmO1Jil1tFB +zb205DG6gJ4JtXPpXKdAPkaJ68pqGcsAUU0N1KXla6ob/QwNlvp5aQ7cdR7uNbuI +kRx/KpsFNpA4jeP0+hK6kSaJgBdIUWzUWkfz9ubBdCRN8oWG+aazq4Y3DvaSnmbr +VCdb78Ni+QP98VtQhdk0UEc+T7vdbS9c71t6qMqNlRUWoiBZORnWa2QTqxhFGsM0 +FZhGX4UIZsdqMkTn/egf5zy/UmgqvmX2ujgQVj4OzkXT022wKgnr4z09/jymUPXE +o4QU15kTmjwTkNk8E3Cj1HbppyEgPNJ2bO3wnJbt6XMKejIXJC8X7G5v4WomOe8j +HVhqpAeOuML4u7KYg73wgRnIIMXCLR2VeS4iSZ42x/L6lWS5NzaGMV6nZv8t5ehh +otZ3uaWlHa4rRK2wrwveN/JdoYXqmZIoOb5Ivt9PlbUZ6NgHXDyHC7rCShtyPK2j +tY6BkoFz4HAloxhFGjRxBfDFjx9nefJ418owI1tOP1rNCoblROT1ggLlQ9a6URIF +R5WvoQC843hWwspzi7ll1Vz5JbkCDQResv/GARAArIvngo2dj+bZgu9/edkrKKbq +JZQj9fqaZDJrHXOmg/3t29qvEnyFJnyl9VYhSmLCppuy0k4YY4DaaCebBPafyV8e +Q/JNF3Le1FO7LHmoHuXFvcOvOVJhANpFKmNX3jaEYT7zDTbJ705FGldaC3udn12n +nEFlAEJjYQA6bgQAXXS02JjeVfl82IEgYpR0yFJjbL690tQ87Emlk3zeRrd/Esuv +Au9jHDTILSkUxa2dHTOgbtPwkk0N1NeGYIvWLYtwVcQ7KF+1xv/WVjO0dyr2qoia +4guJejBkNXAfYbodg5f7KjUYOcmTotSFurens5SdS+KUuaQtbfxGOt6nthwEU/N5 +x2/M64Y4l4vXtrjV+6d6RtvlPHnMTMAdfE6f3F/+wEsVlBQFbV2kn0nbDIJSlwys +L/kR6R9fHPtjSmS1omZWqE7bOu288j/M7/aP4Jcflj1t0+0WGfliS+0IgrNphUUA +1tpC7PXzXKzMtdK5xzLIZWAnjoXpzjVhcFglQpQSk9y4V9lqZbawx+RfHW1U2RYp +rVfvm42wg0DPYanWXzgO4nZdwSzu9RQQUdhdJAxCVV9ODh6CAVj0G7q2XEerjAUE +ZTxf1WKCJTpCy1B6w2lf1PN2zKDVpha0/76u/QcZGg5dAqklpSAaRNj3uDnq1HEP +RQOm6ladgLXO46J+ao0AEQEAAYkCNgQYAQgAIBYhBDN7ke/F8QJ9iu32PRiFsUlQ +TApkBQJesv/GAhsMAAoJEBiFsUlQTApk6HsP/A/sNwdzhTKIWGpdyxXz2YdUSK++ +kaQdZwtDIVcSZQ0yIFf0fPLkeoSd7jZfANmu2O1vnocBjdMcNOvPNjxKpkExJLVs +ttMiqla0ood8LuA9wteRFKRgoJc3Y71bWsxavLTfA4jDK+CaJG+K+vRDU7gwAdF+ +5rKhUIyn7pph7eWGHOv4bzGLEjV4NlLSzZGBA0aMDaWMGgStNzCD25yU7zYEJIWn +8gq2Rq0by8H6NLg6tygh5w8s2NUhPI5V31kZhsC1Kn5kExn4rVxFusqwG63gkPz1 +avx7E5kfChTgjaDlf0gnC73/alMeO4vTJKeDJaq581dza9jwJqaDC1+/ozYdGt7u +3KUxjhiSnWe38/AGna9cB4mAD4reCczH51gthlyeYNaSw+L0rsSMKvth9EYAHknP +ZFT97SIDPF1/2bRgO05I+J4BaSMA+2Euv/O3RWk953l+eR8MoZlr5mnMRM4Guy7K +nfTh5LZFccJyvW+CsxKKfwe/RNQPZLBuScqAogjsd+I6sVlmgLSyKkR2B3voRQ0g +l6J2669tX0wMPM/XsVlZ/UDdfUe6spRO8PXBwe+zdAAejUotLk4aMyhxxZVKCEwO +CrdiSo3ds50gaF1BXP72gfZW0E8djcD9ATfONqxFfftUwPbnbAqKh8t+L+If5H5r +tQrYpH9CNXgX9dC9 +=7S7i +-----END PGP PUBLIC KEY BLOCK-----"#; const VALID_SIGNATURE: &str = r#"-----BEGIN PGP SIGNATURE----- iQIzBAEBCAAdFiEEM3uR78XxAn2K7fY9GIWxSVBMCmQFAmDYhicACgkQGIWxSVBM @@ -87,10 +140,11 @@ l9smp8LtJcXkw4cNgE4MB9VKdx+NhdbvWemt7ccldeL22hmyS24= #[test] fn it_verifies_signed_message() { let (sig, _) = pgp::StandaloneSignature::from_string(VALID_SIGNATURE.trim()).unwrap(); - let verifier = SignedMessageVerifier::new(maintainers().collect()); + let (key, _) = pgp::SignedPublicKey::from_string(PUBLIC_KEY).unwrap(); + let verifier = SignedMessageVerifier::new(vec![key]); let signer = verifier.verify_signature(&sig, MESSAGE).unwrap(); - let (maintainer, _) = pgp::SignedPublicKey::from_string(MAINTAINERS[3]).unwrap(); + let (maintainer, _) = pgp::SignedPublicKey::from_string(PUBLIC_KEY).unwrap(); assert_eq!(*signer, maintainer); } diff --git a/base_layer/p2p/src/initialization.rs b/base_layer/p2p/src/initialization.rs index ac9ab9b653..d4cf62ca97 100644 --- a/base_layer/p2p/src/initialization.rs +++ b/base_layer/p2p/src/initialization.rs @@ -51,8 +51,15 @@ use tari_comms::{ ProtocolId, }, tor, - tor::HiddenServiceControllerError, - transports::{predicate::FalsePredicate, MemoryTransport, SocksConfig, SocksTransport, TcpWithTorTransport}, + tor::{HiddenServiceControllerError, TorIdentity}, + transports::{ + predicate::FalsePredicate, + HiddenServiceTransport, + MemoryTransport, + SocksConfig, + SocksTransport, + TcpWithTorTransport, + }, utils::cidr::parse_cidrs, CommsBuilder, CommsBuilderError, @@ -209,9 +216,10 @@ pub async fn initialize_local_test_comms>( Ok((comms, dht, event_sender)) } -pub async fn spawn_comms_using_transport( +pub async fn spawn_comms_using_transport( comms: UnspawnedCommsNode, transport_config: TransportConfig, + after_comms: F, ) -> Result { let comms = match transport_config.transport_type { TransportType::Memory => { @@ -249,22 +257,16 @@ pub async fn spawn_comms_using_transport( let tor_config = transport_config.tor; debug!(target: LOG_TARGET, "Building TOR comms stack ({:?})", tor_config); let listener_address_override = tor_config.listener_address_override.clone(); - let mut hidden_service_ctl = initialize_hidden_service(tor_config)?; + let hidden_service_ctl = initialize_hidden_service(tor_config)?; // Set the listener address to be the address (usually local) to which tor will forward all traffic - let transport = hidden_service_ctl.initialize_transport().await?; - - info!( - target: LOG_TARGET, - "Tor hidden service initialized. proxied_address = '{:?}', listener_override_address = {:?}", - hidden_service_ctl.proxied_address(), - listener_address_override, - ); + let instant = Instant::now(); + let transport = HiddenServiceTransport::new(hidden_service_ctl, after_comms); + debug!(target: LOG_TARGET, "TOR transport initialized in {:.0?}", instant.elapsed()); comms .with_listener_address( listener_address_override.unwrap_or_else(|| multiaddr![Ip4([127, 0, 0, 1]), Tcp(0u16)]), ) - .with_hidden_service_controller(hidden_service_ctl) .spawn_with_transport(transport) .await? }, diff --git a/base_layer/p2p/tests/support/comms_and_services.rs b/base_layer/p2p/tests/support/comms_and_services.rs index 4bd2dca73f..a653cb4f7a 100644 --- a/base_layer/p2p/tests/support/comms_and_services.rs +++ b/base_layer/p2p/tests/support/comms_and_services.rs @@ -51,11 +51,14 @@ pub async fn setup_comms_services( .await .unwrap(); - let comms = comms.spawn_with_transport(MemoryTransport).await.unwrap(); + let mut comms = comms.spawn_with_transport(MemoryTransport).await.unwrap(); + let address = comms + .connection_manager_requester() + .wait_until_listening() + .await + .unwrap(); // Set the public address for tests - comms - .node_identity() - .add_public_address(comms.listening_address().clone()); + comms.node_identity().add_public_address(address.bind_address().clone()); (comms, dht, messaging_events) } diff --git a/base_layer/wallet/src/base_node_service/monitor.rs b/base_layer/wallet/src/base_node_service/monitor.rs index 6581ab4bf3..0acccd01e7 100644 --- a/base_layer/wallet/src/base_node_service/monitor.rs +++ b/base_layer/wallet/src/base_node_service/monitor.rs @@ -175,7 +175,7 @@ where self.db.set_chain_metadata(chain_metadata.clone())?; let is_synced = tip_info.is_synced; - let height_of_longest_chain = chain_metadata.height_of_longest_chain(); + let best_block_height = chain_metadata.best_block_height(); let new_block = self .update_state(BaseNodeState { @@ -191,7 +191,7 @@ where target: LOG_TARGET, "Base node {} Tip: {} ({}) Latency: {} ms", base_node_id, - height_of_longest_chain, + best_block_height, if is_synced { "Synced" } else { "Syncing..." }, latency.as_millis() ); @@ -221,11 +221,11 @@ where let mut lock = self.state.write().await; let (new_block_detected, height, hash) = match (new_state.chain_metadata.clone(), lock.chain_metadata.clone()) { (Some(new_metadata), Some(old_metadata)) => ( - new_metadata.best_block() != old_metadata.best_block(), - new_metadata.height_of_longest_chain(), - *new_metadata.best_block(), + new_metadata.best_block_hash() != old_metadata.best_block_hash(), + new_metadata.best_block_height(), + *new_metadata.best_block_hash(), ), - (Some(new_metadata), _) => (true, new_metadata.height_of_longest_chain(), *new_metadata.best_block()), + (Some(new_metadata), _) => (true, new_metadata.best_block_height(), *new_metadata.best_block_hash()), (None, _) => (false, 0, BlockHashType::default()), }; diff --git a/base_layer/wallet/src/output_manager_service/service.rs b/base_layer/wallet/src/output_manager_service/service.rs index ed734105d9..1bc1a46ac9 100644 --- a/base_layer/wallet/src/output_manager_service/service.rs +++ b/base_layer/wallet/src/output_manager_service/service.rs @@ -226,7 +226,7 @@ where .map(|_| OutputManagerResponse::OutputMetadataSignatureUpdated), OutputManagerRequest::GetBalance => { let current_tip_for_time_lock_calculation = match self.base_node_service.get_chain_metadata().await { - Ok(metadata) => metadata.map(|m| m.height_of_longest_chain()), + Ok(metadata) => metadata.map(|m| m.best_block_height()), Err(_) => None, }; self.get_balance(current_tip_for_time_lock_calculation) @@ -1284,7 +1284,7 @@ where target: LOG_TARGET, "select_utxos selection criteria: {}", selection_criteria ); - let tip_height = chain_metadata.as_ref().map(|m| m.height_of_longest_chain()); + let tip_height = chain_metadata.as_ref().map(|m| m.best_block_height()); let uo = self .resources .db @@ -1352,7 +1352,7 @@ where let enough_spendable = utxos_total_value > amount + fee_with_change; if !perfect_utxo_selection && !enough_spendable { - let current_tip_for_time_lock_calculation = chain_metadata.map(|cm| cm.height_of_longest_chain()); + let current_tip_for_time_lock_calculation = chain_metadata.map(|cm| cm.best_block_height()); let balance = self.get_balance(current_tip_for_time_lock_calculation)?; let pending_incoming = balance.pending_incoming_balance; if utxos_total_value + pending_incoming >= amount + fee_with_change { diff --git a/base_layer/wallet/src/transaction_service/handle.rs b/base_layer/wallet/src/transaction_service/handle.rs index 5a0a51ae77..b9c353c2e7 100644 --- a/base_layer/wallet/src/transaction_service/handle.rs +++ b/base_layer/wallet/src/transaction_service/handle.rs @@ -82,6 +82,7 @@ pub enum TransactionServiceRequest { GetCancelledCompletedTransactions, GetCompletedTransaction(TxId), GetAnyTransaction(TxId), + ImportTransaction(WalletTransaction), SendTransaction { destination: TariAddress, amount: MicroMinotari, @@ -166,6 +167,7 @@ impl fmt::Display for TransactionServiceRequest { Self::GetPendingInboundTransactions => write!(f, "GetPendingInboundTransactions"), Self::GetPendingOutboundTransactions => write!(f, "GetPendingOutboundTransactions"), Self::GetCompletedTransactions => write!(f, "GetCompletedTransactions"), + Self::ImportTransaction(tx) => write!(f, "ImportTransaction: {:?}", tx), Self::GetCancelledPendingInboundTransactions => write!(f, "GetCancelledPendingInboundTransactions"), Self::GetCancelledPendingOutboundTransactions => write!(f, "GetCancelledPendingOutboundTransactions"), Self::GetCancelledCompletedTransactions => write!(f, "GetCancelledCompletedTransactions"), @@ -244,6 +246,7 @@ impl fmt::Display for TransactionServiceRequest { #[derive(Debug)] pub enum TransactionServiceResponse { TransactionSent(TxId), + TransactionImported(TxId), BurntTransactionSent { tx_id: TxId, proof: Box, @@ -730,6 +733,17 @@ impl TransactionServiceHandle { } } + pub async fn import_transaction(&mut self, tx: WalletTransaction) -> Result { + match self + .handle + .call(TransactionServiceRequest::ImportTransaction(tx)) + .await?? + { + TransactionServiceResponse::TransactionImported(t) => Ok(t), + _ => Err(TransactionServiceError::UnexpectedApiResponse), + } + } + pub async fn import_utxo_with_status( &mut self, amount: MicroMinotari, diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs index 06295138ae..80246318ee 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs @@ -286,13 +286,13 @@ where let sig = response.signature; if let Some(unconfirmed_tx) = batch_signatures.get(&sig) { if response.location == TxLocation::Mined && - response.block_hash.is_some() && + response.best_block_hash.is_some() && response.mined_timestamp.is_some() { mined.push(( (*unconfirmed_tx).clone(), - response.block_height, - response.block_hash.unwrap(), + response.best_block_height, + response.best_block_hash.unwrap(), response.confirmations, response.mined_timestamp.unwrap(), )); @@ -308,13 +308,13 @@ where } } - let tip = batch_response.tip_hash.try_into()?; + let tip = batch_response.best_block_hash.try_into()?; Ok(( mined, unmined, Some(( - batch_response.height_of_longest_chain, + batch_response.best_block_height, tip, batch_response.tip_mined_timestamp, )), diff --git a/base_layer/wallet/src/transaction_service/service.rs b/base_layer/wallet/src/transaction_service/service.rs index a8983805ef..884c4b0074 100644 --- a/base_layer/wallet/src/transaction_service/service.rs +++ b/base_layer/wallet/src/transaction_service/service.rs @@ -114,7 +114,11 @@ use crate::{ }, storage::{ database::{TransactionBackend, TransactionDatabase}, - models::{CompletedTransaction, TxCancellationReason}, + models::{ + CompletedTransaction, + TxCancellationReason, + WalletTransaction::{Completed, PendingInbound, PendingOutbound}, + }, }, tasks::{ check_faux_transaction_status::check_detected_transactions, @@ -776,6 +780,26 @@ where TransactionServiceRequest::GetAnyTransaction(tx_id) => Ok(TransactionServiceResponse::AnyTransaction( Box::new(self.db.get_any_transaction(tx_id)?), )), + TransactionServiceRequest::ImportTransaction(tx) => { + let tx_id = match tx { + PendingInbound(inbound_tx) => { + let tx_id = inbound_tx.tx_id; + self.db.insert_pending_inbound_transaction(tx_id, inbound_tx)?; + tx_id + }, + PendingOutbound(outbound_tx) => { + let tx_id = outbound_tx.tx_id; + self.db.insert_pending_outbound_transaction(tx_id, outbound_tx)?; + tx_id + }, + Completed(completed_tx) => { + let tx_id = completed_tx.tx_id; + self.db.insert_completed_transaction(tx_id, completed_tx)?; + tx_id + }, + }; + Ok(TransactionServiceResponse::TransactionImported(tx_id)) + }, TransactionServiceRequest::ImportUtxoWithStatus { amount, source_address, @@ -926,7 +950,7 @@ where Err(_) => None, }; let tip_height = match metadata { - Some(val) => val.height_of_longest_chain(), + Some(val) => val.best_block_height(), None => 0u64, }; let event_publisher = self.event_publisher.clone(); diff --git a/base_layer/wallet/src/transaction_service/storage/database.rs b/base_layer/wallet/src/transaction_service/storage/database.rs index 8622d6bc70..7e22f7133f 100644 --- a/base_layer/wallet/src/transaction_service/storage/database.rs +++ b/base_layer/wallet/src/transaction_service/storage/database.rs @@ -320,6 +320,30 @@ where T: TransactionBackend + 'static ))) } + pub fn insert_pending_inbound_transaction( + &self, + tx_id: TxId, + transaction: InboundTransaction, + ) -> Result, TransactionStorageError> { + self.db + .write(WriteOperation::Insert(DbKeyValuePair::PendingInboundTransaction( + tx_id, + Box::new(transaction), + ))) + } + + pub fn insert_pending_outbound_transaction( + &self, + tx_id: TxId, + transaction: OutboundTransaction, + ) -> Result, TransactionStorageError> { + self.db + .write(WriteOperation::Insert(DbKeyValuePair::PendingOutboundTransaction( + tx_id, + Box::new(transaction), + ))) + } + pub fn get_pending_outbound_transaction( &self, tx_id: TxId, diff --git a/base_layer/wallet/src/transaction_service/storage/models.rs b/base_layer/wallet/src/transaction_service/storage/models.rs index 1dbc29ab03..77208cdbd0 100644 --- a/base_layer/wallet/src/transaction_service/storage/models.rs +++ b/base_layer/wallet/src/transaction_service/storage/models.rs @@ -303,7 +303,7 @@ impl From for CompletedTransaction { } } -#[derive(Debug)] +#[derive(Debug, Serialize, Deserialize, Clone)] #[allow(clippy::large_enum_variant)] pub enum WalletTransaction { PendingInbound(InboundTransaction), diff --git a/base_layer/wallet/src/utxo_scanner_service/utxo_scanner_task.rs b/base_layer/wallet/src/utxo_scanner_service/utxo_scanner_task.rs index 550c3d0e66..ebd3e6f544 100644 --- a/base_layer/wallet/src/utxo_scanner_service/utxo_scanner_task.rs +++ b/base_layer/wallet/src/utxo_scanner_service/utxo_scanner_task.rs @@ -107,7 +107,8 @@ where Some(peer) => match self.attempt_sync(peer.clone()).await { Ok((num_outputs_recovered, final_height, final_amount, elapsed)) => { debug!(target: LOG_TARGET, "Scanned to height #{}", final_height); - self.finalize(num_outputs_recovered, final_height, final_amount, elapsed)?; + self.finalize(num_outputs_recovered, final_height, final_amount, elapsed) + .await?; return Ok(()); }, Err(e) => { @@ -146,13 +147,18 @@ where } } - fn finalize( - &self, + async fn finalize( + &mut self, num_outputs_recovered: u64, final_height: u64, total_value: MicroMinotari, elapsed: Duration, ) -> Result<(), UtxoScannerError> { + if num_outputs_recovered > 0 { + // this is a best effort, if this fails, its very likely that it's already busy with a validation. + let _result = self.resources.output_manager_service.validate_txos().await; + let _result = self.resources.transaction_service.validate_transactions().await; + } self.publish_event(UtxoScannerEvent::Progress { current_height: final_height, tip_height: final_height, @@ -327,7 +333,7 @@ where client: &mut BaseNodeWalletRpcClient, ) -> Result { let tip_info = client.get_tip_info().await?; - let chain_height = tip_info.metadata.map(|m| m.height_of_longest_chain()).unwrap_or(0); + let chain_height = tip_info.metadata.map(|m| m.best_block_height()).unwrap_or(0); let end_header = client.get_header_by_height(chain_height).await?; let end_header = BlockHeader::try_from(end_header).map_err(UtxoScannerError::ConversionError)?; diff --git a/base_layer/wallet/src/wallet.rs b/base_layer/wallet/src/wallet.rs index ce2d79f9e1..28e2c3457f 100644 --- a/base_layer/wallet/src/wallet.rs +++ b/base_layer/wallet/src/wallet.rs @@ -20,10 +20,11 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{cmp, marker::PhantomData, sync::Arc}; +use std::{cmp, marker::PhantomData, sync::Arc, thread}; use blake2::Blake2b; use digest::consts::U32; +use futures::executor::block_on; use log::*; use rand::rngs::OsRng; use tari_common::configuration::bootstrap::ApplicationType; @@ -33,9 +34,10 @@ use tari_common_types::{ types::{ComAndPubSignature, Commitment, PrivateKey, PublicKey, SignatureWithDomain}, }; use tari_comms::{ - multiaddr::Multiaddr, + multiaddr::{Error as MultiaddrError, Multiaddr}, net_address::{MultiaddressesWithStats, PeerAddressSource}, peer_manager::{NodeId, Peer, PeerFeatures, PeerFlags}, + tor::TorIdentity, types::{CommsPublicKey, CommsSecretKey}, CommsNode, NodeIdentity, @@ -72,6 +74,7 @@ use tari_p2p::{ initialization::P2pInitializer, services::liveness::{config::LivenessConfig, LivenessInitializer}, PeerSeedsConfig, + TransportType, }; use tari_script::{one_sided_payment_script, ExecutionStack, TariScript}; use tari_service_framework::StackBuilder; @@ -252,14 +255,52 @@ where let mut handles = stack.build().await?; + let transaction_service_handle = handles.expect_handle::(); let comms = handles .take_handle::() .expect("P2pInitializer was not added to the stack"); - let comms = initialization::spawn_comms_using_transport(comms, config.p2p.transport).await?; + let comms = if config.p2p.transport.transport_type == TransportType::Tor { + let wallet_db = wallet_database.clone(); + let node_id = comms.node_identity(); + let moved_ts_clone = transaction_service_handle.clone(); + let after_comms = move |identity: TorIdentity| { + // we do this so that we dont have to move in a mut ref and making the closure a FnMut. + let mut ts = moved_ts_clone.clone(); + let address_string = format!("/onion3/{}:{}", identity.service_id, identity.onion_port); + if let Err(e) = wallet_db.set_tor_identity(identity) { + error!(target: LOG_TARGET, "Failed to set wallet db tor identity{:?}", e); + } + let result: Result = address_string.parse(); + if result.is_err() { + error!(target: LOG_TARGET, "Failed to parse tor identity as multiaddr{:?}", result); + return; + } + let address = result.unwrap(); + if !node_id.public_addresses().contains(&address) { + node_id.add_public_address(address.clone()); + } + // Persist the comms node address and features after it has been spawned to capture any modifications + // made during comms startup. In the case of a Tor Transport the public address could + // have been generated + let _result = wallet_db.set_node_address(address); + thread::spawn(move || { + let result = block_on(ts.restart_transaction_protocols()); + if result.is_err() { + warn!( + target: LOG_TARGET, + "Could not restart transaction negotiation protocols: {:?}", result + ); + } + }); + }; + initialization::spawn_comms_using_transport(comms, config.p2p.transport, after_comms).await? + } else { + let after_comms = |_identity| {}; + initialization::spawn_comms_using_transport(comms, config.p2p.transport, after_comms).await? + }; let mut output_manager_handle = handles.expect_handle::(); let key_manager_handle = handles.expect_handle::(); - let transaction_service_handle = handles.expect_handle::(); let contacts_handle = handles.expect_handle::(); let dht = handles.expect_handle::(); let store_and_forward_requester = dht.store_and_forward_requester(); @@ -280,14 +321,6 @@ where e })?; - // Persist the comms node address and features after it has been spawned to capture any modifications made - // during comms startup. In the case of a Tor Transport the public address could have been generated - wallet_database.set_node_address( - comms - .node_identity() - .first_public_address() - .ok_or(WalletError::PublicAddressNotSet)?, - )?; wallet_database.set_node_features(comms.node_identity().features())?; let identity_sig = comms.node_identity().identity_signature_read().as_ref().cloned(); if let Some(identity_sig) = identity_sig { diff --git a/base_layer/wallet/tests/support/comms_and_services.rs b/base_layer/wallet/tests/support/comms_and_services.rs index b6c7344f0e..cf53a469a3 100644 --- a/base_layer/wallet/tests/support/comms_and_services.rs +++ b/base_layer/wallet/tests/support/comms_and_services.rs @@ -58,11 +58,14 @@ pub async fn setup_comms_services( .await .unwrap(); - let comms = comms.spawn_with_transport(MemoryTransport).await.unwrap(); + let mut comms = comms.spawn_with_transport(MemoryTransport).await.unwrap(); + let address = comms + .connection_manager_requester() + .wait_until_listening() + .await + .unwrap(); // Set the public address for tests - comms - .node_identity() - .add_public_address(comms.listening_address().clone()); + comms.node_identity().add_public_address(address.bind_address().clone()); (comms, dht) } diff --git a/base_layer/wallet/tests/support/comms_rpc.rs b/base_layer/wallet/tests/support/comms_rpc.rs index 6d4a41e4b6..351c9c692b 100644 --- a/base_layer/wallet/tests/support/comms_rpc.rs +++ b/base_layer/wallet/tests/support/comms_rpc.rs @@ -134,23 +134,23 @@ impl BaseNodeWalletRpcMockState { })), transaction_query_response: Arc::new(Mutex::new(TxQueryResponse { location: TxLocation::InMempool, - block_hash: None, + best_block_hash: None, confirmations: 0, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, })), transaction_query_batch_response: Arc::new(Mutex::new(TxQueryBatchResponsesProto { responses: vec![], - tip_hash: FixedHash::zero().to_vec(), + best_block_hash: FixedHash::zero().to_vec(), is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, tip_mined_timestamp: EpochTime::now().as_u64(), })), tip_info_response: Arc::new(Mutex::new(TipInfoResponse { metadata: Some(ChainMetadataProto { - height_of_longest_chain: std::i64::MAX as u64, - best_block: FixedHash::zero().to_vec(), + best_block_height: std::i64::MAX as u64, + best_block_hash: FixedHash::zero().to_vec(), accumulated_difficulty: Vec::new(), pruned_height: 0, timestamp: EpochTime::now().as_u64(), @@ -930,8 +930,8 @@ mod test { assert_eq!(calls.len(), 1); let chain_metadata = ChainMetadata { - height_of_longest_chain: 444, - best_block: vec![], + best_block_height: 444, + best_block_hash: vec![], accumulated_difficulty: vec![], pruned_height: 0, timestamp: EpochTime::now().as_u64(), @@ -943,6 +943,6 @@ mod test { let resp = client.get_tip_info().await.unwrap(); assert!(!resp.is_synced); - assert_eq!(resp.metadata.unwrap().height_of_longest_chain(), 444); + assert_eq!(resp.metadata.unwrap().best_block_height(), 444); } } diff --git a/base_layer/wallet/tests/support/output_manager_service_mock.rs b/base_layer/wallet/tests/support/output_manager_service_mock.rs index 87cc1b4b1c..9f9e6ea3fc 100644 --- a/base_layer/wallet/tests/support/output_manager_service_mock.rs +++ b/base_layer/wallet/tests/support/output_manager_service_mock.rs @@ -146,6 +146,7 @@ impl OutputManagerServiceMock { e }); }, + OutputManagerRequest::ValidateUtxos => {}, _ => panic!("Output Manager Service Mock does not support this call"), } } diff --git a/base_layer/wallet/tests/support/transaction_service_mock.rs b/base_layer/wallet/tests/support/transaction_service_mock.rs index 7365cf9935..59cdba9e91 100644 --- a/base_layer/wallet/tests/support/transaction_service_mock.rs +++ b/base_layer/wallet/tests/support/transaction_service_mock.rs @@ -110,6 +110,7 @@ impl TransactionServiceMock { e }); }, + TransactionServiceRequest::ValidateTransactions => {}, _ => panic!("Transaction Service Mock does not support this call"), } } diff --git a/base_layer/wallet/tests/transaction_service_tests/service.rs b/base_layer/wallet/tests/transaction_service_tests/service.rs index 0a7925ac27..f12b8d67be 100644 --- a/base_layer/wallet/tests/transaction_service_tests/service.rs +++ b/base_layer/wallet/tests/transaction_service_tests/service.rs @@ -2588,10 +2588,10 @@ async fn test_power_mode_updates() { .base_node_rpc_mock_state .set_transaction_query_response(TxQueryResponse { location: TxLocation::NotStored, - block_hash: None, + best_block_hash: None, confirmations: 0, is_synced: true, - height_of_longest_chain: 10, + best_block_height: 10, mined_timestamp: None, }); @@ -4995,10 +4995,10 @@ async fn transaction_service_tx_broadcast() { .base_node_rpc_mock_state .set_transaction_query_response(TxQueryResponse { location: TxLocation::Mined, - block_hash: None, + best_block_hash: None, confirmations: TransactionServiceConfig::default().num_confirmations_required, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -5064,10 +5064,10 @@ async fn transaction_service_tx_broadcast() { .base_node_rpc_mock_state .set_transaction_query_response(TxQueryResponse { location: TxLocation::NotStored, - block_hash: None, + best_block_hash: None, confirmations: TransactionServiceConfig::default().num_confirmations_required, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -5208,10 +5208,10 @@ async fn broadcast_all_completed_transactions_on_startup() { .base_node_rpc_mock_state .set_transaction_query_response(TxQueryResponse { location: TxLocation::Mined, - block_hash: None, + best_block_hash: None, confirmations: TransactionServiceConfig::default().num_confirmations_required, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); diff --git a/base_layer/wallet/tests/transaction_service_tests/transaction_protocols.rs b/base_layer/wallet/tests/transaction_service_tests/transaction_protocols.rs index 9e4772b65e..95ed908ad6 100644 --- a/base_layer/wallet/tests/transaction_service_tests/transaction_protocols.rs +++ b/base_layer/wallet/tests/transaction_service_tests/transaction_protocols.rs @@ -417,10 +417,10 @@ async fn tx_broadcast_protocol_restart_protocol_as_query() { // Set Base Node query response to be not stored, as if the base node does not have the tx in its pool rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::NotStored, - block_hash: None, + best_block_hash: None, confirmations: 0, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -447,10 +447,10 @@ async fn tx_broadcast_protocol_restart_protocol_as_query() { // Set Base Node query response to be InMempool as if the base node does not have the tx in its pool rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::InMempool, - block_hash: None, + best_block_hash: None, confirmations: 0, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -469,10 +469,10 @@ async fn tx_broadcast_protocol_restart_protocol_as_query() { // Set base node response to mined and confirmed rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::Mined, - block_hash: None, + best_block_hash: None, confirmations: resources.config.num_confirmations_required, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -526,10 +526,10 @@ async fn tx_broadcast_protocol_submit_success_followed_by_rejection() { // Accepted in the mempool on submit but not query rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::NotStored, - block_hash: None, + best_block_hash: None, confirmations: 0, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -629,10 +629,10 @@ async fn tx_broadcast_protocol_submit_already_mined() { // Set base node response to mined and confirmed rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::Mined, - block_hash: None, + best_block_hash: None, confirmations: resources.config.num_confirmations_required, is_synced: true, - height_of_longest_chain: 10, + best_block_height: 10, mined_timestamp: None, }); @@ -667,10 +667,10 @@ async fn tx_broadcast_protocol_submit_and_base_node_gets_changed() { rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::NotStored, - block_hash: None, + best_block_hash: None, confirmations: 1, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -711,10 +711,10 @@ async fn tx_broadcast_protocol_submit_and_base_node_gets_changed() { // Set new Base Node response to be accepted new_rpc_service_state.set_transaction_query_response(TxQueryResponse { location: TxLocation::InMempool, - block_hash: None, + best_block_hash: None, confirmations: resources.config.num_confirmations_required, is_synced: true, - height_of_longest_chain: 0, + best_block_height: 0, mined_timestamp: None, }); @@ -787,17 +787,17 @@ async fn tx_validation_protocol_tx_becomes_mined_unconfirmed_then_confirmed() { tx2.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: [1u8; 32].to_vec(), + best_block_hash: [1u8; 32].to_vec(), confirmations: 0, - block_height: 1, + best_block_height: 1, mined_timestamp: timestamp, }]; let mut batch_query_response = TxQueryBatchResponsesProto { responses: transaction_query_batch_responses.clone(), is_synced: true, - tip_hash: [1u8; 32].to_vec(), - height_of_longest_chain: 1, + best_block_hash: [1u8; 32].to_vec(), + best_block_height: 1, tip_mined_timestamp: timestamp, }; @@ -859,17 +859,17 @@ async fn tx_validation_protocol_tx_becomes_mined_unconfirmed_then_confirmed() { tx2.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: [5u8; 32].to_vec(), + best_block_hash: [5u8; 32].to_vec(), confirmations: 4, - block_height: 5, + best_block_height: 5, mined_timestamp: timestamp, }]; let batch_query_response = TxQueryBatchResponsesProto { responses: transaction_query_batch_responses.clone(), is_synced: true, - tip_hash: [5u8; 32].to_vec(), - height_of_longest_chain: 5, + best_block_hash: [5u8; 32].to_vec(), + best_block_height: 5, tip_mined_timestamp: timestamp, }; @@ -940,17 +940,17 @@ async fn tx_revalidation() { tx2.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: [5u8; 32].to_vec(), + best_block_hash: [5u8; 32].to_vec(), confirmations: 4, - block_height: 5, + best_block_height: 5, mined_timestamp: timestamp, }]; let batch_query_response = TxQueryBatchResponsesProto { responses: transaction_query_batch_responses.clone(), is_synced: true, - tip_hash: [5u8; 32].to_vec(), - height_of_longest_chain: 5, + best_block_hash: [5u8; 32].to_vec(), + best_block_height: 5, tip_mined_timestamp: timestamp, }; @@ -981,17 +981,17 @@ async fn tx_revalidation() { tx2.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: [5u8; 32].to_vec(), + best_block_hash: [5u8; 32].to_vec(), confirmations: 8, - block_height: 10, + best_block_height: 10, mined_timestamp: timestamp, }]; let batch_query_response = TxQueryBatchResponsesProto { responses: transaction_query_batch_responses.clone(), is_synced: true, - tip_hash: [5u8; 32].to_vec(), - height_of_longest_chain: 10, + best_block_hash: [5u8; 32].to_vec(), + best_block_height: 10, tip_mined_timestamp: timestamp, }; @@ -1101,9 +1101,9 @@ async fn tx_validation_protocol_reorg() { tx1.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&5).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&5).unwrap().hash().to_vec(), confirmations: 5, - block_height: 5, + best_block_height: 5, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1111,9 +1111,9 @@ async fn tx_validation_protocol_reorg() { tx2.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&6).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&6).unwrap().hash().to_vec(), confirmations: 4, - block_height: 6, + best_block_height: 6, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1121,9 +1121,9 @@ async fn tx_validation_protocol_reorg() { tx3.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&7).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&7).unwrap().hash().to_vec(), confirmations: 3, - block_height: 7, + best_block_height: 7, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1131,9 +1131,9 @@ async fn tx_validation_protocol_reorg() { tx4.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&8).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&8).unwrap().hash().to_vec(), confirmations: 2, - block_height: 8, + best_block_height: 8, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1141,9 +1141,9 @@ async fn tx_validation_protocol_reorg() { tx5.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&9).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&9).unwrap().hash().to_vec(), confirmations: 1, - block_height: 9, + best_block_height: 9, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1151,9 +1151,9 @@ async fn tx_validation_protocol_reorg() { tx6.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&8).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&8).unwrap().hash().to_vec(), confirmations: 2, - block_height: 8, + best_block_height: 8, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1161,9 +1161,9 @@ async fn tx_validation_protocol_reorg() { tx7.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&9).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&9).unwrap().hash().to_vec(), confirmations: 1, - block_height: 9, + best_block_height: 9, mined_timestamp: timestamp, }, ]; @@ -1171,8 +1171,8 @@ async fn tx_validation_protocol_reorg() { let batch_query_response = TxQueryBatchResponsesProto { responses: transaction_query_batch_responses.clone(), is_synced: true, - tip_hash: block_headers.get(&10).unwrap().hash().to_vec(), - height_of_longest_chain: 10, + best_block_hash: block_headers.get(&10).unwrap().hash().to_vec(), + best_block_height: 10, tip_mined_timestamp: timestamp, }; @@ -1220,9 +1220,9 @@ async fn tx_validation_protocol_reorg() { tx1.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&5).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&5).unwrap().hash().to_vec(), confirmations: 4, - block_height: 5, + best_block_height: 5, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1230,9 +1230,9 @@ async fn tx_validation_protocol_reorg() { tx2.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&6).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&6).unwrap().hash().to_vec(), confirmations: 3, - block_height: 6, + best_block_height: 6, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1240,9 +1240,9 @@ async fn tx_validation_protocol_reorg() { tx3.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&7).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&7).unwrap().hash().to_vec(), confirmations: 2, - block_height: 7, + best_block_height: 7, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1250,9 +1250,9 @@ async fn tx_validation_protocol_reorg() { tx5.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::Mined) as i32, - block_hash: block_headers.get(&8).unwrap().hash().to_vec(), + best_block_hash: block_headers.get(&8).unwrap().hash().to_vec(), confirmations: 1, - block_height: 8, + best_block_height: 8, mined_timestamp: timestamp, }, TxQueryBatchResponseProto { @@ -1260,9 +1260,9 @@ async fn tx_validation_protocol_reorg() { tx6.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::NotStored) as i32, - block_hash: vec![], + best_block_hash: vec![], confirmations: 0, - block_height: 0, + best_block_height: 0, mined_timestamp: 0, }, TxQueryBatchResponseProto { @@ -1270,9 +1270,9 @@ async fn tx_validation_protocol_reorg() { tx7.transaction.first_kernel_excess_sig().unwrap().clone(), )), location: TxLocationProto::from(TxLocation::NotStored) as i32, - block_hash: vec![], + best_block_hash: vec![], confirmations: 0, - block_height: 0, + best_block_height: 0, mined_timestamp: 0, }, ]; @@ -1280,8 +1280,8 @@ async fn tx_validation_protocol_reorg() { let batch_query_response = TxQueryBatchResponsesProto { responses: transaction_query_batch_responses.clone(), is_synced: true, - tip_hash: block_headers.get(&8).unwrap().hash().to_vec(), - height_of_longest_chain: 8, + best_block_hash: block_headers.get(&8).unwrap().hash().to_vec(), + best_block_height: 8, tip_mined_timestamp: timestamp, }; diff --git a/base_layer/wallet/tests/utxo_scanner/mod.rs b/base_layer/wallet/tests/utxo_scanner/mod.rs index 414a94335e..ba46a85ec7 100644 --- a/base_layer/wallet/tests/utxo_scanner/mod.rs +++ b/base_layer/wallet/tests/utxo_scanner/mod.rs @@ -313,8 +313,8 @@ async fn test_utxo_scanner_recovery() { test_interface.rpc_service_state.set_blocks(block_headers.clone()); let chain_metadata = ChainMetadata { - height_of_longest_chain: NUM_BLOCKS - 1, - best_block: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), + best_block_height: NUM_BLOCKS - 1, + best_block_hash: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), accumulated_difficulty: Vec::new(), pruned_height: 0, timestamp: 0, @@ -412,8 +412,8 @@ async fn test_utxo_scanner_recovery_with_restart() { test_interface.rpc_service_state.set_blocks(block_headers.clone()); let chain_metadata = ChainMetadata { - height_of_longest_chain: NUM_BLOCKS - 1, - best_block: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), + best_block_height: NUM_BLOCKS - 1, + best_block_hash: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), accumulated_difficulty: Vec::new(), pruned_height: 0, timestamp: 0, @@ -578,8 +578,8 @@ async fn test_utxo_scanner_recovery_with_restart_and_reorg() { test_interface.rpc_service_state.set_blocks(block_headers.clone()); let chain_metadata = ChainMetadata { - height_of_longest_chain: NUM_BLOCKS - 1, - best_block: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), + best_block_height: NUM_BLOCKS - 1, + best_block_hash: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), accumulated_difficulty: Vec::new(), pruned_height: 0, timestamp: 0, @@ -651,8 +651,8 @@ async fn test_utxo_scanner_recovery_with_restart_and_reorg() { .set_utxos_by_block(utxos_by_block.clone()); test_interface2.rpc_service_state.set_blocks(block_headers.clone()); let chain_metadata = ChainMetadata { - height_of_longest_chain: 9, - best_block: block_headers.get(&9).unwrap().clone().hash().to_vec(), + best_block_height: 9, + best_block_hash: block_headers.get(&9).unwrap().clone().hash().to_vec(), accumulated_difficulty: Vec::new(), pruned_height: 0, timestamp: 0, @@ -776,8 +776,8 @@ async fn test_utxo_scanner_scanned_block_cache_clearing() { test_interface.rpc_service_state.set_blocks(block_headers.clone()); let chain_metadata = ChainMetadata { - height_of_longest_chain: 800 + NUM_BLOCKS - 1, - best_block: block_headers + best_block_height: 800 + NUM_BLOCKS - 1, + best_block_hash: block_headers .get(&(800 + NUM_BLOCKS - 1)) .unwrap() .clone() @@ -878,8 +878,8 @@ async fn test_utxo_scanner_one_sided_payments() { test_interface.rpc_service_state.set_blocks(block_headers.clone()); let chain_metadata = ChainMetadata { - height_of_longest_chain: NUM_BLOCKS - 1, - best_block: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), + best_block_height: NUM_BLOCKS - 1, + best_block_hash: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), accumulated_difficulty: Vec::new(), pruned_height: 0, timestamp: 0, @@ -998,8 +998,8 @@ async fn test_utxo_scanner_one_sided_payments() { .set_one_sided_payment_message("new one-sided message".to_string()); let chain_metadata = ChainMetadata { - height_of_longest_chain: NUM_BLOCKS, - best_block: block_headers.get(&(NUM_BLOCKS)).unwrap().clone().hash().to_vec(), + best_block_height: NUM_BLOCKS, + best_block_hash: block_headers.get(&(NUM_BLOCKS)).unwrap().clone().hash().to_vec(), accumulated_difficulty: Vec::new(), pruned_height: 0, timestamp: 0, @@ -1014,7 +1014,7 @@ async fn test_utxo_scanner_one_sided_payments() { test_interface .base_node_service_event_publisher .send(Arc::new(BaseNodeEvent::NewBlockDetected( - chain_metadata.best_block.try_into().unwrap(), + chain_metadata.best_block_hash.try_into().unwrap(), 11, ))) .unwrap(); @@ -1085,8 +1085,8 @@ async fn test_birthday_timestamp_over_chain() { test_interface.rpc_service_state.set_blocks(block_headers.clone()); let chain_metadata = ChainMetadata { - height_of_longest_chain: NUM_BLOCKS - 1, - best_block: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), + best_block_height: NUM_BLOCKS - 1, + best_block_hash: block_headers.get(&(NUM_BLOCKS - 1)).unwrap().clone().hash().to_vec(), accumulated_difficulty: Vec::new(), pruned_height: 0, timestamp: 0, diff --git a/base_layer/wallet_ffi/Cargo.toml b/base_layer/wallet_ffi/Cargo.toml index 26ff85f936..ddd092bd5c 100644 --- a/base_layer/wallet_ffi/Cargo.toml +++ b/base_layer/wallet_ffi/Cargo.toml @@ -26,7 +26,7 @@ chrono = { version = "0.4.19", default-features = false, features = ["serde"] } futures = { version = "^0.3.1", features =["compat", "std"]} libc = "0.2.65" log = "0.4.6" -log4rs = { git = "https://github.com/tari-project/log4rs.git", features = ["console_appender", "file_appender", "yaml_format"] } +log4rs = { version = "1.3.0", features = ["console_appender", "file_appender", "yaml_format"] } rand = "0.8" thiserror = "1.0.26" tokio = "1.23" diff --git a/base_layer/wallet_ffi/src/callback_handler.rs b/base_layer/wallet_ffi/src/callback_handler.rs index e6a170bcf1..cba4099123 100644 --- a/base_layer/wallet_ffi/src/callback_handler.rs +++ b/base_layer/wallet_ffi/src/callback_handler.rs @@ -660,8 +660,8 @@ where TBackend: TransactionBackend + 'static let state = match state.chain_metadata { None => TariBaseNodeState { node_id: state.node_id, - height_of_longest_chain: 0, - best_block: BlockHash::zero(), + best_block_height: 0, + best_block_hash: BlockHash::zero(), best_block_timestamp: 0, pruning_horizon: 0, pruned_height: 0, @@ -672,8 +672,8 @@ where TBackend: TransactionBackend + 'static Some(chain_metadata) => TariBaseNodeState { node_id: state.node_id, - height_of_longest_chain: chain_metadata.height_of_longest_chain(), - best_block: *chain_metadata.best_block(), + best_block_height: chain_metadata.best_block_height(), + best_block_hash: *chain_metadata.best_block_hash(), best_block_timestamp: chain_metadata.timestamp(), pruning_horizon: chain_metadata.pruning_horizon(), pruned_height: chain_metadata.pruned_height(), diff --git a/base_layer/wallet_ffi/src/ffi_basenode_state.rs b/base_layer/wallet_ffi/src/ffi_basenode_state.rs index 4d2880900b..d4c32188bc 100644 --- a/base_layer/wallet_ffi/src/ffi_basenode_state.rs +++ b/base_layer/wallet_ffi/src/ffi_basenode_state.rs @@ -41,10 +41,10 @@ pub struct TariBaseNodeState { pub node_id: Option, /// The current chain height, or the block number of the longest valid chain, or zero if there is no chain - pub height_of_longest_chain: u64, + pub best_block_height: u64, /// The block hash of the current tip of the longest valid chain - pub best_block: BlockHash, + pub best_block_hash: BlockHash, /// Timestamp of the tip block in the longest valid chain pub best_block_timestamp: u64, @@ -56,7 +56,7 @@ pub struct TariBaseNodeState { pub pruning_horizon: u64, /// The height of the pruning horizon. This indicates from what height a full block can be provided - /// (exclusive). If `pruned_height` is equal to the `height_of_longest_chain` no blocks can be + /// (exclusive). If `pruned_height` is equal to the `best_block_height` no blocks can be /// provided. Archival nodes wil always have an `pruned_height` of zero. pub pruned_height: u64, @@ -124,7 +124,7 @@ pub unsafe extern "C" fn basenode_state_get_height_of_the_longest_chain( return 0; } - (*ptr).height_of_longest_chain + (*ptr).best_block_height } /// Extracts a best block hash [`FixedHash`] represented as a vector of bytes wrapped into a `ByteVector` @@ -154,7 +154,7 @@ pub unsafe extern "C" fn basenode_state_get_best_block( return ptr::null_mut(); } - Box::into_raw(Box::new(ByteVector((*ptr).best_block.to_vec()))) + Box::into_raw(Box::new(ByteVector((*ptr).best_block_hash.to_vec()))) } /// Extracts a timestamp of the best block @@ -227,7 +227,7 @@ pub unsafe extern "C" fn basenode_state_get_pruning_horizon( /// /// ## Returns /// `c_ulonglong` - The height of the pruning horizon. This indicates from what height a full block can be provided -/// (exclusive). If `pruned_height` is equal to the `height_of_longest_chain` no blocks can be +/// (exclusive). If `pruned_height` is equal to the `best_block_height` no blocks can be /// provided. Archival nodes wil always have an `pruned_height` of zero. /// /// # Safety @@ -345,8 +345,8 @@ mod tests { let boxed_state = Box::into_raw(Box::new(TariBaseNodeState { node_id: Some(original_node_id.clone()), - height_of_longest_chain: 123, - best_block: original_best_block, + best_block_height: 123, + best_block_hash: original_best_block, best_block_timestamp: 12345, pruning_horizon: 456, pruned_height: 789, diff --git a/base_layer/wallet_ffi/src/lib.rs b/base_layer/wallet_ffi/src/lib.rs index 807c68ecf9..94e2dbd9ab 100644 --- a/base_layer/wallet_ffi/src/lib.rs +++ b/base_layer/wallet_ffi/src/lib.rs @@ -288,6 +288,7 @@ pub struct TariUtxo { pub value: u64, pub mined_height: u64, pub mined_timestamp: u64, + pub lock_height: u64, pub status: u8, } @@ -299,6 +300,7 @@ impl From for TariUtxo { .into_raw(), value: x.wallet_output.value.as_u64(), mined_height: x.mined_height.unwrap_or(0), + lock_height: x.wallet_output.features.maturity, mined_timestamp: x .mined_timestamp .map(|ts| ts.timestamp_millis() as u64) @@ -1237,6 +1239,38 @@ pub unsafe extern "C" fn tari_address_to_emoji_id( CString::into_raw(result) } +/// Creates a char array from a TariWalletAddress's network +/// +/// ## Arguments +/// `address` - The pointer to a TariWalletAddress +/// `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions +/// as an out parameter. +/// +/// ## Returns +/// `*mut c_char` - Returns a pointer to a char array. Note that it returns empty +/// if there was an error from TariWalletAddress +/// +/// # Safety +/// The ```string_destroy``` method must be called when finished with a string from rust to prevent a memory leak +#[no_mangle] +pub unsafe extern "C" fn tari_address_network(address: *mut TariWalletAddress, error_out: *mut c_int) -> *mut c_char { + let mut error = 0; + let mut result = CString::new("").expect("Blank CString will not fail."); + ptr::swap(error_out, &mut error as *mut c_int); + if address.is_null() { + error = LibWalletError::from(InterfaceError::NullError("address".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return CString::into_raw(result); + } + let network_string = address + .as_ref() + .expect("Address should not be empty") + .network() + .to_string(); + result = CString::new(network_string).expect("string will not fail."); + CString::into_raw(result) +} + /// Creates a TariWalletAddress from a char array in emoji format /// /// ## Arguments @@ -5471,12 +5505,6 @@ pub unsafe extern "C" fn wallet_create( match w { Ok(w) => { - // lets ensure the wallet tor_id is saved, this could have been changed during wallet startup - if let Some(hs) = w.comms.hidden_service() { - if let Err(e) = w.db.set_tor_identity(hs.tor_identity().clone()) { - warn!(target: LOG_TARGET, "Could not save tor identity to db: {:?}", e); - } - } let wallet_address = TariAddress::new(w.comms.node_identity().public_key().clone(), w.network.as_network()); // Start Callback Handler @@ -5512,16 +5540,6 @@ pub unsafe extern "C" fn wallet_create( runtime.spawn(callback_handler.start()); - let mut ts = w.transaction_service.clone(); - runtime.spawn(async move { - if let Err(e) = ts.restart_transaction_protocols().await { - warn!( - target: LOG_TARGET, - "Could not restart transaction negotiation protocols: {:?}", e - ); - } - }); - let tari_wallet = TariWallet { wallet: w, runtime, diff --git a/base_layer/wallet_ffi/wallet.h b/base_layer/wallet_ffi/wallet.h index 329e4fa086..b48087bdc7 100644 --- a/base_layer/wallet_ffi/wallet.h +++ b/base_layer/wallet_ffi/wallet.h @@ -345,6 +345,7 @@ struct TariUtxo { uint64_t value; uint64_t mined_height; uint64_t mined_timestamp; + uint64_t lock_height; uint8_t status; }; @@ -768,6 +769,24 @@ TariWalletAddress *tari_address_from_hex(const char *address, char *tari_address_to_emoji_id(TariWalletAddress *address, int *error_out); +/** + * Creates a char array from a TariWalletAddress's network + * + * ## Arguments + * `address` - The pointer to a TariWalletAddress + * `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions + * as an out parameter. + * + * ## Returns + * `*mut c_char` - Returns a pointer to a char array. Note that it returns empty + * if there was an error from TariWalletAddress + * + * # Safety + * The ```string_destroy``` method must be called when finished with a string from rust to prevent a memory leak + */ +char *tari_address_network(TariWalletAddress *address, + int *error_out); + /** * Creates a TariWalletAddress from a char array in emoji format * @@ -4058,7 +4077,7 @@ unsigned long long basenode_state_get_pruning_horizon(struct TariBaseNodeState * * * ## Returns * `c_ulonglong` - The height of the pruning horizon. This indicates from what height a full block can be provided - * (exclusive). If `pruned_height` is equal to the `height_of_longest_chain` no blocks can be + * (exclusive). If `pruned_height` is equal to the `best_block_height` no blocks can be * provided. Archival nodes wil always have an `pruned_height` of zero. * * # Safety diff --git a/changelog-development.md b/changelog-development.md index fc64291b98..aea880fee3 100644 --- a/changelog-development.md +++ b/changelog-development.md @@ -2,6 +2,185 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. +## [1.0.0-pre.8](https://github.com/tari-project/tari/compare/v1.0.0-pre.7...v1.0.0-pre.8) (2024-02-06) + + +### Bug Fixes + +* **comms:** correctly initialize hidden service ([#6124](https://github.com/tari-project/tari/issues/6124)) ([0584782](https://github.com/tari-project/tari/commit/058478255a93e7d50d95c8ac8c196069f76b994b)) +* **libtor:** prevent metrics port conflict ([#6125](https://github.com/tari-project/tari/issues/6125)) ([661af51](https://github.com/tari-project/tari/commit/661af5177863f37f0b01c9846dccc7d24f873fc5)) + +## [1.0.0-pre.7](https://github.com/tari-project/tari/compare/v1.0.0-pre.5...v1.0.0-pre.7) (2024-02-02) + + +### âš  BREAKING CHANGES + +* fix horizon sync after smt upgrade (#6006) + +### Features + +* add search kernels method to nodejs client ([#6082](https://github.com/tari-project/tari/issues/6082)) ([0190221](https://github.com/tari-project/tari/commit/019022149d94afb3c0ed3f75490dd777d60bad1c)) +* do validation after adding utxos and txs ([#6114](https://github.com/tari-project/tari/issues/6114)) ([7d886e6](https://github.com/tari-project/tari/commit/7d886e6c85e463a4f7f4dacc5115e625bb1f37f5)) +* export transaction ([#6111](https://github.com/tari-project/tari/issues/6111)) ([70d5ad3](https://github.com/tari-project/tari/commit/70d5ad3b4f8a1b8efb83a868102b7c846f2bd50c)) +* fix horizon sync after smt upgrade ([#6006](https://github.com/tari-project/tari/issues/6006)) ([b6b80f6](https://github.com/tari-project/tari/commit/b6b80f6ee9b91255815bd2a66f51425c3a628dcf)) +* initial horizon sync from prune node ([#6109](https://github.com/tari-project/tari/issues/6109)) ([2987621](https://github.com/tari-project/tari/commit/2987621b2cef6d3b852ed9a1f4215f19b9838e0f)) +* new release ([#6105](https://github.com/tari-project/tari/issues/6105)) ([554a3b2](https://github.com/tari-project/tari/commit/554a3b23d887eac81be288b2b8651019a6097458)) +* prevent runtime error with compact error input ([#6096](https://github.com/tari-project/tari/issues/6096)) ([69421f5](https://github.com/tari-project/tari/commit/69421f5ef97f0ba4c194162bca0b367dc7714ffe)) +* smt verification ([#6115](https://github.com/tari-project/tari/issues/6115)) ([78a9348](https://github.com/tari-project/tari/commit/78a93480bc00235cbf221ff977f7d87f8008226a)) +* update api ([#6101](https://github.com/tari-project/tari/issues/6101)) ([47e73ac](https://github.com/tari-project/tari/commit/47e73ac2b692bbfc924a4329e29597e49f84af0f)) +* update codeowners ([#6088](https://github.com/tari-project/tari/issues/6088)) ([58a131d](https://github.com/tari-project/tari/commit/58a131d302fd7295134c708e75a0b788205d287e)) +* wallet add restart validation to start ([#6113](https://github.com/tari-project/tari/issues/6113)) ([5c236ce](https://github.com/tari-project/tari/commit/5c236ce9928acd3aa212adab716c93f05e8cac9d)) + + +### Bug Fixes + +* faster tor startup ([#6092](https://github.com/tari-project/tari/issues/6092)) ([a2872bb](https://github.com/tari-project/tari/commit/a2872bba188c456578ed5b5ad5eb2e37e26a46e6)) +* make monero extra data less strict ([#6117](https://github.com/tari-project/tari/issues/6117)) ([38b9113](https://github.com/tari-project/tari/commit/38b9113375bb90d667718f406e796f6a0e021861)) + +## [1.0.0-pre.6](https://github.com/tari-project/tari/compare/v1.0.0-pre.5...v1.0.0-pre.6) (2024-01-29) + + +### Features + +* add search kernels method to nodejs client ([#6082](https://github.com/tari-project/tari/issues/6082)) ([0190221](https://github.com/tari-project/tari/commit/019022149d94afb3c0ed3f75490dd777d60bad1c)) +* prevent runtime error with compact error input ([#6096](https://github.com/tari-project/tari/issues/6096)) ([69421f5](https://github.com/tari-project/tari/commit/69421f5ef97f0ba4c194162bca0b367dc7714ffe)) +* update api ([#6101](https://github.com/tari-project/tari/issues/6101)) ([47e73ac](https://github.com/tari-project/tari/commit/47e73ac2b692bbfc924a4329e29597e49f84af0f)) +* update codeowners ([#6088](https://github.com/tari-project/tari/issues/6088)) ([58a131d](https://github.com/tari-project/tari/commit/58a131d302fd7295134c708e75a0b788205d287e)) + + +### Bug Fixes + +* faster tor startup ([#6092](https://github.com/tari-project/tari/issues/6092)) ([a2872bb](https://github.com/tari-project/tari/commit/a2872bba188c456578ed5b5ad5eb2e37e26a46e6)) + +## [1.0.0-pre.5](https://github.com/tari-project/tari/compare/v1.0.0-pre.4...v1.0.0-pre.5) (2024-01-18) + + +### Features + +* add tari address as valid string for discovering a peer ([#6075](https://github.com/tari-project/tari/issues/6075)) ([a4c5bc2](https://github.com/tari-project/tari/commit/a4c5bc2c6c08a5d09b58f13ed9acf561e55478fc)) +* make all apps non interactive ([#6049](https://github.com/tari-project/tari/issues/6049)) ([bafd7e7](https://github.com/tari-project/tari/commit/bafd7e7baadd0f8b82ca8205ec3f18342d74e92a)) +* make libtor on by default for nix builds ([#6060](https://github.com/tari-project/tari/issues/6060)) ([b5e0d06](https://github.com/tari-project/tari/commit/b5e0d0639c540177373b7faa9c2fade64581e46d)) + + +### Bug Fixes + +* fix small error in config.toml ([#6052](https://github.com/tari-project/tari/issues/6052)) ([6518a60](https://github.com/tari-project/tari/commit/6518a60dce9a4b8ace6c5cc4b1ee79045e364e0e)) +* tms validation correctly updating ([#6079](https://github.com/tari-project/tari/issues/6079)) ([34222a8](https://github.com/tari-project/tari/commit/34222a88bd1746869e67ccde9c2f7529862f3b5d)) +* wallet coinbases not validated correctly ([#6074](https://github.com/tari-project/tari/issues/6074)) ([bb66df1](https://github.com/tari-project/tari/commit/bb66df13bcf3d00082e35f7305b1fde72d4ace2a)) + +## [1.0.0-pre.4](https://github.com/tari-project/tari/compare/v1.0.0-pre.2...v1.0.0-pre.4) (2023-12-14) + + +### Features + +* fix windows installer ([#6043](https://github.com/tari-project/tari/issues/6043)) ([c37a0a8](https://github.com/tari-project/tari/commit/c37a0a89726eec765c9c10d3da0c990d339de9b9)) +* side load chat ([#6042](https://github.com/tari-project/tari/issues/6042)) ([d729c45](https://github.com/tari-project/tari/commit/d729c458b17406d9f5dbb8982a9bf5604f39c63c)) + + +### Bug Fixes + + +## [1.0.0-pre.3](https://github.com/tari-project/tari/compare/v1.0.0-pre.2...v1.0.0-pre.3) (2023-12-12) + + +### Features + +* console wallet use dns seeds ([#6034](https://github.com/tari-project/tari/issues/6034)) ([b194954](https://github.com/tari-project/tari/commit/b194954f489bd8ac234993e65463a24808dce8f2)) +* update tests and constants ([#6028](https://github.com/tari-project/tari/issues/6028)) ([d558206](https://github.com/tari-project/tari/commit/d558206ea62c12f3258ede8cfcbf9d44f139ccdd)) + + +### Bug Fixes + +* remove duplicate config settings ([#6029](https://github.com/tari-project/tari/issues/6029)) ([662af28](https://github.com/tari-project/tari/commit/662af28bf811c771cf0fdf9b583c1296a2283188)) + +## [1.0.0-pre.2](https://github.com/tari-project/tari/compare/v1.0.0-pre.1...v1.0.0-pre.2) (2023-12-08) + + +### Bug Fixes + +* chat build ([#6026](https://github.com/tari-project/tari/issues/6026)) ([15793b7](https://github.com/tari-project/tari/commit/15793b7e4dfdcaaad6ec90e357348daf42300eab)) + +## [1.0.0-pre.1](https://github.com/tari-project/tari/compare/v1.0.0-pre.0...v1.0.0-pre.1) (2023-12-08) + + +### âš  BREAKING CHANGES + +* update status (#6008) + +### Features + +* hazop findings ([#6020](https://github.com/tari-project/tari/issues/6020)) ([a68d0dd](https://github.com/tari-project/tari/commit/a68d0dd2fb7719ae99bcd2b62980b5f37d66284a)) +* add miner input processing ([#6016](https://github.com/tari-project/tari/issues/6016)) ([26f5b60](https://github.com/tari-project/tari/commit/26f5b6044832f737c7019dab0e00d2234aac442f)) +* add wallet ffi shutdown tests ([#6007](https://github.com/tari-project/tari/issues/6007)) ([3129ce8](https://github.com/tari-project/tari/commit/3129ce8dd066ea16900ee8add4e608c1890c6545)) +* fix hazop findings ([#6017](https://github.com/tari-project/tari/issues/6017)) ([0bc62b4](https://github.com/tari-project/tari/commit/0bc62b4a5b78893a226700226bac01590a543bb8)) +* make base node support 1 click mining ([#6019](https://github.com/tari-project/tari/issues/6019)) ([d377269](https://github.com/tari-project/tari/commit/d3772690c36e0dcb6476090fc428e5745298e398)) +* update faucets ([#6024](https://github.com/tari-project/tari/issues/6024)) ([394976c](https://github.com/tari-project/tari/commit/394976cc591f9551e1542f2730a8ec299b524229)) +* update status ([#6008](https://github.com/tari-project/tari/issues/6008)) ([e19ce15](https://github.com/tari-project/tari/commit/e19ce15549b138d462060997d40147bad39a1871)) + + +### Bug Fixes + +* **chat:** chat client possible panics ([#6015](https://github.com/tari-project/tari/issues/6015)) ([cf66c51](https://github.com/tari-project/tari/commit/cf66c51483f4b2744221fb652f3b32340d2ee693)) + +## [1.0.0-pre.0](https://github.com/tari-project/tari/compare/v0.53.0-pre.0...v1.0.0-pre.0) (2023-12-01) + + +### âš  BREAKING CHANGES + +* new faucet for esmeralda (#6001) +* dont store entire monero coinbase transaction (#5991) +* ups the min difficulty (#5999) +* network specific domain hashers (#5980) +* add aux chain support for merge mining (#5976) +* disable console wallet grpc (#5988) +* add one-sided coinbase payments (#5967) +* fix opcode signatures (#5966) +* remove mutable mmr (#5954) +* move kernel MMR position to `u64` (#5956) +* standardize gRPC authentication and mitigate DoS (#5936) +* fix difficulty overflow (#5935) + +### Features + +* add aux chain support for merge mining ([#5976](https://github.com/tari-project/tari/issues/5976)) ([6723dc7](https://github.com/tari-project/tari/commit/6723dc7a88b2c1e40efe51259cb26e12638b9668)) +* add constant time comparison for grpc authentication ([#5902](https://github.com/tari-project/tari/issues/5902)) ([2fe44db](https://github.com/tari-project/tari/commit/2fe44db773bbf8ee7c4e306e08973ba25e6af10e)) +* add getheaderbyhash method to grpc-js ([#5942](https://github.com/tari-project/tari/issues/5942)) ([ebc4539](https://github.com/tari-project/tari/commit/ebc45398ea7f9eda7f08830cec93f2bf8d4a0e38)) +* add one-sided coinbase payments ([#5967](https://github.com/tari-project/tari/issues/5967)) ([89b19f6](https://github.com/tari-project/tari/commit/89b19f6de8f2acf28557ca37feda03af2657cf30)) +* bans for bad incoming blocks ([#5934](https://github.com/tari-project/tari/issues/5934)) ([7acc44d](https://github.com/tari-project/tari/commit/7acc44d3dce5d8c9085ae5246a8a0a7487d19516)) +* block endless peer stream ([#5951](https://github.com/tari-project/tari/issues/5951)) ([16b325d](https://github.com/tari-project/tari/commit/16b325defc2f42b9b34d3e1fd05a4b6cd6bcf965)) +* block wallets from sending if BN connection stale ([#5949](https://github.com/tari-project/tari/issues/5949)) ([18d5f57](https://github.com/tari-project/tari/commit/18d5f57363fb085bfac080a7994cb5ced8c932ab)) +* compile out the metrics ([#5944](https://github.com/tari-project/tari/issues/5944)) ([fa2fb27](https://github.com/tari-project/tari/commit/fa2fb27a5834bd56fda62c82a825a7f6d8391fd3)) +* create min dust fee setting ([#5947](https://github.com/tari-project/tari/issues/5947)) ([8f5466c](https://github.com/tari-project/tari/commit/8f5466cb1d85518ba80190fa312281321aa721ff)) +* disable console wallet grpc ([#5988](https://github.com/tari-project/tari/issues/5988)) ([883de17](https://github.com/tari-project/tari/commit/883de175dadee58c4f49fff9a655cae1a2450b3d)) +* dont store entire monero coinbase transaction ([#5991](https://github.com/tari-project/tari/issues/5991)) ([23b10bf](https://github.com/tari-project/tari/commit/23b10bf2d3fdebd296a93eae0aaa5abcd4156de9)) +* enable revealed-value proofs ([#5983](https://github.com/tari-project/tari/issues/5983)) ([f3f5879](https://github.com/tari-project/tari/commit/f3f5879903c619a9219c27ce4e77450f4a1b247b)) +* fix difficulty overflow ([#5935](https://github.com/tari-project/tari/issues/5935)) ([55bbdf2](https://github.com/tari-project/tari/commit/55bbdf2481bb7522ede5cc3e37ca8cdeb323b4f7)) +* grpc over tls ([#5990](https://github.com/tari-project/tari/issues/5990)) ([b80f7e3](https://github.com/tari-project/tari/commit/b80f7e366b14e10b3fb0e9835fb76dd5596d0cf8)) +* limit max number of addresses ([#5960](https://github.com/tari-project/tari/issues/5960)) ([40fc940](https://github.com/tari-project/tari/commit/40fc9408161e404a9f4062362fe495de3c2e374f)) +* move kernel MMR position to `u64` ([#5956](https://github.com/tari-project/tari/issues/5956)) ([cdd8a31](https://github.com/tari-project/tari/commit/cdd8a3135765c3b5a87027f9a5e0103e737c709a)) +* network specific domain hashers ([#5980](https://github.com/tari-project/tari/issues/5980)) ([d7ab283](https://github.com/tari-project/tari/commit/d7ab2838cc08a7c12ccf443697c1560b1ea40b03)) +* **node grpc:** add grpc authentication to the node ([#5928](https://github.com/tari-project/tari/issues/5928)) ([3d95e8c](https://github.com/tari-project/tari/commit/3d95e8cb0543f5bdb284f2ea0771e2f03748b71a)) +* remove panics from applications ([#5943](https://github.com/tari-project/tari/issues/5943)) ([18c3d0b](https://github.com/tari-project/tari/commit/18c3d0be8123cdc362fdeaed66c45ad17c3e7dfa)) +* sender and receiver protocols use bytes (not hex string) in wallet database ([#5950](https://github.com/tari-project/tari/issues/5950)) ([4cbdfec](https://github.com/tari-project/tari/commit/4cbdfec945857c5b7a334962e137d2c8dc4d4c4a)) +* warnings for untrusted urls ([#5955](https://github.com/tari-project/tari/issues/5955)) ([e2e278c](https://github.com/tari-project/tari/commit/e2e278c9a4d09f8e0136e9b3ae2f93afc3e9ac4a)) + + +### Bug Fixes + +* display ([#5982](https://github.com/tari-project/tari/issues/5982)) ([8cce48c](https://github.com/tari-project/tari/commit/8cce48cd8bd9b6f780376030918972e993fc1ab7)) +* fix opcode signatures ([#5966](https://github.com/tari-project/tari/issues/5966)) ([dc26ca6](https://github.com/tari-project/tari/commit/dc26ca6aeeb4196d0496f2977027ac63a4324043)) +* fix the windows installer ([#5938](https://github.com/tari-project/tari/issues/5938)) ([3e65a28](https://github.com/tari-project/tari/commit/3e65a28c5e3729024d70e2b7f55910c8c808495c)) +* fix the windows installer auto build ([#5939](https://github.com/tari-project/tari/issues/5939)) ([a138b78](https://github.com/tari-project/tari/commit/a138b7892d4b41a460b8dd8b9466f34e90f65469)) +* **shutdown:** is_triggered returns up-to-date value without first polling ([#5997](https://github.com/tari-project/tari/issues/5997)) ([49f2053](https://github.com/tari-project/tari/commit/49f20534ec808427d059cde6892adc5597f33391)) +* standardize gRPC authentication and mitigate DoS ([#5936](https://github.com/tari-project/tari/issues/5936)) ([623f127](https://github.com/tari-project/tari/commit/623f12768daf8329731249cf7e4c644e338d9700)) +* **tariscript:** multisig ordered signatures and pubkeys ([#5961](https://github.com/tari-project/tari/issues/5961)) ([14e334a](https://github.com/tari-project/tari/commit/14e334aff346aae8a081599488135c905c2c1f84)) +* update `ToRistrettoPoint` handling ([#5973](https://github.com/tari-project/tari/issues/5973)) ([12e84f4](https://github.com/tari-project/tari/commit/12e84f42ee1842875f72716833e96d0b84460c78)) + + +* new faucet for esmeralda ([#6001](https://github.com/tari-project/tari/issues/6001)) ([4eccc39](https://github.com/tari-project/tari/commit/4eccc392394b03e974b36538096f640d2b98d25d)) +* remove mutable mmr ([#5954](https://github.com/tari-project/tari/issues/5954)) ([0855583](https://github.com/tari-project/tari/commit/0855583c9fb138f7d1633c1829a8cf3f23048c49)) +* ups the min difficulty ([#5999](https://github.com/tari-project/tari/issues/5999)) ([fc1e555](https://github.com/tari-project/tari/commit/fc1e555edc56c9d01d7e9cb4d2c7cd0421616034)) + ## [1.0.0-pre.5](https://github.com/tari-project/tari/compare/v1.0.0-pre.4...v1.0.0-pre.5) (2024-01-18) diff --git a/changelog-nextnet.md b/changelog-nextnet.md index da107fa33d..9aa0ce15aa 100644 --- a/changelog-nextnet.md +++ b/changelog-nextnet.md @@ -2,6 +2,368 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. +## [1.0.0-rc.5](https://github.com/tari-project/tari/compare/v1.0.0-rc.4...v1.0.0-rc.5) (2024-02-06) + + +### Bug Fixes + +* **comms:** correctly initialize hidden service ([#6124](https://github.com/tari-project/tari/issues/6124)) ([0584782](https://github.com/tari-project/tari/commit/058478255a93e7d50d95c8ac8c196069f76b994b)) +* **libtor:** prevent metrics port conflict ([#6125](https://github.com/tari-project/tari/issues/6125)) ([661af51](https://github.com/tari-project/tari/commit/661af5177863f37f0b01c9846dccc7d24f873fc5)) + + +## [1.0.0-rc.4](https://github.com/tari-project/tari/compare/v1.0.0-rc.3...v1.0.0-rc.4) (2024-02-02) + + +### âš  BREAKING CHANGES + +* fix horizon sync after smt upgrade (#6006) + +### Features + +* do validation after adding utxos and txs ([#6114](https://github.com/tari-project/tari/issues/6114)) ([7d886e6](https://github.com/tari-project/tari/commit/7d886e6c85e463a4f7f4dacc5115e625bb1f37f5)) +* export transaction ([#6111](https://github.com/tari-project/tari/issues/6111)) ([70d5ad3](https://github.com/tari-project/tari/commit/70d5ad3b4f8a1b8efb83a868102b7c846f2bd50c)) +* fix horizon sync after smt upgrade ([#6006](https://github.com/tari-project/tari/issues/6006)) ([b6b80f6](https://github.com/tari-project/tari/commit/b6b80f6ee9b91255815bd2a66f51425c3a628dcf)) +* initial horizon sync from prune node ([#6109](https://github.com/tari-project/tari/issues/6109)) ([2987621](https://github.com/tari-project/tari/commit/2987621b2cef6d3b852ed9a1f4215f19b9838e0f)) +* smt verification ([#6115](https://github.com/tari-project/tari/issues/6115)) ([78a9348](https://github.com/tari-project/tari/commit/78a93480bc00235cbf221ff977f7d87f8008226a)) +* wallet add restart validation to start ([#6113](https://github.com/tari-project/tari/issues/6113)) ([5c236ce](https://github.com/tari-project/tari/commit/5c236ce9928acd3aa212adab716c93f05e8cac9d)) + + +### Bug Fixes + +* faster tor startup ([#6092](https://github.com/tari-project/tari/issues/6092)) ([a2872bb](https://github.com/tari-project/tari/commit/a2872bba188c456578ed5b5ad5eb2e37e26a46e6)) +* make monero extra data less strict ([#6117](https://github.com/tari-project/tari/issues/6117)) ([38b9113](https://github.com/tari-project/tari/commit/38b9113375bb90d667718f406e796f6a0e021861)) + +## [1.0.0-rc.3](https://github.com/tari-project/tari/compare/v1.0.0-rc.2...v1.0.0-rc.3) (2024-01-29) + + +### Features + +* add search kernels method to nodejs client ([#6082](https://github.com/tari-project/tari/issues/6082)) ([0190221](https://github.com/tari-project/tari/commit/019022149d94afb3c0ed3f75490dd777d60bad1c)) +* prevent runtime error with compact error input ([#6096](https://github.com/tari-project/tari/issues/6096)) ([69421f5](https://github.com/tari-project/tari/commit/69421f5ef97f0ba4c194162bca0b367dc7714ffe)) +* update api ([#6101](https://github.com/tari-project/tari/issues/6101)) ([47e73ac](https://github.com/tari-project/tari/commit/47e73ac2b692bbfc924a4329e29597e49f84af0f)) +* update codeowners ([#6088](https://github.com/tari-project/tari/issues/6088)) ([58a131d](https://github.com/tari-project/tari/commit/58a131d302fd7295134c708e75a0b788205d287e)) + + +### Bug Fixes + +* faster tor startup ([#6092](https://github.com/tari-project/tari/issues/6092)) ([a2872bb](https://github.com/tari-project/tari/commit/a2872bba188c456578ed5b5ad5eb2e37e26a46e6)) + +## [1.0.0-rc.2](https://github.com/tari-project/tari/compare/v1.0.0-rc.1...v1.0.0-rc.2) (2024-01-18) + + +### Features + +* add tari address as valid string for discovering a peer ([#6075](https://github.com/tari-project/tari/issues/6075)) ([a4c5bc2](https://github.com/tari-project/tari/commit/a4c5bc2c6c08a5d09b58f13ed9acf561e55478fc)) +* make all apps non interactive ([#6049](https://github.com/tari-project/tari/issues/6049)) ([bafd7e7](https://github.com/tari-project/tari/commit/bafd7e7baadd0f8b82ca8205ec3f18342d74e92a)) +* make libtor on by default for nix builds ([#6060](https://github.com/tari-project/tari/issues/6060)) ([b5e0d06](https://github.com/tari-project/tari/commit/b5e0d0639c540177373b7faa9c2fade64581e46d)) + + +### Bug Fixes + +* fix small error in config.toml ([#6052](https://github.com/tari-project/tari/issues/6052)) ([6518a60](https://github.com/tari-project/tari/commit/6518a60dce9a4b8ace6c5cc4b1ee79045e364e0e)) +* tms validation correctly updating ([#6079](https://github.com/tari-project/tari/issues/6079)) ([34222a8](https://github.com/tari-project/tari/commit/34222a88bd1746869e67ccde9c2f7529862f3b5d)) +* wallet coinbases not validated correctly ([#6074](https://github.com/tari-project/tari/issues/6074)) ([bb66df1](https://github.com/tari-project/tari/commit/bb66df13bcf3d00082e35f7305b1fde72d4ace2a)) + + +## [1.0.0-rc.1](https://github.com/tari-project/tari/compare/v1.0.0-rc.1...v1.0.0-rc.0) (2023-12-14) + + +### Features + +* fix windows installer ([#6043](https://github.com/tari-project/tari/issues/6043)) ([c37a0a8](https://github.com/tari-project/tari/commit/c37a0a89726eec765c9c10d3da0c990d339de9b9)) +* side load chat ([#6042](https://github.com/tari-project/tari/issues/6042)) ([d729c45](https://github.com/tari-project/tari/commit/d729c458b17406d9f5dbb8982a9bf5604f39c63c)) + +### Bug Fixes + + + +## [1.0.0-rc.0](https://github.com/tari-project/tari/compare/v1.0.0-rc0...v0.49.0-rc.0) (2023-12-12) + + +### âš  BREAKING CHANGES + +* add paging to utxo stream request (#5302) +* add optional range proof types (#5372) +* hash domain consistency (#5556) ([64443c6f](https://github.com/tari-project/tari/commit/64443c6f428fa84f8ab3e4b86949be6faef35aeb)) +* consistent output/kernel versions between sender and receiver (#5553) ([74f9c35f](https://github.com/tari-project/tari/commit/74f9c35f6a34c1cf731274b7febb245734ae7032)) +* New Gen block (#5633) +* Validator mr included in mining hash (#5615) +* Monero merkle proof change (#5602) +* Merge mining hash has changed +* remove timestamp from header in proto files (#5667) +* **comms/dht:** limit number of peer claims and addresses for all sources (#5702) +* **comms:** use noise XX handshake pattern for improved privacy (#5696) +* update faucet for genesis block (#5633) +* limit monero hashes and force coinbase to be tx 0 (#5602) +* add validator mr to mining hash (#5615) +* replace utxo MMR with SMT (#5854) +* update key parsing (#5900) +* **proto:** remove proto timestamp wrapper types (#5833) +* **proto:** remove proto bytes for std bytes (#5835) +* upgrade bitflags crate (#5831) +* improve block add where many orphan chain tips existed (#5763) +* lmdb flag set wrong on database (#5916) +* add validator mmr size (#5873) +* completed transaction use bytes for transaction protocol (not hex string) in wallet database (#5906) +* new faucet for esmeralda (#6001) +* dont store entire monero coinbase transaction (#5991) +* ups the min difficulty (#5999) +* network specific domain hashers (#5980) +* add aux chain support for merge mining (#5976) +* disable console wallet grpc (#5988) +* add one-sided coinbase payments (#5967) +* fix opcode signatures (#5966) +* remove mutable mmr (#5954) +* move kernel MMR position to `u64` (#5956) +* standardize gRPC authentication and mitigate DoS (#5936) +* fix difficulty overflow (#5935) +* update status (#6008) + +### Features + +* add miner timeout config option ([#5331](https://github.com/tari-project/tari/issues/5331)) ([aea14f6](https://github.com/tari-project/tari/commit/aea14f6bf302801c85efa9f304a8f442aaf9a3ff)) +* chat ffi ([#5349](https://github.com/tari-project/tari/issues/5349)) ([f7cece2](https://github.com/tari-project/tari/commit/f7cece27c02ae3b668e1ffbd6629828d0432debf)) +* chat scaffold ([#5244](https://github.com/tari-project/tari/issues/5244)) ([5b09f8e](https://github.com/tari-project/tari/commit/5b09f8e2b630685d9ff748eae772b9798954f6ff)) +* improve message encryption ([#5288](https://github.com/tari-project/tari/issues/5288)) ([7a80716](https://github.com/tari-project/tari/commit/7a80716c71987bae14d83994d7402f96c190242d)) +* **p2p:** allow listener bind to differ from the tor forward address ([#5357](https://github.com/tari-project/tari/issues/5357)) ([857fb55](https://github.com/tari-project/tari/commit/857fb55520145ece48b4b5cca0aa5d7fd8f6c69e))* add extended mask recovery ([#5301](https://github.com/tari-project/tari/issues/5301)) ([23d882e](https://github.com/tari-project/tari/commit/23d882eb783f3d94efbfdd928b3d87b2907bf2d7)) +* add network name to data path and --network flag to the miners ([#5291](https://github.com/tari-project/tari/issues/5291)) ([1f04beb](https://github.com/tari-project/tari/commit/1f04bebd4f6d14432aab923baeab17d1d6cc39bf)) +* add other code template types ([#5242](https://github.com/tari-project/tari/issues/5242)) ([93e5e85](https://github.com/tari-project/tari/commit/93e5e85cbc13be33bea40c7b8289d0ff344df08c)) +* add paging to utxo stream request ([#5302](https://github.com/tari-project/tari/issues/5302)) ([3540309](https://github.com/tari-project/tari/commit/3540309e29d450fc8cb48bc714fb780c1c107b81)) +* add wallet daemon config ([#5311](https://github.com/tari-project/tari/issues/5311)) ([30419cf](https://github.com/tari-project/tari/commit/30419cfcf198fb923ef431316f2915cbc80f1e3b)) +* define different network defaults for bins ([#5307](https://github.com/tari-project/tari/issues/5307)) ([2f5d498](https://github.com/tari-project/tari/commit/2f5d498d2130b5358fbf126c96a917ed98016955)) +* feature gates ([#5287](https://github.com/tari-project/tari/issues/5287)) ([72c19dc](https://github.com/tari-project/tari/commit/72c19dc130b0c7652cca422c9c4c2e08e5b8e555)) +* fix rpc transaction conversion ([#5304](https://github.com/tari-project/tari/issues/5304)) ([344040a](https://github.com/tari-project/tari/commit/344040ac7322bae5604aa9db48d4194c1b3779fa)) +* add metadata signature check ([#5411](https://github.com/tari-project/tari/issues/5411)) ([9c2bf41](https://github.com/tari-project/tari/commit/9c2bf41ec8f649ffac824878256c09598bf52269)) +* add optional range proof types ([#5372](https://github.com/tari-project/tari/issues/5372)) ([f24784f](https://github.com/tari-project/tari/commit/f24784f3a2f3f574cd2ac4e2d9fe963078e4c524)) +* added burn feature to the console wallet ([#5322](https://github.com/tari-project/tari/issues/5322)) ([45685b9](https://github.com/tari-project/tari/commit/45685b9f3acceba483ec30021e8d4894dbf2861c)) +* improved base node monitoring ([#5390](https://github.com/tari-project/tari/issues/5390)) ([c704890](https://github.com/tari-project/tari/commit/c704890ca949bcfcd608e299175694b81cef0165)) +* refactor configuration for chat so ffi can create and accept a config file (#5426) ([9d0d8b52](https://github.com/tari-project/tari/commit/9d0d8b5277bd26e79b7fe5506edcaf197ba63eb7), breaks [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/)) +* ui for template registration in console wallet (#5444) ([701e3c23](https://github.com/tari-project/tari/commit/701e3c2341d1029c2711b81a66952f3bee7d8e42), breaks [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/)) +* sparse merkle trees (#5457) ([f536d219](https://github.com/tari-project/tari/commit/f536d21929e4eeb11cc185c013eef0b336def216)* proof of work audit part 2 (#5495) ([af32f96f](https://github.com/tari-project/tari/commit/af32f96f36a32235daf7e3b1d9694af7edcf5f8e) +* improve recovery speed (#5489) ([d128f850](https://github.com/tari-project/tari/commit/d128f850356ff18bfd394f6c3bfe78f5bd0607e1)) +* add consistent ban reason for sync ([#5729](https://github.com/brianp/tari/issues/5729)) ([9564281](https://github.com/brianp/tari/commit/95642811b9df592eb9bddd9b71d10ee30987e59d)) +* add mempool min fee ([#5606](https://github.com/brianp/tari/issues/5606)) ([15c7e8f](https://github.com/brianp/tari/commit/15c7e8f9ca3d656850d6f0041d2f7fc07b4af80b)) +* ban peer unexpected response ([#5608](https://github.com/brianp/tari/issues/5608)) ([02494ae](https://github.com/brianp/tari/commit/02494aee0f97469b9deb9c339b4075b14b69ff6f)) +* change default script to PushPubKey ([#5653](https://github.com/brianp/tari/issues/5653)) ([f5b89ad](https://github.com/brianp/tari/commit/f5b89add6a04b935b9ae8dda0f694eb826ef6d9a)) +* chat ffi status callback ([#5583](https://github.com/brianp/tari/issues/5583)) ([f68b85f](https://github.com/brianp/tari/commit/f68b85f404e524d61d8b6153c13e8b2e6ab2a20b)) +* chat message fetching pagination ([#5594](https://github.com/brianp/tari/issues/5594)) ([2024357](https://github.com/brianp/tari/commit/202435742ed78b0eac80efcd19b357df96a6bbb9)) +* chat-ffi logging ([#5591](https://github.com/brianp/tari/issues/5591)) ([159959c](https://github.com/brianp/tari/commit/159959cc32c341e111a626729fb1bd9a2851e8a7)) +* cleanup errors ([#5655](https://github.com/brianp/tari/issues/5655)) ([c1737b9](https://github.com/brianp/tari/commit/c1737b9d872dbaf858dd46e6350c6febd7f43690)) +* fix formatting block ([#5630](https://github.com/brianp/tari/issues/5630)) ([49732f6](https://github.com/brianp/tari/commit/49732f65339f4c120afb49e9edb72eda8d17b737)) +* improve block sync error handling ([#5691](https://github.com/brianp/tari/issues/5691)) ([251f796](https://github.com/brianp/tari/commit/251f796dc023459338212a852d50059380399be2)) +* new message callback to chat-ffi ([#5592](https://github.com/brianp/tari/issues/5592)) ([bbd543e](https://github.com/brianp/tari/commit/bbd543ee35e4e5fc858d875cf30d6f24fa2e4d96)) +* peer sync limiter ([#5445](https://github.com/brianp/tari/issues/5445)) ([548643b](https://github.com/brianp/tari/commit/548643b723a548fea3e56f938a84db652d3ee630)) +* remove inherent iterator panic ([#5697](https://github.com/brianp/tari/issues/5697)) ([7f153e5](https://github.com/brianp/tari/commit/7f153e5dd613b3e38586b7f8f536035c6ac98dd8)) +* remove orphan validation and only validate on insertion ([#5601](https://github.com/brianp/tari/issues/5601)) ([41244a3](https://github.com/brianp/tari/commit/41244a3ea666f925648aa752c9ac476486702473)) +* remove unused wasm_key_manager ([#5622](https://github.com/brianp/tari/issues/5622)) ([508c971](https://github.com/brianp/tari/commit/508c97198617f116bb0ccd69c8e1eba1341b18ac)) +* update faucet for genesis block ([#5633](https://github.com/brianp/tari/issues/5633)) ([ffb987a](https://github.com/brianp/tari/commit/ffb987a757f2af721ca5772e28da31035fcf741f)) +* update genesis blocks ([#5698](https://github.com/brianp/tari/issues/5698)) ([b9145b3](https://github.com/brianp/tari/commit/b9145b3373319f0c2c25d0e5dd4d393115a4c0bd)) +* add (de)serialize to BalancedBinaryMerkleTree ([#5744](https://github.com/tari-project/tari/issues/5744)) ([c53ec06](https://github.com/tari-project/tari/commit/c53ec065b6f7893fe1a5d3a3ccde826fa09e438f)) +* add config for grpc server methods ([#5886](https://github.com/tari-project/tari/issues/5886)) ([a3d7cf7](https://github.com/tari-project/tari/commit/a3d7cf771663d2b3c3585796ef502ab00f569ba0)) +* add insert function to SMT ([#5776](https://github.com/tari-project/tari/issues/5776)) ([5901b4a](https://github.com/tari-project/tari/commit/5901b4af9fe307cdc379979155961d34dcf8c098)) +* add overflow checks to change and fee calculations ([#5834](https://github.com/tari-project/tari/issues/5834)) ([9725fbd](https://github.com/tari-project/tari/commit/9725fbddf1ee7047d2e7698f4ee1975ce22aa605)) +* allow multiple initial sync peers ([#5890](https://github.com/tari-project/tari/issues/5890)) ([e1c504a](https://github.com/tari-project/tari/commit/e1c504a3d9b9affafb3221e46831d818cbdcc45a)) +* apply obscure_error_if_true consistenlty ([#5892](https://github.com/tari-project/tari/issues/5892)) ([1864203](https://github.com/tari-project/tari/commit/1864203c224611cdcac71adbae83e37161ce0a5c)) +* ban bad block-sync peers ([#5871](https://github.com/tari-project/tari/issues/5871)) ([5c2781e](https://github.com/tari-project/tari/commit/5c2781e86be8efacab52c93a0bc2ee662ca56ec8)) +* chat ffi verbose logging options ([#5789](https://github.com/tari-project/tari/issues/5789)) ([24b4324](https://github.com/tari-project/tari/commit/24b4324f3d5b4386a3df68952fb834d58fa5217d)) +* chatffi simpler callbacks and managed identity and db ([#5681](https://github.com/tari-project/tari/issues/5681)) ([79ab584](https://github.com/tari-project/tari/commit/79ab584100bc6899445fc3789d6e3312a06d21e8)) +* **chatffi:** better message metadata parsing ([#5820](https://github.com/tari-project/tari/issues/5820)) ([9a43eab](https://github.com/tari-project/tari/commit/9a43eab2e81aaaa0a5ad53b3dc5d9388b9d43452)) +* **chatffi:** get conversationalists ([#5849](https://github.com/tari-project/tari/issues/5849)) ([d9e8e22](https://github.com/tari-project/tari/commit/d9e8e22846cc0974abcfe19ab32b41299c0a500a)) +* **chatffi:** message metadata ([#5766](https://github.com/tari-project/tari/issues/5766)) ([a9b730a](https://github.com/tari-project/tari/commit/a9b730aaa2e44dbba7c546b0d78ad0fef4884d29)) +* **chatffi:** tor configuration ([#5752](https://github.com/tari-project/tari/issues/5752)) ([1eeb4a9](https://github.com/tari-project/tari/commit/1eeb4a9abbc29ec16593b1c6bec675b928e7b177)) +* **chat:** read receipt feature ([#5824](https://github.com/tari-project/tari/issues/5824)) ([d81fe7d](https://github.com/tari-project/tari/commit/d81fe7d39fdc120665b90e18163151bdb938beee)) +* cli add list of vns for next epoch ([#5743](https://github.com/tari-project/tari/issues/5743)) ([d2a0c8c](https://github.com/tari-project/tari/commit/d2a0c8cc935bb648460f8095c5f2f7125e642169)) +* **comms:** allow multiple messaging protocol instances ([#5748](https://github.com/tari-project/tari/issues/5748)) ([3fba04e](https://github.com/tari-project/tari/commit/3fba04ec862bf405e96e09b5cc38a5d572b77244)) +* consistent handling of edge cases for header sync ([#5837](https://github.com/tari-project/tari/issues/5837)) ([3e1ec1f](https://github.com/tari-project/tari/commit/3e1ec1f1fe70b82ed0f7517d91eb9f3f352cbe97)) +* enable multiple coinbase utxos ([#5879](https://github.com/tari-project/tari/issues/5879)) ([49e5c9c](https://github.com/tari-project/tari/commit/49e5c9c2fec823f0958a28e5c110cc3f34ba48d6)) +* failure of min difficulty should not add block to list of bad blocks ([#5805](https://github.com/tari-project/tari/issues/5805)) ([38dc014](https://github.com/tari-project/tari/commit/38dc014405eb6887210861bd533f2b1dd17f48c2)) +* improve block add where many orphan chain tips existed ([#5763](https://github.com/tari-project/tari/issues/5763)) ([19b3f21](https://github.com/tari-project/tari/commit/19b3f217aee6818678ed45082d910f1a2335a9ec)) +* make prc errors ban-able for sync ([#5884](https://github.com/tari-project/tari/issues/5884)) ([4ca664e](https://github.com/tari-project/tari/commit/4ca664e5933f2266f594ecccf545d0eec3b18b40)) +* prevent possible division by zero in difficulty calculation ([#5828](https://github.com/tari-project/tari/issues/5828)) ([f85a878](https://github.com/tari-project/tari/commit/f85a8785de49dda05b3dc54dfda4f5081424e06f)) +* print warning for wallets in direct send only ([#5883](https://github.com/tari-project/tari/issues/5883)) ([6d8686d](https://github.com/tari-project/tari/commit/6d8686dc40ef701fe980698c30347da5b690de07)) +* reduce timeouts and increase bans ([#5882](https://github.com/tari-project/tari/issues/5882)) ([df9bc9a](https://github.com/tari-project/tari/commit/df9bc9a912fe6e7c750e34a3dd7bd6796c6d758f)) +* replace utxo MMR with SMT ([#5854](https://github.com/tari-project/tari/issues/5854)) ([ca74c29](https://github.com/tari-project/tari/commit/ca74c29db7264413dc3e6542b599db9760993170)) +* up the timeout for comms ([#5758](https://github.com/tari-project/tari/issues/5758)) ([1054868](https://github.com/tari-project/tari/commit/1054868248342d0a07077d441151dc48adbfddf3)) +* update key parsing ([#5900](https://github.com/tari-project/tari/issues/5900)) ([59d7ceb](https://github.com/tari-project/tari/commit/59d7cebd22cc86ab5d3691aa5dc3d73b37032442)) +* update randomx ([#5894](https://github.com/tari-project/tari/issues/5894)) ([e445244](https://github.com/tari-project/tari/commit/e4452440bd9269402f1a5352e9c93cbfa6c72425)) +* adaptable min difficulty check ([#5896](https://github.com/tari-project/tari/issues/5896)) ([76f323c](https://github.com/tari-project/tari/commit/76f323c67ee3f46d772b85c410a1c49376348195)) +* add robustness to monero block extra field handling ([#5826](https://github.com/tari-project/tari/issues/5826)) ([597b9ef](https://github.com/tari-project/tari/commit/597b9ef7698ef705d550f6d3ecb1c27dbea79636)) +* add validator mmr size ([#5873](https://github.com/tari-project/tari/issues/5873)) ([fd51045](https://github.com/tari-project/tari/commit/fd510452c0bf9eefcc4117f378c6434aea7b9fd1)) +* completed transaction use bytes for transaction protocol (not hex string) in wallet database ([#5906](https://github.com/tari-project/tari/issues/5906)) ([61256cd](https://github.com/tari-project/tari/commit/61256cde3630f8d81e5648b1f5038ed6e847b9c2)) +* add aux chain support for merge mining ([#5976](https://github.com/tari-project/tari/issues/5976)) ([6723dc7](https://github.com/tari-project/tari/commit/6723dc7a88b2c1e40efe51259cb26e12638b9668)) +* add constant time comparison for grpc authentication ([#5902](https://github.com/tari-project/tari/issues/5902)) ([2fe44db](https://github.com/tari-project/tari/commit/2fe44db773bbf8ee7c4e306e08973ba25e6af10e)) +* add getheaderbyhash method to grpc-js ([#5942](https://github.com/tari-project/tari/issues/5942)) ([ebc4539](https://github.com/tari-project/tari/commit/ebc45398ea7f9eda7f08830cec93f2bf8d4a0e38)) +* add one-sided coinbase payments ([#5967](https://github.com/tari-project/tari/issues/5967)) ([89b19f6](https://github.com/tari-project/tari/commit/89b19f6de8f2acf28557ca37feda03af2657cf30)) +* bans for bad incoming blocks ([#5934](https://github.com/tari-project/tari/issues/5934)) ([7acc44d](https://github.com/tari-project/tari/commit/7acc44d3dce5d8c9085ae5246a8a0a7487d19516)) +* block endless peer stream ([#5951](https://github.com/tari-project/tari/issues/5951)) ([16b325d](https://github.com/tari-project/tari/commit/16b325defc2f42b9b34d3e1fd05a4b6cd6bcf965)) +* block wallets from sending if BN connection stale ([#5949](https://github.com/tari-project/tari/issues/5949)) ([18d5f57](https://github.com/tari-project/tari/commit/18d5f57363fb085bfac080a7994cb5ced8c932ab)) +* compile out the metrics ([#5944](https://github.com/tari-project/tari/issues/5944)) ([fa2fb27](https://github.com/tari-project/tari/commit/fa2fb27a5834bd56fda62c82a825a7f6d8391fd3)) +* create min dust fee setting ([#5947](https://github.com/tari-project/tari/issues/5947)) ([8f5466c](https://github.com/tari-project/tari/commit/8f5466cb1d85518ba80190fa312281321aa721ff)) +* disable console wallet grpc ([#5988](https://github.com/tari-project/tari/issues/5988)) ([883de17](https://github.com/tari-project/tari/commit/883de175dadee58c4f49fff9a655cae1a2450b3d)) +* dont store entire monero coinbase transaction ([#5991](https://github.com/tari-project/tari/issues/5991)) ([23b10bf](https://github.com/tari-project/tari/commit/23b10bf2d3fdebd296a93eae0aaa5abcd4156de9)) +* enable revealed-value proofs ([#5983](https://github.com/tari-project/tari/issues/5983)) ([f3f5879](https://github.com/tari-project/tari/commit/f3f5879903c619a9219c27ce4e77450f4a1b247b)) +* fix difficulty overflow ([#5935](https://github.com/tari-project/tari/issues/5935)) ([55bbdf2](https://github.com/tari-project/tari/commit/55bbdf2481bb7522ede5cc3e37ca8cdeb323b4f7)) +* grpc over tls ([#5990](https://github.com/tari-project/tari/issues/5990)) ([b80f7e3](https://github.com/tari-project/tari/commit/b80f7e366b14e10b3fb0e9835fb76dd5596d0cf8)) +* limit max number of addresses ([#5960](https://github.com/tari-project/tari/issues/5960)) ([40fc940](https://github.com/tari-project/tari/commit/40fc9408161e404a9f4062362fe495de3c2e374f)) +* move kernel MMR position to `u64` ([#5956](https://github.com/tari-project/tari/issues/5956)) ([cdd8a31](https://github.com/tari-project/tari/commit/cdd8a3135765c3b5a87027f9a5e0103e737c709a)) +* network specific domain hashers ([#5980](https://github.com/tari-project/tari/issues/5980)) ([d7ab283](https://github.com/tari-project/tari/commit/d7ab2838cc08a7c12ccf443697c1560b1ea40b03)) +* **node grpc:** add grpc authentication to the node ([#5928](https://github.com/tari-project/tari/issues/5928)) ([3d95e8c](https://github.com/tari-project/tari/commit/3d95e8cb0543f5bdb284f2ea0771e2f03748b71a)) +* remove panics from applications ([#5943](https://github.com/tari-project/tari/issues/5943)) ([18c3d0b](https://github.com/tari-project/tari/commit/18c3d0be8123cdc362fdeaed66c45ad17c3e7dfa)) +* sender and receiver protocols use bytes (not hex string) in wallet database ([#5950](https://github.com/tari-project/tari/issues/5950)) ([4cbdfec](https://github.com/tari-project/tari/commit/4cbdfec945857c5b7a334962e137d2c8dc4d4c4a)) +* warnings for untrusted urls ([#5955](https://github.com/tari-project/tari/issues/5955)) ([e2e278c](https://github.com/tari-project/tari/commit/e2e278c9a4d09f8e0136e9b3ae2f93afc3e9ac4a)) +* hazop findings ([#6020](https://github.com/tari-project/tari/issues/6020)) ([a68d0dd](https://github.com/tari-project/tari/commit/a68d0dd2fb7719ae99bcd2b62980b5f37d66284a)) +* add miner input processing ([#6016](https://github.com/tari-project/tari/issues/6016)) ([26f5b60](https://github.com/tari-project/tari/commit/26f5b6044832f737c7019dab0e00d2234aac442f)) +* add wallet ffi shutdown tests ([#6007](https://github.com/tari-project/tari/issues/6007)) ([3129ce8](https://github.com/tari-project/tari/commit/3129ce8dd066ea16900ee8add4e608c1890c6545)) +* fix hazop findings ([#6017](https://github.com/tari-project/tari/issues/6017)) ([0bc62b4](https://github.com/tari-project/tari/commit/0bc62b4a5b78893a226700226bac01590a543bb8)) +* make base node support 1 click mining ([#6019](https://github.com/tari-project/tari/issues/6019)) ([d377269](https://github.com/tari-project/tari/commit/d3772690c36e0dcb6476090fc428e5745298e398)) +* update faucets ([#6024](https://github.com/tari-project/tari/issues/6024)) ([394976c](https://github.com/tari-project/tari/commit/394976cc591f9551e1542f2730a8ec299b524229)) +* update status ([#6008](https://github.com/tari-project/tari/issues/6008)) ([e19ce15](https://github.com/tari-project/tari/commit/e19ce15549b138d462060997d40147bad39a1871)) +* console wallet use dns seeds ([#6034](https://github.com/tari-project/tari/issues/6034)) ([b194954](https://github.com/tari-project/tari/commit/b194954f489bd8ac234993e65463a24808dce8f2)) +* update tests and constants ([#6028](https://github.com/tari-project/tari/issues/6028)) ([d558206](https://github.com/tari-project/tari/commit/d558206ea62c12f3258ede8cfcbf9d44f139ccdd)) + + +### Bug Fixes + +* add SECURITY.md Vulnerability Disclosure Policy ([#5351](https://github.com/tari-project/tari/issues/5351)) ([72daaf5](https://github.com/tari-project/tari/commit/72daaf5ef614ceb805f690db12c7fefc642d5453)) +* added missing log4rs features ([#5356](https://github.com/tari-project/tari/issues/5356)) ([b9031bb](https://github.com/tari-project/tari/commit/b9031bbbece1988c1de180cabbf4e3acfcb50836)) +* allow public addresses from command line ([#5303](https://github.com/tari-project/tari/issues/5303)) ([349ac89](https://github.com/tari-project/tari/commit/349ac8957bc513cd4110eaac69550ffa0816862b)) +* clippy issues with config ([#5334](https://github.com/tari-project/tari/issues/5334)) ([026f0d5](https://github.com/tari-project/tari/commit/026f0d5e33d524ad302e7edd0c82e108a17800b6)) +* default network selection ([#5333](https://github.com/tari-project/tari/issues/5333)) ([cf4b2c8](https://github.com/tari-project/tari/commit/cf4b2c8a4f5849ba51dab61595dfed1a9249c580)) +* make the first output optional in the wallet ([#5352](https://github.com/tari-project/tari/issues/5352)) ([bf16140](https://github.com/tari-project/tari/commit/bf16140ecd1ad0ae25f8a9b8cde9c3e4f1d12a02)) +* remove wallet panic ([#5338](https://github.com/tari-project/tari/issues/5338)) ([536d16d](https://github.com/tari-project/tari/commit/536d16d2feea283ac1b8f546f479b76465938c4b)) +* wallet .h file for lib wallets ([#5330](https://github.com/tari-project/tari/issues/5330)) ([22a3a17](https://github.com/tari-project/tari/commit/22a3a17db6ef8889cb3a73dfe2db081a0691a68c)) +* **comms:** only set final forward address if configured to port 0 ([#5406](https://github.com/tari-project/tari/issues/5406)) ([ff7fb6d](https://github.com/tari-project/tari/commit/ff7fb6d6b4ab4f77d108b2d9b7fd010c77e613c7)) +* deeplink to rfc spec ([#5342](https://github.com/tari-project/tari/issues/5342)) ([806d3b8](https://github.com/tari-project/tari/commit/806d3b8cc6668f23bb77ca7040833e080c173063)) +* don't use in memory datastores for chat client dht in integration tests ([#5399](https://github.com/tari-project/tari/issues/5399)) ([cbdca6f](https://github.com/tari-project/tari/commit/cbdca6fcc8ae61ed2dbfacca9da1a59c78945045)) +* fix panic when no public addresses ([#5367](https://github.com/tari-project/tari/issues/5367)) ([49be2a2](https://github.com/tari-project/tari/commit/49be2a27a8aead96c180cb988614e3696c338530)) +* loop on mismatched passphrase entry ([#5396](https://github.com/tari-project/tari/issues/5396)) ([ed120b2](https://github.com/tari-project/tari/commit/ed120b277371be7b9bd61c825aa7d61b104d3ac6)) +* use domain separation for wallet message signing ([#5400](https://github.com/tari-project/tari/issues/5400)) ([7d71f8b](https://github.com/tari-project/tari/commit/7d71f8bef94fddf1ffa345e6b599cf02ee6ab935)) +* use mined at timestamp in fauxconfirmation (#5443) ([f3833c9f](https://github.com/tari-project/tari/commit/f3833c9fc46d77fddaa7a23ef1d53ba9d860182a), breaks [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/)) +* fix custom wallet startup logic for console wallet (#5429) ([0c1e5765](https://github.com/tari-project/tari/commit/0c1e5765676a9281b45fd66c8846b78ea4c76125), breaks [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/)) +* **balanced_mp:** removes some panics, adds some checks and new tests (#5432) ([602f416f](https://github.com/tari-project/tari/commit/602f416f674b5e1835a634f3c8ab123001af600e), breaks [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/)) +* **comms:** validate onion3 checksum (#5440) ([0dfdb3a4](https://github.com/tari-project/tari/commit/0dfdb3a4bef51952f0cecf6f6fcb00f6b2bfe302), breaks [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/)) +* **wallet-ffi:** don't block on start (#5437) ([27fe8d9d](https://github.com/tari-project/tari/commit/27fe8d9d2fc3ea6468605ef5edea56efdcc8248f), breaks [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/), [#](https://github.com/tari-project/tari/issues/)) +* **mmr:** support zero sized balanced merkle proof (#5474) ([ef984823](https://github.com/tari-project/tari/commit/ef98482313c9b9480ac663709162ae62e9c26978) +* **wallet:** use correct output features for send to self (#5472) ([ce1f0686](https://github.com/tari-project/tari/commit/ce1f0686f56367ff094bf28cfd0388b2ea94a8c9) +* covenant nit picking (#5506) ([301ca495](https://github.com/tari-project/tari/commit/301ca49513948e84bc972e5d75e16f6882d8fb8b) +* overflow of target difficulty (#5493) ([822dac60](https://github.com/tari-project/tari/commit/822dac609a4d148227c1bac61d9d81bc1a5925ac) +* coinbase recovery (#5487) ([48dd157a](https://github.com/tari-project/tari/commit/48dd157a82c4459021a1a02d14f7a3e95e24ebd3)) +* **core:** + * minor audit improvements (#5486) ([8756e0b3](https://github.com/tari-project/tari/commit/8756e0b3c0030700a2409e7d29c4822f8e75aacb) + * remove implicit change in protocol for partial/full signatures (#5488) ([fef701ef](https://github.com/tari-project/tari/commit/fef701efbd07eb769dbe11b5a0cb74c807d7d88c) + * compile error in wallet/FFI (#5497) ([49610736](https://github.com/tari-project/tari/commit/49610736b839c1067820ad841d4730ae8032eb2b) +* **core/base_node:** safe `mmr_position` cast in horizon sync (#5503) ([fb3ac60b](https://github.com/tari-project/tari/commit/fb3ac60b163184f89b2d69b0b9ce3d9b2cfdeeee) +* **core/consensus:** include `coinbase_extra` max size into coinbase weight calculation (#5501) ([4554cc5f](https://github.com/tari-project/tari/commit/4554cc5f075bf9392c75fedb7576753612b374ee) +* **core/keymanager:** use tokio rwlock for keymanagers (#5494) ([229aee02](https://github.com/tari-project/tari/commit/229aee029dbb8d401feb74be51caa4f26dd93be1) +* **core/transactions:** resolve or remove TODOs (#5500) ([4a9f73c7](https://github.com/tari-project/tari/commit/4a9f73c79b98298e61115744b3e467622dd4945b) +* **core/weighting:** remove optional and define correct rounding for usize::MAX (#5490) ([38c399a2](https://github.com/tari-project/tari/commit/38c399a2e5ee28878e0238e2b8e13c15f658ffbc) +* **mempool:** remove TODOs and other minor changes (#5498) ([a1f24417](https://github.com/tari-project/tari/commit/a1f244179390d9a4845bce96e3c6a506a59e4b16) +* mempool should use the correct version of the consensus constant (#5549) ([46ab3ef0](https://github.com/tari-project/tari/commit/46ab3ef07e41b091b869ef59376d0709a24e7437)) +* mempool fetch_highest_priority_txs (#5551) ([f7f749c4](https://github.com/tari-project/tari/commit/f7f749c4c476f489f9e30afb87461780d1996834) +* remove optional timestamp verification bypass (#5552) ([b5a5bed2](https://github.com/tari-project/tari/commit/b5a5bed2c23c273d3787afa1c845f62badec1a46)) +* update code coverage approach (#5540) ([7a9830ed](https://github.com/tari-project/tari/commit/7a9830edb66b6be3edc40b84ae8a1a9c3f4ef525) +* use correct TOML field for console wallet network address (#5531) ([70763dde](https://github.com/tari-project/tari/commit/70763dde25c1569013e489a0798540fd66dfa571) +* llvm-tools installed correctly (#5534) ([4ab4b965](https://github.com/tari-project/tari/commit/4ab4b965e5f0556d508ec071a152deb5ad8ea8cc)) +* push test coverage even if some tests fail (#5533) ([053c748d](https://github.com/tari-project/tari/commit/053c748d3d7aee674bada24609612bde9ba1420e) +* **console-wallet:** fix possible subtract underflow panic in list (#5535) ([8d5e8e6e](https://github.com/tari-project/tari/commit/8d5e8e6eac45b11867cee6104c207f6559851405) +* **core:** disable covenants for all networks except igor and localnet (#5505) ([308f5299](https://github.com/tari-project/tari/commit/308f5299007a67df8fb9fe73763809264005e35c) +* add a not before proof (#5560) ([11f42fb0](https://github.com/tari-project/tari/commit/11f42fb0942da3bd64db8ad203b75c364dbe0926) +* borsh sized serialization should be fallible (#5537) ([53058ce2](https://github.com/tari-project/tari/commit/53058ce299cb89f118017ccec5e98a991a7fcbcc) +* add documentation to covenant crate (#5524) ([442d75b0](https://github.com/tari-project/tari/commit/442d75b09f439e4bc81919fc42eaf43846b2c8ca) +* covenants audit (#5526) ([dbb59758](https://github.com/tari-project/tari/commit/dbb59758a92cdf4483574dc6e7c719efa94eedfd) +* add validator mr to mining hash ([#5615](https://github.com/brianp/tari/issues/5615)) ([91db6fb](https://github.com/brianp/tari/commit/91db6fb3b9ee1998d186fba3bbb57c970d8e4c5c)) +* add-peer also dials the peer ([#5727](https://github.com/brianp/tari/issues/5727)) ([cc8573a](https://github.com/brianp/tari/commit/cc8573ae3ec69d748d3793f02136fd6772983850)) +* addition overflow when coinbase + fees is too high ([#5706](https://github.com/brianp/tari/issues/5706)) ([13993f1](https://github.com/brianp/tari/commit/13993f1763eee84f566d6aea83661eb868e47eff)) +* adds bans for horizon sync ([#5661](https://github.com/brianp/tari/issues/5661)) ([826473d](https://github.com/brianp/tari/commit/826473d2a96fc6c978e5ccdce38c052919514a37)) +* ban peers if they send a bad protobuf message ([#5693](https://github.com/brianp/tari/issues/5693)) ([58cbfe6](https://github.com/brianp/tari/commit/58cbfe677f7328d4c9f9c98b1ada1acb369a47ac)) +* better timeout for lagging ([#5705](https://github.com/brianp/tari/issues/5705)) ([5e8a3ec](https://github.com/brianp/tari/commit/5e8a3ecbc9a00cee823260d4a5e33b3e3a60bc9c)) +* check bytes remaining on monero blocks ([#5610](https://github.com/brianp/tari/issues/5610)) ([1087fa9](https://github.com/brianp/tari/commit/1087fa9d7846b1bd11431475cc8ca3fd9def8ec6)) +* **comms/dht:** limit number of peer claims and addresses for all sources ([#5702](https://github.com/brianp/tari/issues/5702)) ([88ed293](https://github.com/brianp/tari/commit/88ed2935f5094e669470f2c015d055f9c3286941)) +* **comms:** check multiple addresses for inbound liveness check ([#5611](https://github.com/brianp/tari/issues/5611)) ([3937ae4](https://github.com/brianp/tari/commit/3937ae422f57f936ad3d2ead8b92ce4fa5adf855)) +* **comms:** dont overwrite ban-reason in add_peer ([#5720](https://github.com/brianp/tari/issues/5720)) ([3b9890b](https://github.com/brianp/tari/commit/3b9890ba5857cc8767be77a024d01bf4826e3956)) +* **comms:** greatly reduce timeouts for first byte and noise handshake ([#5728](https://github.com/brianp/tari/issues/5728)) ([47a3196](https://github.com/brianp/tari/commit/47a319616dde78c243b4558a51a7d81efc8393e1)) +* **comms:** only permit a single inbound messaging substream per peer ([#5731](https://github.com/brianp/tari/issues/5731)) ([c91a35f](https://github.com/brianp/tari/commit/c91a35f82557afd39c9b83f643876630bb4275c5)) +* **comms:** timeout and ban for bad behaviour in protocol negotation ([#5679](https://github.com/brianp/tari/issues/5679)) ([d03d0b5](https://github.com/brianp/tari/commit/d03d0b5fc58d4e284b1f6ce4554830fdbbb78efe)) +* **comms:** use noise XX handshake pattern for improved privacy ([#5696](https://github.com/brianp/tari/issues/5696)) ([d0ea406](https://github.com/brianp/tari/commit/d0ea406e57b8bbb65196c2e880671da2e51f2b62)) +* **core:** always pass the correct timestamp window to header validatior ([#5624](https://github.com/brianp/tari/issues/5624)) ([29700c3](https://github.com/brianp/tari/commit/29700c3d9aa4698742c0c9cd5e313fd3d0727626)) +* **dht:** add SAF bans ([#5711](https://github.com/brianp/tari/issues/5711)) ([594e03e](https://github.com/brianp/tari/commit/594e03eada389c1a131d5877f42f8c43b85a9fbe)) +* **dht:** limit peer sync and ban on server-caused errors ([#5714](https://github.com/brianp/tari/issues/5714)) ([b3f2dca](https://github.com/brianp/tari/commit/b3f2dcae88740abd1bd4c64f64d89010a13a214b)) +* duplicate tari header in monero coinbase ([#5604](https://github.com/brianp/tari/issues/5604)) ([f466840](https://github.com/brianp/tari/commit/f466840a24cd678aac82ae4eaa2661dca2567675)) +* error out the stx protocol if the sender sends unsupported data ([#5572](https://github.com/brianp/tari/issues/5572)) ([8a085cd](https://github.com/brianp/tari/commit/8a085cded40b95fb5d3136743a97e50874ee2903)) +* handle out of sync errors when returning mempool transactions ([#5701](https://github.com/brianp/tari/issues/5701)) ([b0337cf](https://github.com/brianp/tari/commit/b0337cfaac92939db968231cc368b56836c2cf7e)) +* handle target difficulty conversion failure ([#5710](https://github.com/brianp/tari/issues/5710)) ([431c35a](https://github.com/brianp/tari/commit/431c35ac5006d5cd265484e98a224b7f7e75703f)) +* header sync ([#5647](https://github.com/brianp/tari/issues/5647)) ([4583eef](https://github.com/brianp/tari/commit/4583eef444f4f71d6d702a9997566dad42a9fce4)) +* horizon sync ([#5724](https://github.com/brianp/tari/issues/5724)) ([660a5c1](https://github.com/brianp/tari/commit/660a5c1119f76ce30386860b27ed21316d9ace55)) +* **horizon_sync:** check for leftover unpruned outputs ([#5704](https://github.com/brianp/tari/issues/5704)) ([dc5cfce](https://github.com/brianp/tari/commit/dc5cfced6b81b8c7c036db920f7cbbf36d601789)) +* **horizon_sync:** check max number of kernels/utxos from peer ([#5703](https://github.com/brianp/tari/issues/5703)) ([5e4f3c2](https://github.com/brianp/tari/commit/5e4f3c20f0de1d0d7c525cdcfbe86e56b9e909f3)) +* **horizon_sync:** try sync with next next peer if current one fails ([#5699](https://github.com/brianp/tari/issues/5699)) ([a58ec1f](https://github.com/brianp/tari/commit/a58ec1f40fbc57e147e6fb5c21c6b2b5151150df)) +* limit monero hashes and force coinbase to be tx 0 ([#5602](https://github.com/brianp/tari/issues/5602)) ([2af1198](https://github.com/brianp/tari/commit/2af119824e3b21294c4545b18b2fb6a86bb96ea4)) +* make sure all needed libs are required for chatffi ([#5659](https://github.com/brianp/tari/issues/5659)) ([241ca67](https://github.com/brianp/tari/commit/241ca673ee5b3503198f3e662383ad0f6387313c)) +* memory overflow panic ([#5658](https://github.com/brianp/tari/issues/5658)) ([304e40f](https://github.com/brianp/tari/commit/304e40fb44a3dd9765c10147e1ee85344769c55a)) +* miner delay attack ([#5582](https://github.com/brianp/tari/issues/5582)) ([bece2d0](https://github.com/brianp/tari/commit/bece2d0bf82c757808723dba6ec3456bb8e23b2e)) +* minor fixes for multiple address support ([#5617](https://github.com/brianp/tari/issues/5617)) ([efa36eb](https://github.com/brianp/tari/commit/efa36eb7dc92905cc085359c35255678136a15b1)) +* monero fork attack ([#5603](https://github.com/brianp/tari/issues/5603)) ([9c81b4d](https://github.com/brianp/tari/commit/9c81b4d875aa7794226a97a4a90c9c0b3d6d4585)) +* only allow a monero header if it serializes back to the same data ([#5716](https://github.com/brianp/tari/issues/5716)) ([e70c752](https://github.com/brianp/tari/commit/e70c752d6014f0dd9d1a7aeda9a39bbd6dabc21b)) +* peer connection to stale nodes ([#5579](https://github.com/brianp/tari/issues/5579)) ([eebda00](https://github.com/brianp/tari/commit/eebda00bd28aae70813c644ff2b63925cc934ced)) +* potential u64 overflow panic ([#5688](https://github.com/brianp/tari/issues/5688)) ([f261b79](https://github.com/brianp/tari/commit/f261b7900f879ad991de42073094f8cb4443b8d2)) +* prevent access violation when running multiple vms at the same time ([#5734](https://github.com/brianp/tari/issues/5734)) ([18aead2](https://github.com/brianp/tari/commit/18aead232c2da7f6ec4eda152f8ce53e2601a92d)) +* remove potential u64 overflow panic ([#5686](https://github.com/brianp/tari/issues/5686)) ([90a8a21](https://github.com/brianp/tari/commit/90a8a21765f2c1a6930775ed4cd95fe8766b02d8)) +* remove tari prefix and only allow one mergemining tag ([#5722](https://github.com/brianp/tari/issues/5722)) ([3a7c227](https://github.com/brianp/tari/commit/3a7c227002f8bfacde2ab8081c79bfac435484ce)) +* remove timestamp from header in proto files ([#5667](https://github.com/brianp/tari/issues/5667)) ([403b0c6](https://github.com/brianp/tari/commit/403b0c62af9ed2f2eefc48e0feb5025d8c853ecc)) +* save dial result on error ([#5717](https://github.com/brianp/tari/issues/5717)) ([c66af69](https://github.com/brianp/tari/commit/c66af69e5ccb31d2fcaf9a8fa29d2e0b5470eeba)) +* sorted edge case ([#5590](https://github.com/brianp/tari/issues/5590)) ([f7b2193](https://github.com/brianp/tari/commit/f7b21930c7841e7a8801f4c37d1ee0e8111162bb)) +* sparse Merkle tree key querying ([#5566](https://github.com/brianp/tari/issues/5566)) ([623839f](https://github.com/brianp/tari/commit/623839f58116c0828bc5406adbd1dd1b68e7bb3d)) +* syncing from prune node ([#5733](https://github.com/brianp/tari/issues/5733)) ([166f469](https://github.com/brianp/tari/commit/166f469cd1122676ec95b88163ee97058cc28fdf)) +* **sync:** remove mem::take in all syncs ([#5721](https://github.com/brianp/tari/issues/5721)) ([a48e430](https://github.com/brianp/tari/commit/a48e430b6b5bc21c5998009738be1436e479f7ec)) +* **sync:** unify ban logic in all sync processes ([#5713](https://github.com/brianp/tari/issues/5713)) ([4b2b28b](https://github.com/brianp/tari/commit/4b2b28bf2390c400d547cdaa801ff967eb92ac38)) +* update peers seed for esme ([#5573](https://github.com/brianp/tari/issues/5573)) ([0f6b750](https://github.com/brianp/tari/commit/0f6b7504bbfc902ffab89f1904dee237270c690b)) +* add lock height and kernel features checks on default transactions ([#5836](https://github.com/tari-project/tari/issues/5836)) ([1f87226](https://github.com/tari-project/tari/commit/1f87226722b12750424ab2f4861fe0475a67dfd6)) +* ban peer if it sends bad liveness data ([#5844](https://github.com/tari-project/tari/issues/5844)) ([eb40fc4](https://github.com/tari-project/tari/commit/eb40fc44cfc0605545ba9e831c8d27209a4db51f)) +* change truncate_from_bits to from_bits ([#5773](https://github.com/tari-project/tari/issues/5773)) ([fb18078](https://github.com/tari-project/tari/commit/fb18078d888b7c65601e8261d66fca366ffff28b)) +* chat ffi seed peers ([#5786](https://github.com/tari-project/tari/issues/5786)) ([c04996f](https://github.com/tari-project/tari/commit/c04996f01f3e5627acc376a27e7abcb61d7dda5c)) +* **chatffi:** return and read from ptrs ([#5827](https://github.com/tari-project/tari/issues/5827)) ([dd2eddb](https://github.com/tari-project/tari/commit/dd2eddbe9280870485974edd611e224ae585b76a)) +* **comms+dht:** mark peers as online inbound connection,join ([#5741](https://github.com/tari-project/tari/issues/5741)) ([e8413ea](https://github.com/tari-project/tari/commit/e8413ea364c0a17785b475ac57d74244b62a7375)) +* **diagrams:** missing quotes for messaging diagram ([#5750](https://github.com/tari-project/tari/issues/5750)) ([a8f6eb5](https://github.com/tari-project/tari/commit/a8f6eb5e48e6e823b96919bec87843300311caae)) +* **diagrams:** missing quotes for protocol negotiation diagram ([#5751](https://github.com/tari-project/tari/issues/5751)) ([45c20a3](https://github.com/tari-project/tari/commit/45c20a30b849b92e1f6fe402d7e7e657ccf9f663)) +* don't ban a peer for sending a banned peer ([#5843](https://github.com/tari-project/tari/issues/5843)) ([12f8a75](https://github.com/tari-project/tari/commit/12f8a75060e1d15fbeac589c568f7ee9e04eb900)) +* fix erroneous warning message ([#5846](https://github.com/tari-project/tari/issues/5846)) ([8afcd8b](https://github.com/tari-project/tari/commit/8afcd8b5545a433c92d3a47b4f85b4e89a5408b8)) +* get rid of possible 'expect' ([#5794](https://github.com/tari-project/tari/issues/5794)) ([467a8d4](https://github.com/tari-project/tari/commit/467a8d4f4493814f1102d6863fc844896e94a8ec)) +* grpc request overflows ([#5812](https://github.com/tari-project/tari/issues/5812)) ([36d72e8](https://github.com/tari-project/tari/commit/36d72e8b2239870550060fc9e0c183131ee3c2fa)) +* handle possible underflow in smt ([#5769](https://github.com/tari-project/tari/issues/5769)) ([558e6f2](https://github.com/tari-project/tari/commit/558e6f2bf7d00fb2c7c506b7000237aba928238c)) +* listing mode is synced ([#5830](https://github.com/tari-project/tari/issues/5830)) ([ff5a5d8](https://github.com/tari-project/tari/commit/ff5a5d82e3ddbe191bda8b8132590c2afb3282f2)) +* mempool panic ([#5814](https://github.com/tari-project/tari/issues/5814)) ([754fb16](https://github.com/tari-project/tari/commit/754fb16e4ae79bb8d712419f0f6bf59efbaf0ce1)) +* **p2p:** enable auto join when online ([#5738](https://github.com/tari-project/tari/issues/5738)) ([eb74bbb](https://github.com/tari-project/tari/commit/eb74bbba3746b78c3fd8e0ee5066f1d4d987af3e)) +* panic overflow ([#5819](https://github.com/tari-project/tari/issues/5819)) ([af31ba1](https://github.com/tari-project/tari/commit/af31ba1e6deb64a68ec74eac090fdcfc9e8a52ca)) +* possible exception in request_context ([#5784](https://github.com/tari-project/tari/issues/5784)) ([6c8e2d3](https://github.com/tari-project/tari/commit/6c8e2d395799757e5a946fe01226f739d0706741)) +* potential index out of bounds ([#5775](https://github.com/tari-project/tari/issues/5775)) ([f17ac6b](https://github.com/tari-project/tari/commit/f17ac6b61edfe47dacf091969382c6b17e7bf214)) +* potential overflow ([#5759](https://github.com/tari-project/tari/issues/5759)) ([5c93e35](https://github.com/tari-project/tari/commit/5c93e35c785a7a19f8e6c762e3f1df8f8207877e)) +* potential overflow ([#5778](https://github.com/tari-project/tari/issues/5778)) ([1d1332d](https://github.com/tari-project/tari/commit/1d1332d21ba0db18e9f3a3c253963fc1735b8193)) +* potential sync stuck ([#5760](https://github.com/tari-project/tari/issues/5760)) ([c5ed816](https://github.com/tari-project/tari/commit/c5ed816c80eae43348593e636e4b56da98d8af6b)) +* recovery passphrase flow ([#5877](https://github.com/tari-project/tari/issues/5877)) ([4159b76](https://github.com/tari-project/tari/commit/4159b766669e682bb9593c4e7cd3ddb298a56e0b)) +* remove peer ([#5757](https://github.com/tari-project/tari/issues/5757)) ([4c48a26](https://github.com/tari-project/tari/commit/4c48a26f20d800b2098c18b723dfb83cb878f0ad)) +* remove statement from sparse Merkle tree proofs ([#5768](https://github.com/tari-project/tari/issues/5768)) ([d630d11](https://github.com/tari-project/tari/commit/d630d114f1866f24e729cda0f8cf19f298e7bd50)) +* stuck on sync ([#5739](https://github.com/tari-project/tari/issues/5739)) ([33b37a8](https://github.com/tari-project/tari/commit/33b37a8c37f3e1883ef3ebf27a8e18d4dd63fc92)) +* unwraps in rpc client ([#5770](https://github.com/tari-project/tari/issues/5770)) ([6f0d20a](https://github.com/tari-project/tari/commit/6f0d20aa30d3dcc23630d3a9650802f8c1ce3a61)) +* **proto:** remove proto bytes for std bytes ([#5835](https://github.com/tari-project/tari/issues/5835)) ([491ed83](https://github.com/tari-project/tari/commit/491ed83aaea166a6e60d40e76b8574625b56cf98)) +* **proto:** remove proto timestamp wrapper types ([#5833](https://github.com/tari-project/tari/issues/5833)) ([43b994e](https://github.com/tari-project/tari/commit/43b994e62378a9ed241842fc18f01d69231f089f)) +* upgrade bitflags crate ([#5831](https://github.com/tari-project/tari/issues/5831)) ([dae7dd9](https://github.com/tari-project/tari/commit/dae7dd9d1f2277b6192dc0ed7bea26b7d2d946ac)) +* lmdb flag set wrong on database ([#5916](https://github.com/tari-project/tari/issues/5916)) ([60efd35](https://github.com/tari-project/tari/commit/60efd353973a87b1e0cebc7246649a38b5731051)) +* **tariscript:** protect compare and check height from underflows ([#5872](https://github.com/tari-project/tari/issues/5872)) ([aa2ae10](https://github.com/tari-project/tari/commit/aa2ae1066818c1776bd268932fbd3be09f21bf0e)) +* display ([#5982](https://github.com/tari-project/tari/issues/5982)) ([8cce48c](https://github.com/tari-project/tari/commit/8cce48cd8bd9b6f780376030918972e993fc1ab7)) +* fix opcode signatures ([#5966](https://github.com/tari-project/tari/issues/5966)) ([dc26ca6](https://github.com/tari-project/tari/commit/dc26ca6aeeb4196d0496f2977027ac63a4324043)) +* fix the windows installer ([#5938](https://github.com/tari-project/tari/issues/5938)) ([3e65a28](https://github.com/tari-project/tari/commit/3e65a28c5e3729024d70e2b7f55910c8c808495c)) +* fix the windows installer auto build ([#5939](https://github.com/tari-project/tari/issues/5939)) ([a138b78](https://github.com/tari-project/tari/commit/a138b7892d4b41a460b8dd8b9466f34e90f65469)) +* **shutdown:** is_triggered returns up-to-date value without first polling ([#5997](https://github.com/tari-project/tari/issues/5997)) ([49f2053](https://github.com/tari-project/tari/commit/49f20534ec808427d059cde6892adc5597f33391)) +* standardize gRPC authentication and mitigate DoS ([#5936](https://github.com/tari-project/tari/issues/5936)) ([623f127](https://github.com/tari-project/tari/commit/623f12768daf8329731249cf7e4c644e338d9700)) +* **tariscript:** multisig ordered signatures and pubkeys ([#5961](https://github.com/tari-project/tari/issues/5961)) ([14e334a](https://github.com/tari-project/tari/commit/14e334aff346aae8a081599488135c905c2c1f84)) +* update `ToRistrettoPoint` handling ([#5973](https://github.com/tari-project/tari/issues/5973)) ([12e84f4](https://github.com/tari-project/tari/commit/12e84f42ee1842875f72716833e96d0b84460c78)) +* new faucet for esmeralda ([#6001](https://github.com/tari-project/tari/issues/6001)) ([4eccc39](https://github.com/tari-project/tari/commit/4eccc392394b03e974b36538096f640d2b98d25d)) +* remove mutable mmr ([#5954](https://github.com/tari-project/tari/issues/5954)) ([0855583](https://github.com/tari-project/tari/commit/0855583c9fb138f7d1633c1829a8cf3f23048c49)) +* ups the min difficulty ([#5999](https://github.com/tari-project/tari/issues/5999)) ([fc1e555](https://github.com/tari-project/tari/commit/fc1e555edc56c9d01d7e9cb4d2c7cd0421616034)) +* **chat:** chat client possible panics ([#6015](https://github.com/tari-project/tari/issues/6015)) ([cf66c51](https://github.com/tari-project/tari/commit/cf66c51483f4b2744221fb652f3b32340d2ee693)) +* chat build ([#6026](https://github.com/tari-project/tari/issues/6026)) ([15793b7](https://github.com/tari-project/tari/commit/15793b7e4dfdcaaad6ec90e357348daf42300eab)) +* remove duplicate config settings ([#6029](https://github.com/tari-project/tari/issues/6029)) ([662af28](https://github.com/tari-project/tari/commit/662af28bf811c771cf0fdf9b583c1296a2283188)) + ## [1.0.0-rc.2](https://github.com/tari-project/tari/compare/v1.0.0-rc.1...v1.0.0-rc.2) (2024-01-18) diff --git a/clients/nodejs/base_node_grpc_client/src/index.test.js b/clients/nodejs/base_node_grpc_client/src/index.test.js index 632d2bb7d3..36c82fadf3 100644 --- a/clients/nodejs/base_node_grpc_client/src/index.test.js +++ b/clients/nodejs/base_node_grpc_client/src/index.test.js @@ -20,5 +20,5 @@ test("getTipInfo", async () => { const response = await baseNode.getTipInfo(); expect(response.metadata).toBeDefined(); const metadata = response.metadata; - expect(metadata.height_of_longest_chain).toMatch(/\d+/); + expect(metadata.best_block_height).toMatch(/\d+/); }); diff --git a/common/Cargo.toml b/common/Cargo.toml index 4a7adc2e56..3e1c773387 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -22,7 +22,7 @@ config = { version = "0.13.0", default_features = false, features = ["toml"] } dirs-next = "1.0.2" git2 = { version = "0.18", default_features = false, optional = true } log = "0.4.8" -log4rs = { git = "https://github.com/tari-project/log4rs.git", default_features = false, features = ["config_parsing", "threshold_filter", "yaml_format"] } +log4rs = { version = "1.3.0", default_features = false, features = ["config_parsing", "threshold_filter", "yaml_format"] } multiaddr = { version = "0.14.0" } path-clean = "0.1.0" prost-build = { version = "0.11.9", optional = true } diff --git a/comms/core/Cargo.toml b/comms/core/Cargo.toml index 47b20ed625..d82bdf3bfb 100644 --- a/comms/core/Cargo.toml +++ b/comms/core/Cargo.toml @@ -39,7 +39,7 @@ rand = "0.8" serde = "1.0.119" serde_derive = "1.0.119" sha3 = "0.10" -snow = { version = "0.9.4", features = ["default-resolver"] } +snow = { version = "0.9.5", features = ["default-resolver"] } thiserror = "1.0.26" tokio = { version = "1.23", features = ["rt-multi-thread", "time", "sync", "signal", "net", "macros", "io-util"] } tokio-stream = { version = "0.1.9", features = ["sync"] } diff --git a/comms/core/examples/stress/service.rs b/comms/core/examples/stress/service.rs index 7880c07519..2199638f4b 100644 --- a/comms/core/examples/stress/service.rs +++ b/comms/core/examples/stress/service.rs @@ -63,10 +63,9 @@ pub fn start_service( let (request_tx, request_rx) = mpsc::channel(1); println!( - "Node credentials are {}::{:?} (local_listening_addr='{}')", + "Node credentials are {}::{:?})", node_identity.public_key().to_hex(), node_identity.public_addresses(), - comms_node.listening_address(), ); let service = StressTestService::new( diff --git a/comms/core/examples/stress_test.rs b/comms/core/examples/stress_test.rs index a101198b9e..b39cc07d1a 100644 --- a/comms/core/examples/stress_test.rs +++ b/comms/core/examples/stress_test.rs @@ -95,7 +95,7 @@ async fn run() -> Result<(), Error> { temp_dir.as_ref(), public_ip, port, - tor_identity, + tor_identity.clone(), is_tcp, shutdown.to_signal(), ) @@ -105,7 +105,7 @@ async fn run() -> Result<(), Error> { } if !is_tcp { if let Some(tor_identity_path) = tor_identity_path.as_ref() { - save_json(comms_node.hidden_service().unwrap().tor_identity(), tor_identity_path)?; + save_json(&tor_identity.unwrap(), tor_identity_path)?; } } diff --git a/comms/core/examples/tor.rs b/comms/core/examples/tor.rs index ac33ee50c7..cf3b6ef1d9 100644 --- a/comms/core/examples/tor.rs +++ b/comms/core/examples/tor.rs @@ -87,16 +87,14 @@ async fn run() -> Result<(), Error> { println!("Comms nodes started!"); println!( - "Node 1 is '{}' with address '{:?}' (local_listening_addr='{}')", + "Node 1 is '{}' with address '{:?}')", node_identity1.node_id().short_str(), node_identity1.public_addresses(), - comms_node1.listening_address(), ); println!( - "Node 2 is '{}' with address '{:?}' (local_listening_addr='{}')", + "Node 2 is '{}' with address '{:?}')", node_identity2.node_id().short_str(), node_identity2.public_addresses(), - comms_node2.listening_address(), ); // Let's add node 2 as a peer to node 1 diff --git a/comms/core/src/builder/comms_node.rs b/comms/core/src/builder/comms_node.rs index 649497c2c7..b9bd002a98 100644 --- a/comms/core/src/builder/comms_node.rs +++ b/comms/core/src/builder/comms_node.rs @@ -23,7 +23,6 @@ use std::{iter, sync::Arc, time::Duration}; use log::*; -use multiaddr::{multiaddr, Protocol}; use tari_shutdown::ShutdownSignal; use tokio::{ io::{AsyncRead, AsyncWrite}, @@ -37,7 +36,6 @@ use crate::{ ConnectionManagerEvent, ConnectionManagerRequest, ConnectionManagerRequester, - ListenerInfo, LivenessCheck, LivenessStatus, }, @@ -143,7 +141,7 @@ impl UnspawnedCommsNode { let UnspawnedCommsNode { builder, connection_manager_request_rx, - mut connection_manager_requester, + connection_manager_requester, connectivity_requester, connectivity_rx, node_identity, @@ -155,7 +153,6 @@ impl UnspawnedCommsNode { let CommsBuilder { dial_backoff, - hidden_service_ctl, connection_manager_config, connectivity_config, .. @@ -217,29 +214,6 @@ impl UnspawnedCommsNode { "Your node's network ID is '{}'", node_identity.node_id() ); - - let listening_info = connection_manager_requester.wait_until_listening().await?; - - // Final setup of the hidden service. - let mut hidden_service = None; - if let Some(mut ctl) = hidden_service_ctl { - // Only set the address to the bind address it is set to TCP port 0 - let mut proxied_addr = ctl.proxied_address(); - if proxied_addr.ends_with(&multiaddr!(Tcp(0u16))) { - // Remove the TCP port 0 address and replace it with the actual listener port - if let Some(Protocol::Tcp(port)) = listening_info.bind_address().iter().last() { - proxied_addr.pop(); - proxied_addr.push(Protocol::Tcp(port)); - ctl.set_proxied_addr(&proxied_addr); - } - } - let hs = ctl.create_hidden_service().await?; - let onion_addr = hs.get_onion_address(); - if !node_identity.public_addresses().contains(&onion_addr) { - node_identity.add_public_address(onion_addr); - } - hidden_service = Some(hs); - } info!( target: LOG_TARGET, "Your node's public addresses are '{}'", @@ -266,11 +240,9 @@ impl UnspawnedCommsNode { shutdown_signal, connection_manager_requester, connectivity_requester, - listening_info, node_identity, peer_manager, liveness_watch, - hidden_service, complete_signals: ext_context.drain_complete_signals(), }) } @@ -312,12 +284,8 @@ pub struct CommsNode { node_identity: Arc, /// Shared PeerManager instance peer_manager: Arc, - /// The bind addresses of the listener(s) - listening_info: ListenerInfo, /// Current liveness status liveness_watch: watch::Receiver, - /// `Some` if the comms node is configured to run via a hidden service, otherwise `None` - hidden_service: Option, /// The 'reciprocal' shutdown signals for each comms service complete_signals: Vec, } @@ -328,6 +296,10 @@ impl CommsNode { self.connection_manager_requester.get_event_subscription() } + pub fn connection_manager_requester(&mut self) -> &mut ConnectionManagerRequester { + &mut self.connection_manager_requester + } + /// Get a subscription to `ConnectivityEvent`s pub fn subscribe_connectivity_events(&self) -> ConnectivityEventRx { self.connectivity_requester.get_event_subscription() @@ -348,26 +320,11 @@ impl CommsNode { &self.node_identity } - /// Return the Ip/Tcp address that this node is listening on - pub fn listening_address(&self) -> &Multiaddr { - self.listening_info.bind_address() - } - - /// Return [ListenerInfo] - pub fn listening_info(&self) -> &ListenerInfo { - &self.listening_info - } - /// Returns the current liveness status pub fn liveness_status(&self) -> LivenessStatus { *self.liveness_watch.borrow() } - /// Return the Ip/Tcp address that this node is listening on - pub fn hidden_service(&self) -> Option<&tor::HiddenService> { - self.hidden_service.as_ref() - } - /// Return a handle that is used to call the connectivity service. pub fn connectivity(&self) -> ConnectivityRequester { self.connectivity_requester.clone() diff --git a/comms/core/src/builder/tests.rs b/comms/core/src/builder/tests.rs index a4d8a0ae9c..02626c75e7 100644 --- a/comms/core/src/builder/tests.rs +++ b/comms/core/src/builder/tests.rs @@ -88,7 +88,7 @@ async fn spawn_node( .unwrap(); let (messaging_events_sender, _) = broadcast::channel(100); - let comms_node = comms_node + let mut comms_node = comms_node .add_protocol_extensions(protocols.into()) .add_protocol_extension( MessagingProtocolExtension::new( @@ -107,8 +107,12 @@ async fn spawn_node( .spawn_with_transport(MemoryTransport) .await .unwrap(); - - unpack_enum!(Protocol::Memory(_port) = comms_node.listening_address().iter().next().unwrap()); + let address = comms_node + .connection_manager_requester() + .wait_until_listening() + .await + .unwrap(); + unpack_enum!(Protocol::Memory(_port) = address.bind_address().iter().next().unwrap()); (comms_node, inbound_rx, outbound_tx, messaging_events_sender) } diff --git a/comms/core/src/connection_manager/dialer.rs b/comms/core/src/connection_manager/dialer.rs index b5bc59565c..695b381215 100644 --- a/comms/core/src/connection_manager/dialer.rs +++ b/comms/core/src/connection_manager/dialer.rs @@ -519,7 +519,7 @@ where // Inflight dial was cancelled (state, Err(ConnectionManagerError::DialCancelled)) => break (state, Err(ConnectionManagerError::DialCancelled)), (state, Err(err)) => { - warn!(target: LOG_TARGET, "Failed to dial peer {} | Attempt {} | Error: {}", state.peer().node_id.short_str(), state.num_attempts(), err); + debug!(target: LOG_TARGET, "Failed to dial peer {} | Attempt {} | Error: {}", state.peer().node_id.short_str(), state.num_attempts(), err); if state.num_attempts() >= config.max_dial_attempts { break (state, Err(ConnectionManagerError::ConnectFailedMaximumAttemptsReached)); } diff --git a/comms/core/src/tor/control_client/client.rs b/comms/core/src/tor/control_client/client.rs index 7c4fc63d9a..f9ce4f0e29 100644 --- a/comms/core/src/tor/control_client/client.rs +++ b/comms/core/src/tor/control_client/client.rs @@ -125,9 +125,6 @@ impl TorControlPortClient { pub async fn get_info(&mut self, key_name: &'static str) -> Result>, TorClientError> { let command = commands::get_info(key_name); let response = self.request_response(command).await?; - if response.is_empty() { - return Err(TorClientError::ServerNoResponse); - } Ok(response) } @@ -202,7 +199,6 @@ impl TorControlPortClient { let cmd_str = command.to_command_string().map_err(Into::into)?; self.send_line(cmd_str).await?; let responses = self.recv_next_responses().await?; - trace!(target: LOG_TARGET, "Response from tor: {:?}", responses); if responses.is_empty() { return Err(TorClientError::ServerNoResponse); } diff --git a/comms/core/src/tor/control_client/monitor.rs b/comms/core/src/tor/control_client/monitor.rs index 91cf700545..4185f85824 100644 --- a/comms/core/src/tor/control_client/monitor.rs +++ b/comms/core/src/tor/control_client/monitor.rs @@ -53,7 +53,7 @@ where match either { // Received a command to send to the control server Either::Left(Some(line)) => { - trace!(target: LOG_TARGET, "Writing command of length '{}'", line.len()); + trace!(target: LOG_TARGET, "Tor send: {}", line); if let Err(err) = sink.send(line).await { error!( target: LOG_TARGET, @@ -64,7 +64,7 @@ where }, // Command stream ended Either::Left(None) => { - debug!( + warn!( target: LOG_TARGET, "Tor control server command receiver closed. Monitor is exiting." ); @@ -73,7 +73,7 @@ where // Received a line from the control server Either::Right(Some(Ok(line))) => { - trace!(target: LOG_TARGET, "Read line of length '{}'", line.len()); + trace!(target: LOG_TARGET, "Tor recv: {}", line); match parsers::response_line(&line) { Ok(mut line) => { if line.is_multiline { @@ -116,7 +116,7 @@ where // The control server disconnected Either::Right(None) => { cmd_rx.close(); - debug!( + warn!( target: LOG_TARGET, "Connection to tor control port closed. Monitor is exiting." ); diff --git a/comms/core/src/tor/hidden_service/controller.rs b/comms/core/src/tor/hidden_service/controller.rs index a706da54df..1171f14b02 100644 --- a/comms/core/src/tor/hidden_service/controller.rs +++ b/comms/core/src/tor/hidden_service/controller.rs @@ -83,7 +83,7 @@ pub struct HiddenServiceController { proxied_port_mapping: PortMapping, socks_address_override: Option, socks_auth: socks::Authentication, - identity: Option, + pub identity: Option, hs_flags: HsFlags, is_authenticated: bool, proxy_opts: TorProxyOpts, @@ -125,6 +125,7 @@ impl HiddenServiceController { pub async fn initialize_transport(&mut self) -> Result { self.connect_and_auth().await?; + let socks_addr = self.get_socks_address().await?; Ok(SocksTransport::new(SocksConfig { proxy_address: socks_addr, diff --git a/comms/core/src/transports/hidden_service_transport.rs b/comms/core/src/transports/hidden_service_transport.rs new file mode 100644 index 0000000000..04ae542a7e --- /dev/null +++ b/comms/core/src/transports/hidden_service_transport.rs @@ -0,0 +1,141 @@ +// Copyright 2022. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::{io, io::ErrorKind, sync::Arc}; + +use log::*; +use multiaddr::{multiaddr, Multiaddr, Protocol}; +use tokio::sync::RwLock; + +use crate::{ + tor::{HiddenServiceController, TorIdentity}, + transports::{tcp::TcpInbound, SocksTransport, Transport}, +}; + +const LOG_TARGET: &str = "comms::transports::hidden_service_transport"; + +#[derive(thiserror::Error, Debug)] +pub enum HiddenServiceTransportError { + #[error("Tor hidden service transport error: `{0}`")] + HiddenServiceControllerError(#[from] crate::tor::HiddenServiceControllerError), + #[error("Tor hidden service socks error: `{0}`")] + SocksTransportError(#[from] io::Error), +} + +struct HiddenServiceTransportInner { + socks_transport: Option, + hidden_service_ctl: Option, +} + +#[derive(Clone)] +pub struct HiddenServiceTransport { + inner: Arc>, + after_init: F, +} + +impl HiddenServiceTransport { + pub fn new(hidden_service_ctl: HiddenServiceController, after_init: F) -> Self { + Self { + inner: Arc::new(RwLock::new(HiddenServiceTransportInner { + socks_transport: None, + hidden_service_ctl: Some(hidden_service_ctl), + })), + after_init, + } + } + + async fn is_initialized(&self) -> bool { + self.inner.read().await.socks_transport.is_some() + } + + async fn initialize(&self, listen_addr: &Multiaddr) -> Result<(TcpInbound, Multiaddr), io::Error> { + let mut inner_mut = self.inner.write().await; + let mut hs_ctl = inner_mut.hidden_service_ctl.take().ok_or(io::Error::new( + ErrorKind::Other, + "BUG: Hidden service controller not set in transport".to_string(), + ))?; + + let transport = hs_ctl.initialize_transport().await.map_err(|e| { + error!( + target: LOG_TARGET, + "Error initializing hidden transport service stack{}", + e + ); + io::Error::new(ErrorKind::Other, e.to_string()) + })?; + let (inbound, listen_addr) = transport.listen(listen_addr).await?; + inner_mut.socks_transport = Some(transport); + + // Set the proxied address to the port we just listened on + let mut proxied_addr = hs_ctl.proxied_address(); + if proxied_addr.ends_with(&multiaddr!(Tcp(0u16))) { + if let Some(Protocol::Tcp(port)) = listen_addr.iter().last() { + proxied_addr.pop(); + proxied_addr.push(Protocol::Tcp(port)); + } + hs_ctl.set_proxied_addr(&proxied_addr); + } + + let hidden_service = hs_ctl.create_hidden_service().await.map_err(|err| { + error!( + target: LOG_TARGET, + "Error creating hidden service: {}", + err + ); + io::Error::new(ErrorKind::Other, err.to_string()) + })?; + + (self.after_init)(hidden_service.tor_identity().clone()); + Ok((inbound, listen_addr)) + } +} +#[crate::async_trait] +impl Transport for HiddenServiceTransport { + type Error = ::Error; + type Listener = ::Listener; + type Output = ::Output; + + async fn listen(&self, addr: &Multiaddr) -> Result<(Self::Listener, Multiaddr), Self::Error> { + if self.is_initialized().await { + // For now, we only can listen on a single Tor hidden service. This behaviour is not technically correct as + // per the Transport trait, but we only ever call listen once in practice. The fix for this is to + // improve the tor client implementation to allow for multiple hidden services. + return Err(io::Error::new( + ErrorKind::Other, + "BUG: Hidden service transport already initialized".to_string(), + )); + } + let (listener, addr) = self.initialize(addr).await?; + Ok((listener, addr)) + } + + async fn dial(&self, addr: &Multiaddr) -> Result { + let inner = self.inner.read().await; + let transport = inner.socks_transport.as_ref().ok_or_else(|| { + io::Error::new( + ErrorKind::Other, + "BUG: Hidden service transport not initialized before dialling".to_string(), + ) + })?; + transport.dial(addr).await + } +} diff --git a/comms/core/src/transports/mod.rs b/comms/core/src/transports/mod.rs index 45050f540d..1c4d40dd1b 100644 --- a/comms/core/src/transports/mod.rs +++ b/comms/core/src/transports/mod.rs @@ -47,7 +47,9 @@ pub use socks::{SocksConfig, SocksTransport}; mod tcp; pub use tcp::TcpTransport; +mod hidden_service_transport; mod tcp_with_tor; +pub use hidden_service_transport::HiddenServiceTransport; pub use tcp_with_tor::TcpWithTorTransport; /// Defines an abstraction for implementations that can dial and listen for connections over a provided address. diff --git a/comms/core/tests/tests/rpc.rs b/comms/core/tests/tests/rpc.rs index d97a0596d4..d4845d226f 100644 --- a/comms/core/tests/tests/rpc.rs +++ b/comms/core/tests/tests/rpc.rs @@ -44,15 +44,20 @@ async fn spawn_node(signal: ShutdownSignal) -> (CommsNode, RpcServerHandle) { .add_service(GreetingServer::new(GreetingService::default())); let rpc_server_hnd = rpc_server.get_handle(); - let comms = create_comms(signal) + let mut comms = create_comms(signal) .add_rpc_server(rpc_server) .spawn_with_transport(TcpTransport::new()) .await .unwrap(); + let address = comms + .connection_manager_requester() + .wait_until_listening() + .await + .unwrap(); comms .node_identity() - .set_public_addresses(vec![comms.listening_address().clone()]); + .set_public_addresses(vec![address.bind_address().clone()]); (comms, rpc_server_hnd) } diff --git a/comms/core/tests/tests/rpc_stress.rs b/comms/core/tests/tests/rpc_stress.rs index 0e27fa38f9..9a445e8f14 100644 --- a/comms/core/tests/tests/rpc_stress.rs +++ b/comms/core/tests/tests/rpc_stress.rs @@ -46,15 +46,20 @@ async fn spawn_node(signal: ShutdownSignal) -> CommsNode { .finish() .add_service(GreetingServer::new(GreetingService::default())); - let comms = create_comms(signal) + let mut comms = create_comms(signal) .add_rpc_server(rpc_server) .spawn_with_transport(TcpTransport::new()) .await .unwrap(); + let address = comms + .connection_manager_requester() + .wait_until_listening() + .await + .unwrap(); comms .node_identity() - .set_public_addresses(vec![comms.listening_address().clone()]); + .set_public_addresses(vec![address.bind_address().clone()]); comms } diff --git a/comms/core/tests/tests/substream_stress.rs b/comms/core/tests/tests/substream_stress.rs index d36a26d673..488ec9064c 100644 --- a/comms/core/tests/tests/substream_stress.rs +++ b/comms/core/tests/tests/substream_stress.rs @@ -41,15 +41,20 @@ const PROTOCOL_NAME: &[u8] = b"test/dummy/protocol"; pub async fn spawn_node(signal: ShutdownSignal) -> (CommsNode, ProtocolNotificationRx) { let (notif_tx, notif_rx) = mpsc::channel(1); - let comms = create_comms(signal) + let mut comms = create_comms(signal) .add_protocol(&[ProtocolId::from_static(PROTOCOL_NAME)], ¬if_tx) .spawn_with_transport(TcpTransport::new()) .await .unwrap(); + let address = comms + .connection_manager_requester() + .wait_until_listening() + .await + .unwrap(); comms .node_identity() - .set_public_addresses(vec![comms.listening_address().clone()]); + .set_public_addresses(vec![address.bind_address().clone()]); (comms, notif_rx) } diff --git a/hash_domains/Cargo.toml b/hash_domains/Cargo.toml index 28b7a1f3d8..0439960998 100644 --- a/hash_domains/Cargo.toml +++ b/hash_domains/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_hash_domains" -version = "0.1.0" +version = "1.0.0-dan.5" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/infrastructure/libtor/Cargo.toml b/infrastructure/libtor/Cargo.toml index 0828186d30..ce10f58618 100644 --- a/infrastructure/libtor/Cargo.toml +++ b/infrastructure/libtor/Cargo.toml @@ -1,13 +1,12 @@ [package] name = "tari_libtor" -version = "0.24.0" +version = "1.0.0-pre.8" edition = "2021" license = "BSD-3-Clause" [dependencies] tari_common = { path = "../../common" } tari_p2p = { path = "../../base_layer/p2p" } -tari_shutdown = { path = "../shutdown"} derivative = "2.2.0" log = "0.4.8" @@ -16,7 +15,7 @@ tempfile = "3.1.0" tor-hash-passwd = "1.0.1" [target.'cfg(unix)'.dependencies] -libtor = { version="46.9.0"} +libtor = { version = "46.9.0" } openssl = { version = "0.10.61", features = ["vendored"] } [package.metadata.cargo-machete] diff --git a/infrastructure/libtor/src/tor.rs b/infrastructure/libtor/src/tor.rs index e9b6387194..218e6f1928 100644 --- a/infrastructure/libtor/src/tor.rs +++ b/infrastructure/libtor/src/tor.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{fmt, io, net::TcpListener}; +use std::{fmt, io, net::TcpListener, path::PathBuf, thread}; use derivative::Derivative; use libtor::{LogDestination, LogLevel, TorFlag}; @@ -28,7 +28,6 @@ use log::*; use rand::{distributions::Alphanumeric, thread_rng, Rng}; use tari_common::exit_codes::{ExitCode, ExitError}; use tari_p2p::{TorControlAuthentication, TransportConfig, TransportType}; -use tari_shutdown::ShutdownSignal; use tempfile::{tempdir, NamedTempFile, TempDir, TempPath}; use tor_hash_passwd::EncryptedKey; @@ -46,7 +45,7 @@ impl fmt::Debug for TorPassword { #[derivative(Debug)] pub struct Tor { control_port: u16, - data_dir: String, + data_dir: PathBuf, log_destination: String, log_level: LogLevel, #[derivative(Debug = "ignore")] @@ -59,12 +58,12 @@ pub struct Tor { impl Default for Tor { fn default() -> Tor { Tor { - control_port: 19_051, + control_port: 0, data_dir: "/tmp/tor-data".into(), log_destination: "/tmp/tor.log".into(), log_level: LogLevel::Err, passphrase: TorPassword(None), - socks_port: 19_050, + socks_port: 0, temp_dir: None, temp_file: None, } @@ -83,6 +82,7 @@ impl Tor { // check for unused ports to assign let (socks_port, control_port) = get_available_ports()?; + debug!(target: LOG_TARGET, "Using socks port {socks_port} and control_port {control_port}"); instance.socks_port = socks_port; instance.control_port = control_port; @@ -96,9 +96,8 @@ impl Tor { // data dir let temp = tempdir()?; - let dir = temp.path().to_string_lossy().to_string(); + instance.data_dir = temp.path().to_path_buf(); instance.temp_dir = Some(temp); - instance.data_dir = dir; // log destination let temp = NamedTempFile::new()?.into_temp_path(); @@ -128,8 +127,8 @@ impl Tor { } } - /// Run the Tor instance until the shutdown signal is received - pub async fn run(self, mut shutdown_signal: ShutdownSignal) -> Result<(), ExitError> { + /// Run the Tor instance in the background and return a handle to the thread. + pub fn run_background(self) -> thread::JoinHandle> { info!(target: LOG_TARGET, "Starting Tor instance"); let Tor { @@ -144,23 +143,35 @@ impl Tor { let mut tor = libtor::Tor::new(); - tor.flag(TorFlag::DataDirectory(data_dir.clone())) - .flag(TorFlag::SocksPort(socks_port)) - .flag(TorFlag::ControlPort(control_port)) + tor.flag(TorFlag::DataDirectory(data_dir.to_string_lossy().to_string())) + // Disable signal handlers so that ctrl+c can be handled by our application + // https://github.com/torproject/torspec/blob/8961bb4d83fccb2b987f9899ca83aa430f84ab0c/control-spec.txt#L3946 + .flag(TorFlag::Custom("__DisableSignalHandlers 1".to_string())) + // Prevent conflicts with multiple instances using the same listener port for Prometheus metrics + .flag(TorFlag::Custom("MetricsPort 0".to_string())) + // Write the final control port to a file. This could be used to configure the node to use this port when auto is set. + .flag(TorFlag::ControlPortWriteToFile(data_dir.join("control_port").to_string_lossy().to_string())) .flag(TorFlag::Hush()) .flag(TorFlag::LogTo(log_level, LogDestination::File(log_destination))); + if socks_port == 0 { + tor.flag(TorFlag::SocksPortAuto); + } else { + tor.flag(TorFlag::SocksPort(socks_port)); + } + + if control_port == 0 { + tor.flag(TorFlag::ControlPortAuto); + } else { + tor.flag(TorFlag::ControlPort(control_port)); + } + if let Some(secret) = passphrase.0 { let hash = EncryptedKey::hash_password(&secret).to_string(); tor.flag(TorFlag::HashedControlPassword(hash)); } - tor.start_background(); - - shutdown_signal.wait().await; - info!(target: LOG_TARGET, "Shutting down Tor instance"); - - Ok(()) + tor.start_background() } } diff --git a/infrastructure/metrics/Cargo.toml b/infrastructure/metrics/Cargo.toml index 2ccbf6959f..43815e4f4d 100644 --- a/infrastructure/metrics/Cargo.toml +++ b/infrastructure/metrics/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "tari_metrics" description = "Tari metrics" -version = "0.1.0" +version = "1.0.0-pre.8" edition = "2021" authors = ["The Tari Development Community"] repository = "https://github.com/tari-project/tari" diff --git a/infrastructure/tari_script/Cargo.toml b/infrastructure/tari_script/Cargo.toml index 31f1777dae..ef883ebcb0 100644 --- a/infrastructure/tari_script/Cargo.toml +++ b/infrastructure/tari_script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_script" -version = "0.12.0" +version = "1.0.0-pre.8" edition = "2021" description = "Tari script library" authors = ["The Tari Development Community"] diff --git a/integration_tests/Cargo.toml b/integration_tests/Cargo.toml index c9ce21a052..57bdf585de 100644 --- a/integration_tests/Cargo.toml +++ b/integration_tests/Cargo.toml @@ -9,7 +9,7 @@ edition = "2018" [dependencies] minotari_app_grpc = { path = "../applications/minotari_app_grpc" } minotari_app_utilities = { path = "../applications/minotari_app_utilities" } -minotari_node = { path = "../applications/minotari_node" } +minotari_node = { path = "../applications/minotari_node", features = ["metrics"] } minotari_node_grpc_client = { path = "../clients/rust/base_node_grpc_client" } tari_chat_client = { path = "../base_layer/contacts/src/chat_client" } minotari_chat_ffi = { path = "../base_layer/chat_ffi" } diff --git a/integration_tests/tests/steps/node_steps.rs b/integration_tests/tests/steps/node_steps.rs index c86016fbb9..e9fea08a80 100644 --- a/integration_tests/tests/steps/node_steps.rs +++ b/integration_tests/tests/steps/node_steps.rs @@ -144,7 +144,7 @@ async fn all_nodes_on_same_chain_at_height(world: &mut TariWorld, height: u64) { let chain_tip = client.get_tip_info(Empty {}).await.unwrap().into_inner(); let metadata = chain_tip.metadata.unwrap(); - nodes_at_height.insert(name, (metadata.height_of_longest_chain, metadata.best_block)); + nodes_at_height.insert(name, (metadata.best_block_height, metadata.best_block_hash)); } if nodes_at_height @@ -182,7 +182,7 @@ async fn all_nodes_are_at_height(world: &mut TariWorld, height: u64) { let mut client = world.get_node_client(name).await.unwrap(); let chain_tip = client.get_tip_info(Empty {}).await.unwrap().into_inner(); - let chain_hgt = chain_tip.metadata.unwrap().height_of_longest_chain; + let chain_hgt = chain_tip.metadata.unwrap().best_block_height; nodes_at_height.insert(name, chain_hgt); } @@ -208,7 +208,7 @@ async fn node_is_at_height(world: &mut TariWorld, base_node: String, height: u64 for _ in 0..=(TWO_MINUTES_WITH_HALF_SECOND_SLEEP) { let chain_tip = client.get_tip_info(Empty {}).await.unwrap().into_inner(); - chain_hgt = chain_tip.metadata.unwrap().height_of_longest_chain; + chain_hgt = chain_tip.metadata.unwrap().best_block_height; if chain_hgt >= height { return; @@ -506,7 +506,7 @@ async fn base_node_is_at_same_height_as_node(world: &mut TariWorld, base_node: S .into_inner() .metadata .unwrap() - .height_of_longest_chain; + .best_block_height; let mut base_node_client = world.get_node_client(&base_node).await.unwrap(); let mut current_height = 0; @@ -521,7 +521,7 @@ async fn base_node_is_at_same_height_as_node(world: &mut TariWorld, base_node: S .into_inner() .metadata .unwrap() - .height_of_longest_chain; + .best_block_height; if current_height >= expected_height { break 'inner; } @@ -536,7 +536,7 @@ async fn base_node_is_at_same_height_as_node(world: &mut TariWorld, base_node: S .into_inner() .metadata .unwrap() - .height_of_longest_chain; + .best_block_height; current_height = base_node_client .get_tip_info(req.clone()) @@ -545,7 +545,7 @@ async fn base_node_is_at_same_height_as_node(world: &mut TariWorld, base_node: S .into_inner() .metadata .unwrap() - .height_of_longest_chain; + .best_block_height; if current_height == expected_height { break 'outer; @@ -644,7 +644,7 @@ async fn no_meddling_with_data(world: &mut TariWorld, node: String) { // No meddling let chain_tip = client.get_tip_info(Empty {}).await.unwrap().into_inner(); - let current_height = chain_tip.metadata.unwrap().height_of_longest_chain; + let current_height = chain_tip.metadata.unwrap().best_block_height; let script_key_id = &world.script_key_id().await; let block = mine_block_before_submit( &mut client, @@ -658,7 +658,7 @@ async fn no_meddling_with_data(world: &mut TariWorld, node: String) { let _sumbmit_res = client.submit_block(block).await.unwrap(); let chain_tip = client.get_tip_info(Empty {}).await.unwrap().into_inner(); - let new_height = chain_tip.metadata.unwrap().height_of_longest_chain; + let new_height = chain_tip.metadata.unwrap().best_block_height; assert_eq!( current_height + 1, new_height, @@ -736,7 +736,7 @@ async fn node_reached_sync(world: &mut TariWorld, node: String) { for _ in 0..(TWO_MINUTES_WITH_HALF_SECOND_SLEEP * 11) { let tip_info = client.get_tip_info(Empty {}).await.unwrap().into_inner(); let metadata = tip_info.metadata.unwrap(); - longest_chain = metadata.height_of_longest_chain; + longest_chain = metadata.best_block_height; if tip_info.initial_sync_achieved { return; diff --git a/integration_tests/tests/steps/wallet_steps.rs b/integration_tests/tests/steps/wallet_steps.rs index 7dff333a31..f83610b7fb 100644 --- a/integration_tests/tests/steps/wallet_steps.rs +++ b/integration_tests/tests/steps/wallet_steps.rs @@ -1545,7 +1545,7 @@ async fn wallet_with_tari_connected_to_base_node( let mut base_node_client = world.get_node_client(&base_node).await.unwrap(); let tip_info_res = base_node_client.get_tip_info(Empty {}).await.unwrap().into_inner(); - let mut current_height = tip_info_res.metadata.unwrap().height_of_longest_chain; + let mut current_height = tip_info_res.metadata.unwrap().best_block_height; let mut num_blocks = 0; let mut reward = 0; diff --git a/meta/crates.io/update_owners.sh b/meta/crates.io/update_owners.sh new file mode 100755 index 0000000000..b0e68c50a3 --- /dev/null +++ b/meta/crates.io/update_owners.sh @@ -0,0 +1,170 @@ +#!/bin/bash + +CHECK_ONLY=0 +# Check if the first command-line argument is '-c' +if [[ $1 == "-c" ]]; then + CHECK_ONLY=1 +fi + +# Declare associative arrays +declare -A package_group_map +declare -A group_user_map + +# Populate group_user_map +group_user_map["ignore"]="CjS77 stringhandler SWvheerden" +group_user_map["leads"]="CjS77 stringhandler SWvheerden" +group_user_map["dan"]="CjS77 stringhandler sdbondi" + +# Minotari crates and libraries + +package_group_map["minotari_app_grpc"]="leads" +package_group_map["minotari_app_utilities"]="leads" +package_group_map["minotari_chat_ffi"]="leads" +package_group_map["minotari_console_wallet"]="leads" +package_group_map["minotari_merge_mining_proxy"]="leads" +package_group_map["minotari_miner"]="leads" +package_group_map["minotari_mining_helper_ffi"]="leads" +package_group_map["minotari_wallet"]="leads" +package_group_map["minotari_wallet_ffi"]="leads" +package_group_map["minotari_node"]="leads" +package_group_map["tari_crypto"]="leads" +package_group_map["tari_common"]="leads" +package_group_map["tari_utilities"]="leads" +package_group_map["tari_bulletproofs_plus"]="leads" +package_group_map["tari_comms_dht"]="leads" +package_group_map["tari_core"]="leads" +package_group_map["tari_common_types"]="leads" +package_group_map["tari_comms"]="leads" +package_group_map["tari_key_manager"]="leads" +package_group_map["tari_p2p"]="leads" +package_group_map["tari_protobuf_build"]="leads" +package_group_map["tari_script"]="leads" +package_group_map["tari_features"]="leads" +package_group_map["tari_comms_rpc_macros"]="leads" +package_group_map["tari_contacts"]="leads" +package_group_map["tari_service_framework"]="leads" + +# Tari/DAN crates and libraries +package_group_map["tari_template_lib"]="dan" +package_group_map["tari_dan_app_utilities"]="dan" +package_group_map["tari_dan_common_types"]="dan" +package_group_map["tari_dan_engine"]="dan" +package_group_map["tari_dan_p2p"]="dan" +package_group_map["tari_dan_storage"]="dan" +package_group_map["tari_dan_storage_lmdb"]="dan" +package_group_map["tari_dan_storage_sqlite"]="dan" +package_group_map["tari_dan_wallet_cli"]="dan" +package_group_map["tari_dan_wallet_daemon"]="dan" +package_group_map["tari_engine_types"]="dan" +package_group_map["tari_epoch_manager"]="dan" +package_group_map["tari_state_store_sqlite"]="dan" +package_group_map["tari_template_abi"]="dan" +package_group_map["tari_template_builtin"]="dan" +package_group_map["tari_template_macros"]="dan" +package_group_map["tari_template_test_tooling"]="dan" +package_group_map["tari_transaction"]="dan" +package_group_map["tari_transaction_manifest"]="dan" +package_group_map["tari_indexer"]="dan" +package_group_map["tari_indexer_client"]="dan" +package_group_map["tari_indexer_lib"]="dan" + +# Deprecated, unused, or unclassified packages. +package_group_map["tari_signaling_server"]="ignore" +package_group_map["tari_bor"]="ignore" +package_group_map["tari_comms_logging"]="ignore" +package_group_map["tari_comms_rpc_state_sync"]="ignore" +package_group_map["tari_consensus"]="ignore" +package_group_map["tari_wallet_ffi"]="ignore" +package_group_map["tari_storage"]="ignore" +package_group_map["tari_wallet"]="ignore" +package_group_map["tari_comms_middleware"]="ignore" +package_group_map["tari_infra_derive"]="ignore" +package_group_map["tari-curve25519-dalek"]="ignore" +package_group_map["tari_shutdown"]="ignore" +package_group_map["tari_mmr"]="ignore" +package_group_map["tari_base_node"]="ignore" +package_group_map["tari_base_node_client"]="ignore" +package_group_map["tari_broadcast_channel"]="ignore" +package_group_map["tari_bulletproofs"]="ignore" +package_group_map["tari_validator_node"]="ignore" +package_group_map["tari_validator_node_cli"]="ignore" +package_group_map["tari_validator_node_client"]="ignore" +package_group_map["tari_validator_node_rpc"]="ignore" +package_group_map["tari_wallet_daemon_client"]="ignore" +package_group_map["tari_transactions"]="ignore" +package_group_map["tari_mining"]="ignore" +package_group_map["tari_mmr_integration_tests"]="ignore" +package_group_map["tari_pubsub"]="ignore" +package_group_map["tari_test_utils"]="ignore" +package_group_map["tari_libtor"]="ignore" +package_group_map["tari_metrics"]="ignore" +package_group_map["tari_scaffolder"]="ignore" + +########################## Owner management functions ########################## +remove_owner() { + echo "Removing $1 as owner of $package" + cargo owner -q --remove $1 $package + sleep 3 +} + +verify_owner() { + # No-op + : +} + +add_owner() { + echo "Adding $1 to $package" + cargo owner -q --add $1 $package + sleep 3 +} + +################################## Main script ################################## + +# Iterate over packages +for package in "${!package_group_map[@]}"; do + echo "" + echo "Processing $package..." + # Get the expected owners + group=${package_group_map[$package]} + # If group is 'ignore', skip this iteration + if [[ $group == "ignore" ]]; then + echo "Ignoring $package" + continue + fi + expected_owners=(${group_user_map[$group]}) + + # Get the current owners + current_owners=($(cargo owner -q --list $package | awk '{print $1}')) + + # Convert the arrays to space-separated strings for comparison + current_owners_str=" ${current_owners[*]} " + expected_owners_str=" ${expected_owners[*]} " + + echo "Current owners vs: $current_owners_str" + echo "Expected owners : $expected_owners_str" + + if [[ $CHECK_ONLY == 1 ]]; then + continue + fi + + # Iterate over the current owners + for user in "${current_owners[@]}"; do + if [[ $expected_owners_str == *" $user "* ]]; then + # User is in both current and expected owners + verify_owner $user + else + # User is in current owners but not in expected owners + remove_owner $user + fi + done + + # Iterate over the expected owners + for user in "${expected_owners[@]}"; do + if [[ $current_owners_str != *" $user "* ]]; then + # User is in expected owners but not in current owners + add_owner $user + fi + done + echo "... Done processing $package" + echo "" +done diff --git a/meta/gpg_keys/cifko.asc b/meta/gpg_keys/cifko.asc new file mode 100644 index 0000000000..5a57f84f73 --- /dev/null +++ b/meta/gpg_keys/cifko.asc @@ -0,0 +1,41 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQGNBGCwydUBDADDjVNeEwq9FKYuuydsd/ImHs6jDcu0k4Bq6dgcM3wDc2SVAg+a +7SpnLnSi/pl1jNYHSxBB3hK/pfJE/6Wf9vsnkzHYaFiVA5ikpWGhgeIU9tD1Z+4K +R/lCUyl4m3SLhVbHtW95+XD0KyLFqurkt18lcV1O9QZL7fCX43ENojUQynlJL4L0 +XAT+bfS9Ydpzu1UBQeUloFQxxNJ5YoH4tzjuw/rO67bTEOW/ktP59KK5M/VkaDjF +Mq2OmoWAzl7bAoMCF1LumJe9A3GFhFZ9m8NR32TDIJQFr1+9h3x6kmbTgu65IEoo +gUJNRn7zAUyblDqMBw7UvDN9WUr/duqp6ix2G8R7B3McLXWApBL+XU2igONklAlZ +FViL1aR3Bq6qOiG9PfrwA2Oc1LsL+N8tbqzZBx7cbICgqHwMHTxZsiCk3rs3MGyO +L4TUvmN/ApuFn6yLTaKab26ZadXsAZysFFJOP26f3QVCdpGdtHOkwlBwnPbWzLfG +O3Ae7i37FNQJz+8AEQEAAbQYQ2lma28gPGdjaWZrb0BnbWFpbC5jb20+iQHOBBMB +CAA4FiEEoeEccxHuEUSgd5h31x5mpTvWu7oFAmCwydUCGwMFCwkIBwIGFQoJCAsC +BBYCAwECHgECF4AACgkQ1x5mpTvWu7pNpwv9EchQJ2R9yDvxwLLWlWwUOh994JwZ +hZiJs44mRM7+sFVfq1OruOg846m8FdkSMlXeLznzbodtdKc4ssfitulx8vgcy49d +pyIvWbbLxjz+39LEK9rYQJXd8TLLRmMUu/eagYNRhEAAE/ecHc/7wtOEWE+a2ydb +crNqwJwUG6XOeoh+duSPjms+xLRhnoh13oHfktubU/pkE7DqMSWTCBMdZ1U0g+ju +8icOWi2DzQtsndgLJvRw7yaMozQsg4rYk8tCEBxrgLd8T3nJuiruYtCPYgGgkMva +nMcdO6pbaAL8MuAxVyaylW+IDhXGBZhZA/Mjy6rfX6eOZiftM0wioNKCt/YDxOAr +PD76eGB8ZVlQYZpCGa/ryGBh/kpkcRAidPZ8qxWVtikZQWOcQA/wnJRrKA2+jtEw +nhb3Lw+jjUyM8FO1fSWqJWlW2Hond883FTA7/JvW4MqO86jnzBdV3Ca5tLeNaH5m +T8x1a4uudkCahFWcZ1ps9Levqdbc2Kie36gVuQGNBGCwydUBDACuCFU2YXSHEJsP +wt5YTvRq1s79IwDyJrqvUSKC0MUeTO1I9pzZ+c+SCvSzlV15ruDVXrC8mCFzMhyS +rrKvDxGf+LoeykRS2r5TGnXVqIpZaWzn+ZJo3ML63URySS4kBQPMJ4iykeU0Ze9V +mxtSAx4+fVNJ+IyXzKfut48w2NLVSpxGW9QC43tMf5TCPWBOa6JmG1CVvX7yzP/y +EH0MH9oUx8KObUdkcRygUJfoUYFaP+16Bfllf2zWWzs5Njk4Rfjl06sweMCHRCdQ +WmKP7JB/P8e+YwFvamD4EWePjz2TLDG4Voej9IU+Iex4rBWRBbbAVjstCW8oGPuD +2EoTAGzgXVdOGXSvqm1Xb4gVlnllqD7pxouchoiRzEx2blp+d6id0PeQ26vOPZSN +GvDRseB9/zoD+HjoNbMUinu0wEqDIeeqOKd+HPKq4CAFtMoVa6F47gl1g88L9YWu +ATzQgucnB2G9ZsL6jyM1JjKo/Ij4i7U328JlXwG/zXAjiaMywnMAEQEAAYkBtgQY +AQgAIBYhBKHhHHMR7hFEoHeYd9ceZqU71ru6BQJgsMnVAhsMAAoJENceZqU71ru6 +Z/UMAIn6jq+K5M+NoLUDDYBfMyyFnovRe+ZDQbsnuM7uDdaCHeTpcRs6HR9IM5Qr +4R+PeJRTyMQBsBBa536qzFVGceoYRtA1jXh/vB86C2m0Bjvqc5mq+S0K0ywxEVfI +pjgm4xtibJZsRkvx6dVueBQzs7PcnKRyONSaxcOyzfzijOF68wXjZlxkJugAgmHb +iNuvY7X3lerpCk66KRCDjf+lr5DmcQ6er9SUVnrVvhb6VIQMMLlpr9MS19FiWehe +p4mCckhZgXUPXzgKnkrqEO4q3dmd0mRnIZxQHzkrDPyPpEhk74m5HMV+9fZWuNL0 +d2GwMshpfKVGwubBlr+XONtRO6a65O6Rgh3oXVIVjLGsL7a8sMQgintiiYVUNZUA +0B/kB1G4B4/ovPEAFV6uexe9gv+mfxiEHmLQnwIM9iJI7FW50rIrYzDkBhq5QVQk +3ikt7Gj6rzSwZB2dhMwTFWfLc1rt+jzr9RSYmayJwhRPfOukfomaMEMOZ67fcwI1 +oboBkg== +=ErbU +-----END PGP PUBLIC KEY BLOCK----- diff --git a/meta/gpg_keys/delta1.asc b/meta/gpg_keys/delta1.asc deleted file mode 100644 index 1c8d4b7a7b..0000000000 --- a/meta/gpg_keys/delta1.asc +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- - -mQINBF8yonQBEAC6l4ZPIBi7wTi1Id+eCKVRK4fhwgXzQUXfpGCF1lvGnHlQZbr9 -vhi/v/2uiCdzcqjNpPaSAKwTqjvthOlFIcmyk2ZtkfRR9n9T/3rbhgWb2sSBr9da -jgGKK0QdaMYj7o3754VknCCVZtZfqn7WdM8/c6++m32K3gpYTjKhz3ZRD5O0qrpf -2RMIiy8vdQLfKYx8Ro+ihpC1dnOnQCg/BJEcXQ+rjnKsIlZmZ04wX4nCdMzgu0BR -lQvvvkppyf2qeMRN2KITregpOygxVdoqsImeMfv0b0CamrlT6Hg47MYXvdYuwJgG -ex3iU0jCYOJOFtiFrc7bTRWKPT/+X9BM1X60wjFMldXJ5gElw3bfh6zkGA3cnSMz -UpTtqd3mGe6EZ2YTzU5mttEW2VwpeqC89pXljdA7K4aCQ1S6CZYpy3/nrvuFMr1r -dDPJaE4fVslFGKFSGfKdo7ngZXsxS2Lbd6iHfPg1GnIvzC9zzC/yB3mK9gdmUZTU -vs0B4xxi5ixo5aBqnf1LPowruO+e6/56FX3uFl7OemfewrQjUAHtVeu67y5JS65T -g7s/DT6yWKo7fQbuMz4Y4BxOeCVNIPya+4Zgov5QkcPKXig7ZqEsDNu/j75e7B8A -Tpv8t46cw0AMYKEedxGWjp2eWn1C0IHPq8OsUSakPoc25wvpvxjw+UHWxQARAQAB -tB5CeXJvbiBIYW1ibHkgPGJpenpsZUB0YXJpLmNvbT6JAk4EEwEIADgWIQR6OYEg -67Z9l4onCwt6Zpt0lpUiXgUCXzKidAIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIX -gAAKCRB6Zpt0lpUiXtfPD/98oD8vKl9E51wDt/GorEK6Ze/sx6HG4RDERk+cKN0k -RnyuOVm24eSkMwlgAs+eFU5k0KMs9MwszQh/zIH98XAjzQsSW+WQCmMNHLtVCn7u -wWG5gYi3OirvluG8tOQLbqs2yPa7edrN1ynA+7L1uXgvVQbcOosFskDftoYTqWCf -xJOlmHZfe7W+Y/4PhS8cATwijywFPjeyuWTjYTny3aT8e4Js2vQHB3cGEwJbBUos -kCY0F5AZ4YoZZ9V6s+Xs7fXDCiw1SERwmSbcDyriEjGcukt6xwXKSo3SjDQUBeWi -6H/3nhHCoKF26yw3cZzDTQDXBIey+RPcZMMxC5PK6kUNAKT/bo3YZl7kOad5OqIe -3pl0O9h0hWSEa0O4frhpmgaNpGBTjl0JXzkdXSzC4SgaLREN9sCM2ugy3Z5K2vKs -txqkicLX+zZpNUecmKi2ILQpmUom3ZfQHLS2ycWlgRvY5tEIdXE5CxvqnKaogYFv -H4t6HDgTLmryNRBStLpVNki9Ezxe4YAGengeLSjXP6FYGzDSeAIvFICSMVr8GZUa -rNEfpj2rLiE8DPhArqbhXr9+h9DqmYHf2xATQRhVwGmlH9WZy0EpfXCWAbLsHecV -qlI1hxwbkb61e8W94mUtoC/EpszeGl6YMi0GJSw6Pcl0isNEIiTh+zsHYnOWQPsZ -77kCDQRfMqJ0ARAA47fAmlcD2U0UnwS0ZXnggsMWOMMPeG+x7KvVvKWnirwnrE2A -Gw2xHn02pZLtKRnKlf9KU2s0wyFk/nf5NoPBepgNWkX9c2yLlfAFVbT+WJUss/D1 -K3d5CDPbc06LvC5c0js1YrnEj4/8jQe0YxZu6hodxkcgZyeEkCjBN+g7E/ejCAjp -Wfp+Bfls8+yI/nVQJgH1q2FYhqFqN5kBp1hHhA5s0m0oBKofO1xECnhPobuPfVxA -+NY1Jr+tvP9DIVaMeyCMu0EzmghouVZfcvhxEOC/roTGK0zyWqCEjQ3xgCU5OUXQ -RXmUFm2uaCVO5sAtpX3KmkS5QbtDBb24N0CbfS8fwExt93zxwnL9ZbyxCMKVoCsb -P31fOvNtQyt/cD34gjnRemjSC6HInrHft3fod73I1NfvM2ovF5cm1JBU5rankImh -iQ04KA9OJlT3BSLf9CerN2D79u/egOSCHk29V8JvMnZA5zBE/2j7ZiYR/k2n55xQ -fSVqqvxoqSadNCs57SKiSTVT+3o4DKptZvDny8SrZPlMnGUuzobrrDL7K/3c9I9l -RC/cRtNHEVE+d7FUWDXG1N1DS+5e6Oh+nX2GvmHRCQptTEVnOA1ikt3thSPTYIwu -OIvJeQ3Ox/6W7LLLFWlYLoXrHR5/LUqo/+tKT/NHLhKigZ+Hrjk7KmHs2rkAEQEA -AYkCNgQYAQgAIBYhBHo5gSDrtn2XiicLC3pmm3SWlSJeBQJfMqJ0AhsMAAoJEHpm -m3SWlSJeTFgP/iQwdijzHWq1HzPDWRB2g5VaAVWmn2bVA5s3eRq9POYeDecr1aXK -AwnbTkp2470iOPokjY6dPuWIJreDSsPnW0Sv4OdM5W/3GS8h5RYEkteS5/JQ/KeT -ZFcApfvYI8530esAx0W3qxDMkK3crwXI/6p+PW0836pXdoFCVzKaO00eEIXZwLvi -LArYb6BmI+d1zXgBFluZQJOD2XU6lNMD4f2vsn/NUhnukvMygcTOWQ/wVgHpFGEl -/5pBjgOhZwIHzu97FfGl6pQ/hREZ9TQL+NCwyd/Sbp5rWJIa8CCJf+boev1iBgzo -VaNHNtgCB9wUkSm3Xiyt9qVMt/R4cG4BMb9g5x+ukxFJDiK20O18exqDcsZrAqus -lPzWdrziBpLMj4SHIIIpzZ0oArq60zPFu0MIm56QsQ2gERJaBFr8+COSDr1wzmIX -60rYlmzF9DIKOWMtRtKOYoZ3es7Mof03+ROEOGZUqRYBZPWkNuDji8E/60CNGNFX -cJU3TgRUDB1X+8zKftQ40d7uHHD957bQRGVp7o2qsJO25U7PI8Ohf5lJTAjDl8xM -HEokWCXSWnH5raxSrguWf6JY262+4Mpj5/2jQjrP/sTjHjb18JSGzAMYJbQbW5/o -xFHHHnirHQnSjr7+WDcO0SW+X2FWYNcunU26u9i/3oAIiOrtF7K0P1jJ -=KZWW ------END PGP PUBLIC KEY BLOCK----- \ No newline at end of file diff --git a/meta/gpg_keys/hansieodendaal.asc b/meta/gpg_keys/hansieodendaal.asc index baad1345f6..15522c394c 100644 --- a/meta/gpg_keys/hansieodendaal.asc +++ b/meta/gpg_keys/hansieodendaal.asc @@ -2,12 +2,50 @@ mDMEXFAOHRYJKwYBBAHaRw8BAQdA5Pr1oWCXuk4KOLww8nzozn1R/TvL5AvIdScU 9R7W2D+0JEhhbnNpZSBPZGVuZGFhbCBFQ0MgPHBsdXRvQHRhcmkuY29tPoiWBBMW -CAA+FiEEiTBA/hklxU/poczD00Haf8YJhicFAlxQDh0CGyMFCQlnaQMFCwkIBwIG -FQoJCAsCBBYCAwECHgECF4AACgkQ00Haf8YJhid3AQD/elT+/dn+IdfEGf4Veu4L -VdE0uAmg+3JKcxSFAzuJD1sA/23QkxDAqDnBVxX1vqLzMx4WmB30w8Qqjd6NSuqF -WD0AuDgEXFAOHRIKKwYBBAGXVQEFAQEHQJDBWycKABkLFhl+/3wq9jXFQNKu2fhl -bG8o6qkFgdIHAwEIB4h+BBgWCAAmFiEEiTBA/hklxU/poczD00Haf8YJhicFAlxQ -Dh0CGwwFCQlnaQMACgkQ00Haf8YJhifHDQEA8wC6rEJ0q3hzi7gPhwv/J1waspjd -ic5qhFtth9VSH64A/3nwUCTxOfSN4O2BJbEyIbTvNKrqfU0xfIPb5fjLz40H -=mR5Q +CAA+AhsjBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAFiEEiTBA/hklxU/poczD00Ha +f8YJhicFAmW4lxAFCR5fHEEACgkQ00Haf8YJhicyVQD/RrWwkz5z8e5HgrinBVFH +hfAoCmPBQkI5+zBndtnnYo8A/jcj/AIH4h3lsxPuC2DG3mMoAsr2/MVxP1f+WHtg +XvkCiQIzBBABCAAdFiEEriHzWWFX1DYW7Df67QUeLkD7PZ0FAlxaoR4ACgkQ7QUe +LkD7PZ1GVhAAiQR/aoYL13Sj0Xh1CgVYMoYCGm+ulpi/s5U42WxiMw4SOFVqdI7q +0tHCS0IyxomBXQrW7UsNne4ErfmL6D2j2CS5pkBaG+6/gzCFk18zXdRUC87no8rE ++DIjh6XIPNvxxJmxzPJBN1JLiI9xsQGcFGOJE/MSL1PxpXZ/n/zYoxQqvz1eqNy0 +nHiuHSTwuTc+/4Quu48U2115ttqcqe8QZJ224oDjQRtCNIzzQUwSmw05OySsq2Zx +8q2PwWBeElKDIVT8sIdl/VSKU41j+Tt8Bvqi3Av/Egu94d+VhdXDMlBctl3ana23 +WYyjxm8U0onu5Z2K6CD3SSZoXsTOgY2eYR5pWigT3JoceI5nHhRxXxpN5yCKZ4ap +tfjzEuUjLGtL30b9QsK7cwQK9lC1nmvyvzcu/BkkewAh+fmYnckuhNtTv/XRtgto +TIaESKQ+vOxBa2qxbvDKKSkEbchPLp/zqjcB7APtl3bGei5fMDb3LNJvKlmUCySi +lghBOJHjXCJGr3d/oGWz+Lr9Dk57KRRkUxpspN2hMmISDib1J2Pmwh/ZbBsy7gpU +mISAkq+yIShBsddAxUVZ2JIwd2Lqe+K0dXmvgFvSPTRtRZaPiWWyqKkDGw/fmN7I +oX2ZqgmSLZBPkmcyJHwp2OYpkgwZvOdV9RNEafG/p5RD/LDQKoDOp7+JAjMEEAEI +AB0WIQSaNRgoWBvbb8MsADdrUDZi9rVc4gUCXFqsFwAKCRBrUDZi9rVc4lcQD/4p +YST4cg8ZaurMMqPd4zbWJsj5inW/2MaiWt6fgm0P+OCSy84i3WF2cgWGAq9z6aM+ +IMKLNJh+diu6lqbDTvANqQPCOB+GIQHsQMVU00mcpAbgZATTanZjp6DaK/6s7qRs +3sBrcBdOphMZl4wfF09LAAFuWDEVNNY/DRCywMBEkKXBBOpYFPaah40i6cSogegs +ZmclthXqfTfRs1zLBCPGLqEbiJ1nOgAdWM4vsNZw7I4VlTAN5w+1RaaOOMP9Sy83 +H5ZgJ5i3Jfjx7qb7nrIRwLZe7d3unkt6FGboRODJkoUc0Q4M9QNTEhTJgJEHub2L +UKemJfPwLFp++MCsXgC9pI1SsAc1+KA5HFMh1PKbNIh6JCZX2BN2ZjAY3GSaczwM +BXLJ4ouDX/iDztEV16RjpEVWCF/s+gUdHTT9dzSuf5svYdJSSdBRH0jMqCXhhQbS +Xh/9ZAh1iqjoJf8dKSpVQphbE0B2iZIyZXJghFoZb/CF/y/idvXCFQ95lVuSXy/v +jBd3RjWGZcBATpCl9ie8OZP4jyCQQt4YzJy1oyWMVu6fWk3v6dK2g86N21Ci2Tkw +EspYV2Q4RMfh0LThulLhNgZYAlMwCRPq1Pyl63eYOwb5U4ClTIiRJNyamDiUMZkP +IAfLZmuX8dZKey3xRTN6XY5KkPFSdkUyql0pgWk7Q4h1BBAWCAAdFiEEXEnXYm0J +2S2WpzH4BPPYqOZYQswFAlxapDkACgkQBPPYqOZYQsy33AD6AzO52ZpS2pz89sV2 +A2ZE7Lmh7qbj7OZl+r36KvnS9gUA/A/nOown4pzNfMnh6lLBIpNkLp752D5LVCM7 +zgpRGN4GiQIzBBABCAAdFiEE6dgmhm4UIrfqKEWccigxfjhEf0oFAlxiudQACgkQ +cigxfjhEf0rV4g//blrFre5rDJCmlTepzBcZ59Bj3840j1gnbI7IRaQOlIu4l1G0 +QFiamzRZcg2jovCPMWIVoDbg8ejH3kB+PPHx635FlLKGqlBL0uvE2lxyj2FBzVH1 ++VnHlxFObMdg6CmOixLrEjYV64/bmqOQ4scM+MQfwsJgBIjtrazpndoKDNActP/q +S77UnNA85OdSUa16odz/miLj34ov4I1wBFv4xWEJGvyvtMgN+qjJfUGB5nHW7Ya7 +Ythzr0Y244tCbnkRsiNqJCw6N6PgG3HBJqL7W9P5zI7o/JL6XSPPajIdc/qz4B9K +EObiNJyBujGPHiYOgsQESwx7qi8131bdxXCLDYTRJ+8SsyjCcVghSZ4fVJTJzHCK +wJ0J4MqJQvqOxpYg3xbgjkIOUoqtjFQSjL42RTgv8siafsYTFJMaoA0hjbWYnlza +I8noZlryxlzkEpKMDCwqjVVRoR17Gr2cXKCWvk9XN3sUsY7Y2AD5GC5W+w92dOk2 ++1duFfwIcSUmuWDeDNDTSPlALbqC1C+L2rxIzqQk0JJ6WAolMA60TmwUOGWus1Ot +3KRWoOjSK9bCKDnRXoRjwYlVkJqm6ksCisUOVbOZVJSSoIuvK9f1ePAbE/UryZDA +rTnmt5vUuNT/y/4vLUc0hACk5oh40x1+AtWd4uCzXA/wOtPVglqif53sXcO4OARc +UA4dEgorBgEEAZdVAQUBAQdAkMFbJwoAGQsWGX7/fCr2NcVA0q7Z+GVsbyjqqQWB +0gcDAQgHiH4EGBYIACYWIQSJMED+GSXFT+mhzMPTQdp/xgmGJwUCXFAOHQIbDAUJ +CWdpAwAKCRDTQdp/xgmGJ8cNAQDzALqsQnSreHOLuA+HC/8nXBqymN2JzmqEW22H +1VIfrgD/efBQJPE59I3g7YElsTIhtO80qup9TTF8g9vl+MvPjQc= +=UPcF -----END PGP PUBLIC KEY BLOCK----- diff --git a/meta/gpg_keys/neonknight64.asc b/meta/gpg_keys/neonknight64.asc deleted file mode 100644 index 1868b833d0..0000000000 --- a/meta/gpg_keys/neonknight64.asc +++ /dev/null @@ -1,86 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- - -mQINBFyHmAwBEACWiQp6Qpz7berVB54ozWC9Vp1Ax8K4V0LOeynVLb67ukVTjJcG -Xoz1c/wWBF68hmOiePZbY1vYZt15D27LpiUJa7KGK+vLILvcbxDPLxrbIo2Gq+cj -aV8HICYSigczg6qEfrsjNrYo9yth93XhERugyHjXwiawW4s8vepAsqIXsl0B05Uy -AEstfd43SFt2Jl9kbJiH/X5yi0bySvIZMjYixYCnMvyunXOmvEwDPrDqyiRcvFIC -d5BnSnSAs3zoywVPWY56tKAYNFS7/+1HCNffGUhe/dWme06SMWcB8OL0jeisIqfJ -Lr1ZQ2JKBdyfGZOfZ4GCDLkCz2gjNb+8HkZ5DPo+jQZl6yHMXvz3rtaa8+gOSr7D -5JYTx0u5pPYVumTS5L/9jjdY3a+arRgwRBNWD+8er5y8YA7arLi4u6q27uSCgXpb -Mf2dw1qjoKmO8Ry2zjcZoCVdYs58E+lEilVbtnQ60OXcPKSa81v3Rf72E4PBXKuG -53bSdSnsAPGKKzFWRrgMEgxSoEvY32+xPe5mgpdhRIrhSEAg8j3up0IA1Yxm+aGI -zfCpxr4QQH+RSki8zQjwthEnFqUEdyOf5zqjp0FbG4dCLgiIuE7lGoHEltb5nGjn -y73OGkUDXyPWs+pXjrajrU1WNcqLBoMcbREH/Qt10QYhvnXYlz/djiueAwARAQAB -tCBZdWtvIFJvb2R0IDxuZW9ua25pZ2h0QHRhcmkuY29tPokCVAQTAQgAPhYhBJZH -lqe7PuQHyD91iMTrHaDUniKrBQJch5gMAhsDBQkHhh+ABQsJCAcCBhUKCQgLAgQW -AgMBAh4BAheAAAoJEMTrHaDUniKrxikP/AqPmuDhXrACjJLDV2pwvfLg83AKmrui -sg1sWw44FpknXjdQGBf4llqyGTe1/yUs8NKgA+NSAAIm4qZPRjZk+3ErMUMXjbDI -Pucb6krZs80S5KDNjzq1BCb30PutYKWFpGKAZG3/kevlsTsOK/ryJkwIN3MB27XS -H5dTsdfyfjoAX4OYeuziHqcGF5WI//Gi2pn3PyvoxBmy5U2b5egm7vuio4Ej3GSK -Cu6ASxF0PW6+pY5BK9/ZNzuRple9m8syGSEaGDpv+ckSUq1zArt5qvwIO68sMPh4 -XOy7f0vO8PPLNmegFJukZS0IOjhlnbQLhYFpNfSpBFKj7SPTrxfjtiEWXh34YFO9 -ULElXAsOo8Zv7WD+0Vw42WF/C7N6FQ6MvnkTvAoFl8Xd8j5RKl3fo3DgqXLMl+Ow -ojht+C4qe8L7CqO406DYwwYtdyxQeBkEYwZf8X3x3m0WquAAouje3Oz+62rjaku0 -naD4Z1MCS5Kz2/y4+L7zoL0mpR8pZUqKvpHU+/pJXv9oKvbpqkd1gpadq1eCCxcX -XV79TQtcBaW9x/s+QAy0DbvO7ABIw/cvrm4gDJzM97p/TrZLnvHgAoZAURukNTlq -PyLnAtU7aB3wCdid0SZtjvtb0XK7Bru+ntH/ZPLSqRdl0IKU4VWa9IvacWAdA6V2 -O7YqHtQyOOsCiQIzBBABCAAdFiEEOiCYC0os/2DIx9KmA1npIchdiNoFAlyIkHQA -CgkQA1npIchdiNpjYA//SE4ry9VwHkhKEA653sX8iIBRI61WqN0g2Q553YyXgN4f -e4yUqLcmmk/dPlJIg/NAks9ut0dTixB/V3+wla0Le5PfuziqxDmC6SUW0QkpVVpd -hy4ueDVdsxuyOsJPYW6Rf8Q/9SsZqjKKhzTZEX2hbVKCrPJKuRIkACbgWJMwzbGT -nF4CrRJSW4b+Bv+02tVGXuWQrb7Tln6hIz/+Kk4HwU4EZL+kPevJhv+TI9hSnfVz -eFmL7kuKCOzcdlH/eSAz1cZLUEJQF2HPrlJmkYiql/6YDrm1fZ3JIpuLAWducG92 -p9Apk1BtfJSCqusolvpZt6ytTC7NjhkS+xT6GBAEvCAF83vCQJXtp3+gmkCJruxD -cJs/c7TXJ14c82AkEkJ6isATgowORO3YU2A2slDNtO4J9iB6km/U0HjVh+oF0Eu4 -XOVyyrQ/XaXBk3miLouu86pSSKrje+v5QT6f/23FXPAB8tNv8L8o9kS5CmV/Lpli -CoELOOCk3N4TMvBZ3S7CVNjbVkv3cvy1mc8iSx1/XqJEUCiUcWMFCyaqmAl7PEpt -nHJT+SyE7HmRsbBB3tvN6TIaCqLemt73OyUyzSeqIxYuUWyUBlm34ECb+LeH7puR -uzOPzeLJ0nwmKziRBlnM5XXVs+NXgEC4/yAFih5+9l7Zahn534B69RpptPltPLG5 -Ag0EXIeYDAEQANRAcNUrMaq+Eos2NbM3robwfjDDThFuvYgc/AZ7iOYi30rsHGn9 -OILDfVS61CFHe+uYdTfk1XCyPIU6M+lK9E5Ifkxwkg/r4tTtQEplfy6IMUs2tyzp -S+HgxjFLqdCAboc5wKWPRiispq77t3hIkjDv8eTKtuDVjhBFMls823/uAdAXwYcv -prlF4ktPs7dJORHEUf0AUu3TfXFcyeE7utYL+jROTOyX4xeIlgYFKlt5qRBEywWT -72ZqK+0hPjE6eKKQnUpWHLlw0vmbUTptg1uhaKfnacX4iyBAopXCR3g+TxOfZ947 -VMNrPMEx4j4Ig8nBl2ejtWLtNBU3tW8zOlc9FiJ7+HvCyLVjaiZTGRZ+ah/gxl3v -BIeOU36162E0rb/62b4tdHYeTzRRG2Nsxr6MVFLu1Etss3kJnurEX1OmRcGem1ZX -iM6gLTksd6O8d1f6uQe6I+THHJPcI2tBGzvjwQ2ajO/oswwSP8HxXBnwCv7Z9/zg -yCBoEtwo7TG6w5F+2AATrVIWSTLCGkw2KQDHqdjHDZ7Zfw3Li+7Pk7KgAQwRMpLu -LX0gFQmR+PJp6vmsKIzDpZbv1T950TQ2jWZCMrauIY4wz61crzv+xDloVXSO/2eD -i7G8p1+PzAgiPwFCVlD98J1BzTBlagVKiPK7ZbU177nTZ5JeBRCnrmmFABEBAAGJ -AjwEGAEIACYWIQSWR5anuz7kB8g/dYjE6x2g1J4iqwUCXIeYDAIbDAUJB4YfgAAK -CRDE6x2g1J4iq15kD/9SpMyv/UiP5qZSTsiPj+P2g1JCBpjTPudMi+g2uP4Uc8mu -EAtMAjsN2M6r3Qp3PCmZVTbe1trjrlcCj4rC4pXjPdvpUJG3q08DO/Tc8NkexyYr -bivhzkt/CjvnL8QtpQNcTLbr9k8toFUp5poW1MfBeQzs/bVP+ga58hNkaeVW++ZX -OR9+s8igLQzX6n7kztwzcOIP0EAUOTgAiB6Fo7fLrIZnjycOU2ucVcz6s+u/MIpW -qtYNKXu7iTEoJMIANgQOA6wOHM5yI2rwaoDm8mxm805O8FYEWmKzentm0Nh575qU -hlwlyoItr/2YPOTaOzRIj4xO420yeMlQ7MpU7F6W33mhiMPhYMgNDEAbUNrckMng -izu1+KZS/HfpSwz7Z131S7qvuZHV1PsbzWUsG443U5Mm/nRidol4+eFexiKAvKAT -0N1UY6HdCIsezcLwDqW963QJl9L91C4UyDnEHvhx/SZjGAHziJnr6Pnba4PPYWp/ -PlWMAFkxxuZHDbZjaWNe5fvGJePaFmKB8VOLIrlfX9TWiBdJfDjB9QfA1/tx0wLo -aPtEYxm6er7o5ANk7w6HdDUrXnZ2KO+ShMAaQrdY6BzGT5kehU1039InJjtZQpP2 -nRng/vJw3Od7UG/wexfCbu44TsQK+ilvbfWx/QdtPoF49Qwa9wW034Xt0sgNrrkC -DQRch5y9ARAAu3UADX+IvLbeueCQ7iWxslY/m4fcnTO/yvMHxXpJ6KL/FTPMsG9a -4z7bYGD4mHePBNPuozGlQlbL20J+GW9+AHEab+LsApNl1eDWjpf6E1IN0jGtkBEd -SqoAiyPlgP5D/JSANUhOspzgJlOR/HmG7UxlFN3Ne0iKwOPZfX2Gt2wPBdhzeRiY -o2UPEdkKjIFyOgNolG/Q6+K0xf6A6QGdGpGd4zdm1t1wsNKFJ27G7jjCTvBJqQY0 -yV1FbySSA19jo3m/z7FGecTiVSEjIwLtektD/MmsFwkn8J4r/gBSlFS61NCRfNP2 -0M07slAMuvO7MbvDd5mpNWl7c3+6ybQzrY/7RtVK0iCR9ianmzCYmc2aJ0e0aFBc -2uotdMwCA0vrguFvgRTb31bYxttVnVcpT3b24pIFJWes521lHydeaTrF8Qy8+lJu -3kqNIF5JYan3FgRf11YySHb2VZWg8GFKw2gSyNOOtWqajxGYYjIUvKl4t1w/WoRh -EDorBon2lx2djgoAXOXYFezhsMlbbo1sExIGYRYf+LqO9yqpvm6EXLdUHhwDTzY3 -ZkS/wV95jDGsU4NyRBREok/1DSQ+ydHHEqcbrVXlZFTXKNkC8ZDvXocyOWvT7Z8B -qKeNAKCc/QZOY3NQG64CcAp6nX6R1ihcguYvjLpLOWiVIUIK7RDfyWMAEQEAAYkC -PAQYAQgAJhYhBJZHlqe7PuQHyD91iMTrHaDUniKrBQJch5y9AhsMBQkHhh+AAAoJ -EMTrHaDUniKrlWAP/3I4BiQBi9BjeJpvyMuHz/sHYQXT/pccD3nzTuEftsVQo/Y6 -Z0xUarsJiNMWNz0N2UKQdz8r5QQ0mEo0LB5X3Gi7TI15ITX+zKF7qEcMumPmhJLm -Oup97j3f9awT4YINr9RCsXKZnfAKz99Iizn5H5Dn42TJtaqPReECOiGtZBSooy1h -y+Ht37u10jH8NhYGtuJvjgEH80Ziy4CbdkUJd4NPyXbN9D0yGDz/7J7T3M0YWOAc -uJHQwMI2hOwKOuWIBgmDvqO7kKSCDjQor+9Fiqu15ASKLn2ZGU9hP3jNoTwzSfMg -zal3HR9LvmibxNbEOeIWGwcX64Dpl5Sk5TyVViZTJJaZWT+7MZOhZEjgxWjBCUMq -GJxn4hsBpjoAEldepXFabvd7FT6xX89nxwOTjow5D48LaLev1XxM/PfJORrbM6Zq -oDrzby6UA3Y+u4p21S7NI5DHqR6v84bhZdN0Deqxlc53eNgvM62MOlXVv3itCFI+ -zDZntvrHZbUgmpH3SLxSZUq35DN7Uk1GQTBK1YQvUd50l2JXIrxwFhEkXNr7Pp0q -xJo9WrnRc/xgEoghQPZNoMCalY/mRk53XVDob4Y5F1URVfOkiQ7/dT5r/G12iSam -qfFa2tG0zKYecoPdASyOB1uGhJaXgYgk5hbNKVA0OoTDqwgRb/ih1K/Lu0KO -=HEiN ------END PGP PUBLIC KEY BLOCK----- \ No newline at end of file diff --git a/meta/gpg_keys/philipr-za.asc b/meta/gpg_keys/philipr-za.asc deleted file mode 100644 index 90bb54b8e3..0000000000 --- a/meta/gpg_keys/philipr-za.asc +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- - -mQINBFxZfxgBEAC93PEokxzD2Shr/QaacwC82FXux1XApxJ2AdFongmYGhM/IYVW -qQL7r5TkTeSAwHrYu8cTbIcWt9GuYVcPf/Mk4II7ILXkuEoQYencnlYiBx4JnVGt -/64tt3+hjoRUnanesYB5T2eFUHsQFeS6821TMDwTOTm02AsRJY09rZIUtkTjlpx8 -yPou6GiCnDrOs455BRq9wGN1RvVCWWLV13ATlWQAydY1ltY8uC3MpyOmZvgohg7v -Y5semvIMKKBgTJoYriBFAnvH807NEUU9F9stXoNtvipwmA0xOESgNAQCJXlhbU4j -cDHdo0Dq01rfo0eP8Btxr925hwVYQ4ccnAzH42At016cJQmL7zav7+mkrM0966ZS -8zc7bEH28x/0wOh+rL5hqS8/5oIad2o9xhmOxn+9mit5jS9rHy6prY9px/zgGqeU -BrSm/q1l5YSrhhAy6XQ2BRQ7PTf/nCben2RGCSccbj0TVF7Fl9lospAJaIhMk/V5 -jVMcWx2aFUZhGqWtfW1e2N5QXqHWLJEKqYvvmsgorKObx1XzTc8Ij7DDRbtNkEA+ -o7mawLR8SpN4uR0ffUsChEC1yg3uTXdA125xp8pxnG2LdGdgYw3/dlEBWo0Ram0X -M5BaRDDSeRA8B/oW31g4Xqv2TImC4qmxZkjaofpH3zp8RrreYouv346hkwARAQAB -tCFQaGlsaXAgUm9iaW5zb24gPHNpbWlhbkB0YXJpLmNvbT6JAlQEEwEIAD4WIQSu -IfNZYVfUNhbsN/rtBR4uQPs9nQUCXFl/GAIbAwUJB4YfgAULCQgHAgYVCgkICwIE -FgIDAQIeAQIXgAAKCRDtBR4uQPs9nYhLD/95XwGo5rJHFl7LefSzvZpUeSVTjYEG -tjX3dnc84aOExoOeFQ5qX+kNZQxEPdPPAHSm62zgOfG1hl6NkO5N+6it+2fzDd0F -6P7EAWeI090fse3Mf+fKiLFkRVQ/f+eQ40BlT2op53gla5NwdQMxOOicQeKPEZp+ -3h4VkVHwU55yqg7uVLwAqRELq4RPA36btk30GUsG3USoJ3y4/7Sz6D5aloMMe4Gf -HgKH8kynVjaQGvYlh+/DtK/i9Qv+5vsrfDoemNl2ZC1Inaaxm6YHv9t9VtK1xUEj -ZZvzRL/S13+LC1dzRLcwK8ubkcrdFf9sgCo7YBJ8Hodz8bRrsDzkMS+UdSxaK5oA -jGbRqgXRjn4IQyGtO1tWpcinzLPIxwD9rh349RlkqpY6Ii6GWgNNyLftHMeVEJ/e -hUCn+fBvM5jB6n6iSH3nMEFyjKF9e3XosqCs9o1zMVEf8O3usz/7GfVEeTmOFJYC -M5zakmBYVOdIwtlTsh8SctyMMOq4VkKMrM46qBcUH9x64BQb2b1igQ3fV1lfr0jX -wnHJP2iXAc5tWcxNqfS7iiTnRd6oMgop7JTt7X3zWGBRLKcGmxj4C901hVOfjFky -4q+7zvlaJdMV2HgMUMWNIMaTsUSs3n+eUuDz0WipdEQ3aX47qzVF5APZLyqkiexW -glD31H3iUwE+2LkCDQRcWX8YARAA6GK9AaK4Qx0sToPqAEoDdTn1RI8NFAGP9h3A -MhDHMOQ7hiO0wDj+KZxoqtaLSgUp+5Kfi1A6IYQgJ4u13mlxF2fA2EE/GWAOLskn -4+TNd/wksEfahoglllMt1OibtFs1SUCw4yBJj9TeEF9bY4UTEKgG50QS0q0bxKhC -Woc9W5m8mkTCQDni6N2Z+yw1Kl/ZXnwN5PDoWXGe8k4gv/3Ppuz58ePKjpDvwpPJ -Ic7EWAhjslhCeqL5rraKRzd/W9bWDAS2QEtHLHWzdGLFajas3qzP/ViR+twvIjRM -voKGv91tZf7vQlw7Y4xofUwZTuJ3NbNqKdQwK0xZpTNwBjiMj0b55de6uPeOl2ri -0mXWv+mjoPYC5TyavrjTo4zl0RJnXsBCmjNwNTAsafbp/1nTaeuVTeS4GkbKGrgM -8L71Go5cN44LFaflExNHJxeRYFFdf1cy8b+B+4qyxVn6/g3FW0mu8j6bpXNnsfXw -o5TUqzYgSb0DsZA1f/kO3+ZHEZEqyTb2T9PfEzUXOCMtvqwzv0on1XHXzBRhFD4X -r1qz+oZKgvKrxYOI2UNZVrRDImE4C7I6x4pLfoadhMY6ksOnIlijZgoFdGAa8OkK -Lyw6Lar8HJbcTFJJUaP+einLj5iNxvRBPkcGSUcjdJwLC0cxoqKwEUW5SEUmlqyL -hL1/DBsAEQEAAYkCPAQYAQgAJhYhBK4h81lhV9Q2Fuw3+u0FHi5A+z2dBQJcWX8Y -AhsMBQkHhh+AAAoJEO0FHi5A+z2d5/YQAJFvXY6b3+Nop9hEhQ45nT7IfX4DPmEs -X+DLzf2JAe/q5Jxeu9KWWrgAVZwsWnyCYcWaFS6/dLHIvyes2cf9R1xRFRofp7Dd -0m9mp4VlJpOiLeSPn/Ixg6PXMiEKrvHNntZD01MlZdZFvcmQ340wrzFX6pp2FSoB -67+FQibbLOnzE8Hy+AH9GhI/ofEcASNQakNmDW2+F781OJk+vg5PgvDFf0RTyGi4 -uTpxQl6MHCpWLrb6PEvkM9HWPIIQNZD2Ddckyg/t0ssdS6qt2DFFFXRfHqxyHbfz -4TwYmBeQhbyiI3YQwbYOIwMEc6Dai5vERS3VLMrCTH9oyhgPhtTUQfJahs+uL5u+ -5IPpKjLrDcna6pKrmYDCzy/oBdEUKokEDoU9Q1nelpIuKsSpCqiO1+6vRzt7B+RZ -a5T3n2FL0jOSq4aY4Y7s3XRl7qvjd7wcQnrlD0Yd/CPwUfN1z5M+m/tQxJddQg0x -OhtPG1YgTW0IcVhYt0DjKjTzwpN9lw4GSkRfWpC4/am/7ihfD4DutcXi6U7RoIcS -OyfgZZIKZoAHm0CMumQU4Z9+GeF69f7fLysWPQkXn1qrmkRXP7KrxHVwEhAovp/z -wovYyD8kG3ogjml3X6nVCME4h9PH9ZDKTP9mA7i5furidVaLNsLzPDqSX+eGbBHT -oZ7V88g+VRzc -=dQo1 ------END PGP PUBLIC KEY BLOCK----- diff --git a/meta/gpg_keys/stringhandler.asc b/meta/gpg_keys/stringhandler.asc new file mode 100644 index 0000000000..5e6f1d65c3 --- /dev/null +++ b/meta/gpg_keys/stringhandler.asc @@ -0,0 +1,52 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGXDUhsBEACXgDQhpNt7RdWc4TbEOKJ7Ns80l/Q5GNGRHID5X7T+rLMME6Uo +wJor6K4WoQh6oiRNQBr2b8eJ4G5sDKk6UbbTpaIyv5mP/JWvij+Kb7DT+xtEegEm +kvor1ysKwSv4Fx64tiK/7RQPR1zjtKlTPBSr5VYFixCfN60WIvG9SKcZ4+KIc31d +Usy5wJAOsEhnj0yKSjo/Xsex+1YLQUT156Cy7rJjk3sLEv97zESNcT2+JpX4e367 +nahw3oGEvXiXI6ir+TaAeyyWArKBqJv564XpZ9Mef2i0IG/wgjDF5VJlHBEidmEW +2p8vMGIM4SNKD4giRSQRNc/jAffSNIrmi3Ri3UlUYTlblf5Hlcq3JB2nSGjrDXVB +i3x5lOjKIy6Bp/F+TF/JCsd8TC+2HhoKqttY5tBKFbGKYDr5TkOPzHjNRHuQ1EAB +HkYTn8P8pr3Y3x4cwXSIE5qA2CpA9hEvaetDVz0NAgieRyqgmCPJG38m3xsKhxWL +Ihw/BfbzVffTyfUX2u2/PvFaoRt/Dx8o+ZZAG6BUp+G4vGhtGeq/VvUjoyJ5bd2C +2ksMKE5U0jA5leeX16AoEli0gh+LeEjGBETBKz8LAWaPlJnituerLEXSfOrnhvMi +wFfPq6amIZ/XkphwyvkX0ne9cOoNjjSzHOFNM21x2lQsJYR85KR6AxEPtwARAQAB +tCxzdHJpbmdoYW5kbGVyIDxzdHJpbmdoYW5kbGVyQHByb3Rvbm1haWwuY29tPokC +VAQTAQgAPhYhBIGxvgyXTm0ZLw6H4Cm72HKl+aguBQJlw1IbAhsDBQkFo5qABQsJ +CAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJECm72HKl+agufnUQAJMPaOO5S7J8UNNN +D7AeCv2IJ5GBYV1QCQ5GqGW11WKcrperm4dVw66J686oAFb6xM+xOFrsbCW5XRpi +x+Icr3vmGn/iXTpDE15gTJfevywH0hTt6Dzx4cWmbi1whwpDPal0i7iqpU6DxREu +G5xFwHZXswfM6z6vpqcDlJG/+P7uqF/Mo+/A7F6yn9+tX96o+TFAA+h3KO88V2x6 +e4OFsdQFewqYIjTcGw9t60jSFoiw9h4z9mNaHUfme14B++Ng7/Mlq21R7NiQmXNH +CGsPqfowaAsATuSORakg5PdoicxXPo/ykbhr7ztDNAqq8ieduyBTrB7i9QhOISAj +jpjvCFK49TBv/XUV6+uFDQoiAzKSjxbbbYCleHJNazMGZfhbXmB+YVKFzV/DDevq +HV6Gs/5Ae13Pp+N7zxEFM3EgRlxgPbAS8wacH8ASqQ1Uwrk2EgeZKKC00BvQzRm3 +duM5Fp4iLEUAHA0g2MxveQkGY5SmfpF/0hE2gmW4pHFNItw19nsITiT0cFyfwzEr +JRCsXmk4SMEN+oKGjg59qBAc/qd3QNpSQ5BPgxh/zULbivcEI+5uPHIohafyg3cc +W05mNSBhdYp1U7cpLQCcAx+Bz3p0nh78eWoK65749bArN8qqle8y+hwdpaImh6RD +bjKaEItN5bLb8OxO8cPNAB+IXaXluQINBGXDUhsBEADDpiDP3qTe/oGruheMRCru +qoktaefBFvtmKq//uIAsW+7qjsHRoLs0gGXogknF8GSVpQJbCqJTQJfQjtDY2mVu +CpUkL+ytrL287wKvVMj2C5U8qi/FXfaTkYNGC6eYMIFd5urmLetwT2S4bC7qHnlo +MxEOKJI4ECja9i1xk8PSpcfN+Du2FOs5pHwhRMfUC7NBA8MQyx/hsQ1RgWY8pAzq +j71sIo4lc7xi6eT9HpAxnLUFfWYVIP1bN1FowAaiQEdfNtELc+OKMJfmv8VRW+hN +ojQ7m5sNQr3PG81Vrxn6nVyeDsMAt+/qZUCsnBHTBxaHBHJZlP1tSLZ9XVxY/3L7 +vu/KlqQv5fuMPBKYudwvTfgmtqNTtU9ZcDbrs3P0KOXr/yNXiWYlYRuJDdxhZqdT +ReCKiG79c9ADsttm6VK0di8RUjDmKdUDP+XnjLc337Slx1QhVLHHKuxAnC1LmaPQ +TalS3evcpi58NjB3RiFKGoHmecoKp53AnT4l/xSdXCDm3DjBaUEuejVVFnRfdWxL +m5DyGwtYiFotmmbg09ctv1eZapusbCU4riDsAhlKOj+y8TZmxNUJkRpvYhlWwpGD +m8RkVbot2D8HDArtaKDAvul0MqFrtLNE6ldXflXf6vgw1SI9LZf3uJb6m0Opo4Lt +eqcnUGoCLaXbY5V/PidpdwARAQABiQI8BBgBCAAmFiEEgbG+DJdObRkvDofgKbvY +cqX5qC4FAmXDUhsCGwwFCQWjmoAACgkQKbvYcqX5qC70lg//XTWyltA/Qa7orP2N +OqrywJQx17iWMc6t5EwI8bW5ST74hW79AomzXyjhWC15LNrK/KjoVUh0/1WLdVC4 +jv+L0CJkeUgulV63qdPiHwTsRLhnObGAes5g033N2I2yXruDwYiOPlI9nR+YSMyu +9nQwcNfdvgSTipqP1hdCjT1RvxLzBctkHPnclKUPfQvYDdDNg2c6NY6/Eq+R4Sc+ +dlGNENpyGbDnqYDU0iVHnSgkG1gSNr6VQeNhKrgtbihDQSP1+ppCeROC5AzeoGUP +pNvPuMVuqdIDt9u4T7unhr9JOb5LDqpplAH67j2+s4+D9ggrpteTBNV+7jGZglur +sPAlHDnyP+AlPHoHwrxCG98s2WOWGdHW3Zx+3LkP/ijB2tx69fO7J1W7600qiHXx +J1WNGJAvwQHlsuTG4klRzUr6dOe+SAapaCDiwduIiNy6aO3jAyi000WjM7kWncgJ +gXn/FKdbjTMUea+liDe6WYuntPp+ewz0jYZdDXY4tCwVoii0bJ7l5FxPiNCvPm8m +5t3P0JTpGwNpqT0YuEPiebFXwSmSyX3O1GZDXbWgYwG6DxC3xa1mebjKPQdTss5i +p5hGhKFTBlk0KFwCCEDtZjmWzrAtceqcDZ95jAeQc1PJMb3jWw4snbCjirTx4gug +V0f2fATBiu2NWfgql40ixuzveL0= +=uH/a +-----END PGP PUBLIC KEY BLOCK----- diff --git a/meta/gpg_keys/swvheerden.asc b/meta/gpg_keys/swvheerden.asc index 47e2a7324f..42eef171bb 100644 --- a/meta/gpg_keys/swvheerden.asc +++ b/meta/gpg_keys/swvheerden.asc @@ -1,40 +1,53 @@ -----BEGIN PGP PUBLIC KEY BLOCK----- -mDMEXFmM+BYJKwYBBAHaRw8BAQdAKKpMVnKMXJEcs2ECTbKorqRTEkClTr9sl3rv -lTq0cwK0KXNjaGFsayB2YW4gaGVlcmRlbiA8c3d2aGVlcmRlbkBnbWFpbC5jb20+ -iJYEExYIAD4WIQRcSddibQnZLZanMfgE89io5lhCzAUCXFmM+AIbAwUJCWYBgAUL -CQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRAE89io5lhCzFOCAP4ujof1jX8sfVot -O6SoIi+rTc3mSD7CyhMtXsZIs1wO4AD+N7MqzQ5pmvfSBI64S3AKEHV2Ie+mk43v -5vnSU4KGrwOJAjMEEAEIAB0WIQSuIfNZYVfUNhbsN/rtBR4uQPs9nQUCXFqgiQAK -CRDtBR4uQPs9naPcD/4wKeAv8HwYGaNbrVgRKGALfWZdcrM6JxKxvXLvudlm4jYi -72mAGPgTW2FFWhhdzJBQYlTxueBBb4nStEGsD1MAMgPsUNGBiyIlF6Qo/1fvBmHW -Bsev+8MRY3FETlpqGKUF+iPVpfhWrpWnsatDcN7rnZecF1Fwj0AoEiPol41L1TzC -2rFtkkB3SpgCU7C5amI9PNAE5bhrk3D0TXgl9cu4rx81aTzJEreu2Qp2Ld1Av3eN -49ZcyFFYQp5luZu6hxuYz3llLT56fre/OdZVhfHCusbA/5TQTHaIsZY5BO1rQh6C -fdMWnl9KQe2V+Am4Ccwinj9d5+CWem05hF0xzY7ihUtHBQfr1a/6vVnUFPtjA7Wv -hp0aKVwniwR+BSOg1fbowZ1YU8o+rhKgJlIg61QOS6Mbm9nw0tncRseKRdFuT6iu -/kCY/ny9aHjRWvQ45Y/Ld5lpjQhoZlcuHDREdTPFpdBnYiFPTEAAhT+HZdnA2525 -lAQg7Bdwor4BJH4YW342bjyXWe40Ij5XzKweQ69paL6LcvLuQOzphvgqpDGLSaTp -ZqdEidNVGGYhu2aL8gC3CUhFrHSAtWdLwTnj4dTTXDgsObSGZIzxStsBRIrEpdDv -LK1TDV6r+w5BKZOH7ecMh2jqA1RqRxALnFmg3sgAtf/KPy5Lv6ZpW5GbZfiQgIh1 -BBAWCAAdFiEEiTBA/hklxU/poczD00Haf8YJhicFAlxapNEACgkQ00Haf8YJhif7 -igEA/18aC75pBp2TbnGk6iMSOHu54jC5nYVkCtoqPrWneM8BAInbp1jURAz6yqjC -cihhfSnel98M1PdcP32kJuEEijoHiQIzBBABCAAdFiEEOiCYC0os/2DIx9KmA1np -IchdiNoFAlxaqloACgkQA1npIchdiNo/OxAAoHREAxc9s9SwSJfTfDUlbZNw342K -Jg2tOFmd7OEHXFTcSBS4X2MftQsX/X5KedqHdyxIVhyP3XyhHUDpqBpHvdvnRtjZ -lQlEzXyvwicy92FE+Co6lPG4XH/WdtWTIaurhlqm9sKaRGP4qvn+qVHbUfIWENbk -nDh8qKy90ENQjqdDEf7S0JnkCHsQhFUyStLXbfsah27SHuIc4zD6ZyJmTkIDDeyr -MPt3aNZ2/LamkLto6XEnTWHilEbyifO5/JwY2nl46Bu77ht5ulkqns2CJ8OiuCWH -O8+/QkWfy7bB0DBj3rQ4QYQzdTUHZPUi5ydoUIpzmavtlwxRHxw5VZv55YnigA4q -AFqsmCfBvY2sza0wo7r7gDKExlgtLcBuG3jbqMWsZ082Ga3144JCe2+2HDPw+aHe -nZJ3O2vrNeBXdyjxPLaj+0IdZHJHaK2hTNk1g2pLsRiwuMPhXeEK/VbBbwmKFXiG -FenoIG8eIrGShnvrO2n1nLur8v13pBCMga9LL3i2DuqYZER7GKOcGZYPnFS1jjR0 -rJqAumfqxSWvh7C2vW07uD7gpG1iVFTJv9kLwVdfaPJALn5YbI/gqc7LHnor2wRC -rk7fUe23J56/DJqEn4jrBXfdMjwYxqE65kabeMqIecjX+wwBZQQ3QZHmDZ5YXy2M -A6YS80hZx6gDpDS4OARcWYz4EgorBgEEAZdVAQUBAQdAzMLBLnQ6fLP6yF4FFxVL -8SP8nmdjF8TuCmy/xWjFMhYDAQgHiH4EGBYIACYWIQRcSddibQnZLZanMfgE89io -5lhCzAUCXFmM+AIbDAUJCWYBgAAKCRAE89io5lhCzKp7AQDPS3kW2suf/kN/KQ8p -DA69jdJnTIZVSOLA4ivyJk8csQEArTOnWJuNyZboSin5/e8XtuMqt6JylT94/D1G -ZzY69Aw= -=Ob4p ------END PGP PUBLIC KEY BLOCK----- +mQSuBGXB9V8RDACNREHtA4e53celi1DFDMQwFVBsKg2eN6S1IqINuZhgDquRL92O +Jtcja18GTzaVdC80+TuvL19XWdi4B869XFClZpLlbM1p6HP11haOtG30CRNbRWAQ +FOgDTO+Z/HkktsZlY1Zv0LLOwr/U2EbZAAKp/uKndpwST7rsmBJZyepbPKMTv0yj +IEJSC1NJ2r7iStdEEfpCzzBgDlWesg0+3f710+Z4kQm6EvPq8Z3tFCpcOMKcfbbh +d4cDYJSikEPelrkh6a51gFkcCVAO4KUbmROu95ijHA2INpXtf4L5kiY6iS2OEaej +9J6Whh/spD8bV5wUbV/dpgl21esLrdFpS/0sXlPnez1gDH1d2nIonBA94LxrdL/K +rNfScyLSCI9HfWaXjs8UkrAEHlvbStDI1pvPla7Qjtx+8H9D/UF+1kqfCAgAvuzH +Mwn00HybRLnjIx3RO4fmv8HdmPJHlAt2apT5M+KlxzE2aKu59OqXDaN1cMcmenox +DaXEWx30MB6eFB8BAJAnyT0w9C6NMAml4ecnaI+AsGzwuTXXTFppXm7mhRZ5C/9o +O7YWlyVBGxOoihwlNQU7wu1xt82NRE0PhynNWc1wQrFzPt9Xj2sy7FoEqYjFLkAg +9V8qJ8Gv1neu0DcIqt7tPBJ7ZLcvKtngLbXV1jSGiZsMhHSNYThCZfZu2AitjZgP +BJ7zwsWuAG/x491ellGnE3qnYd7eR628TiHueT2J0CmneovEF65anSPswdCY93/u +YvKYVXHt4Zwtdekc7PmZeVUu2atVuG+ELxYWBBSI01/YeQXi2cJ16rxJf1Eq8nkd +9sli7pK1r+c5tbjgDB8JVp5CEiwkkakVogNBXxYdSmB0GxIIQ38qbWTJ2ZDmeJH4 +aWQEs3EDtplaTKdLUIIl84Lgx7r7EMYlQKQC8Ah3HNmneihasZa3xvhsarBa5uYw +dmz23u73AXn3aWnX+d8RdToQ//znnP06LL2KwQocAPyM5yuo5N1P7kAThqSC17em +XlMu/6Y7emVq7H0Xi4ElQRrJQnOuZOOxcsUOIfu4VTDE6PNSwc6OnROVE+bPyssL +/irHxwOkSQEwYkuj3jAL3R15GnRiMHKy206WLYpkDlLm1HKvFM60xbSdXn8C7Z2c +56RgVBs2JNzo9dsjBv58RT4q/cK+NuKlvRFlgmAc8gOmJ5woh5zw/pLow8i0ljwz +NW2GD9TlcNUrzFjU4sQ4siFw9RY3Oc8TTZbL3YP6Ci+NqtRXRIbrXYECNijPHfAz +d6rtFTI2uQl7ii6ZkQJ9fzAMa7riigPtjNhZW6WiKl43TAEaimZ/YFSPU1zTrwMG +AVBauTzxU+R2rPjoxoZddvok2548Mzt0eFVvF5Qfx11mt+bL2MfMT3ryUxgAEHOF +bvLE0aQd49u+hChAItfh9Bq5d2mtQ1YXFvu8n82EjGd0SXUuN6AVs1HLF51v1cxZ +vxGjE3oTKgfNZavrlvRD46kbWYtor3qYGuokGwzB5BLOlWtxOzYXMDMligj7WqXz +/V8oEgUmwnwt58V8Z5HR28jQH9+yyGFbE1L7bliwG2rYgT2w+7x6hl6rRaE1Th6/ +5LQpU2NoYWxrIHZhbiBIZWVyZGVuIDxzd3ZoZWVyZGVuQGdtYWlsLmNvbT6IlgQT +EQgAPhYhBGrrA9URsApsptQzcQGqGYFT/ofiBQJlwfVfAhsDBQkHhh8JBQsJCAcC +BhUKCQgLAgQWAgMBAh4BAheAAAoJEAGqGYFT/ofi/+gA/3vDPjDCMUwGcupnBnYD +W1Bi8yhu/pMXRnTdZvXerCvJAQCGVjvOssfx0Nb9S92nb6QyreCg1W5snNZSVRjl +8uywo7kDDQRlwfVfEAwAhpp8f6B2xNk0M/+YZNOskzwbj+XSzkgiSXp8xLSCchrM +poiM1cvRgkDw/TA3lo5kd1S9q4NHdP/t/4xFpZ6o+oPNbW2MqwyqH0CtHprN6HtX +x3G71s1JbAHq40EtXGFVLx29yTpQY3pBBQD6kdH/T5xi2IR+Xi1RaMfcsZL4ilzc +6eq818AsFFuOfMgicxmUJKXd3vDywjpmY6VbqZ68UULPngYQkNfYVafx4LOD1y5O +8qPfxR0MCcpqHDX/P9Zo2OXr0PJFf+lgV45/nrdA4/SxbuT4E++Xgm7ZTHwktpI2 +giyNjZ08Mqy/j8VlxxHd231AmL11tSGy501C5NyYIgxZLq/lRe/M8uNWy1eiAIYD +Qos9lnvZTRJxDxwkQp7jqqNfnWMW9wtqp/I2Y5S1LlRrq9T+PC/t5N0RzE9WaWpJ +8voThtdOps0nB+IA3o45dNz4b3yhVEt8AjQwp0LAncXnp3VyqwLDvSf5ua/YQPZx +u3DxgWyckPSOpHomZ1VnAAMFC/95PVeQHX6lGjIXL1eK5EfyxBgDuc6TVc5zfHor +aJTwDpGD5dzriaRRZUQzdFSyUjSRG796foT9T7CQk3uuhzfdJTRFGuszGmUgWGTQ +Rry3OJni3ZDRmOkbWzlcYlwVsekd5BpbSTqPCiq55OsycT4EwMY3rnCMtQ+8TcFJ +Sn27GM4trN/ForclTHoLDQTvY6qT559rWox8zrUmH23AA4CQofb2MPlIM/iYosMy +XTl9MQfNhESe6gS7EHbQalGbQjbXA6Q3o5wsktdRMLXAhYL4aOU9kS3YDaH5d8lk +68xVYe7mb6rj6tv0UKUp68NwnPhm3TGItnsBlUhCVcWzw2Fttt9xF2I99fDl8FJE +5MTMiiGET51w9sB51BA+K1Lj57T4YmIgAzgRqw6zHstGqXU5US13RvpGAX/RzgRP +icmzkQiBGXm66IqUzRu0SelomW2oTfXjo7VVs5U8zpc2KWHb8wqN7rDWDxfJZsj9 +RyEJgc6ekmgC2vlbFqqAQ/C44+uIfgQYEQgAJhYhBGrrA9URsApsptQzcQGqGYFT +/ofiBQJlwfVfAhsMBQkHhh8JAAoJEAGqGYFT/ofi768A/jBr3sFU97M5Hf0nGfYA +Bjw6yQmDOU02magWtM4aJmpBAP9YLTeYB02+GYo3wM7MQ8xyJaS8Ed+PzAeNtQMM +xDOnOQ== +=VupM +-----END PGP PUBLIC KEY BLOCK----- \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index 8cf6ea2b68..2cf3264ddc 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,6 +1,6 @@ { "name": "tari", - "version": "1.0.0-pre.5", + "version": "1.0.0-pre.8", "lockfileVersion": 2, "requires": true, "packages": {}