diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000..603a913 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,44 @@ +name: CI + +on: [push, pull_request] + +jobs: + tests: + strategy: + matrix: + toolchain: + - 1.43 + - nightly + os: + - ubuntu-latest + - macOS-latest + runs-on: ${{ matrix.os }} + steps: + - name: Checkout source code + uses: actions/checkout@v2 + - name: Install Rust ${{ matrix.toolchain }} toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: ${{ matrix.toolchain }} + override: true + profile: minimal + - name: Build on Rust ${{ matrix.toolchain }} + run: cargo build --verbose --color always + - name: Test on Rust ${{ matrix.toolchain }} + run: cargo test --verbose --color always --all-features + - name: Functional tests + if: matrix.os == 'ubuntu-latest' && matrix.toolchain == 'nightly' + run: ./contrib/ci-functional-tests.sh + env: + RUST_BACKTRACE: 1 + + rustfmt_check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + components: rustfmt + override: true + - run: cargo fmt -- --check diff --git a/.gitignore b/.gitignore index ea8c4bf..e762de7 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ -/target +target/ +__pycache__ diff --git a/Cargo.lock b/Cargo.lock index 6def57a..5a12256 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,12 +1,10 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 - [[package]] name = "addr2line" -version = "0.15.2" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7a2e47a1fbe209ee101dd6d61285226744c6c8d3c21c8dc878ba6cb9f467f3a" +checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd" dependencies = [ "gimli", ] @@ -36,9 +34,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.60" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7815ea54e4d821e791162e078acbebfd6d8c8939cd559c9335dceb1c8ca7282" +checksum = "e7a905d892734eea339e896738c14b9afce22b5318f64b951e70bf3844419b01" dependencies = [ "addr2line", "cc", @@ -115,9 +113,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "cc" -version = "1.0.68" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787" +checksum = "d26a6ce4b6a484fa3edb70f7efa6fc430fd2b87285fe8b84304fd0936faa0dc0" [[package]] name = "cfg-if" @@ -147,9 +145,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d0860415b12243916284c67a9be413e044ee6668247b99ba26d94b2bc06c8f6" +checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc" dependencies = [ "signature", ] @@ -166,6 +164,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +[[package]] +name = "fastrand" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b394ed3d285a429378d3b384b9eb1285267e7df4b166df24b7a6939a04dc392e" +dependencies = [ + "instant", +] + [[package]] name = "fern" version = "0.6.0" @@ -188,9 +195,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189" +checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7" [[package]] name = "hashbrown" @@ -210,11 +217,20 @@ dependencies = [ "hashbrown", ] +[[package]] +name = "instant" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "716d3d89f35ac6a34fd0eed635395f4c3b76fa889338a4632e5231a8684216bd" +dependencies = [ + "cfg-if", +] + [[package]] name = "itoa" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "jsonrpc" @@ -230,9 +246,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.97" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6" +checksum = "dd8f7255a17a627354f321ef0055d63b898c6fb27eff628af4d1b66b7331edf6" [[package]] name = "libsodium-sys" @@ -268,9 +284,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "miniscript" @@ -297,6 +313,7 @@ version = "0.0.1" dependencies = [ "backtrace", "dirs", + "fastrand", "fern", "jsonrpc", "libc", @@ -311,9 +328,9 @@ dependencies = [ [[package]] name = "object" -version = "0.25.3" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38f2be3697a57b4060074ff41b44c16870d916ad7877c17696e063257482bc7" +checksum = "39f37e50073ccad23b6d09bcb5b263f4e76d3bb6038e4a3c08e52162ffa8abc2" dependencies = [ "memchr", ] @@ -326,15 +343,15 @@ checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" [[package]] name = "pkg-config" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +checksum = "7c9b1041b4387893b91ee6746cddfc28516aff326a3519fb2adf820932c5e6cb" [[package]] name = "proc-macro2" -version = "1.0.27" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" +checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d" dependencies = [ "unicode-xid", ] @@ -356,9 +373,9 @@ checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" [[package]] name = "redox_syscall" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee" +checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" dependencies = [ "bitflags", ] @@ -417,9 +434,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dead70b0b5e03e9c814bcb6b01e03e68f7c57a80aa48c72ec92152ab3e818d49" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" [[package]] name = "rustc_version" @@ -481,18 +498,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.126" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" +checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.126" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" +checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" dependencies = [ "proc-macro2", "quote", @@ -501,9 +518,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.64" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" +checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8" dependencies = [ "itoa", "ryu", @@ -549,15 +566,15 @@ dependencies = [ [[package]] name = "subtle" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.73" +version = "1.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7" +checksum = "5239bc68e0fef57495900cfea4e8dc75596d9a319d7e16b1e0a440d24e6fe0a0" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 222f63c..1d96993 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" [dependencies] revault_tx = { version = "0.3.0", features = ["use-serde"] } -revault_net = { version = "0.1.0", branch = "update_tx" } +revault_net = "0.1.0" # Don't reinvent the wheel dirs = "3.0" @@ -31,4 +31,7 @@ rusqlite = { version = "0.25.3", features = ["bundled", "unlock_notify"] } # We want to have a backtrace on panic, and the stdlib doesn't have a programmatic # interface yet to work with our custom panic hook. -backtrace = "0.3.60" +backtrace = "0.3" + +[dev-dependencies] +fastrand = "1.4.0" diff --git a/contrib/ci-functional-tests.sh b/contrib/ci-functional-tests.sh new file mode 100755 index 0000000..d0fed89 --- /dev/null +++ b/contrib/ci-functional-tests.sh @@ -0,0 +1,22 @@ +set -xe + +# Do the linter check early for quicker feedback +pip install black +black --check tests/ + +# Build the miradord binary +cargo build --release + +# Download the bitcoind binary +BITCOIND_VERSION="22.0" +DIR_NAME="bitcoin-$BITCOIND_VERSION" +ARCHIVE_NAME="$DIR_NAME.tar.gz" +curl https://bitcoincore.org/bin/bitcoin-core-$BITCOIND_VERSION/bitcoin-$BITCOIND_VERSION-x86_64-linux-gnu.tar.gz -o $ARCHIVE_NAME +tar -xzf $ARCHIVE_NAME +sudo mv $DIR_NAME/bin/bitcoind /usr/local/bin/ + +# Run the functional tests +python3 -m venv venv +. venv/bin/activate +pip install -r tests/requirements.txt +VERBOSE=1 LOG_LEVEL=debug TIMEOUT=60 pytest -n2 -vvv --log-cli-level=DEBUG tests/ diff --git a/contrib/coverage.sh b/contrib/coverage.sh new file mode 100755 index 0000000..a02b097 --- /dev/null +++ b/contrib/coverage.sh @@ -0,0 +1,23 @@ +# Assumes: +# - you have a postgres DB set up with credentials revault:revault +# - you have functional tests dependencies installed (likely you are in a venv) + +set -ex + +if [ -z "$JOBS" ]; then JOBS=1; fi + +if ! command -v grcov &>/dev/null; then + cargo install grcov +fi + +cargo clean + +rm -f "revaultd_coverage_*.profraw" +LLVM_PROFILE_FILE="miradord_coverage_%m.profraw" RUSTFLAGS="-Zinstrument-coverage" RUSTDOCFLAGS="$RUSTFLAGS -Z unstable-options --persist-doctests target/debug/doctestbins" cargo +nightly build --all-features +LLVM_PROFILE_FILE="miradord_coverage_%m.profraw" RUSTFLAGS="-Zinstrument-coverage" RUSTDOCFLAGS="$RUSTFLAGS -Z unstable-options --persist-doctests target/debug/doctestbins" cargo +nightly test --all-features --jobs $JOBS +pytest -n $JOBS + +grcov . --binary-path ./target/debug/ -t html --branch --ignore-not-existing --llvm -o ./target/grcov/ +firefox target/grcov/index.html + +set +ex diff --git a/contrib/tools/mscompiler/Cargo.lock b/contrib/tools/mscompiler/Cargo.lock new file mode 100644 index 0000000..6e193a7 --- /dev/null +++ b/contrib/tools/mscompiler/Cargo.lock @@ -0,0 +1,127 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "base64" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" + +[[package]] +name = "bech32" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dabbe35f96fb9507f7330793dc490461b2962659ac5d427181e451a623751d1" + +[[package]] +name = "bitcoin" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec5f88a446d66e7474a3b8fa2e348320b574463fb78d799d90ba68f79f48e0e" +dependencies = [ + "bech32", + "bitcoin_hashes", + "secp256k1", +] + +[[package]] +name = "bitcoin_hashes" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0aaf87b776808e26ae93289bc7d025092b6d909c193f0cdee0b3a86e7bd3c776" + +[[package]] +name = "bitcoinconsensus" +version = "0.19.0-3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a8aa43b5cd02f856cb126a9af819e77b8910fdd74dd1407be649f2f5fe3a1b5" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "cc" +version = "1.0.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" + +[[package]] +name = "itoa" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" + +[[package]] +name = "libc" +version = "0.2.94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e" + +[[package]] +name = "miniscript" +version = "5.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71f455be59a359d50370c4f587afbc5739c862e684c5afecae80ab93e7474b4e" +dependencies = [ + "bitcoin", +] + +[[package]] +name = "mscompiler" +version = "0.0.1" +dependencies = [ + "revault_tx", + "serde_json", +] + +[[package]] +name = "revault_tx" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "281109ba2c34a241b24e30e22225c4fd283a5dc69f86c33de9cc339abb870c38" +dependencies = [ + "base64", + "bitcoinconsensus", + "miniscript", +] + +[[package]] +name = "ryu" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" + +[[package]] +name = "secp256k1" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee5070fdc6f26ca5be6dcfc3d07c76fdb974a63a8b246b459854274145f5a258" +dependencies = [ + "secp256k1-sys", +] + +[[package]] +name = "secp256k1-sys" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67e4b6455ee49f5901c8985b88f98fb0a0e1d90a6661f5a03f4888bd987dad29" +dependencies = [ + "cc", +] + +[[package]] +name = "serde" +version = "1.0.125" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "558dc50e1a5a5fa7112ca2ce4effcb321b0300c0d4ccf0776a9f60cd89031171" + +[[package]] +name = "serde_json" +version = "1.0.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" +dependencies = [ + "itoa", + "ryu", + "serde", +] diff --git a/contrib/tools/mscompiler/Cargo.toml b/contrib/tools/mscompiler/Cargo.toml new file mode 100644 index 0000000..4d72ff6 --- /dev/null +++ b/contrib/tools/mscompiler/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "mscompiler" +version = "0.0.1" +authors = ["Antoine Poinsot "] +edition = "2018" + +[dependencies] +revault_tx = "0.2" +serde_json = "1" diff --git a/contrib/tools/mscompiler/README.md b/contrib/tools/mscompiler/README.md new file mode 100644 index 0000000..4b9ce6a --- /dev/null +++ b/contrib/tools/mscompiler/README.md @@ -0,0 +1,14 @@ +A small tool for generating the descriptors needed by `revaultd` out of keys generated during The +Ceremony (:tm:). + +### Example + +``` +$ cargo build +$ ./target/debug/mscompiler '["xpub6AHA9hZDN11k2ijHMeS5QqHx2KP9aMBRhTDqANMnwVtdyw2TDYRmF8PjpvwUFcL1Et8Hj59S3gTSMcUQ5gAqTz3Wd8EsMTmF3DChhqPQBnU","xpub6AaffFGfH6WXfm6pwWzmUMuECQnoLeB3agMKaLyEBZ5ZVfwtnS5VJKqXBt8o5ooCWVy2H87GsZshp7DeKE25eWLyd1Ccuh2ZubQUkgpiVux"]' '["03b506a1dbe57b4bf48c95e0c7d417b87dd3b4349d290d2e7e9ba72c912652d80a", "0295e7f5d12a2061f1fd2286cefec592dff656a19f55f4f01305d6aa56630880ce"]' '["xpub6BaZSKgpaVvibu2k78QsqeDWXp92xLHZxiu1WoqLB9hKhsBf3miBUDX7PJLgSPvkj66ThVHTqdnbXpeu8crXFmDUd4HeM4s4miQS2xsv3Qb"]' 1 '["xpub6BaZSKgpaVvibu2k78QsqeDWXp92xLHZxiu1WoqLB9hKhsBf3miBUDX7PJLgSPvkj66ThVHTqdnbXpeu8crXFmDUd4HeM4s4miQS2xsv3Qb"]' 6 +{ + "cpfp_descriptor": "wsh(thresh(1,pk(xpub6BaZSKgpaVvibu2k78QsqeDWXp92xLHZxiu1WoqLB9hKhsBf3miBUDX7PJLgSPvkj66ThVHTqdnbXpeu8crXFmDUd4HeM4s4miQS2xsv3Qb/*)))#cwycq5xu", + "deposit_descriptor": "wsh(multi(2,xpub6AHA9hZDN11k2ijHMeS5QqHx2KP9aMBRhTDqANMnwVtdyw2TDYRmF8PjpvwUFcL1Et8Hj59S3gTSMcUQ5gAqTz3Wd8EsMTmF3DChhqPQBnU/*,xpub6AaffFGfH6WXfm6pwWzmUMuECQnoLeB3agMKaLyEBZ5ZVfwtnS5VJKqXBt8o5ooCWVy2H87GsZshp7DeKE25eWLyd1Ccuh2ZubQUkgpiVux/*))#n3cj9mhy", + "unvault_descriptor": "wsh(andor(thresh(1,pk(xpub6BaZSKgpaVvibu2k78QsqeDWXp92xLHZxiu1WoqLB9hKhsBf3miBUDX7PJLgSPvkj66ThVHTqdnbXpeu8crXFmDUd4HeM4s4miQS2xsv3Qb/*)),and_v(v:multi(2,03b506a1dbe57b4bf48c95e0c7d417b87dd3b4349d290d2e7e9ba72c912652d80a,0295e7f5d12a2061f1fd2286cefec592dff656a19f55f4f01305d6aa56630880ce),older(6)),thresh(2,pkh(xpub6AHA9hZDN11k2ijHMeS5QqHx2KP9aMBRhTDqANMnwVtdyw2TDYRmF8PjpvwUFcL1Et8Hj59S3gTSMcUQ5gAqTz3Wd8EsMTmF3DChhqPQBnU/*),a:pkh(xpub6AaffFGfH6WXfm6pwWzmUMuECQnoLeB3agMKaLyEBZ5ZVfwtnS5VJKqXBt8o5ooCWVy2H87GsZshp7DeKE25eWLyd1Ccuh2ZubQUkgpiVux/*))))#76jsyzdg" +} +``` diff --git a/contrib/tools/mscompiler/src/main.rs b/contrib/tools/mscompiler/src/main.rs new file mode 100644 index 0000000..2c4f4e1 --- /dev/null +++ b/contrib/tools/mscompiler/src/main.rs @@ -0,0 +1,152 @@ +use std::{env, process, str::FromStr}; + +use revault_tx::{ + bitcoin::{secp256k1, util::bip32}, + miniscript::{ + descriptor::{Descriptor, DescriptorPublicKey, DescriptorTrait, DescriptorXKey, Wildcard}, + MiniscriptKey, + }, + scripts::{CpfpDescriptor, DepositDescriptor, UnvaultDescriptor}, +}; + +macro_rules! from_json { + ($str:expr) => { + serde_json::from_str($str).unwrap_or_else(|e| { + eprintln!("Failed to deserialize '{}' as JSON: '{}'", $str, e); + process::exit(1); + }); + }; +} + +fn xpubs_from_json(json_array: &str) -> Vec { + let keys: Vec = from_json!(json_array); + keys.into_iter() + .map(|key_str| { + let xpub = bip32::ExtendedPubKey::from_str(&key_str).unwrap_or_else(|e| { + eprintln!("Failed to parse xpub '{}': '{}'", &key_str, e); + process::exit(1); + }); + DescriptorPublicKey::XPub(DescriptorXKey { + origin: None, + xkey: xpub, + derivation_path: vec![].into(), + wildcard: Wildcard::Unhardened, + }) + }) + .collect() +} + +fn keys_from_json(json_array: &str) -> Vec { + let keys: Vec = from_json!(json_array); + keys.into_iter() + .map(|key_str| { + DescriptorPublicKey::from_str(&key_str).unwrap_or_else(|e| { + eprintln!("Failed to parse xpub '{}': '{}'", &key_str, e); + process::exit(1); + }) + }) + .collect() +} + +fn desc_san_check( + desc: &Descriptor

, +) -> Result<(), revault_tx::miniscript::Error> { + match desc { + Descriptor::Wsh(wsh) => wsh.sanity_check(), + _ => unreachable!(), + } +} + +fn sanity_checks( + dep_desc: &DepositDescriptor, + unv_desc: &UnvaultDescriptor, + cpfp_desc: &CpfpDescriptor, +) { + desc_san_check(dep_desc.clone().inner()).unwrap_or_else(|e| { + eprintln!("Error sanity checking xpub Deposit descriptor: '{:?}'", e); + process::exit(1); + }); + desc_san_check(unv_desc.clone().inner()).unwrap_or_else(|e| { + eprintln!("Error sanity checking xpub Unvault descriptor: '{:?}'", e); + process::exit(1); + }); + desc_san_check(cpfp_desc.clone().inner()).unwrap_or_else(|e| { + eprintln!("Error sanity checking xpub CPFP descriptor: '{:?}'", e); + process::exit(1); + }); + + let secp = secp256k1::Secp256k1::verification_only(); + for i in &[0, 5, 10, 100, 1000] { + desc_san_check(dep_desc.derive((*i).into(), &secp).inner()).unwrap_or_else(|e| { + eprintln!( + "Error sanity checking derived Deposit descriptor: '{:?}'", + e + ); + process::exit(1); + }); + desc_san_check(unv_desc.derive((*i).into(), &secp).inner()).unwrap_or_else(|e| { + eprintln!( + "Error sanity checking derived Unvault descriptor: '{:?}'", + e + ); + process::exit(1); + }); + desc_san_check(cpfp_desc.derive((*i).into(), &secp).inner()).unwrap_or_else(|e| { + eprintln!("Error sanity checking derived CPFP descriptor: '{:?}'", e); + process::exit(1); + }); + } +} + +fn main() { + let args: Vec = env::args().collect(); + if args.len() != 7 { + eprintln!( + "Usage: '{} \ + '\n \ + All values are as JSON.", + args[0] + ); + process::exit(1); + } + + let stk_keys = xpubs_from_json(&args[1]); + let cosigs_keys = keys_from_json(&args[2]); + let man_keys = xpubs_from_json(&args[3]); + let man_thresh: u32 = from_json!(&args[4]); + let cpfp_xpubs = xpubs_from_json(&args[5]); + let unvault_csv: u32 = from_json!(&args[6]); + + let deposit_desc = DepositDescriptor::new(stk_keys.clone()).unwrap_or_else(|e| { + eprintln!("Compiling Deposit descriptor: '{}'", e); + process::exit(1); + }); + let unvault_desc = UnvaultDescriptor::new( + stk_keys, + man_keys, + man_thresh as usize, + cosigs_keys, + unvault_csv, + ) + .unwrap_or_else(|e| { + eprintln!("Compiling Unvault descriptor: '{}'", e); + process::exit(1); + }); + let cpfp_desc = CpfpDescriptor::new(cpfp_xpubs).unwrap_or_else(|e| { + eprintln!("Compiling CPFP descriptor: '{}'", e); + process::exit(1); + }); + sanity_checks(&deposit_desc, &unvault_desc, &cpfp_desc); + + let dep_str: serde_json::Value = deposit_desc.to_string().into(); + let unv_str: serde_json::Value = unvault_desc.to_string().into(); + let cpfp_str: serde_json::Value = cpfp_desc.to_string().into(); + println!( + "{:#}", + serde_json::json!({ + "deposit_descriptor": dep_str, + "unvault_descriptor": unv_str, + "cpfp_descriptor": cpfp_str, + }) + ); +} diff --git a/contrib/tools/txbuilder/Cargo.lock b/contrib/tools/txbuilder/Cargo.lock new file mode 100644 index 0000000..df3e05c --- /dev/null +++ b/contrib/tools/txbuilder/Cargo.lock @@ -0,0 +1,128 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "base64" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" + +[[package]] +name = "bech32" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dabbe35f96fb9507f7330793dc490461b2962659ac5d427181e451a623751d1" + +[[package]] +name = "bitcoin" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6742ec672d3f12506f4ac5c0d853926ff1f94e675f60ffd3224039972bf663f1" +dependencies = [ + "bech32", + "bitcoin_hashes", + "secp256k1", +] + +[[package]] +name = "bitcoin_hashes" +version = "0.9.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ce18265ec2324ad075345d5814fbeed4f41f0a660055dc78840b74d19b874b1" + +[[package]] +name = "bitcoinconsensus" +version = "0.19.0-3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a8aa43b5cd02f856cb126a9af819e77b8910fdd74dd1407be649f2f5fe3a1b5" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "cc" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2" + +[[package]] +name = "itoa" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" + +[[package]] +name = "libc" +version = "0.2.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320cfe77175da3a483efed4bc0adc1968ca050b098ce4f2f1c13a56626128790" + +[[package]] +name = "miniscript" +version = "5.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71f455be59a359d50370c4f587afbc5739c862e684c5afecae80ab93e7474b4e" +dependencies = [ + "bitcoin", +] + +[[package]] +name = "revault_tx" +version = "0.2.1" +source = "git+https://github.com/revault/revault_tx#695415edc410e4420884b3a7a497139b7fece7c1" +dependencies = [ + "base64", + "bitcoinconsensus", + "miniscript", +] + +[[package]] +name = "ryu" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" + +[[package]] +name = "secp256k1" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d03ceae636d0fed5bae6a7f4f664354c5f4fcedf6eef053fef17e49f837d0a" +dependencies = [ + "secp256k1-sys", +] + +[[package]] +name = "secp256k1-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "827cb7cce42533829c792fc51b82fbf18b125b45a702ef2c8be77fce65463a7b" +dependencies = [ + "cc", +] + +[[package]] +name = "serde" +version = "1.0.126" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" + +[[package]] +name = "serde_json" +version = "1.0.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "txbuilder" +version = "0.0.1" +dependencies = [ + "revault_tx", + "serde_json", +] diff --git a/contrib/tools/txbuilder/Cargo.toml b/contrib/tools/txbuilder/Cargo.toml new file mode 100644 index 0000000..f686596 --- /dev/null +++ b/contrib/tools/txbuilder/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "txbuilder" +version = "0.0.1" +authors = ["Antoine Poinsot "] +edition = "2018" + +[dependencies] +revault_tx = { git = "https://github.com/revault/revault_tx" } +serde_json = "1" diff --git a/contrib/tools/txbuilder/src/main.rs b/contrib/tools/txbuilder/src/main.rs new file mode 100644 index 0000000..4389704 --- /dev/null +++ b/contrib/tools/txbuilder/src/main.rs @@ -0,0 +1,245 @@ +use std::{collections::BTreeMap, env, process, str::FromStr}; + +use revault_tx::{ + bitcoin::{ + consensus::encode::serialize_hex, secp256k1, util::bip32, Address, Amount, OutPoint, + SigHashType, + }, + miniscript::{ + descriptor::{Descriptor, DescriptorTrait}, + MiniscriptKey, + }, + scripts::{CpfpDescriptor, DepositDescriptor, EmergencyAddress, UnvaultDescriptor}, + transactions::RevaultTransaction, +}; + +macro_rules! from_json { + ($str:expr) => { + serde_json::from_str($str).unwrap_or_else(|e| { + eprintln!("Failed to deserialize '{}' as JSON: '{}'", $str, e); + process::exit(1); + }); + }; +} + +fn xprivs_from_json(json_array: &str) -> Vec { + let keys: Vec = from_json!(json_array); + keys.into_iter() + .map(|key_str| { + bip32::ExtendedPrivKey::from_str(&key_str).unwrap_or_else(|e| { + eprintln!("Failed to parse xpriv '{}': '{}'", &key_str, e); + process::exit(1); + }) + }) + .collect() +} + +fn emer_address_from_arg(arg: &str) -> EmergencyAddress { + let address = Address::from_str(&arg).unwrap_or_else(|e| { + eprintln!("Failed to parse Emergency address '{}': '{}'", &arg, e); + process::exit(1); + }); + EmergencyAddress::from(address).unwrap_or_else(|e| { + eprintln!("Failed to parse Emergency address '{}': '{}'", &arg, e); + process::exit(1); + }) +} + +fn desc_san_check( + desc: &Descriptor

, +) -> Result<(), revault_tx::miniscript::Error> { + match desc { + Descriptor::Wsh(wsh) => wsh.sanity_check(), + _ => unreachable!(), + } +} + +fn sanity_checks( + dep_desc: &DepositDescriptor, + unv_desc: &UnvaultDescriptor, + cpfp_desc: &CpfpDescriptor, +) { + desc_san_check(dep_desc.clone().inner()).unwrap_or_else(|e| { + eprintln!("Error sanity checking xpub Deposit descriptor: '{:?}'", e); + process::exit(1); + }); + desc_san_check(unv_desc.clone().inner()).unwrap_or_else(|e| { + eprintln!("Error sanity checking xpub Unvault descriptor: '{:?}'", e); + process::exit(1); + }); + desc_san_check(cpfp_desc.clone().inner()).unwrap_or_else(|e| { + eprintln!("Error sanity checking xpub CPFP descriptor: '{:?}'", e); + process::exit(1); + }); + + let secp = secp256k1::Secp256k1::verification_only(); + for i in &[0, 5, 10, 100, 1000] { + desc_san_check(dep_desc.derive((*i).into(), &secp).inner()).unwrap_or_else(|e| { + eprintln!( + "Error sanity checking derived Deposit descriptor: '{:?}'", + e + ); + process::exit(1); + }); + desc_san_check(unv_desc.derive((*i).into(), &secp).inner()).unwrap_or_else(|e| { + eprintln!( + "Error sanity checking derived Unvault descriptor: '{:?}'", + e + ); + process::exit(1); + }); + desc_san_check(cpfp_desc.derive((*i).into(), &secp).inner()).unwrap_or_else(|e| { + eprintln!("Error sanity checking derived CPFP descriptor: '{:?}'", e); + process::exit(1); + }); + } +} + +fn sign( + psbt: &mut impl RevaultTransaction, + sigtype: SigHashType, + stk_xprivs: &[bip32::ExtendedPrivKey], + derivation_index: u32, + secp: &secp256k1::Secp256k1, +) -> BTreeMap { + let mut sigs = BTreeMap::new(); + + for xpriv in stk_xprivs.iter() { + let privkey = xpriv + .derive_priv(secp, &[derivation_index.into()]) + .unwrap() + .private_key + .key; + let sighash = psbt.signature_hash(0, sigtype).unwrap(); + let sighash = secp256k1::Message::from_slice(&sighash).unwrap(); + let pubkey = secp256k1::PublicKey::from_secret_key(&secp, &privkey); + let sig = secp.sign(&sighash, &privkey); + psbt.add_signature(0, pubkey, sig, &secp) + .unwrap_or_else(|e| { + eprintln!("Failed to add signature to '{:?}': '{}'", &psbt, e); + process::exit(1); + }); + sigs.insert(pubkey.to_string(), sig.to_string()); + } + + psbt.finalize(secp).unwrap_or_else(|e| { + eprintln!("Failed to finalize psbt '{:?}': '{}'", &psbt, e); + process::exit(1); + }); + + sigs +} + +fn main() { + let args: Vec = env::args().collect(); + if args.len() != 9 { + eprintln!( + "Usage: '{} \ + \ + '", + args[0] + ); + process::exit(1); + } + + let secp = revault_tx::bitcoin::secp256k1::Secp256k1::new(); + + let stk_xprivs = xprivs_from_json(&args[1]); + let deposit_desc = DepositDescriptor::from_str(&args[2]).unwrap_or_else(|e| { + eprintln!("Failed to parse deposit descriptor '{}': '{}'", args[2], e); + process::exit(1); + }); + let unvault_desc = UnvaultDescriptor::from_str(&args[3]).unwrap_or_else(|e| { + eprintln!("Failed to parse unvault descriptor '{}': '{}'", args[3], e); + process::exit(1); + }); + let cpfp_desc = CpfpDescriptor::from_str(&args[4]).unwrap_or_else(|e| { + eprintln!("Failed to parse CPFP descriptor '{}': '{}'", args[1], e); + process::exit(1); + }); + let emer_address = emer_address_from_arg(&args[5]); + let deposit_outpoint = OutPoint::from_str(&args[6]).unwrap_or_else(|e| { + eprintln!("Failed to parse deposit outpoint '{}': '{}'", &args[6], e); + process::exit(1); + }); + let deposit_value: u64 = from_json!(&args[7]); + let derivation_index: u32 = from_json!(&args[8]); + sanity_checks(&deposit_desc, &unvault_desc, &cpfp_desc); + + eprintln!( + "{}", + deposit_desc + .derive(derivation_index.into(), &secp) + .into_inner() + .address(revault_tx::bitcoin::Network::Regtest) + .unwrap() + ); + + let (mut unvault_tx, mut cancel_tx, mut emer_tx, mut unemer_tx) = + revault_tx::transactions::transaction_chain( + deposit_outpoint, + Amount::from_sat(deposit_value), + &deposit_desc, + &unvault_desc, + &cpfp_desc, + derivation_index.into(), + emer_address, + 0, + &secp, + ) + .unwrap_or_else(|e| { + eprintln!("Failed to derive transaction chain: '{}'", e); + process::exit(1); + }); + + let unvault_sigs = sign( + &mut unvault_tx, + SigHashType::All, + &stk_xprivs, + derivation_index, + &secp, + ); + let cancel_sigs = sign( + &mut cancel_tx, + SigHashType::AllPlusAnyoneCanPay, + &stk_xprivs, + derivation_index, + &secp, + ); + let emer_sigs = sign( + &mut emer_tx, + SigHashType::AllPlusAnyoneCanPay, + &stk_xprivs, + derivation_index, + &secp, + ); + let unemer_sigs = sign( + &mut unemer_tx, + SigHashType::AllPlusAnyoneCanPay, + &stk_xprivs, + derivation_index, + &secp, + ); + + println!( + "{:#}", + serde_json::json!({ + "unvault": serde_json::json!({ + "tx": serialize_hex(&unvault_tx.into_tx()), + "sigs": unvault_sigs, + }), + "cancel": serde_json::json!({ + "tx": serialize_hex(&cancel_tx.into_tx()), + "sigs": cancel_sigs, + }), + "emer": serde_json::json!({ + "tx": serialize_hex(&emer_tx.into_tx()), + "sigs": emer_sigs, + }), + "unemer": serde_json::json!({ + "tx": serialize_hex(&unemer_tx.into_tx()), + "sigs": unemer_sigs, + }), + }) + ); +} diff --git a/src/bitcoind/interface.rs b/src/bitcoind/interface.rs index 9b13b95..24fa2b9 100644 --- a/src/bitcoind/interface.rs +++ b/src/bitcoind/interface.rs @@ -1,6 +1,6 @@ use crate::{bitcoind::BitcoindError, config::BitcoindConfig}; use revault_tx::bitcoin::{ - consensus::encode, BlockHash, OutPoint, Transaction as BitcoinTransaction, + consensus::encode, Amount, BlockHash, OutPoint, Transaction as BitcoinTransaction, }; use std::{ @@ -165,10 +165,6 @@ impl BitcoinD { thread::sleep(Duration::from_secs(1)); } jsonrpc::Error::Json(ref err) => { - log::error!( - "JSON serialization error when talking to bitcoind: '{}'", - err - ); // Weird. A JSON serialization error? Just try again but // fail fast anyways as it should not happen. log::error!( @@ -411,9 +407,15 @@ impl BitcoinD { .and_then(|bb| bb.as_str()) .and_then(|bb_str| BlockHash::from_str(bb_str).ok()) .expect("'gettxout' didn't return a valid 'bestblock' value"); + let value = res + .get("value") + .and_then(|v| v.as_f64()) + .and_then(|v| Amount::from_btc(v).ok()) + .expect("'gettxout' didn't return a valid 'value' entry"); Some(UtxoInfo { confirmations, bestblock, + value, }) } @@ -444,4 +446,5 @@ pub struct ChainTip { pub struct UtxoInfo { pub confirmations: i64, pub bestblock: BlockHash, + pub value: Amount, } diff --git a/src/config.rs b/src/config.rs index f1c35cb..ef9ab61 100644 --- a/src/config.rs +++ b/src/config.rs @@ -3,7 +3,7 @@ use std::{io, net::SocketAddr, path::PathBuf, str::FromStr, time::Duration}; use revault_net::noise::PublicKey as NoisePubkey; use revault_tx::{ bitcoin::{hashes::hex::FromHex, Network}, - scripts::{CpfpDescriptor, DepositDescriptor, UnvaultDescriptor}, + scripts::{CpfpDescriptor, DepositDescriptor, EmergencyAddress, UnvaultDescriptor}, }; use serde::{de, Deserialize, Deserializer}; @@ -57,6 +57,10 @@ fn daemon_default() -> bool { false } +fn listen_default() -> SocketAddr { + SocketAddr::from(([127, 0, 0, 1], 8383)) +} + /// Everything we need to know for talking to bitcoind serenely #[derive(Debug, Clone, Deserialize)] pub struct BitcoindConfig { @@ -74,7 +78,7 @@ pub struct BitcoindConfig { pub poll_interval_secs: Duration, } -#[derive(Debug, Deserialize)] +#[derive(Debug, Clone, Deserialize)] pub struct ScriptsConfig { #[serde(deserialize_with = "deserialize_fromstr")] pub deposit_descriptor: DepositDescriptor, @@ -82,14 +86,16 @@ pub struct ScriptsConfig { pub unvault_descriptor: UnvaultDescriptor, #[serde(deserialize_with = "deserialize_fromstr")] pub cpfp_descriptor: CpfpDescriptor, + // TODO: make it optional + pub emergency_address: EmergencyAddress, } /// Static informations we require to operate -#[derive(Debug, Deserialize)] +#[derive(Debug, Clone, Deserialize)] pub struct Config { /// Everything we need to know to talk to bitcoind pub bitcoind_config: BitcoindConfig, - /// Bitcoin Miniscript descriptors to be able to derive the transaction chain + /// Bitcoin Scripts to be able to derive the transaction chain pub scripts_config: ScriptsConfig, /// The Noise static public keys of "our" stakeholder #[serde(deserialize_with = "deserialize_noisepubkey")] @@ -112,8 +118,8 @@ pub struct Config { )] pub log_level: log::LevelFilter, /// to bind to - // TODO: have a default implem as in cosignerd - pub listen: Option, + #[serde(default = "listen_default")] + pub listen: SocketAddr, } #[derive(Debug)] @@ -211,6 +217,7 @@ mod tests { cpfp_descriptor = "wsh(thresh(1,pk(xpub6BaZSKgpaVvibu2k78QsqeDWXp92xLHZxiu1WoqLB9hKhsBf3miBUDX7PJLgSPvkj66ThVHTqdnbXpeu8crXFmDUd4HeM4s4miQS2xsv3Qb/*)))#cwycq5xu" deposit_descriptor = "wsh(multi(2,xpub6AHA9hZDN11k2ijHMeS5QqHx2KP9aMBRhTDqANMnwVtdyw2TDYRmF8PjpvwUFcL1Et8Hj59S3gTSMcUQ5gAqTz3Wd8EsMTmF3DChhqPQBnU/*,xpub6AaffFGfH6WXfm6pwWzmUMuECQnoLeB3agMKaLyEBZ5ZVfwtnS5VJKqXBt8o5ooCWVy2H87GsZshp7DeKE25eWLyd1Ccuh2ZubQUkgpiVux/*))#n3cj9mhy" unvault_descriptor = "wsh(andor(thresh(1,pk(xpub6BaZSKgpaVvibu2k78QsqeDWXp92xLHZxiu1WoqLB9hKhsBf3miBUDX7PJLgSPvkj66ThVHTqdnbXpeu8crXFmDUd4HeM4s4miQS2xsv3Qb/*)),and_v(v:multi(2,03b506a1dbe57b4bf48c95e0c7d417b87dd3b4349d290d2e7e9ba72c912652d80a,0295e7f5d12a2061f1fd2286cefec592dff656a19f55f4f01305d6aa56630880ce),older(4)),thresh(2,pkh(xpub6AHA9hZDN11k2ijHMeS5QqHx2KP9aMBRhTDqANMnwVtdyw2TDYRmF8PjpvwUFcL1Et8Hj59S3gTSMcUQ5gAqTz3Wd8EsMTmF3DChhqPQBnU/*),a:pkh(xpub6AaffFGfH6WXfm6pwWzmUMuECQnoLeB3agMKaLyEBZ5ZVfwtnS5VJKqXBt8o5ooCWVy2H87GsZshp7DeKE25eWLyd1Ccuh2ZubQUkgpiVux/*))))#532k8uvf" + emergency_address = "bc1q906h8q49vu20cyffqklnzcda20k7c3m83fltey344kz3lctlx9xqhf2v56" [bitcoind_config] network = "bitcoin" @@ -236,6 +243,7 @@ mod tests { deposit_descriptor = "wsh(multi(2,xpub6AHA9hZDN11k2ijHMeS5QqHx2KP9aMBRhTDqANMnwVtdyw2TDYRmF8PjpvwUFcL1Et8Hj59S3gTSMcUQ5gAqTz3Wd8EsMTmF3DChhqPQBnU/*,xpub6AaffFGfH6WXfm6pwWzmUMuECQnoLeB3agMKaLyEBZ5ZVfwtnS5VJKqXBt8o5ooCWVy2H87GsZshp7DeKE25eWLyd1Ccuh2ZubQUkgpiVux/*))#n3cj9mhy" # The checksum is for older(4) but it was replaced by older(42) unvault_descriptor = "wsh(andor(thresh(1,pk(xpub6BaZSKgpaVvibu2k78QsqeDWXp92xLHZxiu1WoqLB9hKhsBf3miBUDX7PJLgSPvkj66ThVHTqdnbXpeu8crXFmDUd4HeM4s4miQS2xsv3Qb/*)),and_v(v:multi(2,03b506a1dbe57b4bf48c95e0c7d417b87dd3b4349d290d2e7e9ba72c912652d80a,0295e7f5d12a2061f1fd2286cefec592dff656a19f55f4f01305d6aa56630880ce),older(42)),thresh(2,pkh(xpub6AHA9hZDN11k2ijHMeS5QqHx2KP9aMBRhTDqANMnwVtdyw2TDYRmF8PjpvwUFcL1Et8Hj59S3gTSMcUQ5gAqTz3Wd8EsMTmF3DChhqPQBnU/*),a:pkh(xpub6AaffFGfH6WXfm6pwWzmUMuECQnoLeB3agMKaLyEBZ5ZVfwtnS5VJKqXBt8o5ooCWVy2H87GsZshp7DeKE25eWLyd1Ccuh2ZubQUkgpiVux/*))))#532k8uvf" + emergency_address = "bc1q906h8q49vu20cyffqklnzcda20k7c3m83fltey344kz3lctlx9xqhf2v56" [bitcoind_config] network = "bitcoin" diff --git a/src/database/mod.rs b/src/database/mod.rs index 5af9ac2..8f0d3f2 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -4,9 +4,9 @@ use revault_tx::{ bitcoin::{secp256k1, util::bip32, Amount, Network, OutPoint}, scripts::{CpfpDescriptor, DepositDescriptor, UnvaultDescriptor}, }; -use schema::{DbInstance, DbSignature, DbVault, SigTxType, SCHEMA}; +use schema::{DbInstance, DbSignature, DbVault, SigTxType, DbAllocation, SCHEMA}; -use std::{convert::TryInto, fs, io, os::unix::fs::OpenOptionsExt, path, time}; +use std::{collections, convert::TryInto, fs, io, os::unix::fs::OpenOptionsExt, path, time}; use rusqlite::params; @@ -135,12 +135,12 @@ pub fn db_instance(db_path: &path::Path) -> Result { } /// Register a new vault to be watched. Atomically inserts the vault and the Emergency signatures. -fn db_new_vault( +pub fn db_new_vault( db_path: &path::Path, deposit_outpoint: &OutPoint, derivation_index: bip32::ChildNumber, amount: Amount, - emer_sigs: &[(secp256k1::PublicKey, secp256k1::Signature)], + emer_sigs: &collections::BTreeMap, ) -> Result<(), DatabaseError> { let instance_id = db_instance(db_path)?.id; let deposit_txid = deposit_outpoint.txid.to_vec(); @@ -177,48 +177,70 @@ fn db_new_vault( }) } -/// Mark a vault as being delegated, storing the signatures of its second-stage transactions -fn db_delegate_vault( +/// Mark a vault as being delegated. +pub fn db_delegate_vault( db_path: &path::Path, deposit_outpoint: &OutPoint, - unemer_sigs: &[(secp256k1::PublicKey, secp256k1::Signature)], - cancel_sigs: &[(secp256k1::PublicKey, secp256k1::Signature)], ) -> Result<(), DatabaseError> { let db_vault = db_vault(db_path, deposit_outpoint)? .ok_or_else(|| DatabaseError::UnknownVault(Box::new(*deposit_outpoint)))?; - assert!( - unemer_sigs.len() > 0, - "Registering a vault without UnvaultEmergency signature" - ); - assert!( - cancel_sigs.len() > 0, - "Registering a vault without Cancel signature" - ); - db_exec(db_path, |db_tx| { db_tx.execute( "UPDATE vaults SET delegated = 1 WHERE id = (?1)", params![db_vault.id], )?; - for (key, sig) in unemer_sigs { + Ok(()) + }) +} + +/// Store signatures for a Cancel transaction. +pub fn db_store_cancel_sigs( + db_path: &path::Path, + deposit_outpoint: &OutPoint, + sigs: &collections::BTreeMap, +) -> Result<(), DatabaseError> { + let db_vault = db_vault(db_path, deposit_outpoint)? + .ok_or_else(|| DatabaseError::UnknownVault(Box::new(*deposit_outpoint)))?; + + assert!(sigs.len() > 0, "Storing no signature"); + + db_exec(db_path, |db_tx| { + for (key, sig) in sigs { db_tx.execute( "INSERT INTO signatures (vault_id, tx_type, pubkey, signature) VALUES (?1, ?2, ?3, ?4)", params![ db_vault.id, - SigTxType::UnvaultEmergency as i64, + SigTxType::Cancel as i64, key.serialize().to_vec(), sig.serialize_der().to_vec() ], )?; } - for (key, sig) in cancel_sigs { + + Ok(()) + }) +} + +/// Store signatures for an UnvaultEmergency transaction. +pub fn db_store_unemer_sigs( + db_path: &path::Path, + deposit_outpoint: &OutPoint, + sigs: &collections::BTreeMap, +) -> Result<(), DatabaseError> { + let db_vault = db_vault(db_path, deposit_outpoint)? + .ok_or_else(|| DatabaseError::UnknownVault(Box::new(*deposit_outpoint)))?; + + assert!(sigs.len() > 0, "Storing no signature"); + + db_exec(db_path, |db_tx| { + for (key, sig) in sigs { db_tx.execute( "INSERT INTO signatures (vault_id, tx_type, pubkey, signature) VALUES (?1, ?2, ?3, ?4)", params![ db_vault.id, - SigTxType::Cancel as i64, + SigTxType::UnvaultEmergency as i64, key.serialize().to_vec(), sig.serialize_der().to_vec() ], @@ -293,7 +315,7 @@ fn db_vaults(db_path: &path::Path) -> Result, DatabaseError> { } /// Get a vault in the database by its deposit outpoint -fn db_vault( +pub fn db_vault( db_path: &path::Path, deposit_outpoint: &OutPoint, ) -> Result, DatabaseError> { @@ -345,7 +367,7 @@ fn db_sigs_by_type( } /// Get all the Emergency signatures of this vault -fn db_emergency_signatures( +pub fn db_emergency_signatures( db_path: &path::Path, vault_id: i64, ) -> Result, DatabaseError> { @@ -353,7 +375,7 @@ fn db_emergency_signatures( } /// Get all the UnvaultEmergency signatures of this vault -fn db_unvault_emergency_signatures( +pub fn db_unvault_emergency_signatures( db_path: &path::Path, vault_id: i64, ) -> Result, DatabaseError> { @@ -368,6 +390,76 @@ pub fn db_cancel_signatures( db_sigs_by_type(db_path, vault_id, SigTxType::Cancel) } +/// Get the allocation of a given outpoint if it is in the database +pub fn db_allocation( + db_path: &path::Path, + wallet_outpoint: &OutPoint, +) -> Result, DatabaseError> { + let txid = wallet_outpoint.txid.to_vec(); + let vout = wallet_outpoint.vout; + + db_query( + db_path, + "SELECT * FROM allocation WHERE txid = (?1) AND vout = (?2)", + params![txid, vout], + |row| row.try_into(), + ) + .map(|mut rows| rows.pop()) +} + + +/// Update the allocation table with a collection of new wallet outpoints for a vault +pub fn db_new_allocation( + db_path: &path::Path, + wallet_outpoints: &[OutPoint], + vault_id: i64, +) -> Result<(), DatabaseError> { + db_exec(db_path, |db_tx| { + for wallet_outpoint in wallet_outpoints { + let txid = wallet_outpoint.txid.to_vec(); + let vout = wallet_outpoint.vout; + db_tx.execute( + "INSERT INTO allocation (txid, vout, vault_id) + VALUES (?1, ?2, ?3)", + params![txid, vout, vault_id], + )?; + } + + Ok(()) + }) +} + +// Delete specific allocation row associated with the given wallet outpoint +pub fn db_del_allocation( + db_path: &path::Path, + wallet_outpoint: &OutPoint, +) -> Result<(), DatabaseError> { + let txid = wallet_outpoint.txid.to_vec(); + let vout = wallet_outpoint.vout; + + db_exec(db_path, |db_tx| { + db_tx.execute( + "DELETE FROM allocation WHERE txid = (?1) AND vout = (?2)", + params![txid, vout])?; + + Ok(()) + }) +} + +// Clear all allocation rows associated with the given vault id +pub fn db_clear_vault_allocation( + db_path: &path::Path, + vault_id: i64, +) -> Result<(), DatabaseError> { + db_exec(db_path, |db_tx| { + db_tx.execute( + "DELETE FROM allocation WHERE vault_id = (?1)", + params![vault_id])?; + + Ok(()) + }) +} + // Create the db file with RW permissions only for the user fn create_db_file(db_path: &path::Path) -> Result<(), DatabaseError> { let mut options = fs::OpenOptions::new(); @@ -661,36 +753,36 @@ mod tests { .unwrap(); let deriv_a = bip32::ChildNumber::from(32); let amount_a = Amount::from_sat(i64::MAX as u64 - 100_000); - let emer_sigs_a = [ + let emer_sigs_a: collections::BTreeMap = [ dummy_sig!(secp256k1::Signature::from_str("304402200b4025e855ac108cf4f5114c3a8af9f8122023ffa971c5de8a8bc3f67d18749902202cc9b7d36f57dbe70f8826fac13838c6757fe18fb4572328c76dd5b55e452528").unwrap()), dummy_sig!(secp256k1::Signature::from_str("3045022100cc110b2dc66b9a116f50c61548d33f589d00ef57fb2fa784100ffb84e1577faf02206eec4e600f76f347b2014752a3619df8b2406fa61a34f0ec01ce4900f0b22083").unwrap()) - ]; - let unemer_sigs_a = [ + ].iter().copied().collect(); + let unemer_sigs_a: collections::BTreeMap = [ dummy_sig!(secp256k1::Signature::from_str("30450221008f4abfaa7c22adbf621e46f520fea81779b4fce81c22889354f8044336a542ff02205b5bf7c7a677414fdf20f5192c51f0fd34a8447b709a5d0f7df6e6c8d5dfbeff").unwrap()), dummy_sig!(secp256k1::Signature::from_str("3045022100a1da27080b26a6a328a26dfe0c076931ea5e22ad06e31b867a2ccd11d57e912102203ccb9388e104e13a81bc02c700d214278541ff8da67f27359b7bbb0e6eea6a41").unwrap()) - ]; - let cancel_sigs_a = [ + ].iter().copied().collect(); + let cancel_sigs_a: collections::BTreeMap = [ dummy_sig!(secp256k1::Signature::from_str("304502210089a1b4a09cafb8f26d6355c5ad51c686d8796d3a833945de35687085b1cd048e022068f6ac3fd4d3909f5d3cf93b0cf6538edfbafdd0b36d858c073e4b9b4137a027").unwrap()), dummy_sig!(secp256k1::Signature::from_str("3044022009334cec178a66aef6a473fc9d7608cc2b53495d433920262ba50e8a2947bba202207a0eb002ebe2fc0774adbe9b28885d00758d3043497aae414884bbc8cf7c84dc").unwrap()), - ]; + ].iter().copied().collect(); let outpoint_b = OutPoint::from_str( "69a747cd1ea7ce4904e6173b06a4a83e0df173661046e70f5128b3c9bef8241d:18", ) .unwrap(); let deriv_b = bip32::ChildNumber::from((1 << 31) - 1); let amount_b = Amount::from_sat(1298766); - let emer_sigs_b = [ + let emer_sigs_b: collections::BTreeMap = [ dummy_sig!(secp256k1::Signature::from_str("304402207d1d99b6164597cee75baa0de60d4988f298fbc1857ca67102996287d8ccc76402207d9a2997a79c895d34d9bc450219b988d40cc2054f25a9a4e582666b96dc2444").unwrap()), dummy_sig!(secp256k1::Signature::from_str("3044022031c4547c4f3688b02ff749c6830579318d4ba24bb832dffff5156b2bb751480c022060f6745664612b70e8acb3db3e00af60952bda853891edc6d98a83825e92aeb6").unwrap()) - ]; - let unemer_sigs_b = [ + ].iter().copied().collect(); + let unemer_sigs_b: collections::BTreeMap = [ dummy_sig!(secp256k1::Signature::from_str("30450221009c93c095d2d8cb7f7918bc6b43de451f146eec07d8569a77eed2d14d25fafee50220656328e7e74953c82c4af62fd809bea903de1d9b92de8f4d02450f5d9a2d02ab").unwrap()), dummy_sig!(secp256k1::Signature::from_str("3045022100ca96469270b45e4be24c70115de4545b975c27b60c007b4668cc6edb97944ee302203a078a1cd7d36c6293635dc9604bb7ced31d5a98c8a01a2f7fb2da533245d074").unwrap()) - ]; - let cancel_sigs_b = [ + ].iter().copied().collect(); + let cancel_sigs_b: collections::BTreeMap = [ dummy_sig!(secp256k1::Signature::from_str("304402207e17f075edacc44be94263caa38e0b94dcffd65f2e76159def578d61dd82cbbe02202f300241721dfa8334cc8835d422e8928a7a87301be094e8c296ecdf945c9d71").unwrap()), dummy_sig!(secp256k1::Signature::from_str("30440220398b5d0a75911f69c37c71e929727d16bf48a6b6cc46b1db0d6097f91eb7ecfa0220379c43fc3db9b70b2d3d5d945f8d51ae2660bdedd94b8468abb92c7f2c1989a8").unwrap()), - ]; + ].iter().copied().collect(); // We can insert and query no-yet-delegated vaults db_new_vault(&db_path, &outpoint_a, deriv_a, amount_a, &emer_sigs_a).unwrap(); @@ -740,16 +832,16 @@ mod tests { .unwrap() .into_iter() .map(|db_sig| (db_sig.pubkey, db_sig.signature)) - .collect::>(), - emer_sigs_a.to_vec() + .collect::>(), + emer_sigs_a ); assert_eq!( db_emergency_signatures(&db_path, 2) .unwrap() .into_iter() .map(|db_sig| (db_sig.pubkey, db_sig.signature)) - .collect::>(), - emer_sigs_b.to_vec() + .collect::>(), + emer_sigs_b ); // We can't insert a vault twice @@ -763,7 +855,7 @@ mod tests { assert!(db_vault(&db_path, &uk_outpoint).unwrap().is_none()); // We can delegate the vaults, they'll be marked as such - db_delegate_vault(&db_path, &outpoint_a, &unemer_sigs_a, &cancel_sigs_a).unwrap(); + db_delegate_vault(&db_path, &outpoint_a).unwrap(); assert_eq!( db_vault(&db_path, &outpoint_a).unwrap().unwrap(), DbVault { @@ -777,7 +869,7 @@ mod tests { revoc_height: None, } ); - db_delegate_vault(&db_path, &outpoint_b, &unemer_sigs_b, &cancel_sigs_b).unwrap(); + db_delegate_vault(&db_path, &outpoint_b).unwrap(); assert_eq!( db_vault(&db_path, &outpoint_b).unwrap().unwrap(), DbVault { @@ -796,38 +888,42 @@ mod tests { db_vaults(&db_path).unwrap() ); - // We can get the signatures of the second-stage transactions for these vaults now + // We can store and get the signatures of the second-stage transactions for these vaults + db_store_unemer_sigs(&db_path, &outpoint_a, &unemer_sigs_a).unwrap(); assert_eq!( db_unvault_emergency_signatures(&db_path, 1) .unwrap() .into_iter() .map(|db_sig| (db_sig.pubkey, db_sig.signature)) - .collect::>(), - unemer_sigs_a.to_vec() + .collect::>(), + unemer_sigs_a ); + db_store_cancel_sigs(&db_path, &outpoint_a, &cancel_sigs_a).unwrap(); assert_eq!( db_cancel_signatures(&db_path, 1) .unwrap() .into_iter() .map(|db_sig| (db_sig.pubkey, db_sig.signature)) - .collect::>(), - cancel_sigs_a.to_vec() + .collect::>(), + cancel_sigs_a ); + db_store_unemer_sigs(&db_path, &outpoint_b, &unemer_sigs_b).unwrap(); assert_eq!( db_unvault_emergency_signatures(&db_path, 2) .unwrap() .into_iter() .map(|db_sig| (db_sig.pubkey, db_sig.signature)) - .collect::>(), - unemer_sigs_b.to_vec() + .collect::>(), + unemer_sigs_b ); + db_store_cancel_sigs(&db_path, &outpoint_b, &cancel_sigs_b).unwrap(); assert_eq!( db_cancel_signatures(&db_path, 2) .unwrap() .into_iter() .map(|db_sig| (db_sig.pubkey, db_sig.signature)) - .collect::>(), - cancel_sigs_b.to_vec() + .collect::>(), + cancel_sigs_b ); // And if we mark them as either 'to cancel' or 'to let go through', they won't be @@ -903,4 +999,68 @@ mod tests { // Cleanup fs::remove_file(&db_path).unwrap(); } -} + + // Sanity check we can create new allocations and delete them individually by outpoint + // or collectively by vault + #[test] + fn db_allocate() { + let db_path = get_db(); + let outpoint_a = OutPoint::from_str( + "5bebdb97b54e2268b3fccd4aeea99419d87a92f88f27e906ceea5e863946a731:0", + ).unwrap(); + let outpoint_b = OutPoint::from_str( + "69a747cd1ea7ce4904e6173b06a4a83e0df173661046e70f5128b3c9bef8241d:0", + ) + .unwrap(); + let outpoint_c = OutPoint::from_str( + "5bebdb97b54e2268b3fccd4aeea99419d87a92f88f27e906ceea5e863946a731:1", + ) + .unwrap(); + let vault_outpoint = OutPoint::from_str( + "69a747cd1ea7ce4904e6173b06a4a83e0df173661046e70f5128b3c9bef8241d:1", + ) + .unwrap(); + let vault_id: i64 = 1; + // We'll fail to insert if no vault exists with the given id + db_new_allocation(&db_path, &[outpoint_a], vault_id).unwrap_err(); + + // Create the vault + let deriv = bip32::ChildNumber::from(32); + let amount = Amount::from_sat(i64::MAX as u64 - 100_000); + let emer_sigs: collections::BTreeMap = [ + dummy_sig!(secp256k1::Signature::from_str("304402200b4025e855ac108cf4f5114c3a8af9f8122023ffa971c5de8a8bc3f67d18749902202cc9b7d36f57dbe70f8826fac13838c6757fe18fb4572328c76dd5b55e452528").unwrap()), + dummy_sig!(secp256k1::Signature::from_str("3045022100cc110b2dc66b9a116f50c61548d33f589d00ef57fb2fa784100ffb84e1577faf02206eec4e600f76f347b2014752a3619df8b2406fa61a34f0ec01ce4900f0b22083").unwrap()) + ].iter().copied().collect(); + db_new_vault(&db_path, &vault_outpoint, deriv, amount, &emer_sigs).unwrap(); + + // Get a valid vault_id + let vault_id = db_vault(&db_path, &vault_outpoint).unwrap().unwrap().id; + + // Now we can insert allocations and query an allocation + db_new_allocation(&db_path, &[outpoint_a, outpoint_b, outpoint_c], vault_id).unwrap(); + assert_eq!( + db_allocation(&db_path, &outpoint_a).unwrap().unwrap(), + DbAllocation { + id: 1, + wallet_outpoint: outpoint_a, + vault_id: vault_id, + } + ); + + // We can't insert the same allocation twice + db_new_allocation(&db_path, &[outpoint_a], vault_id).unwrap_err(); + + // We can delete the allocation given the outpoint + db_del_allocation(&db_path, &outpoint_a).unwrap(); + assert!(db_allocation(&db_path, &outpoint_a).unwrap().is_none()); + + // We can clear all allocations for the given vault + assert!(db_allocation(&db_path, &outpoint_b).unwrap().is_some()); + assert!(db_allocation(&db_path, &outpoint_c).unwrap().is_some()); + db_clear_vault_allocation(&db_path, vault_id).unwrap(); + + // Now there's no remaining allocations + assert!(db_allocation(&db_path, &outpoint_b).unwrap().is_none()); + assert!(db_allocation(&db_path, &outpoint_c).unwrap().is_none()); + } +} \ No newline at end of file diff --git a/src/database/schema.rs b/src/database/schema.rs index fb0069d..ac2db71 100644 --- a/src/database/schema.rs +++ b/src/database/schema.rs @@ -72,6 +72,18 @@ CREATE TABLE signatures ( ON UPDATE RESTRICT ON DELETE RESTRICT ); + +/* Allocation map linking unspent outputs from the WT wallet to vaults it is watching + */ +CREATE TABLE allocation ( + id INTEGER PRIMARY KEY NOT NULL, + txid BLOB NOT NULL, + vout INTEGER NOT NULL, + vault_id INTEGER NOT NULL, + FOREIGN KEY (vault_id) REFERENCES vaults (id) + CONSTRAINT unique_alloc UNIQUE (txid, vout, vault_id) +); + "; /// A row in the "instances" table @@ -243,3 +255,36 @@ impl TryFrom<&rusqlite::Row<'_>> for DbSignature { }) } } + + +/// A row in the "allocation" table +#[derive(Clone, Debug, PartialEq)] +pub struct DbAllocation { + pub id: i64, + pub wallet_outpoint: OutPoint, + pub vault_id: i64, +} + +impl TryFrom<&rusqlite::Row<'_>> for DbAllocation { + type Error = rusqlite::Error; + + fn try_from(row: &rusqlite::Row) -> Result { + let id: i64 = row.get(0)?; + let txid: Vec = row.get(1)?; + let txid: Txid = encode::deserialize(&txid) + .expect("Insane db: invalid txid in allocation row"); + let vout: u32 = row.get(2)?; + let wallet_outpoint = OutPoint { + txid, + vout, + }; + + let vault_id = row.get(3)?; + + Ok(DbAllocation { + id, + wallet_outpoint, + vault_id, + }) + } +} \ No newline at end of file diff --git a/src/listener.rs b/src/listener.rs new file mode 100644 index 0000000..0d9aab7 --- /dev/null +++ b/src/listener.rs @@ -0,0 +1,953 @@ +use crate::{ + bitcoind::{interface::BitcoinD, BitcoindError}, + config::{Config, ScriptsConfig}, + database::{ + db_delegate_vault, db_new_vault, db_store_cancel_sigs, db_store_unemer_sigs, + db_unvault_emergency_signatures, db_vault, DatabaseError, + }, +}; + +use revault_net::{ + message::{ + watchtower::{Sig, SigResult}, + RequestParams, ResponseResult, + }, + noise::SecretKey as NoisePrivkey, +}; +use revault_tx::{ + bitcoin::{secp256k1, OutPoint, Txid}, + transactions::{transaction_chain, RevaultTransaction}, +}; + +use std::{io, net::TcpListener, path, sync}; + +#[derive(Debug)] +pub enum ListenerError { + Io(io::Error), + Db(DatabaseError), + Tx(revault_tx::Error), + BitcoinD(BitcoindError), + UnknownTxid(Txid), + UnknownOutpoint(OutPoint), + UnexpectedEmerSig(OutPoint), + UnexpectedUnEmerSig(OutPoint), + UnexpectedCancelSig(OutPoint), +} + +impl std::fmt::Display for ListenerError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::Io(ref e) => write!(f, "io error: '{}'", e), + Self::Db(ref e) => write!(f, "database error: '{}'", e), + Self::Tx(ref e) => write!(f, "transaction handling error: '{}'", e), + Self::BitcoinD(ref e) => write!(f, "bitcoind communication error: '{}'", e), + Self::UnknownOutpoint(ref o) => write!(f, "unknown outpoint: '{}'", o), + Self::UnknownTxid(ref t) => write!(f, "unknown txid: '{}'", t), + Self::UnexpectedEmerSig(ref o) => write!(f, "received an Emergency signature for an \ + existing vault ('{}')", o), + Self::UnexpectedUnEmerSig(ref o) => write!(f, "received an UnvaultEmergency signature for a \ + delegated vault or before receiving Emergency \ + signatures for '{}'", o), + Self::UnexpectedCancelSig(ref o) => write!(f, "received a Cancel signature for a \ + delegated vault or before receiving both Emergency \ + and UnEmer signatures for '{}'", o), + } + } +} + +impl std::error::Error for ListenerError {} + +impl From for ListenerError { + fn from(e: io::Error) -> Self { + Self::Io(e) + } +} + +impl From for ListenerError { + fn from(e: DatabaseError) -> Self { + Self::Db(e) + } +} + +impl From for ListenerError { + fn from(e: revault_tx::Error) -> Self { + Self::Tx(e) + } +} + +impl From for ListenerError { + fn from(e: revault_tx::error::InputSatisfactionError) -> Self { + Self::Tx(e.into()) + } +} + +impl From for ListenerError { + fn from(e: BitcoindError) -> Self { + Self::BitcoinD(e) + } +} + +// TODO: make Emergency sharing optional +fn process_sig_message( + db_path: &path::Path, + scripts_config: &ScriptsConfig, + bitcoind: &sync::Arc, + msg: Sig, + secp: &secp256k1::Secp256k1, +) -> Result { + let deposit_utxo = bitcoind + .utxoinfo(&msg.deposit_outpoint) + .ok_or(ListenerError::UnknownOutpoint(msg.deposit_outpoint))?; + let (_, mut cancel_tx, mut emer_tx, mut unemer_tx) = transaction_chain( + msg.deposit_outpoint, + deposit_utxo.value, + &scripts_config.deposit_descriptor, + &scripts_config.unvault_descriptor, + &scripts_config.cpfp_descriptor, + msg.derivation_index, + scripts_config.emergency_address.clone(), + 0, /* FIXME: remove from API */ + secp, + )?; + + if msg.txid == emer_tx.txid() { + // Receiving the signatures of an Emergency tx means they just signed the revocation + // transactions. The vault must not exist yet. + if db_vault(db_path, &msg.deposit_outpoint)?.is_some() { + return Err(ListenerError::UnexpectedEmerSig(msg.deposit_outpoint)); + } + + // Check that the sig they gave us are valid, and enough to make the transaction valid. + for (key, sig) in msg.signatures.iter() { + // Note this checks for ALL|ACP. + emer_tx.add_emer_sig(*key, *sig, secp)?; + } + emer_tx.finalize(secp)?; + + // Ok, we have enough info to be able to broadcast it. Store it as a not-yet-delegated + // vault. + db_new_vault( + db_path, + &msg.deposit_outpoint, + msg.derivation_index, + deposit_utxo.value, + &msg.signatures, + )?; + log::debug!("Registered a new vault at '{}'", &msg.deposit_outpoint); + + Ok(SigResult { + ack: true, + txid: msg.txid, + }) + } else if msg.txid == unemer_tx.txid() { + // If we are receiving the signatures of an UnEmer tx they must have already sent the sigs + // for the Emergency one. + let db_vault = db_vault(db_path, &msg.deposit_outpoint)? + .ok_or(ListenerError::UnexpectedUnEmerSig(msg.deposit_outpoint))?; + if db_vault.delegated || !db_unvault_emergency_signatures(db_path, db_vault.id)?.is_empty() + { + return Err(ListenerError::UnexpectedUnEmerSig(msg.deposit_outpoint)); + } + + // Check that the sig they gave us are valid, and enough to make the transaction valid + // before storing it. + for (key, sig) in msg.signatures.iter() { + unemer_tx.add_emer_sig(*key, *sig, secp)?; + } + unemer_tx.finalize(secp)?; + db_store_unemer_sigs(db_path, &msg.deposit_outpoint, &msg.signatures)?; + log::debug!( + "Got UnEmer transaction signatures for vault at '{}'", + &msg.deposit_outpoint + ); + + Ok(SigResult { + ack: true, + txid: msg.txid, + }) + } else if msg.txid == cancel_tx.txid() { + // Receiving the signatures of a Cancel tx means they just delegated this vault and we need + // to start watching for Unvault broadcasts. + let db_vault = db_vault(db_path, &msg.deposit_outpoint)? + .ok_or(ListenerError::UnexpectedCancelSig(msg.deposit_outpoint))?; + if db_vault.delegated { + return Err(ListenerError::UnexpectedCancelSig(msg.deposit_outpoint)); + } + // We check their validity before storing them hence it's enough to just check if they are + // present. + if db_unvault_emergency_signatures(db_path, db_vault.id)?.is_empty() { + return Err(ListenerError::UnexpectedCancelSig(msg.deposit_outpoint)); + } + + // Check that the sig they gave us are valid, and enough to make the transaction valid. + for (key, sig) in msg.signatures.iter() { + cancel_tx.add_cancel_sig(*key, *sig, secp)?; + } + cancel_tx.finalize(secp)?; + + // Ok, store those signatures and mark the vault as being delegated if it's not already + db_store_cancel_sigs(db_path, &msg.deposit_outpoint, &msg.signatures)?; + if !db_vault.delegated { + db_delegate_vault(db_path, &msg.deposit_outpoint)?; + } + log::debug!( + "Got Cancel transaction signatures for vault at '{}'. Now watching for Unvault broadcast.", + &msg.deposit_outpoint + ); + + Ok(SigResult { + ack: true, + txid: msg.txid, + }) + } else { + Err(ListenerError::UnknownTxid(msg.txid)) + } +} + +/// Wait for connections from the stakeholder on the configured interface and process `sig` messages. +pub fn listener_main( + db_path: &path::Path, + config: &Config, + bitcoind: sync::Arc, + noise_privkey: &NoisePrivkey, +) -> Result<(), ListenerError> { + let host = config.listen; + let listener = TcpListener::bind(host)?; + let secp_ctx = secp256k1::Secp256k1::verification_only(); + + log::info!("Listener thread started."); + + // There is only going to be a small amount of ephemeral connections, there is no need for + // complexity so just sequentially process each message. + loop { + let mut kk_stream = match revault_net::transport::KKTransport::accept( + &listener, + noise_privkey, + &[config.stakeholder_noise_key], + ) { + Ok(s) => s, + Err(e) => { + log::error!("Listener: Error during handshake: '{}'", e); + continue; + } + }; + log::trace!("New connection"); + + // Handle all messages from this connection. + loop { + match kk_stream.read_req(|req_params| match req_params { + RequestParams::WtSig(sig_msg) => { + log::debug!("Decoded request: {:#?}", sig_msg); + + let txid = sig_msg.txid; + match process_sig_message( + db_path, + &config.scripts_config, + &bitcoind, + sig_msg, + &secp_ctx, + ) { + Ok(res) => { + log::debug!("Decoded response: {:#?}", res); + Some(ResponseResult::WtSig(res)) + } + Err(e) => { + log::error!("Error when processing 'sig' message: '{}'.", e); + Some(ResponseResult::WtSig(SigResult { ack: false, txid })) + } + } + } + _ => { + log::error!("Unexpected message: '{:?}'", req_params); + None + } + }) { + Ok(buf) => buf, + Err(revault_net::Error::Transport(e)) => { + log::error!( + "Transport error trying to read request: '{}'. Dropping connection.", + e + ); + break; + } + Err(e) => { + log::error!("Error handling request: '{}'", e); + continue; + } + } + } + } +} + +#[cfg(test)] +mod tests { + use std::{ + collections::BTreeMap, + fs, + io::{BufRead, BufReader, Write}, + iter::repeat_with, + net, path, + str::FromStr, + thread, time, + }; + + use super::*; + + use crate::{ + config::BitcoindConfig, + database::{db_cancel_signatures, db_emergency_signatures, setup_db}, + }; + use revault_tx::{ + bitcoin::{util::bip32, Address, Amount, Network, SigHashType, Txid}, + miniscript::descriptor::{DescriptorPublicKey, DescriptorXKey, Wildcard}, + scripts::{CpfpDescriptor, DepositDescriptor, EmergencyAddress, UnvaultDescriptor}, + }; + + fn get_random_privkey(rng: &mut fastrand::Rng) -> bip32::ExtendedPrivKey { + let rand_bytes: Vec = repeat_with(|| rng.u8(..)).take(64).collect(); + + bip32::ExtendedPrivKey::new_master(Network::Bitcoin, &rand_bytes) + .unwrap_or_else(|_| get_random_privkey(rng)) + } + + fn get_participants_sets( + n_stk: usize, + n_man: usize, + secp: &secp256k1::Secp256k1, + ) -> ( + (Vec, Vec), + (Vec, Vec), + (Vec, Vec), + ) { + let mut rng = fastrand::Rng::new(); + + let managers_priv = (0..n_man) + .map(|_| get_random_privkey(&mut rng)) + .collect::>(); + let managers = managers_priv + .iter() + .map(|xpriv| { + DescriptorPublicKey::XPub(DescriptorXKey { + origin: None, + xkey: bip32::ExtendedPubKey::from_private(&secp, &xpriv), + derivation_path: bip32::DerivationPath::from(vec![]), + wildcard: Wildcard::Unhardened, + }) + }) + .collect::>(); + + let stakeholders_priv = (0..n_stk) + .map(|_| get_random_privkey(&mut rng)) + .collect::>(); + let stakeholders = stakeholders_priv + .iter() + .map(|xpriv| { + DescriptorPublicKey::XPub(DescriptorXKey { + origin: None, + xkey: bip32::ExtendedPubKey::from_private(&secp, &xpriv), + derivation_path: bip32::DerivationPath::from(vec![]), + wildcard: Wildcard::Unhardened, + }) + }) + .collect::>(); + + let cosigners_priv = (0..n_stk) + .map(|_| get_random_privkey(&mut rng)) + .collect::>(); + let cosigners = cosigners_priv + .iter() + .map(|xpriv| { + DescriptorPublicKey::XPub(DescriptorXKey { + origin: None, + xkey: bip32::ExtendedPubKey::from_private(&secp, &xpriv), + derivation_path: bip32::DerivationPath::from(vec![]), + wildcard: Wildcard::Unhardened, + }) + }) + .collect::>(); + + ( + (managers_priv, managers), + (stakeholders_priv, stakeholders), + (cosigners_priv, cosigners), + ) + } + + // Read all bytes from the socket until the end of a JSON object, good enough approximation. + fn read_til_json_end(stream: &mut net::TcpStream) { + stream + .set_read_timeout(Some(time::Duration::from_secs(5))) + .unwrap(); + let mut reader = BufReader::new(stream); + loop { + let mut line = String::new(); + reader.read_line(&mut line).unwrap(); + + if line.starts_with("Authorization") { + let mut buf = vec![0; 256]; + reader.read_until(b'}', &mut buf).unwrap(); + return; + } + } + } + + // Respond to the two "echo" sent at startup to sanity check the connection + fn complete_bitcoind_sanity_check(server: &net::TcpListener) { + let echo_resp = + "HTTP/1.1 200\n\r\n{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":[]}\n".as_bytes(); + + // Read the first echo, respond to it + let (mut stream, _) = server.accept().unwrap(); + read_til_json_end(&mut stream); + stream.write_all(echo_resp).unwrap(); + stream.flush().unwrap(); + + // Read the second echo, respond to it + let (mut stream, _) = server.accept().unwrap(); + read_til_json_end(&mut stream); + stream.write_all(echo_resp).unwrap(); + stream.flush().unwrap(); + } + + fn dummy_bitcoind(deposit_amount: Amount) -> BitcoinD { + let network = Network::Bitcoin; + let cookie: path::PathBuf = + format!("scratch_test_{:?}.cookie", thread::current().id()).into(); + // Will overwrite should it exist already + fs::write(&cookie, &[0; 32]).unwrap(); + let addr: net::SocketAddr = + net::SocketAddrV4::new(net::Ipv4Addr::new(127, 0, 0, 1), 0).into(); + let server = net::TcpListener::bind(&addr).unwrap(); + let addr = server.local_addr().unwrap(); + let bitcoind_config = BitcoindConfig { + network, + addr, + cookie_path: cookie.clone(), + poll_interval_secs: time::Duration::from_secs(2), + }; + + thread::spawn({ + move || { + complete_bitcoind_sanity_check(&server); + + // The listener is always only going to poll for gettxout (and only care about the + // value of the utxo). So, just listen and answer a dummy gettxout with the right + // deposit value forever. + loop { + let gettxout_resp = + format!("HTTP/1.1 200\n\r\n{{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":{{\"bestblock\":\"000000000000000000143e36c08e4686f2c7e317bce11c192106ca5ae44207cd\",\"confirmations\":1,\"value\":{}}}}}\n", deposit_amount.as_btc()); + let gettxout_resp = gettxout_resp.as_bytes(); + + let (mut stream, _) = server.accept().unwrap(); + read_til_json_end(&mut stream); + stream.write_all(gettxout_resp).unwrap(); + stream.flush().unwrap(); + } + } + }); + let bitcoind_client = + BitcoinD::new(&bitcoind_config, "dummy_filename".to_string()).unwrap(); + // We don't need it anymore + fs::remove_file(&cookie).unwrap_or_else(|_| ()); + + bitcoind_client + } + + // Sanity check `sig` message processing + #[test] + fn sig_message() { + let secp_ctx = secp256k1::Secp256k1::new(); + let db_path: path::PathBuf = + format!("scratch_test_{:?}.sqlite3", thread::current().id()).into(); + + let ((_, managers), (stakeholders_priv, stakeholders), (_, cosigners)) = + get_participants_sets(3, 2, &secp_ctx); + + let deposit_descriptor = DepositDescriptor::new(stakeholders.clone()).unwrap(); + let cpfp_descriptor = CpfpDescriptor::new(managers.clone()).unwrap(); + let unvault_descriptor = + UnvaultDescriptor::new(stakeholders.clone(), managers, 2, cosigners, 2021).unwrap(); + let emergency_address = EmergencyAddress::from( + Address::from_str("bc1q906h8q49vu20cyffqklnzcda20k7c3m83fltey344kz3lctlx9xqhf2v56") + .unwrap(), + ) + .unwrap(); + let scripts_config = ScriptsConfig { + deposit_descriptor, + unvault_descriptor, + cpfp_descriptor, + emergency_address, + }; + + // Remove any potential leftover from a previous crashed session and create the database + fs::remove_file(&db_path).unwrap_or_else(|_| ()); + setup_db( + &db_path, + &scripts_config.deposit_descriptor, + &scripts_config.unvault_descriptor, + &scripts_config.cpfp_descriptor, + Network::Bitcoin, + ) + .unwrap(); + + // Given a new vault deposit at this outpoint try different scenarii + let deposit_outpoint = OutPoint::from_str( + "f21885abfb5a0706d0d56542fbff2483e455a788075c85f567c07df775f3742b:0", + ) + .unwrap(); + let deposit_value = Amount::from_sat(8765432); + let derivation_index = bip32::ChildNumber::from(45678); + let (_, cancel, emer, unemer) = transaction_chain( + deposit_outpoint, + deposit_value, + &scripts_config.deposit_descriptor, + &scripts_config.unvault_descriptor, + &scripts_config.cpfp_descriptor, + derivation_index, + scripts_config.emergency_address.clone(), + 0, + &secp_ctx, + ) + .unwrap(); + + // This starts a dummy server to answer our gettxout requests + let bitcoind = sync::Arc::from(dummy_bitcoind(deposit_value)); + + // Invalid txid, no sigs + let msg = Sig { + signatures: BTreeMap::new(), + txid: Txid::default(), + deposit_outpoint, + derivation_index, + }; + assert!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx) + .unwrap_err() + .to_string() + .contains("unknown txid"), + ); + + // An UnEmer before an Emer + let msg = Sig { + signatures: BTreeMap::new(), + txid: unemer.txid(), + deposit_outpoint, + derivation_index, + }; + assert!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx) + .unwrap_err() + .to_string() + .contains("received an UnvaultEmergency") + ); + + // A Cancel before an Emer + let msg = Sig { + signatures: BTreeMap::new(), + txid: cancel.txid(), + deposit_outpoint, + derivation_index, + }; + assert!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx) + .unwrap_err() + .to_string() + .contains("received a Cancel") + ); + + // Not enough signatures + let sighash = emer + .signature_hash(0, SigHashType::AllPlusAnyoneCanPay) + .unwrap(); + let sighash = secp256k1::Message::from_slice(&sighash).unwrap(); + let signatures: BTreeMap = stakeholders_priv + [..2] + .iter() + .map(|xpriv| { + let privkey = xpriv.derive_priv(&secp_ctx, &[derivation_index]).unwrap(); + ( + privkey.private_key.public_key(&secp_ctx).key, + secp_ctx.sign(&sighash, &privkey.private_key.key), + ) + }) + .collect(); + let msg = Sig { + signatures, + txid: emer.txid(), + deposit_outpoint, + derivation_index, + }; + assert!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx) + .unwrap_err() + .to_string() + .contains("Revault transaction finalisation error") + ); + + // Enough sigs, but invalid signature type + let bad_sighash = emer.signature_hash(0, SigHashType::All).unwrap(); + let bad_sighash = secp256k1::Message::from_slice(&bad_sighash).unwrap(); + let signatures: BTreeMap = stakeholders_priv + .iter() + .map(|xpriv| { + let privkey = xpriv.derive_priv(&secp_ctx, &[derivation_index]).unwrap(); + ( + privkey.private_key.public_key(&secp_ctx).key, + secp_ctx.sign(&bad_sighash, &privkey.private_key.key), + ) + }) + .collect(); + let msg = Sig { + signatures, + txid: emer.txid(), + deposit_outpoint, + derivation_index, + }; + assert!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx) + .unwrap_err() + .to_string() + .contains("Invalid signature") + ); + + // Enough invalid sigs + let signatures: BTreeMap = stakeholders_priv + .iter() + .map(|xpriv| { + let privkey = xpriv + .derive_priv(&secp_ctx, &[derivation_index.increment().unwrap()]) + .unwrap(); + ( + privkey.private_key.public_key(&secp_ctx).key, + secp_ctx.sign(&sighash, &privkey.private_key.key), + ) + }) + .collect(); + let msg = Sig { + signatures, + txid: emer.txid(), + deposit_outpoint, + derivation_index, + }; + assert!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx) + .unwrap_err() + .to_string() + .contains("Miniscript Error: could not satisfy") + ); + + // Enough *valid* sigs, vault must now be registered and Emer sigs present + let signatures: BTreeMap = stakeholders_priv + .iter() + .map(|xpriv| { + let privkey = xpriv.derive_priv(&secp_ctx, &[derivation_index]).unwrap(); + ( + privkey.private_key.public_key(&secp_ctx).key, + secp_ctx.sign(&sighash, &privkey.private_key.key), + ) + }) + .collect(); + let msg = Sig { + signatures, + txid: emer.txid(), + deposit_outpoint, + derivation_index, + }; + assert_eq!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx).unwrap(), + SigResult { + ack: true, + txid: emer.txid() + } + ); + let vault = db_vault(&db_path, &deposit_outpoint).unwrap().unwrap(); + assert_eq!(vault.delegated, false); + assert_eq!( + db_emergency_signatures(&db_path, vault.id).unwrap().len(), + stakeholders.len() + ); + + // We won't accept to process an Emergency for this vault anymore + let msg = Sig { + signatures: BTreeMap::new(), + txid: emer.txid(), + deposit_outpoint, + derivation_index, + }; + assert!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx) + .unwrap_err() + .to_string() + .contains("received an Emergency") + ); + + // We won't accept to process a Cancel just yet + let msg = Sig { + signatures: BTreeMap::new(), + txid: cancel.txid(), + deposit_outpoint, + derivation_index, + }; + assert!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx) + .unwrap_err() + .to_string() + .contains("received a Cancel") + ); + + let sighash = unemer + .signature_hash(0, SigHashType::AllPlusAnyoneCanPay) + .unwrap(); + let sighash = secp256k1::Message::from_slice(&sighash).unwrap(); + let signatures: BTreeMap = stakeholders_priv + [..2] + .iter() + .map(|xpriv| { + let privkey = xpriv.derive_priv(&secp_ctx, &[derivation_index]).unwrap(); + ( + privkey.private_key.public_key(&secp_ctx).key, + secp_ctx.sign(&sighash, &privkey.private_key.key), + ) + }) + .collect(); + let msg = Sig { + signatures, + txid: unemer.txid(), + deposit_outpoint, + derivation_index, + }; + assert!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx) + .unwrap_err() + .to_string() + .contains("Revault transaction finalisation error") + ); + + // Enough sigs, but invalid signature type + let bad_sighash = unemer.signature_hash(0, SigHashType::All).unwrap(); + let bad_sighash = secp256k1::Message::from_slice(&bad_sighash).unwrap(); + let signatures: BTreeMap = stakeholders_priv + .iter() + .map(|xpriv| { + let privkey = xpriv.derive_priv(&secp_ctx, &[derivation_index]).unwrap(); + ( + privkey.private_key.public_key(&secp_ctx).key, + secp_ctx.sign(&bad_sighash, &privkey.private_key.key), + ) + }) + .collect(); + let msg = Sig { + signatures, + txid: unemer.txid(), + deposit_outpoint, + derivation_index, + }; + assert!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx) + .unwrap_err() + .to_string() + .contains("Invalid signature") + ); + + // Enough invalid sigs + let signatures: BTreeMap = stakeholders_priv + .iter() + .map(|xpriv| { + let privkey = xpriv + .derive_priv(&secp_ctx, &[derivation_index.increment().unwrap()]) + .unwrap(); + ( + privkey.private_key.public_key(&secp_ctx).key, + secp_ctx.sign(&sighash, &privkey.private_key.key), + ) + }) + .collect(); + let msg = Sig { + signatures, + txid: unemer.txid(), + deposit_outpoint, + derivation_index, + }; + assert!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx) + .unwrap_err() + .to_string() + .contains("Miniscript Error: could not satisfy") + ); + + // Enough *valid* sigs, UnvaultEmer sigs are now stored. + let signatures: BTreeMap = stakeholders_priv + .iter() + .map(|xpriv| { + let privkey = xpriv.derive_priv(&secp_ctx, &[derivation_index]).unwrap(); + ( + privkey.private_key.public_key(&secp_ctx).key, + secp_ctx.sign(&sighash, &privkey.private_key.key), + ) + }) + .collect(); + let msg = Sig { + signatures, + txid: unemer.txid(), + deposit_outpoint, + derivation_index, + }; + assert_eq!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx).unwrap(), + SigResult { + ack: true, + txid: unemer.txid() + } + ); + let vault = db_vault(&db_path, &deposit_outpoint).unwrap().unwrap(); + assert_eq!(vault.delegated, false); + assert_eq!( + db_unvault_emergency_signatures(&db_path, vault.id) + .unwrap() + .len(), + stakeholders.len() + ); + + // We won't accept it twice + let msg = Sig { + signatures: BTreeMap::new(), + txid: unemer.txid(), + deposit_outpoint, + derivation_index, + }; + assert!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx) + .unwrap_err() + .to_string() + .contains("received an UnvaultEmergency") + ); + + // We won't accept to process an Emergency either + let msg = Sig { + signatures: BTreeMap::new(), + txid: emer.txid(), + deposit_outpoint, + derivation_index, + }; + assert!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx) + .unwrap_err() + .to_string() + .contains("received an Emergency") + ); + + let sighash = cancel + .signature_hash(0, SigHashType::AllPlusAnyoneCanPay) + .unwrap(); + let sighash = secp256k1::Message::from_slice(&sighash).unwrap(); + let signatures: BTreeMap = stakeholders_priv + [..2] + .iter() + .map(|xpriv| { + let privkey = xpriv.derive_priv(&secp_ctx, &[derivation_index]).unwrap(); + ( + privkey.private_key.public_key(&secp_ctx).key, + secp_ctx.sign(&sighash, &privkey.private_key.key), + ) + }) + .collect(); + let msg = Sig { + signatures, + txid: cancel.txid(), + deposit_outpoint, + derivation_index, + }; + assert!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx) + .unwrap_err() + .to_string() + .contains("Revault transaction finalisation error") + ); + + // Enough sigs, but invalid signature type + let bad_sighash = cancel.signature_hash(0, SigHashType::All).unwrap(); + let bad_sighash = secp256k1::Message::from_slice(&bad_sighash).unwrap(); + let signatures: BTreeMap = stakeholders_priv + .iter() + .map(|xpriv| { + let privkey = xpriv.derive_priv(&secp_ctx, &[derivation_index]).unwrap(); + ( + privkey.private_key.public_key(&secp_ctx).key, + secp_ctx.sign(&bad_sighash, &privkey.private_key.key), + ) + }) + .collect(); + let msg = Sig { + signatures, + txid: cancel.txid(), + deposit_outpoint, + derivation_index, + }; + assert!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx) + .unwrap_err() + .to_string() + .contains("Invalid signature") + ); + + // Enough invalid sigs + let signatures: BTreeMap = stakeholders_priv + .iter() + .map(|xpriv| { + let privkey = xpriv + .derive_priv(&secp_ctx, &[derivation_index.increment().unwrap()]) + .unwrap(); + ( + privkey.private_key.public_key(&secp_ctx).key, + secp_ctx.sign(&sighash, &privkey.private_key.key), + ) + }) + .collect(); + let msg = Sig { + signatures, + txid: cancel.txid(), + deposit_outpoint, + derivation_index, + }; + assert!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx) + .unwrap_err() + .to_string() + .contains("Miniscript Error: could not satisfy") + ); + + // Enough *valid* sigs, Cancel sigs are now stored. + let signatures: BTreeMap = stakeholders_priv + .iter() + .map(|xpriv| { + let privkey = xpriv.derive_priv(&secp_ctx, &[derivation_index]).unwrap(); + ( + privkey.private_key.public_key(&secp_ctx).key, + secp_ctx.sign(&sighash, &privkey.private_key.key), + ) + }) + .collect(); + let msg = Sig { + signatures, + txid: cancel.txid(), + deposit_outpoint, + derivation_index, + }; + assert_eq!( + process_sig_message(&db_path, &scripts_config, &bitcoind, msg, &secp_ctx).unwrap(), + SigResult { + ack: true, + txid: cancel.txid() + } + ); + let vault = db_vault(&db_path, &deposit_outpoint).unwrap().unwrap(); + assert!(vault.delegated); + assert_eq!( + db_cancel_signatures(&db_path, vault.id).unwrap().len(), + stakeholders.len() + ); + + // Done: remove the db + fs::remove_file(&db_path).unwrap_or_else(|_| ()); + } +} diff --git a/src/main.rs b/src/main.rs index 6931e6a..b160d60 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,6 +3,7 @@ mod config; mod daemonize; mod database; mod keys; +mod listener; mod poller; use bitcoind::{load_watchonly_wallet, start_bitcoind, wait_bitcoind_synced}; @@ -10,13 +11,14 @@ use config::{config_folder_path, Config}; use daemonize::daemonize; use database::setup_db; use keys::read_or_create_noise_key; +use listener::listener_main; use revault_net::{ noise::PublicKey as NoisePubKey, sodiumoxide::{self, crypto::scalarmult::curve25519}, }; use revault_tx::bitcoin::{hashes::hex::ToHex, secp256k1}; -use std::{env, fs, os::unix::fs::DirBuilderExt, panic, path, process, time}; +use std::{env, fs, os::unix::fs::DirBuilderExt, panic, path, process, sync, thread, time}; const DATABASE_FILENAME: &str = "mirarod.sqlite3"; const VAULT_WATCHONLY_FILENAME: &str = "vault_watchonly"; @@ -169,9 +171,11 @@ fn main() { log::error!("Error setting up bitcoind RPC connection: '{}'", e); process::exit(1); }); + let bitcoind = sync::Arc::new(bitcoind); log::info!("Checking if bitcoind is synced"); wait_bitcoind_synced(&bitcoind); + log::info!("bitcoind now synced"); load_watchonly_wallet(&bitcoind, vault_watchonly_path).unwrap_or_else(|e| { log::error!("Error loading vault watchonly wallet: '{}'", e); @@ -190,10 +194,9 @@ fn main() { process::exit(1); }); log::info!( - "Using Noise key '{}'.", - NoisePubKey(curve25519::scalarmult_base(&curve25519::Scalar(noise_secret.0)).0) - .0 - .to_hex() + "Using Noise key '{}'. Stakehodler Noise key '{}'", + noise_secret.public_key().0.to_hex(), + &config.stakeholder_noise_key.0.to_hex() ); if config.daemon { @@ -219,6 +222,20 @@ fn main() { } } + thread::spawn({ + let _db_path = db_path.clone(); + let _config = config.clone(); + let _bitcoind = bitcoind.clone(); + move || { + listener_main(&_db_path, &_config, _bitcoind, &noise_secret).unwrap_or_else(|e| { + log::error!("Error in listener loop: '{}'", e); + process::exit(1); + }) + } + }); + + log::info!("Started miradord.",); + let secp_ctx = secp256k1::Secp256k1::verification_only(); poller::main_loop(&db_path, &secp_ctx, &config, &bitcoind).unwrap_or_else(|e| { log::error!("Error in main loop: '{}'", e); diff --git a/src/poller.rs b/src/poller.rs index 75721d1..2f5928b 100644 --- a/src/poller.rs +++ b/src/poller.rs @@ -134,6 +134,10 @@ fn manage_cancel_attempts( .expect("Impossible, the confirmation height is always <= tip"); if n_confs > REORG_WATCH_LIMIT { db_del_vault(db_path, db_vault.id)?; + log::info!( + "Forgetting about consumed vault at '{}' after its Cancel transaction had '{}' confirmations.", + &db_vault.deposit_outpoint, n_confs + ); } } else { let cancel_outpoint = cancel_tx.deposit_txin(&deposit_desc).outpoint(); @@ -163,13 +167,16 @@ fn manage_cancel_attempts( ); } else { // If the chain didn't change, and there is no Cancel UTXO at the best block there - // are only 2 possibilities: either the Cancel transaction is still unconfirmed - // (and therefore the Unvault UTXO is still present) or it was spent. + // are only 2 possibilities before the expiration of the CSV: either the Cancel + // transaction is still unconfirmed (and therefore the Unvault UTXO is still present) + // or it was spent. if bitcoind.utxoinfo(&unvault_outpoint).is_none() { if bitcoind.chain_tip().hash != current_tip.hash { // TODO } + // FIXME: if timelock matured, do additional checks (is this Cancel still + // broadcastable?) db_revoc_confirmed(db_path, db_vault.id, current_tip.height)?; log::debug!( "Noticed at height '{}' that Cancel transaction '{}' was confirmed for vault at '{}'", @@ -241,7 +248,7 @@ fn revault( e ); } else { - log::trace!( + log::debug!( "Broadcasted Cancel transaction '{}'", encode::serialize_hex(&cancel_tx) ); @@ -277,7 +284,11 @@ fn check_for_unvault( let unvault_txin = unvault_tx.revault_unvault_txin(&unvault_desc); if let Some(utxoinfo) = bitcoind.utxoinfo(&unvault_txin.outpoint()) { - log::debug!("Got a confirmed Unvault UTXO: '{:?}'", utxoinfo); + log::debug!( + "Got a confirmed Unvault UTXO at '{}': '{:?}'", + &unvault_txin.outpoint(), + utxoinfo + ); if current_tip.hash != utxoinfo.bestblock { // TODO diff --git a/tests/fixtures.py b/tests/fixtures.py new file mode 100644 index 0000000..fe416ef --- /dev/null +++ b/tests/fixtures.py @@ -0,0 +1,162 @@ +from concurrent import futures +from ephemeral_port_reserve import reserve +from test_framework.bitcoind import Bitcoind +from test_framework.miradord import Miradord +from test_framework.utils import ( + get_descriptors, + EXECUTOR_WORKERS, + STKS_XPRIVS, + STKS_XPUBS, +) + +import bip32 +import logging +import os +import pytest +import shutil +import tempfile +import time + +__attempts = {} + + +@pytest.fixture(scope="session") +def test_base_dir(): + d = os.getenv("TEST_DIR", "/tmp") + + directory = tempfile.mkdtemp(prefix="miradord-tests-", dir=d) + print("Running tests in {}".format(directory)) + + yield directory + + content = os.listdir(directory) + if content == []: + shutil.rmtree(directory) + else: + print(f"Leaving base dir '{directory}' as it still contains {content}") + + +# Taken from https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures +@pytest.hookimpl(tryfirst=True, hookwrapper=True) +def pytest_runtest_makereport(item, call): + # execute all other hooks to obtain the report object + outcome = yield + rep = outcome.get_result() + + # set a report attribute for each phase of a call, which can + # be "setup", "call", "teardown" + + setattr(item, "rep_" + rep.when, rep) + + +@pytest.fixture +def directory(request, test_base_dir, test_name): + """Return a per-test specific directory. + + This makes a unique test-directory even if a test is rerun multiple times. + + """ + global __attempts + # Auto set value if it isn't in the dict yet + __attempts[test_name] = __attempts.get(test_name, 0) + 1 + directory = os.path.join( + test_base_dir, "{}_{}".format(test_name, __attempts[test_name]) + ) + + if not os.path.exists(directory): + os.makedirs(directory) + + yield directory + + # test_base_dir is at the session scope, so we can't use request.node as mentioned in + # the doc linked in the hook above. + if request.session.testsfailed == 0: + try: + shutil.rmtree(directory) + except Exception: + files = [ + os.path.join(dp, f) for dp, _, fn in os.walk(directory) for f in fn + ] + print("Directory still contains files:", files) + raise + else: + print(f"Test failed, leaving directory '{directory}' intact") + + +@pytest.fixture +def test_name(request): + yield request.function.__name__ + + +@pytest.fixture +def executor(test_name): + ex = futures.ThreadPoolExecutor( + max_workers=EXECUTOR_WORKERS, thread_name_prefix=test_name + ) + yield ex + ex.shutdown(wait=False) + + +@pytest.fixture +def bitcoind(directory): + bitcoind = Bitcoind(bitcoin_dir=directory) + bitcoind.startup() + + bitcoind.rpc.createwallet(bitcoind.rpc.wallet_name, False, False, "", False, True) + + while bitcoind.rpc.getbalance() < 50: + bitcoind.rpc.generatetoaddress(1, bitcoind.rpc.getnewaddress()) + + while bitcoind.rpc.getblockcount() <= 1: + time.sleep(0.1) + + yield bitcoind + + bitcoind.cleanup() + + +@pytest.fixture +def miradord(bitcoind, directory): + datadir = os.path.join(directory, "miradord") + os.makedirs(datadir, exist_ok=True) + + # We only care about the stakeholders, so we use dummy keys for the others. + cpfp_xpubs = [ + "xpub6FD2XRGE3DAJzb69LXMEAiHfj3U4xVqLExMSV4DJXs5zCntHmtdvpkErLwAMGMnKJN2m3LGgaaAMvBELwNNJDAwWvidNMxVgSqLyoC2y2Kc" + ] + cosigs_keys = [ + "03170fcc522ee69d743c15e40379fcabb6c607ff3dbeb68cbdd6da5da9c9d048a5", + "03b8789ff36bf55a77a20af0a0b1668d8dd3df2e7b7f81da058b5236f0120aba38", + "028d5f3bb2bcf819f785086e0b04833361f773a328aeff41ea5dd248fe03d18b25", + "03c977891e952393a742f9f2ef5cd4cefb7cbe58d9b3acfdc750b38f6931764ba8", + ] + mans_xpubs = [ + "xpub6CFH8m3bnUFXvS78XZyCQ9mCbp7XmKXbS67YHGUS3NxHSLhAMCGHGaEPojcoYt5PYnocyuScAM5xuDzf4BqFQt3fhmKEaRgmVzDcAR46Byh", + "xpub6ECZqYNQzHkveSWmsGh6XSL8wMGXRtoZ5hkbWXwRSVEyEsKADe34dbdnMob1ZjUpd4TD7no1isnnvpQq9DchFes5DnHJ7JupSntZsKr7VbQ", + ] + (dep_desc, unv_desc, cpfp_desc) = get_descriptors( + STKS_XPUBS, cosigs_keys, mans_xpubs, len(mans_xpubs), cpfp_xpubs, 232 + ) + emer_addr = "bcrt1qewc2348370pgw8kjz8gy09z8xyh0d9fxde6nzamd3txc9gkmjqmq8m4cdq" + + coordinator_noise_key = ( + "d91563973102454a7830137e92d0548bc83b4ea2799f1df04622ca1307381402" + ) + miradord = Miradord( + datadir, + dep_desc, + unv_desc, + cpfp_desc, + emer_addr, + reserve(), + os.urandom(32), + os.urandom(32), + coordinator_noise_key, # Unused yet + reserve(), # Unused yet + bitcoind, + ) + miradord.start() + + yield miradord + + miradord.cleanup() diff --git a/tests/requirements.txt b/tests/requirements.txt new file mode 100644 index 0000000..1a20e26 --- /dev/null +++ b/tests/requirements.txt @@ -0,0 +1,8 @@ +pytest==6.2 +pytest-xdist==1.31.0 +pytest-timeout==1.3.4 +ephemeral_port_reserve==1.1.1 +bip32~=2.0 +psycopg2==2.8 +pynacl==1.4 +noiseprotocol==0.3.1 diff --git a/tests/test_chain.py b/tests/test_chain.py new file mode 100644 index 0000000..cf6c3ea --- /dev/null +++ b/tests/test_chain.py @@ -0,0 +1,76 @@ +from fixtures import * +from test_framework.utils import COIN, DEPOSIT_ADDRESS, DERIV_INDEX + + +def test_simple_unvault_broadcast(miradord, bitcoind): + """ + Sanity check we detect the broadcast of the Unvault transaction for a + vault we registered. + """ + deposit_value = 12 + deposit_txid, deposit_outpoint = bitcoind.create_utxo( + DEPOSIT_ADDRESS, deposit_value + ) + bitcoind.generate_block(1, deposit_txid) + + # Register this vault on the WT + txs = miradord.watch_vault(deposit_outpoint, deposit_value * COIN, DERIV_INDEX) + + # Broadcast the Unvault + unvault_txid = bitcoind.rpc.decoderawtransaction(txs["unvault"]["tx"])["txid"] + bitcoind.rpc.sendrawtransaction(txs["unvault"]["tx"]) + bitcoind.generate_block(1, unvault_txid) + + cancel_txid = bitcoind.rpc.decoderawtransaction(txs["cancel"]["tx"])["txid"] + miradord.wait_for_logs( + [ + f"Got a confirmed Unvault UTXO at '{unvault_txid}:0'", + f"Broadcasted Cancel transaction '{txs['cancel']['tx']}'", + f"Cancel transaction '{cancel_txid}' for vault at '{deposit_outpoint}' is still unconfirmed", + ] + ) + + bitcoind.generate_block(1, wait_for_mempool=cancel_txid) + miradord.wait_for_log( + f"Vault at '{deposit_outpoint}' Cancel transaction .* confirmed" + ) + + # Generate two days worth of blocks, the WT should + bitcoind.generate_block(288) + miradord.wait_for_log(f"Forgetting about consumed vault at '{deposit_outpoint}'") + + +def test_spent_cancel_detection(miradord, bitcoind): + """ + Sanity check we detect a Cancel is confirmed even after it was spent. + """ + deposit_value = 12 + deposit_txid, deposit_outpoint = bitcoind.create_utxo( + DEPOSIT_ADDRESS, deposit_value + ) + bitcoind.generate_block(1, deposit_txid) + + # Register this vault on the WT, and make it broadcast the Spend + txs = miradord.watch_vault(deposit_outpoint, deposit_value * COIN, DERIV_INDEX) + unvault_txid = bitcoind.rpc.decoderawtransaction(txs["unvault"]["tx"])["txid"] + bitcoind.rpc.sendrawtransaction(txs["unvault"]["tx"]) + bitcoind.generate_block(1, unvault_txid) + cancel_tx = bitcoind.rpc.decoderawtransaction(txs["cancel"]["tx"]) + miradord.wait_for_logs( + [ + f"Got a confirmed Unvault UTXO at '{unvault_txid}:0'", + f"Broadcasted Cancel transaction '{txs['cancel']['tx']}'", + f"Cancel transaction '{cancel_tx['txid']}' for vault at '{deposit_outpoint}' is still unconfirmed", + ] + ) + + # Now, the unconfirmed Cancel is a new deposit: get the Unvault tx for this one and broadcast it + txs = miradord.get_signed_txs( + f"{cancel_tx['txid']}:0", cancel_tx["vout"][0]["value"] * COIN + ) + unvault_txid = bitcoind.rpc.sendrawtransaction(txs["unvault"]["tx"]) + + bitcoind.generate_block(1, wait_for_mempool=[cancel_tx["txid"], unvault_txid]) + miradord.wait_for_log( + f"Noticed at height .* that Cancel transaction '{cancel_tx['txid']}' was confirmed for vault at '{deposit_outpoint}'" + ) diff --git a/tests/test_conn.py b/tests/test_conn.py new file mode 100644 index 0000000..84fab97 --- /dev/null +++ b/tests/test_conn.py @@ -0,0 +1,50 @@ +from fixtures import * +from test_framework.utils import COIN, DEPOSIT_ADDRESS, DERIV_INDEX + + +def test_simple_client_server(miradord, bitcoind): + """ + Sanity check we can connect to the WT and register a vault to be watched for + """ + # Sending for a non-existing outpoint they will NACK + assert not miradord.send_sigs( + { + "030fac04165b606dea3b8f81ada5eb66ca181d5215c873fcf46623ea7cf8e98b1b": "304402205870a4bd7bca8147f3b8ca97e0f42166d223b5c1921c2843530e290a3712cd23022039dc778788b35caf0724028c2c9dec855118cc416dc4f2ed6213f0f6e3a681a6" + }, + "9c05f3169986caf69cebf2ec82a027e7d3f77c37731de72849bd3d1c6abd0543", + "150131e9c18da0c46bc5ed6e37f4f75bb077df68ace9da9a12f084ba7171c3d8:0", + DERIV_INDEX, + ) + + # Now actually create a deposit + deposit_value = 0.5 + deposit_txid, deposit_outpoint = bitcoind.create_utxo( + DEPOSIT_ADDRESS, deposit_value + ) + bitcoind.generate_block(6, deposit_txid) + + # If we don't send any thing it won't ACK + txs = miradord.get_signed_txs(deposit_outpoint, deposit_value * COIN) + emer_txid = bitcoind.rpc.decoderawtransaction(txs["emer"]["tx"])["txid"] + assert not miradord.send_sigs({}, emer_txid, deposit_outpoint, DERIV_INDEX) + + # We can send many messages through the same connection + noise_conn = miradord.get_noise_conn() + + # Now if we send all the Emergency transaction it will ACK + assert miradord.send_sigs( + txs["emer"]["sigs"], emer_txid, deposit_outpoint, DERIV_INDEX, noise_conn + ) + + # It will refuse that we send it twice but will happily accept the other sigs + assert not miradord.send_sigs( + txs["emer"]["sigs"], emer_txid, deposit_outpoint, DERIV_INDEX, noise_conn + ) + unemer_txid = bitcoind.rpc.decoderawtransaction(txs["unemer"]["tx"])["txid"] + assert miradord.send_sigs( + txs["unemer"]["sigs"], unemer_txid, deposit_outpoint, DERIV_INDEX, noise_conn + ) + cancel_txid = bitcoind.rpc.decoderawtransaction(txs["cancel"]["tx"])["txid"] + assert miradord.send_sigs( + txs["cancel"]["sigs"], cancel_txid, deposit_outpoint, DERIV_INDEX, noise_conn + ) diff --git a/tests/test_framework/__init__.py b/tests/test_framework/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_framework/authproxy.py b/tests/test_framework/authproxy.py new file mode 100644 index 0000000..52abe27 --- /dev/null +++ b/tests/test_framework/authproxy.py @@ -0,0 +1,280 @@ +# This file was taken from the Bitcoin Core project in September 2021. +# Copyright (c) 2021 The Bitcoin Core developers +# +# Copyright (c) 2011 Jeff Garzik +# +# Previous copyright, from python-jsonrpc/jsonrpc/proxy.py: +# +# Copyright (c) 2007 Jan-Klaas Kollhof +# +# This file is part of jsonrpc. +# +# jsonrpc is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this software; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +"""HTTP proxy for opening RPC connection to bitcoind. + +AuthServiceProxy has the following improvements over python-jsonrpc's +ServiceProxy class: + +- HTTP connections persist for the life of the AuthServiceProxy object + (if server supports HTTP/1.1) +- sends protocol 'version', per JSON-RPC 1.1 +- sends proper, incrementing 'id' +- sends Basic HTTP authentication headers +- parses all JSON numbers that look like floats as Decimal +- uses standard Python json lib +""" + +import base64 +import decimal +from http import HTTPStatus +import http.client +import json +import logging +import os +import socket +import time +import urllib.parse + +HTTP_TIMEOUT = 30 +USER_AGENT = "AuthServiceProxy/0.1" + +log = logging.getLogger("BitcoinRPC") + + +class JSONRPCException(Exception): + def __init__(self, rpc_error, http_status=None): + try: + errmsg = "%(message)s (%(code)i)" % rpc_error + except (KeyError, TypeError): + errmsg = "" + super().__init__(errmsg) + self.error = rpc_error + self.http_status = http_status + + +def EncodeDecimal(o): + if isinstance(o, decimal.Decimal): + return str(o) + raise TypeError(repr(o) + " is not JSON serializable") + + +class AuthServiceProxy: + __id_count = 0 + + # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps + def __init__( + self, + service_url, + service_name=None, + timeout=HTTP_TIMEOUT, + connection=None, + ensure_ascii=True, + ): + self.__service_url = service_url + self._service_name = service_name + self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests + self.__url = urllib.parse.urlparse(service_url) + user = ( + b"" if self.__url.username is None else self.__url.username.encode("utf8") + ) + passwd = ( + b"" if self.__url.password is None else self.__url.password.encode("utf8") + ) + authpair = user + b":" + passwd + self.__auth_header = b"Basic " + base64.b64encode(authpair) + self.timeout = timeout + self._set_conn(connection) + + def __getattr__(self, name): + if name.startswith("__") and name.endswith("__"): + # Python internal stuff + raise AttributeError + if self._service_name is not None: + name = "%s.%s" % (self._service_name, name) + return AuthServiceProxy(self.__service_url, name, connection=self.__conn) + + def _request(self, method, path, postdata): + """ + Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout). + This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5. + """ + headers = { + "Host": self.__url.hostname, + "User-Agent": USER_AGENT, + "Authorization": self.__auth_header, + "Content-type": "application/json", + } + if os.name == "nt": + # Windows somehow does not like to re-use connections + # TODO: Find out why the connection would disconnect occasionally and make it reusable on Windows + # Avoid "ConnectionAbortedError: [WinError 10053] An established connection was aborted by the software in your host machine" + self._set_conn() + try: + self.__conn.request(method, path, postdata, headers) + return self._get_response() + except (BrokenPipeError, ConnectionResetError): + # Python 3.5+ raises BrokenPipeError when the connection was reset + # ConnectionResetError happens on FreeBSD + self.__conn.close() + self.__conn.request(method, path, postdata, headers) + return self._get_response() + except OSError as e: + retry = ( + "[WinError 10053] An established connection was aborted by the software in your host machine" + in str(e) + ) + # Workaround for a bug on macOS. See https://bugs.python.org/issue33450 + retry = retry or ("[Errno 41] Protocol wrong type for socket" in str(e)) + if retry: + self.__conn.close() + self.__conn.request(method, path, postdata, headers) + return self._get_response() + else: + raise + + def get_request(self, *args, **argsn): + AuthServiceProxy.__id_count += 1 + + log.debug( + "-{}-> {} {}".format( + AuthServiceProxy.__id_count, + self._service_name, + json.dumps( + args or argsn, default=EncodeDecimal, ensure_ascii=self.ensure_ascii + ), + ) + ) + if args and argsn: + raise ValueError("Cannot handle both named and positional arguments") + return { + "version": "1.1", + "method": self._service_name, + "params": args or argsn, + "id": AuthServiceProxy.__id_count, + } + + def __call__(self, *args, **argsn): + postdata = json.dumps( + self.get_request(*args, **argsn), + default=EncodeDecimal, + ensure_ascii=self.ensure_ascii, + ) + response, status = self._request( + "POST", self.__url.path, postdata.encode("utf-8") + ) + if response["error"] is not None: + raise JSONRPCException(response["error"], status) + elif "result" not in response: + raise JSONRPCException( + {"code": -343, "message": "missing JSON-RPC result"}, status + ) + elif status != HTTPStatus.OK: + raise JSONRPCException( + { + "code": -342, + "message": "non-200 HTTP status code but no JSON-RPC error", + }, + status, + ) + else: + return response["result"] + + def batch(self, rpc_call_list): + postdata = json.dumps( + list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii + ) + log.debug("--> " + postdata) + response, status = self._request( + "POST", self.__url.path, postdata.encode("utf-8") + ) + if status != HTTPStatus.OK: + raise JSONRPCException( + { + "code": -342, + "message": "non-200 HTTP status code but no JSON-RPC error", + }, + status, + ) + return response + + def _get_response(self): + req_start_time = time.time() + try: + http_response = self.__conn.getresponse() + except socket.timeout: + raise JSONRPCException( + { + "code": -344, + "message": "%r RPC took longer than %f seconds. Consider " + "using larger timeout for calls that take " + "longer to return." % (self._service_name, self.__conn.timeout), + } + ) + if http_response is None: + raise JSONRPCException( + {"code": -342, "message": "missing HTTP response from server"} + ) + + content_type = http_response.getheader("Content-Type") + if content_type != "application/json": + raise JSONRPCException( + { + "code": -342, + "message": "non-JSON HTTP response with '%i %s' from server" + % (http_response.status, http_response.reason), + }, + http_response.status, + ) + + responsedata = http_response.read().decode("utf8") + response = json.loads(responsedata, parse_float=decimal.Decimal) + elapsed = time.time() - req_start_time + if "error" in response and response["error"] is None: + log.debug( + "<-%s- [%.6f] %s" + % ( + response["id"], + elapsed, + json.dumps( + response["result"], + default=EncodeDecimal, + ensure_ascii=self.ensure_ascii, + ), + ) + ) + else: + log.debug("<-- [%.6f] %s" % (elapsed, responsedata)) + return response, http_response.status + + def __truediv__(self, relative_uri): + return AuthServiceProxy( + "{}/{}".format(self.__service_url, relative_uri), + self._service_name, + connection=self.__conn, + ) + + def _set_conn(self, connection=None): + port = 80 if self.__url.port is None else self.__url.port + if connection: + self.__conn = connection + self.timeout = connection.timeout + elif self.__url.scheme == "https": + self.__conn = http.client.HTTPSConnection( + self.__url.hostname, port, timeout=self.timeout + ) + else: + self.__conn = http.client.HTTPConnection( + self.__url.hostname, port, timeout=self.timeout + ) diff --git a/tests/test_framework/bitcoind.py b/tests/test_framework/bitcoind.py new file mode 100644 index 0000000..2647820 --- /dev/null +++ b/tests/test_framework/bitcoind.py @@ -0,0 +1,200 @@ +import base64 +import logging +import os + +from decimal import Decimal +from ephemeral_port_reserve import reserve +from test_framework.authproxy import AuthServiceProxy +from test_framework.utils import TailableProc, wait_for, COIN, TIMEOUT + + +class BitcoinDProxy: + def __init__(self, data_dir, network, rpc_port): + self.cookie_path = os.path.join(data_dir, network, ".cookie") + self.rpc_port = rpc_port + self.wallet_name = "miradord-tests" + + def __getattr__(self, name): + assert not (name.startswith("__") and name.endswith("__")), "Python internals" + + with open(self.cookie_path) as fd: + authpair = fd.read() + service_url = ( + f"http://{authpair}@localhost:{self.rpc_port}/wallet/{self.wallet_name}" + ) + proxy = AuthServiceProxy(service_url, name) + + def f(*args): + return proxy.__call__(*args) + + # Make debuggers show rather than > + f.__name__ = name + return f + + +class Bitcoind(TailableProc): + def __init__(self, bitcoin_dir, rpcport=None): + TailableProc.__init__(self, bitcoin_dir, verbose=False) + + if rpcport is None: + rpcport = reserve() + + self.bitcoin_dir = bitcoin_dir + self.rpcport = rpcport + self.p2pport = reserve() + self.prefix = "bitcoind" + + regtestdir = os.path.join(bitcoin_dir, "regtest") + if not os.path.exists(regtestdir): + os.makedirs(regtestdir) + + self.cmd_line = [ + "bitcoind", + "-datadir={}".format(bitcoin_dir), + "-printtoconsole", + "-server", + "-logtimestamps", + "-rpcthreads=16", + ] + bitcoind_conf = { + "port": self.p2pport, + "rpcport": rpcport, + "debug": 1, + "fallbackfee": Decimal(1000) / COIN, + } + self.conf_file = os.path.join(bitcoin_dir, "bitcoin.conf") + with open(self.conf_file, "w") as f: + f.write("chain=regtest\n") + f.write("[regtest]\n") + for k, v in bitcoind_conf.items(): + f.write(f"{k}={v}\n") + + self.rpc = BitcoinDProxy(bitcoin_dir, "regtest", rpcport) + + def start(self): + TailableProc.start(self) + self.wait_for_log("Done loading", timeout=TIMEOUT) + + logging.info("BitcoinD started") + + def stop(self): + self.rpc.stop() + return TailableProc.stop(self) + + # wait_for_mempool can be used to wait for the mempool before generating + # blocks: + # True := wait for at least 1 transation + # int > 0 := wait for at least N transactions + # 'tx_id' := wait for one transaction id given as a string + # ['tx_id1', 'tx_id2'] := wait until all of the specified transaction IDs + def generate_block(self, numblocks=1, wait_for_mempool=0): + if wait_for_mempool: + if isinstance(wait_for_mempool, str): + wait_for_mempool = [wait_for_mempool] + if isinstance(wait_for_mempool, list): + wait_for( + lambda: all( + txid in self.rpc.getrawmempool() for txid in wait_for_mempool + ) + ) + else: + wait_for(lambda: len(self.rpc.getrawmempool()) >= wait_for_mempool) + + old_blockcount = self.rpc.getblockcount() + addr = self.rpc.getnewaddress() + self.rpc.generatetoaddress(numblocks, addr) + wait_for(lambda: self.rpc.getblockcount() == old_blockcount + numblocks) + + def create_utxo(self, address, amount): + """Send {amount} coins to this {address}, return the outpoint of the + created UTXO.""" + txid = self.rpc.sendtoaddress(address, amount) + vout = 0 + while True: + txoinfo = self.rpc.gettxout(txid, vout) + if txoinfo["scriptPubKey"]["address"] == address: + return txid, f"{txid}:{vout}" + vout += 1 + + def get_coins(self, amount_btc): + # subsidy halving is every 150 blocks on regtest, it's a rough estimate + # to avoid looping in most cases + numblocks = amount_btc // 25 + 1 + while self.rpc.getbalance() < amount_btc: + self.generate_block(numblocks) + + def generate_blocks_censor(self, n, txids): + """Generate {n} blocks ignoring {txids}""" + fee_delta = 1000000 + for txid in txids: + self.rpc.prioritisetransaction(txid, None, -fee_delta) + self.generate_block(n) + for txid in txids: + self.rpc.prioritisetransaction(txid, None, fee_delta) + + def simple_reorg(self, height, shift=0): + """ + Reorganize chain by creating a fork at height={height} and: + - If shift >=0: + - re-mine all mempool transactions into {height} + shift + (with shift floored at 1) + - Else: + - don't re-mine the mempool transactions + + Note that tx's that become invalid at {height} (because coin maturity, + locktime etc.) are removed from mempool. The length of the new chain + will be original + 1 OR original + {shift}, whichever is larger. + + For example: to push tx's backward from height h1 to h2 < h1, + use {height}=h2. + + Or to change the txindex of tx's at height h1: + 1. A block at height h2 < h1 should contain a non-coinbase tx that can + be pulled forward to h1. + 2. Set {height}=h2 and {shift}= h1-h2 + """ + orig_len = self.rpc.getblockcount() + old_hash = self.rpc.getblockhash(height) + if height + shift > orig_len: + final_len = height + shift + else: + final_len = 1 + orig_len + + self.rpc.invalidateblock(old_hash) + self.wait_for_log( + r"InvalidChainFound: invalid block=.* height={}".format(height) + ) + memp = self.rpc.getrawmempool() + + if shift < 0: + self.generate_blocks_censor(1 + final_len - height, memp) + elif shift == 0: + self.generate_block(1 + final_len - height, memp) + else: + self.generate_blocks_censor(shift, memp) + self.generate_block(1 + final_len - (height + shift), memp) + self.wait_for_log(r"UpdateTip: new best=.* height={}".format(final_len)) + + def startup(self): + try: + self.start() + except Exception: + self.stop() + raise + + info = self.rpc.getnetworkinfo() + + if info["version"] < 210000: + self.rpc.stop() + raise ValueError( + "bitcoind is too old. At least version 21000" + " (v0.21.0) is needed, current version is {}".format(info["version"]) + ) + + def cleanup(self): + try: + self.stop() + except Exception: + self.proc.kill() + self.proc.wait() diff --git a/tests/test_framework/miradord.py b/tests/test_framework/miradord.py new file mode 100644 index 0000000..f838b0c --- /dev/null +++ b/tests/test_framework/miradord.py @@ -0,0 +1,231 @@ +import json +import logging +import os +import random +import socket + +from test_framework.utils import ( + TailableProc, + VERBOSE, + LOG_LEVEL, + get_signed_txs, + STKS_XPRIVS, + DERIV_INDEX, + TIMEOUT, +) +from nacl.public import PrivateKey as Curve25519Private +from noise.connection import NoiseConnection, Keypair + + +# FIXME: it's a bit clumsy. Miradord should stick to be the `miradord` process object +# and we should have another class (PartialRevaultNetwork?) to stuff helpers and all +# info not strictly necessary to running the process. +class Miradord(TailableProc): + def __init__( + self, + datadir, + deposit_desc, + unvault_desc, + cpfp_desc, + emer_addr, + listen_port, + noise_priv, + stk_noise_secret, + coordinator_noise_key, + coordinator_port, + bitcoind, + ): + TailableProc.__init__(self, datadir, verbose=VERBOSE) + + self.prefix = os.path.split(datadir)[-1] + self.stk_noise_secret = stk_noise_secret + self.noise_secret = noise_priv + self.listen_port = listen_port + self.deposit_desc = deposit_desc + self.unvault_desc = unvault_desc + self.cpfp_desc = cpfp_desc + self.emer_addr = emer_addr + self.bitcoind = bitcoind + + # The data is stored in a per-network directory. We need to create it + # in order to write the Noise private key + self.datadir_with_network = os.path.join(datadir, "regtest") + os.makedirs(self.datadir_with_network, exist_ok=True) + + bin = os.path.join( + os.path.dirname(__file__), "..", "..", "target/debug/miradord" + ) + self.conf_file = os.path.join(datadir, "config.toml") + self.cmd_line = [bin, "--conf", f"{self.conf_file}"] + + self.noise_secret_file = os.path.join(self.datadir_with_network, "noise_secret") + with open(self.noise_secret_file, "wb") as f: + f.write(noise_priv) + wt_noise_key = bytes(Curve25519Private(noise_priv).public_key) + stk_noise_key = bytes(Curve25519Private(self.stk_noise_secret).public_key) + logging.debug( + f"Watchtower Noise key: {wt_noise_key.hex()}, Stakeholder Noise key: {stk_noise_key.hex()}" + ) + + bitcoind_cookie = os.path.join(bitcoind.bitcoin_dir, "regtest", ".cookie") + with open(self.conf_file, "w") as f: + f.write(f"data_dir = '{datadir}'\n") + f.write("daemon = false\n") + f.write(f"log_level = '{LOG_LEVEL}'\n") + + f.write(f'stakeholder_noise_key = "{stk_noise_key.hex()}"\n') + + f.write(f'coordinator_host = "127.0.0.1:{coordinator_port}"\n') + f.write(f'coordinator_noise_key = "{coordinator_noise_key}"\n') + f.write("coordinator_poll_seconds = 5\n") + + f.write(f'listen = "127.0.0.1:{listen_port}"\n') + + f.write("[scripts_config]\n") + f.write(f'deposit_descriptor = "{deposit_desc}"\n') + f.write(f'unvault_descriptor = "{unvault_desc}"\n') + f.write(f'cpfp_descriptor = "{cpfp_desc}"\n') + f.write(f'emergency_address = "{emer_addr}"\n') + + f.write("[bitcoind_config]\n") + f.write('network = "regtest"\n') + f.write(f"cookie_path = '{bitcoind_cookie}'\n") + f.write(f"addr = '127.0.0.1:{bitcoind.rpcport}'\n") + f.write("poll_interval_secs = 5\n") + + def start(self): + TailableProc.start(self) + self.wait_for_logs( + ["bitcoind now synced", "Listener thread started", "Started miradord."] + ) + + def stop(self, timeout=10): + return TailableProc.stop(self) + + def cleanup(self): + try: + self.stop() + except Exception: + self.proc.kill() + + def get_signed_txs(self, deposit_outpoint, deposit_value): + """ + Get the Unvault, Cancel, Emergency and Unvault Emergency (in this order) fully + signed transactions extracted, ready to be broadcast for this deposit UTXO info. + """ + return get_signed_txs( + STKS_XPRIVS, + self.deposit_desc, + self.unvault_desc, + self.cpfp_desc, + self.emer_addr, + deposit_outpoint, + deposit_value, + DERIV_INDEX, + ) + + def get_noise_conn(self): + """Create a new connection to the watchtower, performing the Noise handshake.""" + conn = NoiseConnection.from_name(b"Noise_KK_25519_ChaChaPoly_SHA256") + + conn.set_as_initiator() + conn.set_keypair_from_private_bytes(Keypair.STATIC, self.stk_noise_secret) + conn.set_keypair_from_private_bytes(Keypair.REMOTE_STATIC, self.noise_secret) + conn.start_handshake() + + sock = socket.socket() + sock.settimeout(TIMEOUT // 10) + sock.connect(("localhost", self.listen_port)) + msg = conn.write_message(b"practical_revault_0") + sock.sendall(msg) + resp = sock.recv(32 + 16) # Key size + Mac size + assert len(resp) > 0 + conn.read_message(resp) + + return sock, conn + + def send_msg(self, name, params, noise_conn=None): + """ + Send a message to the watchtower. If a Noise connection isn't provided + a new one is established. + """ + if noise_conn is None: + (sock, conn) = self.get_noise_conn() + else: + (sock, conn) = noise_conn + + # Practical-revault specifies messages format as almost-JSONRPC + msg_id = random.randint(0, 2 ** 32) + msg = {"id": msg_id, "method": name, "params": params} + msg_serialized = json.dumps(msg).encode("utf-8") + logging.debug(f"Sending message {msg}") + + # We encrypt messages in two parts to length-prefix them + prefix = (len(msg_serialized) + 16).to_bytes(2, "big") + encrypted_header = conn.encrypt(prefix) + encrypted_body = conn.encrypt(msg_serialized) + sock.sendall(encrypted_header + encrypted_body) + + # Same for decryption, careful to read length first and then the body + resp_header = sock.recv(2 + 16) + assert len(resp_header) > 0 + resp_header = conn.decrypt(resp_header) + resp_len = int.from_bytes(resp_header, "big") + resp = sock.recv(resp_len) + assert len(resp) == resp_len + resp = conn.decrypt(resp) + + resp = json.loads(resp) + assert resp["id"] == msg_id, "Reusing the same Noise connection across threads?" + + return resp["result"] + + def send_sigs(self, sigs, txid, deposit_outpoint, deriv_index, noise_conn=None): + """ + Send a `sig` message to the watchtower, optionally using an existing + connection. + """ + params = { + "signatures": sigs, + "txid": txid, + "deposit_outpoint": deposit_outpoint, + "derivation_index": deriv_index, + } + + resp = self.send_msg("sig", params, noise_conn) + assert resp["txid"] == txid # Everything is synchronous + + return resp["ack"] + + def watch_vault(self, deposit_outpoint, deposit_value, deriv_index): + """The deposit transaction must be confirmed. The deposit value is in sats.""" + txs = self.get_signed_txs(deposit_outpoint, deposit_value) + emer_txid = self.bitcoind.rpc.decoderawtransaction(txs["emer"]["tx"])["txid"] + unemer_txid = self.bitcoind.rpc.decoderawtransaction(txs["unemer"]["tx"])[ + "txid" + ] + cancel_txid = self.bitcoind.rpc.decoderawtransaction(txs["cancel"]["tx"])[ + "txid" + ] + + noise_conn = self.get_noise_conn() + assert self.send_sigs( + txs["emer"]["sigs"], emer_txid, deposit_outpoint, DERIV_INDEX, noise_conn + ) + assert self.send_sigs( + txs["unemer"]["sigs"], + unemer_txid, + deposit_outpoint, + DERIV_INDEX, + noise_conn, + ) + assert self.send_sigs( + txs["cancel"]["sigs"], + cancel_txid, + deposit_outpoint, + DERIV_INDEX, + noise_conn, + ) + self.wait_for_log("Now watching for Unvault broadcast.") + + return txs diff --git a/tests/test_framework/utils.py b/tests/test_framework/utils.py new file mode 100644 index 0000000..cf664ed --- /dev/null +++ b/tests/test_framework/utils.py @@ -0,0 +1,312 @@ +""" +Lot of the code here is stolen from C-lightning's test suite. This is surely +Rusty Russell or Christian Decker who wrote most of this (I'd put some sats on +cdecker), so credits to them ! (MIT licensed) +""" +import itertools +import json +import logging +import os +import re +import subprocess +import threading +import time + + +TIMEOUT = int(os.getenv("TIMEOUT", 60)) +EXECUTOR_WORKERS = int(os.getenv("EXECUTOR_WORKERS", 20)) +VERBOSE = os.getenv("VERBOSE", "0") == "1" +LOG_LEVEL = os.getenv("LOG_LEVEL", "trace") +assert LOG_LEVEL in ["trace", "debug", "info", "warn", "error"] + +COIN = 10 ** 8 + +# FIXME: This is a hack until we have a python-revault-tx. We use static xprivs +# and a static deposit address across all tests +STKS_XPRIVS = [ + "xprv9s21ZrQH143K3dHHJFdvqxsuDmR6uVmidU3ByetiTpc1Tyw9LD92iZBUiCCGBTqPULEjAZPPkZmhT7sxiSo47moNnELA1aZzDG6AQquzSdY", + "xprvA12WBRLLa6eYs8DucMdv2nYusFAwLxYvNbN54ixQmfRojyLG3NbZxCTAdgNfmhBoPKkNUXAxJK6nc2gd6NjJmZWihYQf5mPKn719kmtSPJj", + "xprvA1xrc2Pp7KkTneNq32UctAG5dHk4T2CAq6fmJeVQesXDi3HPUr361fSarv3VnBFHW14u9gm57eC7sdEM7muL8oc8uV3ctmUa4ZR3FTgM5Wp", + "xprvA12Gy5MjHkiswfVT1gPpsTFai1zSJbQ8bcDTQygfUf77VzvCtdQBa3X3CufPhTUYUaYCECdKyjjZz7Du6a9ckRh1BC7V8haub6F2kYMCct6", +] +STKS_XPUBS = [ + "xpub661MyMwAqRbcG7MkQHAwD6pdmoFbJxVZzgxnn3JL2A8zLnGHskTHGMVxZUch5T2PHyQwxGtc2BTnw9swJUiXfKbzFggryY8AEwkc1amoCbm", + "xpub6E1ravsEQUCr5cJNiPAvPvVeRH1RkRGmjpHfs7N2KzxncmfQauupVzmeUxzZ4osSfJc2SC9fMcn1aAPEc1b89nhdvVjeLtQgCQHdnJ4ND42", + "xpub6ExD1XvhwhJm18TJ941dFJCpBKaYrUv2CKbN72u2DD4CaqcY2PMLZTm4iBvV8LMnNDnxjsC8Pk7chXMEw9ejesmZNX4dJsW7vbDBcASxNX7", + "xpub6E1dNatd88HBA9Zv7hvqEbCKG3pvi47yxq94DN6H2ze6NoFMSAiS7qqX4CUfDHrt6UsEHah8UPp1Bw2q9p2pcZHNvQxMiaoeErGUTKBmj4P", +] +DERIV_INDEX = 7651 +DEPOSIT_ADDRESS = "bcrt1qgprmrfkz5mucga0ec046v0sf8yg2y4za99c0h26ew5ycfx64sgdsl0u2j3" + + +def wait_for(success, timeout=TIMEOUT, debug_fn=None): + """ + Run success() either until it returns True, or until the timeout is reached. + debug_fn is logged at each call to success, it can be useful for debugging + when tests fail. + """ + start_time = time.time() + interval = 0.25 + while not success() and time.time() < start_time + timeout: + if debug_fn is not None: + logging.info(debug_fn()) + time.sleep(interval) + interval *= 2 + if interval > 5: + interval = 5 + if time.time() > start_time + timeout: + raise ValueError("Error waiting for {}", success) + + +# FIXME: have a python-revault-tx lib to avoid this hack.. +def get_descriptors(stks_xpubs, cosigs_keys, mans_xpubs, mans_thresh, cpfp_xpubs, csv): + # tests/test_framework/../../contrib/tools/mscompiler/target/debug/mscompiler + mscompiler_dir = os.path.abspath( + os.path.join( + os.path.dirname(os.path.dirname(os.path.dirname(__file__))), + "contrib", + "tools", + "mscompiler", + ) + ) + cwd = os.getcwd() + os.chdir(mscompiler_dir) + try: + subprocess.check_call(["cargo", "build"]) + except subprocess.CalledProcessError as e: + logging.error(f"Error compiling mscompiler: {str(e)}") + raise e + finally: + os.chdir(cwd) + + mscompiler_bin = os.path.join(mscompiler_dir, "target", "debug", "mscompiler") + cmd = [ + mscompiler_bin, + f"{json.dumps(stks_xpubs)}", + f"{json.dumps(cosigs_keys)}", + f"{json.dumps(mans_xpubs)}", + str(mans_thresh), + f"{json.dumps(cpfp_xpubs)}", + str(csv), + ] + try: + descs_json = subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + logging.error(f"Error running mscompiler with command '{' '.join(cmd)}'") + raise e + + descs = json.loads(descs_json) + return ( + descs["deposit_descriptor"], + descs["unvault_descriptor"], + descs["cpfp_descriptor"], + ) + + +# FIXME: have a python-revault-tx lib to avoid this hack.. +def get_signed_txs( + stks_xprivs, + deposit_desc, + unvault_desc, + cpfp_desc, + emer_addr, + deposit_outpoint, + deposit_value, + deriv_index, +): + """ + Get the Unvault, Cancel, Emergency and Unvault Emergency fully signed + transactions extracted, ready to be broadcast. + """ + # tests/test_framework/../../contrib/tools/txbuilder/target/debug/txbuilder + txbuilder_dir = os.path.abspath( + os.path.join( + os.path.dirname(os.path.dirname(os.path.dirname(__file__))), + "contrib", + "tools", + "txbuilder", + ) + ) + cwd = os.getcwd() + os.chdir(txbuilder_dir) + try: + subprocess.check_call(["cargo", "build"]) + except subprocess.CalledProcessError as e: + logging.error(f"Error compiling txbuilder: {str(e)}") + raise e + finally: + os.chdir(cwd) + + txbuilder_bin = os.path.join(txbuilder_dir, "target", "debug", "txbuilder") + cmd = [ + txbuilder_bin, + f"{json.dumps(stks_xprivs)}", + f"{str(deposit_desc)}", + f"{str(unvault_desc)}", + f"{str(cpfp_desc)}", + emer_addr, + deposit_outpoint, + str(int(deposit_value)), + str(deriv_index), + ] + try: + txs_json = subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + logging.error(f"Error running txbuilder with command '{' '.join(cmd)}'") + raise e + + return json.loads(txs_json) + + +class TailableProc(object): + """A monitorable process that we can start, stop and tail. + + This is the base class for the daemons. It allows us to directly + tail the processes and react to their output. + """ + + def __init__(self, outputDir=None, verbose=True): + self.logs = [] + self.logs_cond = threading.Condition(threading.RLock()) + self.env = os.environ.copy() + self.running = False + self.proc = None + self.outputDir = outputDir + self.logsearch_start = 0 + + # Set by inherited classes + self.cmd_line = [] + self.prefix = "" + + # Should we be logging lines we read from stdout? + self.verbose = verbose + + # A filter function that'll tell us whether to filter out the line (not + # pass it to the log matcher and not print it to stdout). + self.log_filter = lambda _: False + + def start(self, stdin=None, stdout=None, stderr=None): + """Start the underlying process and start monitoring it.""" + logging.debug("Starting '%s'", " ".join(self.cmd_line)) + self.proc = subprocess.Popen( + self.cmd_line, + stdin=stdin, + stdout=stdout if stdout else subprocess.PIPE, + stderr=stderr if stderr else subprocess.PIPE, + env=self.env, + ) + self.thread = threading.Thread(target=self.tail) + self.thread.daemon = True + self.thread.start() + self.running = True + + def save_log(self): + if self.outputDir: + logpath = os.path.join(self.outputDir, "log") + with open(logpath, "w") as f: + for l in self.logs: + f.write(l + "\n") + + def stop(self, timeout=10): + self.save_log() + self.proc.terminate() + + # Now give it some time to react to the signal + rc = self.proc.wait(timeout) + + if rc is None: + self.proc.kill() + self.proc.wait() + + self.thread.join() + + return self.proc.returncode + + def kill(self): + """Kill process without giving it warning.""" + self.proc.kill() + self.proc.wait() + self.thread.join() + + def tail(self): + """Tail the stdout of the process and remember it. + + Stores the lines of output produced by the process in + self.logs and signals that a new line was read so that it can + be picked up by consumers. + """ + out = self.proc.stdout.readline + err = self.proc.stderr.readline + for line in itertools.chain(iter(out, ""), iter(err, "")): + if len(line) == 0: + break + if self.log_filter(line.decode("utf-8")): + continue + if self.verbose: + logging.debug(f"{self.prefix}: {line.decode().rstrip()}") + with self.logs_cond: + self.logs.append(str(line.rstrip())) + self.logs_cond.notifyAll() + self.running = False + self.proc.stdout.close() + self.proc.stderr.close() + + def is_in_log(self, regex, start=0): + """Look for `regex` in the logs.""" + + ex = re.compile(regex) + for l in self.logs[start:]: + if ex.search(l): + logging.debug("Found '%s' in logs", regex) + return l + + logging.debug(f"{self.prefix} : Did not find {regex} in logs") + return None + + def wait_for_logs(self, regexs, timeout=TIMEOUT): + """Look for `regexs` in the logs. + + We tail the stdout of the process and look for each regex in `regexs`, + starting from last of the previous waited-for log entries (if any). We + fail if the timeout is exceeded or if the underlying process + exits before all the `regexs` were found. + + If timeout is None, no time-out is applied. + """ + logging.debug("Waiting for {} in the logs".format(regexs)) + + exs = [re.compile(r) for r in regexs] + start_time = time.time() + pos = self.logsearch_start + + while True: + if timeout is not None and time.time() > start_time + timeout: + print("Time-out: can't find {} in logs".format(exs)) + for r in exs: + if self.is_in_log(r): + print("({} was previously in logs!)".format(r)) + raise TimeoutError('Unable to find "{}" in logs.'.format(exs)) + + with self.logs_cond: + if pos >= len(self.logs): + if not self.running: + raise ValueError("Process died while waiting for logs") + self.logs_cond.wait(1) + continue + + for r in exs.copy(): + self.logsearch_start = pos + 1 + if r.search(self.logs[pos]): + logging.debug("Found '%s' in logs", r) + exs.remove(r) + break + if len(exs) == 0: + return self.logs[pos] + pos += 1 + + def wait_for_log(self, regex, timeout=TIMEOUT): + """Look for `regex` in the logs. + + Convenience wrapper for the common case of only seeking a single entry. + """ + return self.wait_for_logs([regex], timeout)