diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index ec47427dd..44c58e0b7 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -22,7 +22,7 @@ jobs: - name: Pull Relayer Docker Image run: | docker pull ghcr.io/near/pagoda-relayer-rs-fastauth - docker tag ghcr.io/near/pagoda-relayer-rs-fastauth pagoda-relayer-rs-fastauth + docker pull ghcr.io/near/sandbox - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Install stable toolchain diff --git a/Cargo.lock b/Cargo.lock index af7ea6dff..d23da5082 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -224,6 +224,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +[[package]] +name = "arrayvec" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" + [[package]] name = "async-channel" version = "1.8.0" @@ -519,7 +525,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" dependencies = [ "arrayref", - "arrayvec", + "arrayvec 0.5.2", "constant_time_eq", ] @@ -586,13 +592,15 @@ dependencies = [ [[package]] name = "bollard" -version = "0.14.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af254ed2da4936ef73309e9597180558821cb16ae9bba4cb24ce6b612d8d80ed" +checksum = "c92fed694fd5a7468c971538351c61b9c115f1ae6ed411cd2800f0f299403a4b" dependencies = [ - "base64 0.21.0", + "base64 0.13.1", "bollard-stubs", "bytes", + "chrono", + "dirs-next", "futures-core", "futures-util", "hex 0.4.3", @@ -600,27 +608,27 @@ dependencies = [ "hyper", "hyperlocal", "log", - "pin-project-lite", + "pin-project", "serde", "serde_derive", "serde_json", - "serde_repr", "serde_urlencoded", "thiserror", "tokio", - "tokio-util 0.7.3", + "tokio-util 0.6.10", "url 2.3.1", "winapi", ] [[package]] name = "bollard-stubs" -version = "1.42.0-rc.7" +version = "1.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "602bda35f33aeb571cef387dcd4042c643a8bf689d8aaac2cc47ea24cb7bc7e0" +checksum = "ed2f2e73fffe9455141e170fb9c1feb0ac521ec7e7dcd47a7cab72a658490fb8" dependencies = [ + "chrono", "serde", - "serde_with", + "serde_with 1.14.0", ] [[package]] @@ -1128,7 +1136,7 @@ dependencies = [ "ff-zeroize", "generic-array 0.14.7", "hex 0.4.3", - "hmac", + "hmac 0.11.0", "lazy_static", "merkle-cbt", "num-integer", @@ -1207,14 +1215,38 @@ dependencies = [ "syn 2.0.15", ] +[[package]] +name = "darling" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" +dependencies = [ + "darling_core 0.13.4", + "darling_macro 0.13.4", +] + [[package]] name = "darling" version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.14.4", + "darling_macro 0.14.4", +] + +[[package]] +name = "darling_core" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 1.0.109", ] [[package]] @@ -1231,13 +1263,24 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "darling_macro" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" +dependencies = [ + "darling_core 0.13.4", + "quote", + "syn 1.0.109", +] + [[package]] name = "darling_macro" version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ - "darling_core", + "darling_core 0.14.4", "quote", "syn 1.0.109", ] @@ -1313,6 +1356,7 @@ dependencies = [ "block-buffer 0.10.4", "const-oid 0.9.2", "crypto-common", + "subtle", ] [[package]] @@ -1322,7 +1366,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fd78930633bd1c6e35c4b42b1df7b0cbc6bc191146e512bb3bedf243fcc3901" dependencies = [ "libc", - "redox_users", + "redox_users 0.3.5", + "winapi", +] + +[[package]] +name = "dirs-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if", + "dirs-sys-next", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users 0.4.3", "winapi", ] @@ -1340,7 +1405,7 @@ checksum = "43ee23aa5b4f68c7a092b5c3beb25f50c406adc75e2363634f242f28ab255372" dependencies = [ "der 0.4.5", "elliptic-curve", - "hmac", + "hmac 0.11.0", "signature 1.3.2", ] @@ -1780,7 +1845,7 @@ dependencies = [ "mime", "serde", "serde_json", - "serde_with", + "serde_with 2.3.2", "tokio", "tower-service", "url 1.7.2", @@ -1871,6 +1936,9 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.6", +] [[package]] name = "hashbrown" @@ -1945,6 +2013,15 @@ dependencies = [ "digest 0.9.0", ] +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.6", +] + [[package]] name = "home" version = "0.5.4" @@ -2437,6 +2514,7 @@ version = "0.1.0" dependencies = [ "anyhow", "bollard", + "clap 4.2.4", "curv-kzen", "ed25519-dalek", "futures", @@ -2445,13 +2523,16 @@ dependencies = [ "mpc-recovery", "multi-party-eddsa", "near-crypto 0.16.1", + "near-units", "once_cell", "portpicker", "rand 0.8.5", "reqwest", "serde", "serde_json", + "testcontainers", "tokio", + "tokio-util 0.7.3", "workspaces", ] @@ -2887,6 +2968,38 @@ version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc1279be274b9a49c2cb4b62541241a1ff6745cb77ca81ece7f949cfbc229bff" +[[package]] +name = "near-units" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97a2b77f295d398589eeee51ad0887905ef1734fb12b45cb6d77bd7e401988b9" +dependencies = [ + "near-units-core", + "near-units-macro", +] + +[[package]] +name = "near-units-core" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89aa2a7985de87a08ca35f28abd8d00f0f901e704257e6e029aadef981386bc6" +dependencies = [ + "num-format", + "regex", +] + +[[package]] +name = "near-units-macro" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ab45d066220846f9bd5c21e9ab88c47c892edd36f962ada78bf8308523171a" +dependencies = [ + "near-units-core", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "near-vm-errors" version = "0.15.0" @@ -2974,6 +3087,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "num-format" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3" +dependencies = [ + "arrayvec 0.7.2", + "itoa", +] + [[package]] name = "num-integer" version = "0.1.45" @@ -3783,6 +3906,17 @@ dependencies = [ "rust-argon2", ] +[[package]] +name = "redox_users" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +dependencies = [ + "getrandom 0.2.9", + "redox_syscall 0.2.16", + "thiserror", +] + [[package]] name = "reed-solomon-erasure" version = "4.0.2" @@ -4197,6 +4331,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_with" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" +dependencies = [ + "serde", + "serde_with_macros 1.5.2", +] + [[package]] name = "serde_with" version = "2.3.2" @@ -4209,17 +4353,29 @@ dependencies = [ "indexmap", "serde", "serde_json", - "serde_with_macros", + "serde_with_macros 2.3.2", "time 0.3.20", ] +[[package]] +name = "serde_with_macros" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" +dependencies = [ + "darling 0.13.4", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "serde_with_macros" version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "859011bddcc11f289f07f467cc1fe01c7a941daa4d8f6c40d4d1c92eb6d9319c" dependencies = [ - "darling", + "darling 0.14.4", "proc-macro2", "quote", "syn 1.0.109", @@ -4526,6 +4682,26 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "testcontainers" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e2b1567ca8a2b819ea7b28c92be35d9f76fb9edb214321dcc86eb96023d1f87" +dependencies = [ + "async-trait", + "bollard", + "bollard-stubs", + "futures", + "hex 0.4.3", + "hmac 0.12.1", + "log", + "rand 0.8.5", + "serde", + "serde_json", + "sha2 0.10.6", + "tokio", +] + [[package]] name = "textwrap" version = "0.16.0" @@ -4733,8 +4909,12 @@ checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45" dependencies = [ "bytes", "futures-core", + "futures-io", "futures-sink", + "futures-util", + "hashbrown 0.12.3", "pin-project-lite", + "slab", "tokio", "tracing", ] @@ -5478,7 +5658,7 @@ dependencies = [ [[package]] name = "workspaces" version = "0.7.0" -source = "git+https://github.com/near/workspaces-rs?branch=main#b24cf3cb409bae898275de08d318551941ba5414" +source = "git+https://github.com/near/workspaces-rs?branch=daniyar/custom-validator-key#0839ec65f7becdce5349b4500a93b9f54f5c5fdf" dependencies = [ "async-process", "async-trait", diff --git a/Dockerfile b/Dockerfile index a94649efe..15665992a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,7 @@ COPY . . RUN cargo build --release --package mpc-recovery FROM debian:bullseye-slim as runtime -RUN apt-get update && apt-get install --assume-yes libssl-dev ca-certificates +RUN apt-get update && apt-get install --assume-yes libssl-dev ca-certificates curl RUN update-ca-certificates COPY --from=builder /usr/src/app/target/release/mpc-recovery /usr/local/bin/mpc-recovery WORKDIR /usr/local/bin diff --git a/integration-tests/Cargo.toml b/integration-tests/Cargo.toml index 6492a52f0..b728efe8e 100644 --- a/integration-tests/Cargo.toml +++ b/integration-tests/Cargo.toml @@ -4,24 +4,30 @@ version = "0.1.0" edition = "2021" publish = false -[dev-dependencies] +[dependencies] anyhow = "1.0" -bollard = "0.14" +bollard = "0.11" +clap = { version = "4.2", features = ["derive", "env"] } +ed25519-dalek = {version = "1.0.1", features = ["serde"]} futures = "0.3" hex = "0.4" hyper = { version = "0.14", features = ["full"] } mpc-recovery = { path = "../mpc-recovery" } +multi-party-eddsa = { git = "https://github.com/DavidM-D/multi-party-eddsa.git", rev = "25ae4fdc5ff7819ae70e73ab4afacf1c24fc4da1" } near-crypto = "0.16.1" -once_cell = "1" +near-units = "0.2.0" portpicker = "0.1" -rand = "0.8" serde = "1" serde_json = "1" +testcontainers = { version = "0.14", features = ["experimental"] } tokio = { version = "1.0", features = ["full"] } -workspaces = { git = "https://github.com/near/workspaces-rs", branch = "main" } +workspaces = { git = "https://github.com/near/workspaces-rs", branch = "daniyar/custom-validator-key" } + +[dev-dependencies] +once_cell = "1" +rand = "0.8" +tokio-util = { version = "0.7", features = ["full"] } curv = { package = "curv-kzen", version = "0.9", default-features = false } -ed25519-dalek = {version = "1.0.1", features = ["serde"]} -multi-party-eddsa = { git = "https://github.com/DavidM-D/multi-party-eddsa.git", rev = "25ae4fdc5ff7819ae70e73ab4afacf1c24fc4da1" } reqwest = "0.11.16" [features] diff --git a/integration-tests/README.md b/integration-tests/README.md new file mode 100644 index 000000000..4be18d852 --- /dev/null +++ b/integration-tests/README.md @@ -0,0 +1,66 @@ +# Integration tests + +## Basic guide + +Running integration tests requires you to have relayer and sandbox docker images present on your machine: + +```BASH +docker pull ghcr.io/near/pagoda-relayer-rs-fastauth +docker pull ghcr.io/near/sandbox +``` + +Now, build mpc-recovery from the project's root: + +```BASH +docker build . -t near/mpc-recovery +``` + +**Note**. You will need to re-build the Docker image each time you make a code change and want to run the integration tests. + +Finally, run the integration tests: + +```BASH +cargo test -p mpc-recovery-integration-tests +``` + +## FAQ + +### I want to run a test, but keep the docker containers from being destroyed + +You can pass environment variable `TESTCONTAINERS=keep` to keep all of the docker containers. For example: + +```bash +$ TESTCONTAINERS=keep cargo test -p mpc-recovery-integration-tests +``` + +### There are no logs anymore, how do I debug? + +The easiest way is to run one isolated test of your choosing while keeping the containers (see above): + +```bash +$ TESTCONTAINERS=keep cargo test -p mpc-recovery-integration-tests test_basic_action +``` + +Now, you can do `docker ps` and it should list all of containers related to your test (the most recent ones are always at the top, so lookout for those). For example: + +```bash +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +b2724d0c9530 near/mpc-recovery:latest "mpc-recovery start-…" 5 minutes ago Up 5 minutes 0.0.0.0:32792->19985/tcp, :::32792->19985/tcp fervent_moore +67308ab06c5d near/mpc-recovery:latest "mpc-recovery start-…" 5 minutes ago Up 5 minutes 0.0.0.0:32791->3000/tcp, :::32791->3000/tcp upbeat_volhard +65ec65384af4 near/mpc-recovery:latest "mpc-recovery start-…" 5 minutes ago Up 5 minutes 0.0.0.0:32790->3000/tcp, :::32790->3000/tcp friendly_easley +b4f90b1546ec near/mpc-recovery:latest "mpc-recovery start-…" 5 minutes ago Up 5 minutes 0.0.0.0:32789->3000/tcp, :::32789->3000/tcp vibrant_allen +934ec13d9146 ghcr.io/near/pagoda-relayer-rs-fastauth:latest "/usr/local/bin/entr…" 5 minutes ago Up 5 minutes 0.0.0.0:32788->16581/tcp, :::32788->16581/tcp sleepy_grothendieck +c505ead6eb18 redis:latest "docker-entrypoint.s…" 5 minutes ago Up 5 minutes 0.0.0.0:32787->6379/tcp, :::32787->6379/tcp trusting_lederberg +2843226b16a9 google/cloud-sdk:latest "gcloud beta emulato…" 5 minutes ago Up 5 minutes 0.0.0.0:32786->15805/tcp, :::32786->15805/tcp hungry_pasteur +3f4c70020a4c ghcr.io/near/sandbox:latest "near-sandbox --home…" 5 minutes ago Up 5 minutes practical_elbakyan +``` + +Now, you can inspect each container's logs according to your needs using `docker logs `. You might also want to reproduce some components of the test manually by making `curl` requests to the leader node (its web port is exposed on your host machine, use `docker ps` output above as the reference). + +### Re-building Docker image is way too slow, is there a way I can do a faster development feedback loop? + +We have a CLI tool that can instantiate a short-lived development environment that has everything except for the leader node set up. You can then seamlessly plug in your own leader node instance that you have set up manually (the tool gives you a CLI command to use as a starting point, but you can attach debugger, enable extra logs etc). Try it out now (sets up 3 signer nodes): + +```bash +$ cargo run -p mpc-recovery-integration-tests -- test-leader 3 +``` diff --git a/integration-tests/src/containers.rs b/integration-tests/src/containers.rs new file mode 100644 index 000000000..1e5dcdb49 --- /dev/null +++ b/integration-tests/src/containers.rs @@ -0,0 +1,437 @@ +#![allow(clippy::too_many_arguments)] + +use anyhow::anyhow; +use bollard::Docker; +use ed25519_dalek::ed25519::signature::digest::{consts::U32, generic_array::GenericArray}; +use hyper::{Body, Client, Method, Request, StatusCode, Uri}; +use mpc_recovery::msg::{AddKeyRequest, AddKeyResponse, NewAccountRequest, NewAccountResponse}; +use multi_party_eddsa::protocols::ExpandedKeyPair; +use near_crypto::SecretKey; +use serde::{Deserialize, Serialize}; +use testcontainers::{ + clients::Cli, + core::{ExecCommand, WaitFor}, + images::generic::GenericImage, + Container, Image, RunnableImage, +}; +use workspaces::AccountId; + +pub struct DockerClient { + pub docker: Docker, + pub cli: Cli, +} + +impl DockerClient { + pub async fn get_network_ip_address( + &self, + container: &Container<'_, I>, + network: &str, + ) -> anyhow::Result { + let network_settings = self + .docker + .inspect_container(container.id(), None) + .await? + .network_settings + .ok_or_else(|| anyhow!("missing NetworkSettings on container '{}'", container.id()))?; + let ip_address = network_settings + .networks + .ok_or_else(|| { + anyhow!( + "missing NetworkSettings.Networks on container '{}'", + container.id() + ) + })? + .get(network) + .cloned() + .ok_or_else(|| { + anyhow!( + "container '{}' is not a part of network '{}'", + container.id(), + network + ) + })? + .ip_address + .ok_or_else(|| { + anyhow!( + "container '{}' belongs to network '{}', but is not assigned an IP address", + container.id(), + network + ) + })?; + + Ok(ip_address) + } +} + +impl Default for DockerClient { + fn default() -> Self { + Self { + docker: Docker::connect_with_local_defaults().unwrap(), + cli: Default::default(), + } + } +} + +pub struct Redis<'a> { + pub container: Container<'a, GenericImage>, + pub address: String, +} + +impl<'a> Redis<'a> { + pub async fn run(docker_client: &'a DockerClient, network: &str) -> anyhow::Result> { + let image = GenericImage::new("redis", "latest") + .with_wait_for(WaitFor::message_on_stdout("Ready to accept connections")); + let image: RunnableImage = image.into(); + let image = image.with_network(network); + let container = docker_client.cli.run(image); + let address = docker_client + .get_network_ip_address(&container, network) + .await?; + + Ok(Redis { container, address }) + } +} + +pub struct Sandbox<'a> { + pub container: Container<'a, GenericImage>, + pub address: String, +} + +impl<'a> Sandbox<'a> { + pub const CONTAINER_RPC_PORT: u16 = 3000; + pub const CONTAINER_NETWORK_PORT: u16 = 3001; + + pub async fn run( + docker_client: &'a DockerClient, + network: &str, + ) -> anyhow::Result> { + let image = GenericImage::new("ghcr.io/near/sandbox", "latest") + .with_wait_for(WaitFor::seconds(2)) + .with_exposed_port(Self::CONTAINER_RPC_PORT); + let image: RunnableImage = ( + image, + vec![ + "--rpc-addr".to_string(), + format!("0.0.0.0:{}", Self::CONTAINER_RPC_PORT), + "--network-addr".to_string(), + format!("0.0.0.0:{}", Self::CONTAINER_NETWORK_PORT), + ], + ) + .into(); + let image = image.with_network(network); + let container = docker_client.cli.run(image); + let address = docker_client + .get_network_ip_address(&container, network) + .await?; + + Ok(Sandbox { + container, + address: format!("http://{}:{}", address, Self::CONTAINER_RPC_PORT), + }) + } +} + +pub struct Relayer<'a> { + pub container: Container<'a, GenericImage>, + pub address: String, +} + +impl<'a> Relayer<'a> { + pub const CONTAINER_PORT: u16 = 3000; + + pub async fn run( + docker_client: &'a DockerClient, + network: &str, + near_rpc: &str, + redis_hostname: &str, + relayer_account_id: &AccountId, + relayer_account_sk: &SecretKey, + creator_account_id: &AccountId, + social_db_id: &AccountId, + social_account_id: &AccountId, + social_account_sk: &SecretKey, + ) -> anyhow::Result> { + let image = GenericImage::new("ghcr.io/near/pagoda-relayer-rs-fastauth", "latest") + .with_wait_for(WaitFor::message_on_stdout("listening on")) + .with_exposed_port(Self::CONTAINER_PORT) + .with_env_var("RUST_LOG", "DEBUG") + .with_env_var("NETWORK", "custom") + .with_env_var("SERVER_PORT", Self::CONTAINER_PORT.to_string()) + .with_env_var("RELAYER_RPC_URL", near_rpc) + .with_env_var("RELAYER_ACCOUNT_ID", relayer_account_id.to_string()) + .with_env_var("REDIS_HOST", redis_hostname) + .with_env_var("PUBLIC_KEY", relayer_account_sk.public_key().to_string()) + .with_env_var("PRIVATE_KEY", relayer_account_sk.to_string()) + .with_env_var( + "RELAYER_WHITELISTED_CONTRACT", + creator_account_id.to_string(), + ) + .with_env_var("CUSTOM_SOCIAL_DB_ID", social_db_id.to_string()) + .with_env_var("STORAGE_ACCOUNT_ID", social_account_id.to_string()) + .with_env_var( + "STORAGE_PUBLIC_KEY", + social_account_sk.public_key().to_string(), + ) + .with_env_var("STORAGE_PRIVATE_KEY", social_account_sk.to_string()); + let image: RunnableImage = image.into(); + let image = image.with_network(network); + let container = docker_client.cli.run(image); + let ip_address = docker_client + .get_network_ip_address(&container, network) + .await?; + + Ok(Relayer { + container, + address: format!("http://{}:{}", ip_address, Self::CONTAINER_PORT), + }) + } +} + +pub struct Datastore<'a> { + pub container: Container<'a, GenericImage>, + pub address: String, +} + +impl<'a> Datastore<'a> { + pub const CONTAINER_PORT: u16 = 3000; + + pub async fn run( + docker_client: &'a DockerClient, + network: &str, + project_id: &str, + ) -> anyhow::Result> { + let image = GenericImage::new("google/cloud-sdk", "latest") + .with_wait_for(WaitFor::message_on_stderr("Dev App Server is now running.")) + .with_exposed_port(Self::CONTAINER_PORT) + .with_entrypoint("gcloud") + .with_env_var( + "DATASTORE_EMULATOR_HOST", + format!("0.0.0.0:{}", Self::CONTAINER_PORT), + ) + .with_env_var("DATASTORE_PROJECT_ID", project_id); + let image: RunnableImage = ( + image, + vec![ + "beta".to_string(), + "emulators".to_string(), + "datastore".to_string(), + "start".to_string(), + format!("--project={project_id}"), + "--host-port".to_string(), + format!("0.0.0.0:{}", Self::CONTAINER_PORT), + "--no-store-on-disk".to_string(), + ], + ) + .into(); + let image = image.with_network(network); + let container = docker_client.cli.run(image); + let ip_address = docker_client + .get_network_ip_address(&container, network) + .await?; + + Ok(Datastore { + container, + address: format!("http://{}:{}/", ip_address, Self::CONTAINER_PORT), + }) + } +} + +pub struct SignerNode<'a> { + pub container: Container<'a, GenericImage>, + pub address: String, + pub local_address: String, +} + +pub struct SignerNodeApi { + pub address: String, +} + +impl<'a> SignerNode<'a> { + // Container port used for the docker network, does not have to be unique + const CONTAINER_PORT: u16 = 3000; + + pub async fn run( + docker_client: &'a DockerClient, + network: &str, + node_id: u64, + sk_share: &ExpandedKeyPair, + cipher_key: &GenericArray, + datastore_url: &str, + gcp_project_id: &str, + firebase_audience_id: &str, + ) -> anyhow::Result> { + let image: GenericImage = GenericImage::new("near/mpc-recovery", "latest") + .with_wait_for(WaitFor::Nothing) + .with_exposed_port(Self::CONTAINER_PORT) + .with_env_var("RUST_LOG", "mpc_recovery=DEBUG"); + let image: RunnableImage = ( + image, + vec![ + "start-sign".to_string(), + "--node-id".to_string(), + node_id.to_string(), + "--sk-share".to_string(), + serde_json::to_string(&sk_share)?, + "--cipher-key".to_string(), + hex::encode(cipher_key), + "--web-port".to_string(), + Self::CONTAINER_PORT.to_string(), + "--pagoda-firebase-audience-id".to_string(), + firebase_audience_id.to_string(), + "--gcp-project-id".to_string(), + gcp_project_id.to_string(), + "--gcp-datastore-url".to_string(), + datastore_url.to_string(), + "--test".to_string(), + ], + ) + .into(); + let image = image.with_network(network); + let container = docker_client.cli.run(image); + let ip_address = docker_client + .get_network_ip_address(&container, network) + .await?; + let host_port = container.get_host_port_ipv4(Self::CONTAINER_PORT); + + container.exec(ExecCommand { + cmd: format!("bash -c 'while [[ \"$(curl -s -o /dev/null -w ''%{{http_code}}'' localhost:{})\" != \"200\" ]]; do sleep 1; done'", Self::CONTAINER_PORT), + ready_conditions: vec![WaitFor::message_on_stdout("node is ready to accept connections")] + }); + + Ok(SignerNode { + container, + address: format!("http://{ip_address}:{}", Self::CONTAINER_PORT), + local_address: format!("http://localhost:{host_port}"), + }) + } + + pub fn api(&self) -> SignerNodeApi { + SignerNodeApi { + address: self.local_address.clone(), + } + } +} + +pub struct LeaderNode<'a> { + pub container: Container<'a, GenericImage>, + pub address: String, +} + +pub struct LeaderNodeApi { + address: String, +} + +impl<'a> LeaderNode<'a> { + pub async fn run( + docker_client: &'a DockerClient, + network: &str, + sign_nodes: Vec, + near_rpc: &str, + relayer_url: &str, + datastore_url: &str, + gcp_project_id: &str, + near_root_account: &AccountId, + account_creator_id: &AccountId, + account_creator_sk: &SecretKey, + firebase_audience_id: &str, + ) -> anyhow::Result> { + let port = portpicker::pick_unused_port().expect("no free ports"); + + let image = GenericImage::new("near/mpc-recovery", "latest") + .with_wait_for(WaitFor::Nothing) + .with_exposed_port(port) + .with_env_var("RUST_LOG", "mpc_recovery=DEBUG"); + let mut cmd = vec![ + "start-leader".to_string(), + "--web-port".to_string(), + port.to_string(), + "--near-rpc".to_string(), + near_rpc.to_string(), + "--relayer-url".to_string(), + relayer_url.to_string(), + "--near-root-account".to_string(), + near_root_account.to_string(), + "--account-creator-id".to_string(), + account_creator_id.to_string(), + "--account-creator-sk".to_string(), + account_creator_sk.to_string(), + "--pagoda-firebase-audience-id".to_string(), + firebase_audience_id.to_string(), + "--gcp-project-id".to_string(), + gcp_project_id.to_string(), + "--gcp-datastore-url".to_string(), + datastore_url.to_string(), + "--test".to_string(), + ]; + for sign_node in sign_nodes { + cmd.push("--sign-nodes".to_string()); + cmd.push(sign_node); + } + let image: RunnableImage = (image, cmd).into(); + let image = image.with_network(network); + let container = docker_client.cli.run(image); + let ip_address = docker_client + .get_network_ip_address(&container, network) + .await?; + + container.exec(ExecCommand { + cmd: format!("bash -c 'while [[ \"$(curl -s -o /dev/null -w ''%{{http_code}}'' localhost:{})\" != \"200\" ]]; do sleep 1; done'", port), + ready_conditions: vec![WaitFor::message_on_stdout("node is ready to accept connections")] + }); + + Ok(LeaderNode { + container, + address: format!("http://{ip_address}:{port}"), + }) + } + + pub fn api(&self) -> LeaderNodeApi { + LeaderNodeApi { + address: self.address.clone(), + } + } +} + +impl LeaderNodeApi { + async fn post( + &self, + uri: U, + request: Req, + ) -> anyhow::Result<(StatusCode, Resp)> + where + Uri: TryFrom, + >::Error: Into, + for<'de> Resp: Deserialize<'de>, + { + let req = Request::builder() + .method(Method::POST) + .uri(uri) + .header("content-type", "application/json") + .body(Body::from(serde_json::to_string(&request)?))?; + + let client = Client::new(); + let response = client.request(req).await?; + let status = response.status(); + + let data = hyper::body::to_bytes(response).await?; + let response: Resp = serde_json::from_slice(&data)?; + + Ok((status, response)) + } + + pub async fn new_account( + &self, + request: NewAccountRequest, + ) -> anyhow::Result<(StatusCode, NewAccountResponse)> { + self.post(format!("{}/new_account", self.address), request) + .await + } + + pub async fn add_key( + &self, + request: AddKeyRequest, + ) -> anyhow::Result<(StatusCode, AddKeyResponse)> { + self.post(format!("{}/add_key", self.address), request) + .await + } +} diff --git a/integration-tests/src/lib.rs b/integration-tests/src/lib.rs index 8b1378917..c85d8450b 100644 --- a/integration-tests/src/lib.rs +++ b/integration-tests/src/lib.rs @@ -1 +1,104 @@ +use bollard::exec::{CreateExecOptions, StartExecResults}; +use futures::StreamExt; +use near_crypto::{KeyFile, SecretKey}; +use near_units::parse_near; +use workspaces::{ + network::{Sandbox, ValidatorKey}, + AccountId, Worker, +}; +pub mod containers; +pub mod sandbox; + +async fn fetch_validator_keys( + docker_client: &containers::DockerClient, + sandbox: &containers::Sandbox<'_>, +) -> anyhow::Result { + let create_result = docker_client + .docker + .create_exec( + sandbox.container.id(), + CreateExecOptions:: { + attach_stdout: Some(true), + attach_stderr: Some(true), + cmd: Some(vec![ + "cat".to_string(), + "/root/.near/validator_key.json".to_string(), + ]), + ..Default::default() + }, + ) + .await?; + + let start_result = docker_client + .docker + .start_exec(&create_result.id, None) + .await?; + + match start_result { + StartExecResults::Attached { mut output, .. } => { + let mut stream_contents = Vec::new(); + while let Some(chunk) = output.next().await { + stream_contents.extend_from_slice(&chunk?.into_bytes()); + } + + Ok(serde_json::from_slice(&stream_contents)?) + } + StartExecResults::Detached => unreachable!("unexpected detached output"), + } +} + +pub struct RelayerCtx<'a> { + pub sandbox: containers::Sandbox<'a>, + pub redis: containers::Redis<'a>, + pub relayer: containers::Relayer<'a>, + pub worker: Worker, + pub creator_account_id: AccountId, + pub creator_account_sk: SecretKey, +} + +pub async fn initialize_relayer<'a>( + docker_client: &'a containers::DockerClient, + network: &str, +) -> anyhow::Result> { + let sandbox = containers::Sandbox::run(docker_client, network).await?; + let validator_key = fetch_validator_keys(docker_client, &sandbox).await?; + + let worker = workspaces::sandbox() + .rpc_addr(&sandbox.address) + .validator_key(ValidatorKey::Known( + validator_key.account_id.to_string().parse()?, + validator_key.secret_key.to_string().parse()?, + )) + .await?; + let social_db = sandbox::initialize_social_db(&worker).await?; + sandbox::initialize_linkdrop(&worker).await?; + let (relayer_account_id, relayer_account_sk) = sandbox::create_account(&worker).await?; + let (creator_account_id, creator_account_sk) = sandbox::create_account(&worker).await?; + let (social_account_id, social_account_sk) = sandbox::create_account(&worker).await?; + sandbox::up_funds_for_account(&worker, &social_account_id, parse_near!("1000 N")).await?; + + let redis = containers::Redis::run(docker_client, network).await?; + let relayer = containers::Relayer::run( + docker_client, + network, + &sandbox.address, + &redis.address, + &relayer_account_id, + &relayer_account_sk, + &creator_account_id, + social_db.id(), + &social_account_id, + &social_account_sk, + ) + .await?; + + Ok(RelayerCtx::<'a> { + sandbox, + redis, + relayer, + worker, + creator_account_id, + creator_account_sk, + }) +} diff --git a/integration-tests/src/main.rs b/integration-tests/src/main.rs new file mode 100644 index 000000000..edaac9696 --- /dev/null +++ b/integration-tests/src/main.rs @@ -0,0 +1,118 @@ +use clap::Parser; +use mpc_recovery::GenerateResult; +use mpc_recovery_integration_tests::containers; +use tokio::io::{stdin, AsyncReadExt}; + +const NETWORK: &str = "mpc_recovery_dev_network"; +const GCP_PROJECT_ID: &str = "mpc-recovery-dev-gcp-project"; +// TODO: figure out how to instantiate an use a local firebase deployment +const FIREBASE_AUDIENCE_ID: &str = "not-actually-used-in-integration-tests"; + +#[derive(Parser, Debug)] +enum Cli { + TestLeader { nodes: usize }, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + match Cli::parse() { + Cli::TestLeader { nodes } => { + let docker_client = containers::DockerClient::default(); + + let relayer_ctx_future = + mpc_recovery_integration_tests::initialize_relayer(&docker_client, NETWORK); + let datastore_future = + containers::Datastore::run(&docker_client, NETWORK, GCP_PROJECT_ID); + + let (relayer_ctx, datastore) = + futures::future::join(relayer_ctx_future, datastore_future).await; + let relayer_ctx = relayer_ctx?; + let datastore = datastore?; + + let GenerateResult { secrets, .. } = mpc_recovery::generate(nodes); + let mut signer_node_futures = Vec::new(); + for (i, (share, cipher_key)) in secrets.iter().enumerate().take(nodes) { + let signer_node = containers::SignerNode::run( + &docker_client, + NETWORK, + i as u64, + share, + cipher_key, + &datastore.address, + GCP_PROJECT_ID, + FIREBASE_AUDIENCE_ID, + ); + signer_node_futures.push(signer_node); + } + let signer_nodes = futures::future::join_all(signer_node_futures) + .await + .into_iter() + .collect::, _>>()?; + let signer_urls: &Vec<_> = &signer_nodes.iter().map(|n| n.address.clone()).collect(); + + let near_root_account = relayer_ctx.worker.root_account()?; + + let mut cmd = vec![ + "start-leader".to_string(), + "--web-port".to_string(), + "3000".to_string(), + "--near-rpc".to_string(), + format!( + "http://localhost:{}", + relayer_ctx + .sandbox + .container + .get_host_port_ipv4(containers::Sandbox::CONTAINER_RPC_PORT) + ), + "--relayer-url".to_string(), + format!( + "http://localhost:{}", + relayer_ctx + .relayer + .container + .get_host_port_ipv4(containers::Relayer::CONTAINER_PORT) + ), + "--near-root-account".to_string(), + near_root_account.id().to_string(), + "--account-creator-id".to_string(), + relayer_ctx.creator_account_id.to_string(), + "--account-creator-sk".to_string(), + relayer_ctx.creator_account_sk.to_string(), + "--pagoda-firebase-audience-id".to_string(), + FIREBASE_AUDIENCE_ID.to_string(), + "--gcp-project-id".to_string(), + GCP_PROJECT_ID.to_string(), + "--gcp-datastore-url".to_string(), + format!( + "http://localhost:{}", + datastore + .container + .get_host_port_ipv4(containers::Datastore::CONTAINER_PORT) + ), + "--test".to_string(), + ]; + for sign_node in signer_urls { + cmd.push("--sign-nodes".to_string()); + cmd.push(sign_node.clone()); + } + + println!("Please run the command below to start a leader node:"); + println!( + "RUST_LOG=mpc_recovery=debug cargo run --bin mpc-recovery -- {}", + cmd.join(" ") + ); + println!("===================================="); + println!("You can now interact with your local service manually. For example:"); + println!( + r#"curl -X POST -H "Content-Type: application/json" -d '{{"oidc_token": "validToken:1", "near_account_id": "abc45436676.near", "create_account_options": {{"full_access_keys": ["ed25519:4fnCz9NTEMhkfwAHDhFDkPS1mD58QHdRyago5n4vtCS2"]}}}}' http://localhost:3000/new_account"# + ); + + println!(); + println!("Press any button to exit and destroy all containers..."); + + while stdin().read(&mut [0]).await? == 0 {} + } + }; + + Ok(()) +} diff --git a/integration-tests/src/sandbox.rs b/integration-tests/src/sandbox.rs new file mode 100644 index 000000000..8fb683103 --- /dev/null +++ b/integration-tests/src/sandbox.rs @@ -0,0 +1,81 @@ +use workspaces::{network::Sandbox, AccountId, Contract, Worker}; + +pub async fn initialize_social_db(worker: &Worker) -> anyhow::Result { + let social_db = worker + .import_contract(&"social.near".parse()?, &workspaces::mainnet().await?) + .transact() + .await?; + social_db + .call("new") + .max_gas() + .transact() + .await? + .into_result()?; + + Ok(social_db) +} + +// Linkdrop contains top-level account creation logic +pub async fn initialize_linkdrop(worker: &Worker) -> anyhow::Result<()> { + let near_root_account = worker.root_account()?; + near_root_account + .deploy(include_bytes!("../linkdrop.wasm")) + .await? + .into_result()?; + near_root_account + .call(near_root_account.id(), "new") + .max_gas() + .transact() + .await? + .into_result()?; + + Ok(()) +} + +pub async fn create_account( + worker: &Worker, +) -> anyhow::Result<(AccountId, near_crypto::SecretKey)> { + let (account_id, account_sk) = worker.dev_generate().await; + worker + .create_tla(account_id.clone(), account_sk.clone()) + .await? + .into_result()?; + + let account_sk: near_crypto::SecretKey = + serde_json::from_str(&serde_json::to_string(&account_sk)?)?; + + Ok((account_id, account_sk)) +} + +// Makes sure that the target account has at least target amount of NEAR +pub async fn up_funds_for_account( + worker: &Worker, + target_account_id: &AccountId, + target_amount: u128, +) -> anyhow::Result<()> { + // Max balance we can transfer out of a freshly created dev account + const DEV_ACCOUNT_AVAILABLE_BALANCE: u128 = 99 * 10u128.pow(24); + + let diff: u128 = target_amount - worker.view_account(target_account_id).await?.balance; + // Integer ceiling division + let n = (diff + DEV_ACCOUNT_AVAILABLE_BALANCE - 1) / DEV_ACCOUNT_AVAILABLE_BALANCE; + let futures = (0..n).map(|_| async { + let tmp_account = worker.dev_create_account().await?; + tmp_account + .transfer_near(target_account_id, DEV_ACCOUNT_AVAILABLE_BALANCE) + .await? + .into_result()?; + tmp_account + .delete_account(target_account_id) + .await? + .into_result()?; + + Ok::<(), anyhow::Error>(()) + }); + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>()?; + + Ok(()) +} diff --git a/integration-tests/tests/docker/datastore.rs b/integration-tests/tests/docker/datastore.rs deleted file mode 100644 index 2a76ec7de..000000000 --- a/integration-tests/tests/docker/datastore.rs +++ /dev/null @@ -1,108 +0,0 @@ -use std::collections::HashMap; - -use crate::drop_container; -use bollard::{ - container::{Config, RemoveContainerOptions}, - image::CreateImageOptions, - service::{HostConfig, PortBinding}, - Docker, -}; -use futures::TryStreamExt; - -pub struct Datastore { - docker: Docker, - container_id: String, - pub address: String, -} - -impl Datastore { - pub async fn start(docker: &Docker, network: &str, project_id: &str) -> anyhow::Result { - super::create_network(docker, network).await?; - let web_port = portpicker::pick_unused_port().expect("no free ports"); - - let mut exposed_ports = HashMap::new(); - let mut port_bindings = HashMap::new(); - let empty = HashMap::<(), ()>::new(); - exposed_ports.insert(format!("{web_port}/tcp"), empty); - port_bindings.insert( - format!("{web_port}/tcp"), - Some(vec![PortBinding { - host_ip: None, - host_port: Some(web_port.to_string()), - }]), - ); - - docker - .create_image( - Some(CreateImageOptions { - from_image: "google/cloud-sdk:latest", - ..Default::default() - }), - None, - None, - ) - .try_collect::>() - .await?; - - let config = Config { - image: Some("google/cloud-sdk:latest".to_string()), - tty: Some(true), - attach_stdout: Some(true), - attach_stderr: Some(true), - exposed_ports: Some(exposed_ports), - cmd: Some(vec![ - "gcloud".to_string(), - "beta".to_string(), - "emulators".to_string(), - "datastore".to_string(), - "start".to_string(), - format!("--project={project_id}"), - "--host-port".to_string(), - format!("0.0.0.0:{web_port}"), - "--no-store-on-disk".to_string(), - ]), - env: Some(vec![ - format!("DATASTORE_EMULATOR_HOST=0.0.0.0:{web_port}"), - format!("DATASTORE_PROJECT_ID={project_id}"), - ]), - host_config: Some(HostConfig { - network_mode: Some(network.to_string()), - port_bindings: Some(port_bindings), - ..Default::default() - }), - ..Default::default() - }; - - let container_id = docker - .create_container::<&str, String>(None, config) - .await? - .id; - - super::continuously_print_docker_output(docker, &container_id).await?; - docker - .start_container::(&container_id, None) - .await?; - - let network_settings = docker - .inspect_container(&container_id, None) - .await? - .network_settings - .unwrap(); - let ip_address = network_settings - .networks - .unwrap() - .get(network) - .cloned() - .unwrap() - .ip_address - .unwrap(); - - Ok(Self { - docker: docker.clone(), - container_id, - address: format!("http://{ip_address}:{web_port}/"), - }) - } -} - -drop_container!(Datastore); diff --git a/integration-tests/tests/docker/mod.rs b/integration-tests/tests/docker/mod.rs deleted file mode 100644 index 3bf129902..000000000 --- a/integration-tests/tests/docker/mod.rs +++ /dev/null @@ -1,328 +0,0 @@ -pub mod datastore; -pub mod redis; -pub mod relayer; - -use bollard::{ - container::{AttachContainerOptions, AttachContainerResults, Config, RemoveContainerOptions}, - network::CreateNetworkOptions, - service::{HostConfig, Ipam, PortBinding}, - Docker, -}; -use ed25519_dalek::ed25519::signature::digest::{consts::U32, generic_array::GenericArray}; -use futures::{lock::Mutex, StreamExt}; -use hyper::{Body, Client, Method, Request, StatusCode, Uri}; -use mpc_recovery::msg::{AddKeyRequest, AddKeyResponse, NewAccountRequest, NewAccountResponse}; -use multi_party_eddsa::protocols::ExpandedKeyPair; -use near_crypto::SecretKey; -use once_cell::sync::Lazy; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use tokio::io::AsyncWriteExt; -use workspaces::AccountId; - -static NETWORK_MUTEX: Lazy> = Lazy::new(|| Mutex::new(0)); - -// Removing container is an asynchronous operation and hence has to be scheduled to execute -// outside of `drop`'s scope. This leads to problems when the drop happens right before the -// execution ends. The invoker needs to be aware of this behavior and give `drop` some time -// to finalize. -#[macro_export] -macro_rules! drop_container { - ( $container:ident ) => { - #[cfg(feature = "drop-containers")] - impl Drop for $container { - fn drop(&mut self) { - let container_id = self.container_id.clone(); - let docker = self.docker.clone(); - tokio::spawn(async move { - docker - .remove_container( - &container_id, - Some(RemoveContainerOptions { - force: true, - ..Default::default() - }), - ) - .await - }); - } - } - }; -} - -async fn continuously_print_docker_output(docker: &Docker, id: &str) -> anyhow::Result<()> { - let AttachContainerResults { mut output, .. } = docker - .attach_container( - id, - Some(AttachContainerOptions:: { - stdout: Some(true), - stderr: Some(true), - stream: Some(true), - ..Default::default() - }), - ) - .await?; - - // Asynchronous process that pipes docker attach output into stdout. - // Will die automatically once Docker container output is closed. - tokio::spawn(async move { - let mut stdout = tokio::io::stdout(); - - while let Some(Ok(output)) = output.next().await { - stdout - .write_all(output.into_bytes().as_ref()) - .await - .unwrap(); - stdout.flush().await.unwrap(); - } - }); - - Ok(()) -} - -async fn start_mpc_node( - docker: &Docker, - network: &str, - cmd: Vec, - web_port: u16, - expose_web_port: bool, -) -> anyhow::Result<(String, String)> { - let mut exposed_ports = HashMap::new(); - let mut port_bindings = HashMap::new(); - if expose_web_port { - let empty = HashMap::<(), ()>::new(); - exposed_ports.insert(format!("{web_port}/tcp"), empty); - port_bindings.insert( - format!("{web_port}/tcp"), - Some(vec![PortBinding { - host_ip: None, - host_port: Some(web_port.to_string()), - }]), - ); - } - - let mpc_recovery_config = Config { - image: Some("near/mpc-recovery:latest".to_string()), - tty: Some(true), - attach_stdout: Some(true), - attach_stderr: Some(true), - exposed_ports: Some(exposed_ports), - cmd: Some(cmd), - host_config: Some(HostConfig { - network_mode: Some(network.to_string()), - port_bindings: Some(port_bindings), - ..Default::default() - }), - env: Some(vec!["RUST_LOG=mpc_recovery=DEBUG".to_string()]), - ..Default::default() - }; - - let id = docker - .create_container::<&str, String>(None, mpc_recovery_config) - .await? - .id; - - continuously_print_docker_output(docker, &id).await?; - docker.start_container::(&id, None).await?; - - let network_settings = docker - .inspect_container(&id, None) - .await? - .network_settings - .unwrap(); - let ip_address = network_settings - .networks - .unwrap() - .get(network) - .cloned() - .unwrap() - .ip_address - .unwrap(); - - Ok((id, ip_address)) -} - -async fn create_network(docker: &Docker, network: &str) -> anyhow::Result<()> { - let _lock = &NETWORK_MUTEX.lock().await; - let list = docker.list_networks::<&str>(None).await?; - if list.iter().any(|n| n.name == Some(network.to_string())) { - return Ok(()); - } - - let create_network_options = CreateNetworkOptions { - name: network, - check_duplicate: true, - driver: if cfg!(windows) { - "transparent" - } else { - "bridge" - }, - ipam: Ipam { - config: None, - ..Default::default() - }, - ..Default::default() - }; - let _response = &docker.create_network(create_network_options).await?; - - Ok(()) -} - -pub struct LeaderNode { - docker: Docker, - container_id: String, - pub address: String, -} - -impl LeaderNode { - #[allow(clippy::too_many_arguments)] - pub async fn start( - docker: &Docker, - network: &str, - sign_nodes: Vec, - near_rpc: &str, - relayer_url: &str, - datastore_url: &str, - gcp_project_id: &str, - near_root_account: &AccountId, - account_creator_id: &AccountId, - account_creator_sk: &SecretKey, - pagoda_firebase_audience_id: &str, - ) -> anyhow::Result { - create_network(docker, network).await?; - let web_port = portpicker::pick_unused_port().expect("no free ports"); - - let mut cmd = vec![ - "start-leader".to_string(), - "--web-port".to_string(), - web_port.to_string(), - "--near-rpc".to_string(), - near_rpc.to_string(), - "--relayer-url".to_string(), - relayer_url.to_string(), - "--near-root-account".to_string(), - near_root_account.to_string(), - "--account-creator-id".to_string(), - account_creator_id.to_string(), - "--account-creator-sk".to_string(), - account_creator_sk.to_string(), - "--pagoda-firebase-audience-id".to_string(), - pagoda_firebase_audience_id.to_string(), - "--gcp-project-id".to_string(), - gcp_project_id.to_string(), - "--gcp-datastore-url".to_string(), - datastore_url.to_string(), - "--test".to_string(), - ]; - for sign_node in sign_nodes { - cmd.push("--sign-nodes".to_string()); - cmd.push(sign_node); - } - - let (container_id, _) = start_mpc_node(docker, network, cmd, web_port, true).await?; - Ok(LeaderNode { - docker: docker.clone(), - container_id, - address: format!("http://localhost:{web_port}"), - }) - } - - async fn post( - &self, - uri: U, - request: Req, - ) -> anyhow::Result<(StatusCode, Resp)> - where - Uri: TryFrom, - >::Error: Into, - for<'de> Resp: Deserialize<'de>, - { - let req = Request::builder() - .method(Method::POST) - .uri(uri) - .header("content-type", "application/json") - .body(Body::from(serde_json::to_string(&request)?))?; - - let client = Client::new(); - let response = client.request(req).await?; - let status = response.status(); - - let data = hyper::body::to_bytes(response).await?; - let response: Resp = serde_json::from_slice(&data)?; - - Ok((status, response)) - } - - pub async fn new_account( - &self, - request: NewAccountRequest, - ) -> anyhow::Result<(StatusCode, NewAccountResponse)> { - self.post(format!("{}/new_account", self.address), request) - .await - } - - pub async fn add_key( - &self, - request: AddKeyRequest, - ) -> anyhow::Result<(StatusCode, AddKeyResponse)> { - self.post(format!("{}/add_key", self.address), request) - .await - } -} - -pub struct SignNode { - docker: Docker, - container_id: String, - pub address: String, - /// For calling directly from tests - pub local_address: String, -} - -impl SignNode { - #[allow(clippy::too_many_arguments)] - pub async fn start( - docker: &Docker, - network: &str, - node_id: u64, - sk_share: &ExpandedKeyPair, - cipher_key: &GenericArray, - datastore_url: &str, - gcp_project_id: &str, - pagoda_firebase_audience_id: &str, - ) -> anyhow::Result { - create_network(docker, network).await?; - let web_port = portpicker::pick_unused_port().expect("no free ports"); - - let cmd = vec![ - "start-sign".to_string(), - "--node-id".to_string(), - node_id.to_string(), - "--sk-share".to_string(), - serde_json::to_string(&sk_share)?, - "--cipher-key".to_string(), - hex::encode(cipher_key), - "--web-port".to_string(), - web_port.to_string(), - "--pagoda-firebase-audience-id".to_string(), - pagoda_firebase_audience_id.to_string(), - "--gcp-project-id".to_string(), - gcp_project_id.to_string(), - "--gcp-datastore-url".to_string(), - datastore_url.to_string(), - "--test".to_string(), - ]; - - let (container_id, ip_address) = - start_mpc_node(docker, network, cmd, web_port, true).await?; - - Ok(SignNode { - docker: docker.clone(), - container_id, - address: format!("http://{ip_address}:{web_port}"), - local_address: format!("http://localhost:{web_port}"), - }) - } -} - -drop_container!(LeaderNode); -drop_container!(SignNode); diff --git a/integration-tests/tests/docker/redis.rs b/integration-tests/tests/docker/redis.rs deleted file mode 100644 index b67a8cabf..000000000 --- a/integration-tests/tests/docker/redis.rs +++ /dev/null @@ -1,76 +0,0 @@ -use crate::drop_container; -use bollard::{ - container::{Config, RemoveContainerOptions}, - image::CreateImageOptions, - service::HostConfig, - Docker, -}; -use futures::TryStreamExt; - -pub struct Redis { - docker: Docker, - container_id: String, - pub hostname: String, -} - -impl Redis { - pub async fn start(docker: &Docker, network: &str) -> anyhow::Result { - super::create_network(docker, network).await?; - docker - .create_image( - Some(CreateImageOptions { - from_image: "redis:latest", - ..Default::default() - }), - None, - None, - ) - .try_collect::>() - .await?; - - let relayer_config = Config { - image: Some("redis:latest".to_string()), - tty: Some(true), - attach_stdout: Some(true), - attach_stderr: Some(true), - cmd: None, - host_config: Some(HostConfig { - network_mode: Some(network.to_string()), - ..Default::default() - }), - ..Default::default() - }; - - let container_id = docker - .create_container::<&str, String>(None, relayer_config) - .await? - .id; - - super::continuously_print_docker_output(docker, &container_id).await?; - docker - .start_container::(&container_id, None) - .await?; - - let network_settings = docker - .inspect_container(&container_id, None) - .await? - .network_settings - .unwrap(); - let ip_address = network_settings - .networks - .unwrap() - .get(network) - .cloned() - .unwrap() - .ip_address - .unwrap(); - - Ok(Self { - docker: docker.clone(), - container_id, - hostname: ip_address, - }) - } -} - -drop_container!(Redis); diff --git a/integration-tests/tests/docker/relayer.rs b/integration-tests/tests/docker/relayer.rs deleted file mode 100644 index d689b7bd4..000000000 --- a/integration-tests/tests/docker/relayer.rs +++ /dev/null @@ -1,108 +0,0 @@ -use crate::drop_container; -use bollard::{ - container::{Config, RemoveContainerOptions}, - service::{HostConfig, PortBinding}, - Docker, -}; -use near_crypto::SecretKey; -use std::collections::HashMap; -use workspaces::AccountId; - -pub struct Relayer { - docker: Docker, - container_id: String, - pub address: String, -} - -impl Relayer { - #[allow(clippy::too_many_arguments)] // TODO: fix later - pub async fn start( - docker: &Docker, - network: &str, - near_rpc: &str, - redis_hostname: &str, - relayer_account_id: &AccountId, - relayer_account_sk: &SecretKey, - creator_account_id: &AccountId, - social_db_id: &AccountId, - social_account_id: &AccountId, - social_account_sk: &SecretKey, - ) -> anyhow::Result { - super::create_network(docker, network).await?; - let web_port = portpicker::pick_unused_port().expect("no free ports"); - - let mut exposed_ports = HashMap::new(); - let mut port_bindings = HashMap::new(); - let empty = HashMap::<(), ()>::new(); - exposed_ports.insert(format!("{web_port}/tcp"), empty); - port_bindings.insert( - format!("{web_port}/tcp"), - Some(vec![PortBinding { - host_ip: None, - host_port: Some(web_port.to_string()), - }]), - ); - - let relayer_config = Config { - image: Some("pagoda-relayer-rs-fastauth:latest".to_string()), - tty: Some(true), - attach_stdout: Some(true), - attach_stderr: Some(true), - exposed_ports: Some(exposed_ports), - cmd: None, - host_config: Some(HostConfig { - network_mode: Some(network.to_string()), - port_bindings: Some(port_bindings), - ..Default::default() - }), - env: Some(vec![ - "RUST_LOG=mpc_recovery=DEBUG".to_string(), - "NETWORK=custom".to_string(), - format!("SERVER_PORT={}", web_port), - format!("RELAYER_RPC_URL={}", near_rpc), - format!("RELAYER_ACCOUNT_ID={}", relayer_account_id), - format!("REDIS_HOST={}", redis_hostname), - format!("PUBLIC_KEY={}", relayer_account_sk.public_key()), - format!("PRIVATE_KEY={}", relayer_account_sk), - format!("RELAYER_WHITELISTED_CONTRACT={}", creator_account_id), - format!("CUSTOM_SOCIAL_DB_ID={}", social_db_id), - format!("STORAGE_ACCOUNT_ID={}", social_account_id), - format!("STORAGE_PUBLIC_KEY={}", social_account_sk.public_key()), - format!("STORAGE_PRIVATE_KEY={}", social_account_sk), - ]), - ..Default::default() - }; - - let container_id = docker - .create_container::<&str, String>(None, relayer_config) - .await? - .id; - - super::continuously_print_docker_output(docker, &container_id).await?; - docker - .start_container::(&container_id, None) - .await?; - - let network_settings = docker - .inspect_container(&container_id, None) - .await? - .network_settings - .unwrap(); - let ip_address = network_settings - .networks - .unwrap() - .get(network) - .cloned() - .unwrap() - .ip_address - .unwrap(); - - Ok(Self { - docker: docker.clone(), - container_id, - address: format!("http://{ip_address}:{web_port}"), - }) - } -} - -drop_container!(Relayer); diff --git a/integration-tests/tests/lib.rs b/integration-tests/tests/lib.rs index 32aca6eb8..2e4d2dba9 100644 --- a/integration-tests/tests/lib.rs +++ b/integration-tests/tests/lib.rs @@ -1,164 +1,82 @@ -mod docker; mod mpc; -use crate::docker::{LeaderNode, SignNode}; -use bollard::Docker; use curv::elliptic::curves::{Ed25519, Point}; -use docker::{datastore::Datastore, redis::Redis, relayer::Relayer}; use futures::future::BoxFuture; use mpc_recovery::GenerateResult; -use std::time::Duration; -use workspaces::{network::Sandbox, AccountId, Worker}; +use mpc_recovery_integration_tests::containers; +use workspaces::{network::Sandbox, Worker}; const NETWORK: &str = "mpc_recovery_integration_test_network"; const GCP_PROJECT_ID: &str = "mpc-recovery-gcp-project"; -#[cfg(target_os = "linux")] -const HOST_MACHINE_FROM_DOCKER: &str = "172.17.0.1"; -#[cfg(target_os = "macos")] -const HOST_MACHINE_FROM_DOCKER: &str = "docker.for.mac.localhost"; +// TODO: figure out how to instantiate an use a local firebase deployment +const FIREBASE_AUDIENCE_ID: &str = "not actually used in integration tests"; pub struct TestContext<'a> { - leader_node: &'a LeaderNode, + leader_node: &'a containers::LeaderNodeApi, _pk_set: &'a Vec>, worker: &'a Worker, - signer_nodes: &'a Vec, -} - -async fn create_account( - worker: &Worker, -) -> anyhow::Result<(AccountId, near_crypto::SecretKey)> { - let (account_id, account_sk) = worker.dev_generate().await; - worker - .create_tla(account_id.clone(), account_sk.clone()) - .await? - .into_result()?; - - let account_sk: near_crypto::SecretKey = - serde_json::from_str(&serde_json::to_string(&account_sk)?)?; - - Ok((account_id, account_sk)) + signer_nodes: &'a Vec, } async fn with_nodes(nodes: usize, f: F) -> anyhow::Result<()> where F: for<'a> FnOnce(TestContext<'a>) -> BoxFuture<'a, anyhow::Result<()>>, { - let docker = Docker::connect_with_local_defaults()?; + let docker_client = containers::DockerClient::default(); - let GenerateResult { pk_set, secrets } = mpc_recovery::generate(nodes); - let worker = workspaces::sandbox().await?; - let social_db = worker - .import_contract(&"social.near".parse()?, &workspaces::mainnet().await?) - .transact() - .await?; - social_db - .call("new") - .max_gas() - .transact() - .await? - .into_result()?; + let relayer_ctx_future = + mpc_recovery_integration_tests::initialize_relayer(&docker_client, NETWORK); + let datastore_future = containers::Datastore::run(&docker_client, NETWORK, GCP_PROJECT_ID); - let near_root_account = worker.root_account()?; - near_root_account - .deploy(include_bytes!("../linkdrop.wasm")) - .await? - .into_result()?; - near_root_account - .call(near_root_account.id(), "new") - .max_gas() - .transact() - .await? - .into_result()?; - let (relayer_account_id, relayer_account_sk) = create_account(&worker).await?; - let (creator_account_id, creator_account_sk) = create_account(&worker).await?; - let (social_account_id, social_account_sk) = create_account(&worker).await?; - up_funds_for_account(&worker, &social_account_id).await?; + let (relayer_ctx, datastore) = + futures::future::join(relayer_ctx_future, datastore_future).await; + let relayer_ctx = relayer_ctx?; + let datastore = datastore?; - let near_rpc = format!("http://{HOST_MACHINE_FROM_DOCKER}:{}", worker.rpc_port()); - let datastore = Datastore::start(&docker, NETWORK, GCP_PROJECT_ID).await?; - let redis = Redis::start(&docker, NETWORK).await?; - let relayer = Relayer::start( - &docker, - NETWORK, - &near_rpc, - &redis.hostname, - &relayer_account_id, - &relayer_account_sk, - &creator_account_id, - social_db.id(), - &social_account_id, - &social_account_sk, - ) - .await?; - tokio::time::sleep(Duration::from_millis(10000)).await; - - let pagoda_firebase_audience_id = "not actually used in integration tests"; - - let mut signer_nodes = Vec::new(); + let GenerateResult { pk_set, secrets } = mpc_recovery::generate(nodes); + let mut signer_node_futures = Vec::new(); for (i, (share, cipher_key)) in secrets.iter().enumerate().take(nodes) { - let addr = SignNode::start( - &docker, + let signer_node = containers::SignerNode::run( + &docker_client, NETWORK, i as u64, share, cipher_key, &datastore.address, GCP_PROJECT_ID, - pagoda_firebase_audience_id, - ) - .await?; - signer_nodes.push(addr); + FIREBASE_AUDIENCE_ID, + ); + signer_node_futures.push(signer_node); } - + let signer_nodes = futures::future::join_all(signer_node_futures) + .await + .into_iter() + .collect::, _>>()?; let signer_urls: &Vec<_> = &signer_nodes.iter().map(|n| n.address.clone()).collect(); - let leader_node = LeaderNode::start( - &docker, + let near_root_account = relayer_ctx.worker.root_account()?; + let leader_node = containers::LeaderNode::run( + &docker_client, NETWORK, signer_urls.clone(), - &near_rpc, - &relayer.address, + &relayer_ctx.sandbox.address, + &relayer_ctx.relayer.address, &datastore.address, GCP_PROJECT_ID, near_root_account.id(), - &creator_account_id, - &creator_account_sk, - pagoda_firebase_audience_id, + &relayer_ctx.creator_account_id, + &relayer_ctx.creator_account_sk, + FIREBASE_AUDIENCE_ID, ) .await?; - // Wait until all nodes initialize - tokio::time::sleep(Duration::from_millis(10000)).await; - - let result = f(TestContext { - leader_node: &leader_node, + f(TestContext { + leader_node: &leader_node.api(), _pk_set: &pk_set, - signer_nodes: &signer_nodes, - worker: &worker, + signer_nodes: &signer_nodes.iter().map(|n| n.api()).collect(), + worker: &relayer_ctx.worker, }) - .await; - - drop(datastore); - drop(leader_node); - drop(signer_nodes); - drop(relayer); - drop(redis); - - // Wait until all docker containers are destroyed. - // See `Drop` impl for `LeaderNode` and `SignNode` for more info. - tokio::time::sleep(Duration::from_millis(2000)).await; - - result -} - -async fn up_funds_for_account(worker: &Worker, id: &AccountId) -> anyhow::Result<()> { - const AMOUNT: u128 = 99 * 10u128.pow(24); - for _ in 0..10 { - let tmp_account = worker.dev_create_account().await?; - tmp_account.transfer_near(id, AMOUNT).await?.into_result()?; - tmp_account.delete_account(id).await?.into_result()?; - } - Ok(()) + .await } mod account { @@ -230,8 +148,8 @@ mod check { use crate::TestContext; use workspaces::AccountId; - pub async fn access_key_exists<'a>( - ctx: &TestContext<'a>, + pub async fn access_key_exists( + ctx: &TestContext<'_>, account_id: &AccountId, public_key: &str, ) -> anyhow::Result<()> { @@ -249,10 +167,7 @@ mod check { } } - pub async fn no_account<'a>( - ctx: &TestContext<'a>, - account_id: &AccountId, - ) -> anyhow::Result<()> { + pub async fn no_account(ctx: &TestContext<'_>, account_id: &AccountId) -> anyhow::Result<()> { if ctx.worker.view_account(account_id).await.is_err() { Ok(()) } else { diff --git a/integration-tests/tests/mpc/positive.rs b/integration-tests/tests/mpc/positive.rs index 1ea282b28..36f47813d 100644 --- a/integration-tests/tests/mpc/positive.rs +++ b/integration-tests/tests/mpc/positive.rs @@ -14,7 +14,7 @@ use std::time::Duration; use workspaces::types::AccessKeyPermission; #[tokio::test] -async fn test_trio() -> anyhow::Result<()> { +async fn test_aggregate_signatures() -> anyhow::Result<()> { with_nodes(3, |ctx| { Box::pin(async move { let payload: String = rand::thread_rng() @@ -23,13 +23,8 @@ async fn test_trio() -> anyhow::Result<()> { .map(char::from) .collect(); - // TODO integrate this better with testing let client = reqwest::Client::new(); - let signer_urls: Vec<_> = ctx - .signer_nodes - .iter() - .map(|s| s.local_address.clone()) - .collect(); + let signer_urls: Vec<_> = ctx.signer_nodes.iter().map(|s| s.address.clone()).collect(); let signature = sign( &client, @@ -51,7 +46,6 @@ async fn test_trio() -> anyhow::Result<()> { .await } -// TODO: write a test with real token #[tokio::test] async fn test_basic_action() -> anyhow::Result<()> { with_nodes(3, |ctx| { @@ -145,7 +139,7 @@ async fn test_basic_action() -> anyhow::Result<()> { #[tokio::test] async fn test_random_recovery_keys() -> anyhow::Result<()> { - with_nodes(4, |ctx| { + with_nodes(3, |ctx| { Box::pin(async move { let account_id = account::random(ctx.worker)?; let user_full_access_key = key::random(); diff --git a/mpc-recovery/src/leader_node/mod.rs b/mpc-recovery/src/leader_node/mod.rs index 0b3625b58..8c38182fc 100644 --- a/mpc-recovery/src/leader_node/mod.rs +++ b/mpc-recovery/src/leader_node/mod.rs @@ -12,6 +12,7 @@ use crate::transaction::{ get_add_key_delegate_action, get_create_account_delegate_action, get_local_signed_delegated_action, get_mpc_signed_delegated_action, }; +use axum::routing::get; use axum::{http::StatusCode, routing::post, Extension, Json, Router}; use curv::elliptic::curves::{Ed25519, Point}; use near_crypto::{ParseKeyError, PublicKey, SecretKey}; @@ -106,6 +107,14 @@ pub async fn run(config: Config) { let cors_layer = tower_http::cors::CorsLayer::permissive(); let app = Router::new() + // healthcheck endpoint + .route( + "/", + get(|| async move { + tracing::info!("node is ready to accept connections"); + StatusCode::OK + }), + ) .route("/new_account", post(new_account::)) .route("/add_key", post(add_key::)) .layer(Extension(state)) diff --git a/mpc-recovery/src/sign_node/mod.rs b/mpc-recovery/src/sign_node/mod.rs index 4ce4b69bf..f86c3479d 100644 --- a/mpc-recovery/src/sign_node/mod.rs +++ b/mpc-recovery/src/sign_node/mod.rs @@ -7,6 +7,7 @@ use crate::primitives::InternalAccountId; use crate::sign_node::pk_set::SignerNodePkSet; use crate::NodeId; use aes_gcm::Aes256Gcm; +use axum::routing::get; use axum::{http::StatusCode, routing::post, Extension, Json, Router}; use curv::elliptic::curves::{Ed25519, Point}; use multi_party_eddsa::protocols::{self, ExpandedKeyPair}; @@ -55,6 +56,14 @@ pub async fn run(config: Config) { }; let app = Router::new() + // healthcheck endpoint + .route( + "/", + get(|| async move { + tracing::info!("node is ready to accept connections"); + StatusCode::OK + }), + ) .route("/commit", post(commit::)) .route("/reveal", post(reveal)) .route("/signature_share", post(signature_share))