Skip to content

Commit

Permalink
Add upgrade test
Browse files Browse the repository at this point in the history
Add integration test for upgrades. Collect multiple sequencer streams
into one stream and and assert version in the headers. First few
blocks are asserted to be base version. Then we check that all the
sequencers are upgrade within a timeout period. Currently we only
check 2 sequencers, since that is how many have the API enabled
currently. The same test can be run against any consecutive
base/upgrade version pairs.

  * factor common logic to module
  * add sequencer version env var to setting version to be tested
  * get builder address from API (instead of hard coding)
  • Loading branch information
tbro committed Sep 30, 2024
1 parent c2b96e9 commit 8113ae1
Show file tree
Hide file tree
Showing 12 changed files with 346 additions and 221 deletions.
4 changes: 3 additions & 1 deletion .env
Original file line number Diff line number Diff line change
Expand Up @@ -147,4 +147,6 @@ INTEGRATION_TEST_EXPECTED_BLOCK_HEIGHT=10
INTEGRATION_TEST_HOST=localhost
INTEGRATION_TEST_PROTO=http

MARKETPLACE_SMOKE_TEST=false
# Version of sequencer protocol we want to test. If this is set to
# `03`, marketplace upgrade will be tested.
INTEGRATION_TEST_SEQUENCER_VERSION=02
3 changes: 2 additions & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ jobs:
- name: Test Integration
env:
NEXTEST_PROFILE: integration
INTEGRATION_TEST_SEQUENCER_VERSION=02
run: cargo nextest run --locked --release --all-features --verbose --nocapture
timeout-minutes: 40

Expand All @@ -89,7 +90,7 @@ jobs:
- name: Test Marketplace Integration
env:
NEXTEST_PROFILE: integration
MARKETPLACE_SMOKE_TEST: true
INTEGRATION_TEST_SEQUENCER_VERSION=03
run: cargo nextest run --locked --release --all-features --verbose --nocapture
timeout-minutes: 40

Expand Down
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 6 additions & 8 deletions client/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use anyhow::Context;
use async_std::task::sleep;
use espresso_types::{FeeAccount, FeeAmount, FeeMerkleTree};
use espresso_types::{FeeAccount, FeeAmount, FeeMerkleTree, Header};
use ethers::types::Address;
use jf_merkle_tree::{
prelude::{MerkleProof, Sha3Node},
Expand All @@ -9,14 +9,12 @@ use jf_merkle_tree::{
use std::time::Duration;
use surf_disco::{
error::ClientError,
http::convert::DeserializeOwned,
socket::{Connection, Unsupported},
Url,
};
use vbs::version::StaticVersion;

pub type SequencerApiVersion = StaticVersion<0, 1>;
// pub type EspressoClient = surf_disco::Client<ClientError, SequencerApiVersion>;

#[derive(Clone, Debug)]
pub struct SequencerClient(surf_disco::Client<ClientError, SequencerApiVersion>);
Expand Down Expand Up @@ -46,13 +44,13 @@ impl SequencerClient {
.context("getting Espresso transaction count")
}

/// Subscribe to a stream to Block Headers
pub async fn subscribe_headers<FromServer: DeserializeOwned>(
/// Subscribe to a stream of Block Headers
pub async fn subscribe_headers(
&self,
connect: &str,
) -> anyhow::Result<Connection<FromServer, Unsupported, ClientError, SequencerApiVersion>> {
height: u64,
) -> anyhow::Result<Connection<Header, Unsupported, ClientError, SequencerApiVersion>> {
self.0
.socket(connect)
.socket(&format!("availability/stream/headers/{height}"))
.subscribe()
.await
.context("subscribing to Espresso headers")
Expand Down
Empty file added common/mod.rs
Empty file.
2 changes: 1 addition & 1 deletion justfile
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ demo *args:

demo-native:
cargo build --release
scripts/demo-native -D
scripts/demo-native

demo-native-mp:
cargo build --release
Expand Down
2 changes: 1 addition & 1 deletion sequencer/src/bin/espresso-bridge.rs
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ async fn deposit(opt: Deposit) -> anyhow::Result<()> {
// Wait for Espresso to catch up to the L1.
let espresso_height = espresso.get_height().await?;
let mut headers = espresso
.subscribe_headers(&format!("availability/stream/headers/{espresso_height}"))
.subscribe_headers(espresso_height)
.await?;
let espresso_block = loop {
let header: Header = match headers.next().await.context("header stream ended")? {
Expand Down
5 changes: 3 additions & 2 deletions tests/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ edition = { workspace = true }
publish = false

[[test]]
name = "test_demo_native"
path = "demo_native.rs"
name = "integration"
path = "main.rs"

[dev-dependencies]
anyhow = { workspace = true }
Expand All @@ -19,3 +19,4 @@ ethers = { workspace = true }
futures = { workspace = true }
reqwest = { workspace = true, features = ["json", "blocking"] }
surf-disco = { workspace = true }
vbs = { workspace = true }
255 changes: 255 additions & 0 deletions tests/common/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,255 @@
use anyhow::{anyhow, Result};
use async_std::{future, task::sleep};
use client::SequencerClient;
use espresso_types::FeeAmount;
use ethers::prelude::*;
use futures::future::join_all;
use reqwest::blocking;
use std::{fmt, str::FromStr, time::Duration};
use surf_disco::Url;

const L1_PROVIDER_RETRY_INTERVAL: Duration = Duration::from_secs(1);
// TODO add to .env
const RECIPIENT_ADDRESS: &str = "0x0000000000000000000000000000000000000000";
/// Duration in seconds to wait before declaring the chain deceased.
const SEQUENCER_BLOCKS_TIMEOUT: u64 = 120;

#[derive(Clone, Debug)]
pub struct TestConfig {
pub load_generator_url: String,
pub provider: Provider<Http>,
pub espresso: SequencerClient,
pub builder_address: Address,
pub recipient_address: Address,
pub light_client_address: Address,
pub prover_url: String,
pub expected_block_height: u64,
pub timeout: f64,
pub sequencer_version: u8,
pub sequencer_clients: Vec<SequencerClient>,
pub initial_height: u64,
pub initial_txns: u64,
}

#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord)]
pub struct TestState {
pub block_height: Option<u64>,
pub txn_count: u64,
pub builder_balance: FeeAmount,
pub recipient_balance: FeeAmount,
pub light_client_update: u64,
}

impl fmt::Display for TestState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let output = format!(
"
block_height: {}
transactions: {}
builder_balance: {}
recipient_balance: {}
light_client_updated: {}
",
self.block_height.unwrap(),
self.txn_count,
self.builder_balance,
self.recipient_balance,
self.light_client_update
);

write!(f, "{}", output)
}
}

fn url_from_port(port: String) -> Result<String> {
Ok(format!(
"{}://{}:{}",
dotenvy::var("INTEGRATION_TEST_PROTO")?,
dotenvy::var("INTEGRATION_TEST_HOST")?,
port
))
}

impl TestConfig {
pub async fn new() -> Result<Self> {
// We need to know sequencer version we are testing in order
// to test against the correct services. It's tempting to try
// and proceed against any working services, but we run the
// risk of testing against the wrong ones. For example, legacy
// builder may be alive during marketplace test.
let sequencer_version: u8 = dotenvy::var("INTEGRATION_TEST_SEQUENCER_VERSION")?
.parse()
.unwrap();

// Varies between v0 and v3.
let load_generator_url = if sequencer_version >= 03 {
url_from_port(dotenvy::var(
"ESPRESSO_SUBMIT_TRANSACTIONS_PRIVATE_FALLBACK_PORT",
)?)?
} else {
url_from_port(dotenvy::var("ESPRESSO_SUBMIT_TRANSACTIONS_PRIVATE_PORT")?)?
};

let builder_url = if sequencer_version >= 03 {
let url = url_from_port(dotenvy::var("ESPRESSO_RESERVE_BUILDER_SERVER_PORT")?)?;

Url::from_str(&url)?
.join("bundle_info/builderaddress")
.unwrap()
} else {
let url = url_from_port(dotenvy::var("ESPRESSO_BUILDER_SERVER_PORT")?)?;
Url::from_str(&url)?
.join("block_info/builderaddress")
.unwrap()
};

let builder_address = get_builder_address(builder_url).await;

let l1_provider_url = url_from_port(dotenvy::var("ESPRESSO_SEQUENCER_L1_PORT")?)?;
let sequencer_api_url = url_from_port(dotenvy::var("ESPRESSO_SEQUENCER1_API_PORT")?)?;
let sequencer_clients = vec![
dotenvy::var("ESPRESSO_SEQUENCER_API_PORT")?,
dotenvy::var("ESPRESSO_SEQUENCER1_API_PORT")?,
dotenvy::var("ESPRESSO_SEQUENCER2_API_PORT")?,
]
.iter()
.map(|port| url_from_port(port.clone()).unwrap())
.collect::<Vec<String>>()
.iter()
.map(|url| SequencerClient::new(Url::from_str(url).unwrap()))
.collect::<Vec<SequencerClient>>();

let prover_url = url_from_port(dotenvy::var("ESPRESSO_PROVER_SERVICE_PORT")?)?;

let light_client_proxy_address =
dotenvy::var("ESPRESSO_SEQUENCER_LIGHT_CLIENT_PROXY_ADDRESS")?;

// Number of blocks to wait before deeming the test successful
let expected_block_height = dotenvy::var("INTEGRATION_TEST_EXPECTED_BLOCK_HEIGHT")?
.parse::<u64>()
.unwrap();

// Set the time out. Give a little more leeway when we have a
// large `expected_block_height`.
let timeout = expected_block_height as f64
+ SEQUENCER_BLOCKS_TIMEOUT as f64
+ (expected_block_height as f64).sqrt();

println!("Waiting on Builder Address");

let client = SequencerClient::new(Url::from_str(&sequencer_api_url).unwrap());
let initial_height = client.get_height().await.unwrap();
let initial_txns = client.get_transaction_count().await.unwrap();

Ok(Self {
load_generator_url,
provider: Provider::<Http>::try_from(l1_provider_url)?
.interval(L1_PROVIDER_RETRY_INTERVAL),
espresso: client,
light_client_address: light_client_proxy_address.parse::<Address>().unwrap(),
builder_address,
recipient_address: RECIPIENT_ADDRESS.parse::<Address>().unwrap(),
prover_url,
expected_block_height,
timeout,
sequencer_version,
sequencer_clients,
initial_height,
initial_txns,
})
}

/// Number of blocks to wait before deeming the test successful
pub fn expected_block_height(&self) -> u64 {
self.expected_block_height
}
/// Get the latest block where we see a light client update
pub async fn latest_light_client_update(&self) -> u64 {
let filter = Filter::new()
.from_block(BlockNumber::Earliest)
.address(self.light_client_address);
// block number for latest light client update
let latest_light_client_update = self
.provider
.get_logs(&filter)
.await
.unwrap()
.last()
.unwrap()
.block_number;

latest_light_client_update.unwrap().as_u64()
}

/// Return current state of the test
pub async fn test_state(&self) -> TestState {
let block_height = self.espresso.get_height().await.ok();
let txn_count = self.espresso.get_transaction_count().await.unwrap();

let builder_balance = self
.espresso
.get_espresso_balance(self.builder_address, block_height)
.await
.unwrap();
let recipient_balance = self
.espresso
.get_espresso_balance(self.recipient_address, block_height)
.await
.unwrap();

let light_client_update = self.latest_light_client_update().await;

TestState {
block_height,
txn_count,
builder_balance,
recipient_balance,
light_client_update,
}
}

/// Check if services are healthy
pub async fn readiness(&self) -> Result<Vec<String>> {
join_all(vec![
wait_for_service(Url::from_str(&self.load_generator_url).unwrap(), 1000, 600),
wait_for_service(Url::from_str(&self.prover_url).unwrap(), 1000, 300),
])
.await
.into_iter()
.collect::<Result<Vec<String>>>()
}
}

/// Get Address from builder after waiting for builder to become ready.
pub async fn get_builder_address(url: Url) -> Address {
let _ = wait_for_service(url.clone(), 1000, 200).await;
for _ in 0..5 {
// Try to get builder address somehow
if let Ok(body) = reqwest::blocking::get(url.clone()) {
return body.json::<Address>().unwrap();
} else {
sleep(Duration::from_millis(400)).await
}
}
panic!("Error: Failed to retrieve address from builder!");
}

async fn wait_for_service(url: Url, interval: u64, timeout: u64) -> Result<String> {
future::timeout(Duration::from_secs(timeout), async {
loop {
if let Ok(body) = blocking::get(format!("{url}/healthcheck")) {
return body.text().map_err(|e| {
anyhow!(
"Wait for service, could not decode response: ({}) {}",
url,
e
)
});
} else {
sleep(Duration::from_millis(interval)).await;
}
}
})
.await
.map_err(|e| anyhow!("Wait for service, timeout: ({}) {}", url, e))?
}
Loading

0 comments on commit 8113ae1

Please sign in to comment.