diff --git a/evm/src/executor/abi/mod.rs b/evm/src/executor/abi/mod.rs index f72ee46034f8..50dc16c90d9c 100644 --- a/evm/src/executor/abi/mod.rs +++ b/evm/src/executor/abi/mod.rs @@ -6,7 +6,7 @@ use std::collections::HashMap; /// /// This is the same address as the one used in DappTools's HEVM. /// `address(bytes20(uint160(uint256(keccak256('hevm cheat code')))))` -pub static CHEATCODE_ADDRESS: Address = H160([ +pub const CHEATCODE_ADDRESS: Address = H160([ 0x71, 0x09, 0x70, 0x9E, 0xcf, 0xa9, 0x1a, 0x80, 0x62, 0x6f, 0xf3, 0x98, 0x9d, 0x68, 0xf6, 0x7f, 0x5b, 0x1d, 0xd1, 0x2d, ]); @@ -95,6 +95,13 @@ ethers::contract::abigen!( createSelectFork(string)(uint256) selectFork(uint256) activeFork()(uint256) + makePersistent(address) + makePersistent(address,address) + makePersistent(address,address,address) + makePersistent(address[]) + revokePersistent(address) + revokePersistent(address[]) + isPersistent(address)(bool) rollFork(uint256) rollFork(uint256,uint256) rpcUrl(string)(string) diff --git a/evm/src/executor/backend/diagnostic.rs b/evm/src/executor/backend/diagnostic.rs new file mode 100644 index 000000000000..79050cb977c0 --- /dev/null +++ b/evm/src/executor/backend/diagnostic.rs @@ -0,0 +1,45 @@ +use crate::{ + executor::{backend::LocalForkId, inspector::Cheatcodes}, + Address, +}; +use foundry_common::fmt::UIfmt; + +/// Represents possible diagnostic cases on revert +#[derive(Debug, Clone)] +pub enum RevertDiagnostic { + /// The `contract` does not exist on the `active` fork but exist on other fork(s) + ContractExistsOnOtherForks { + contract: Address, + active: LocalForkId, + available_on: Vec, + }, + ContractDoesNotExist { + contract: Address, + active: LocalForkId, + }, +} + +// === impl RevertDiagnostic === + +impl RevertDiagnostic { + /// Converts the diagnostic to a readable error message + pub fn to_error_msg(&self, cheats: &Cheatcodes) -> String { + let get_label = |addr| cheats.labels.get(addr).cloned().unwrap_or_else(|| addr.pretty()); + + match self { + RevertDiagnostic::ContractExistsOnOtherForks { contract, active, available_on } => { + let contract_label = get_label(contract); + + format!( + r#"Contract {} does not exists on active fork with id `{}` + But exists on non active forks: `{:?}`"#, + contract_label, active, available_on + ) + } + RevertDiagnostic::ContractDoesNotExist { contract, .. } => { + let contract_label = get_label(contract); + format!("Contract {} does not exists", contract_label) + } + } + } +} diff --git a/evm/src/executor/backend/fuzz.rs b/evm/src/executor/backend/fuzz.rs index c71f447d268c..84b2209080e8 100644 --- a/evm/src/executor/backend/fuzz.rs +++ b/evm/src/executor/backend/fuzz.rs @@ -1,8 +1,6 @@ -use super::update_current_env_with_fork_env; use crate::{ - abi::CHEATCODE_ADDRESS, executor::{ - backend::{snapshot::BackendSnapshot, Backend, BackendDatabase, BackendInner, DatabaseExt}, + backend::{diagnostic::RevertDiagnostic, Backend, DatabaseExt, LocalForkId}, fork::{CreateFork, ForkId}, }, Address, @@ -11,19 +9,20 @@ use bytes::Bytes; use ethers::prelude::{H160, H256, U256}; use hashbrown::HashMap as Map; use revm::{ - db::{CacheDB, DatabaseRef}, - Account, AccountInfo, Database, Env, Inspector, Log, Return, SubRoutine, TransactOut, - TransactTo, + db::DatabaseRef, Account, AccountInfo, Database, Env, Inspector, Log, Return, SubRoutine, + TransactOut, }; -use tracing::{trace, warn}; +use std::borrow::Cow; +use tracing::trace; /// A wrapper around `Backend` that ensures only `revm::DatabaseRef` functions are called. /// /// Any changes made during its existence that affect the caching layer of the underlying Database -/// will result in a clone of the initial Database. Therefor, this backend type is something akin to -/// a clone-on-write `Backend` type. +/// will result in a clone of the initial Database. Therefore, this backend type is basically +/// a clone-on-write `Backend`, where cloning is only necessary if cheatcodes will modify the +/// `Backend` /// -/// Main purpose for this type is for fuzzing. A test function fuzzer will repeatedly execute the +/// Entire purpose of this type is for fuzzing. A test function fuzzer will repeatedly execute the /// function via immutable raw (no state changes) calls. /// /// **N.B.**: we're assuming cheatcodes that alter the state (like multi fork swapping) are niche. @@ -37,80 +36,17 @@ pub struct FuzzBackendWrapper<'a> { /// The underlying immutable `Backend` /// /// No calls on the `FuzzBackendWrapper` will ever persistently modify the `backend`'s state. - pub backend: &'a Backend, - /// active database clone that holds the currently active db, like reverted snapshots, selected - /// fork, etc. - db_override: Option>, - /// holds additional Backend data - inner: BackendInner, + pub backend: Cow<'a, Backend>, } // === impl FuzzBackendWrapper === impl<'a> FuzzBackendWrapper<'a> { - pub fn new(inner: &'a Backend) -> Self { - Self { backend: inner, db_override: None, inner: Default::default() } + pub fn new(backend: &'a Backend) -> Self { + Self { backend: Cow::Borrowed(backend) } } - /// Returns the currently active database - fn active_db(&self) -> &CacheDB { - self.db_override.as_ref().unwrap_or(&self.backend.db) - } - - /// Sets the database override - fn set_active(&mut self, db: CacheDB) { - self.db_override = Some(db) - } - - /// Sets the address of the `DSTest` contract that is being executed - pub fn set_test_contract(&mut self, addr: Address) -> &mut Self { - self.inner.test_contract_context = Some(addr); - self - } - - /// Returns the address of the set `DSTest` contract - pub fn test_contract_address(&self) -> Option
{ - self.inner.test_contract_context - } - - /// Checks if the test contract associated with this backend failed, See - /// [Self::is_failed_test_contract] - pub fn is_failed(&self) -> bool { - self.backend.is_failed() || - self.inner.has_failure_snapshot || - self.test_contract_address() - .map(|addr| self.is_failed_test_contract(addr)) - .unwrap_or_default() - } - - /// Checks if the given test function failed - /// - /// DSTest will not revert inside its `assertEq`-like functions which allows - /// to test multiple assertions in 1 test function while also preserving logs. - /// Instead, it stores whether an `assert` failed in a boolean variable that we can read - pub fn is_failed_test_contract(&self, address: Address) -> bool { - /* - contract DSTest { - bool public IS_TEST = true; - // slot 0 offset 1 => second byte of slot0 - bool private _failed; - } - */ - let value = self.storage(address, U256::zero()); - - value.byte(1) != 0 - } - - /// In addition to the `_failed` variable, `DSTest::fail()` stores a failure - /// in "failed" - /// See - pub fn is_global_failure(&self) -> bool { - let index = U256::from(&b"failed"[..]); - let value = self.storage(CHEATCODE_ADDRESS, index); - value == U256::one() - } - - /// Executes the configured transaction of the `env` without commiting state changes + /// Executes the configured transaction of the `env` without committing state changes pub fn inspect_ref( &mut self, mut env: Env, @@ -119,22 +55,14 @@ impl<'a> FuzzBackendWrapper<'a> { where INSP: Inspector, { - if let TransactTo::Call(to) = env.tx.transact_to { - self.inner.test_contract_context = Some(to); - } revm::evm_inner::(&mut env, self, &mut inspector).transact() } } impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { fn snapshot(&mut self, subroutine: &SubRoutine, env: &Env) -> U256 { - let id = self.inner.snapshots.insert(BackendSnapshot::new( - self.active_db().clone(), - subroutine.clone(), - env.clone(), - )); - trace!(target: "backend::fuzz", "Created new snapshot {}", id); - id + trace!("fuzz: create snapshot"); + self.backend.to_mut().snapshot(subroutine, env) } fn revert( @@ -143,135 +71,87 @@ impl<'a> DatabaseExt for FuzzBackendWrapper<'a> { subroutine: &SubRoutine, current: &mut Env, ) -> Option { - if let Some(mut snapshot) = - self.inner.snapshots.remove(id).or_else(|| self.backend.snapshots().get(id).cloned()) - { - // need to check whether DSTest's `failed` variable is set to `true` which means an - // error occurred either during the snapshot or even before - if self.is_failed() { - self.inner.has_failure_snapshot = true; - } - // merge additional logs - snapshot.merge(subroutine); - let BackendSnapshot { db, subroutine, env } = snapshot; - self.set_active(db); - update_current_env_with_fork_env(current, env); - - trace!(target: "backend::fuzz", "Reverted snapshot {}", id); - Some(subroutine) - } else { - warn!(target: "backend::fuzz", "No snapshot to revert for {}", id); - None - } + trace!(?id, "fuzz: revert snapshot"); + self.backend.to_mut().revert(id, subroutine, current) } - fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { - let (id, fork) = self.backend.forks.create_fork(fork)?; - let id = self.inner.insert_new_fork(id, fork); - Ok(id) + fn create_fork( + &mut self, + fork: CreateFork, + subroutine: &SubRoutine, + ) -> eyre::Result { + trace!("fuzz: create fork"); + self.backend.to_mut().create_fork(fork, subroutine) } - fn select_fork(&mut self, id: U256, env: &mut Env) -> eyre::Result<()> { - let fork_id = self.ensure_fork_id(id).cloned()?; - let fork_env = self - .backend - .forks - .get_env(fork_id)? - .ok_or_else(|| eyre::eyre!("Requested fork `{}` does not exit", id))?; - let fork = self - .inner - .ensure_backend(id) - .or_else(|_| self.backend.inner.ensure_backend(id)) - .cloned()?; - - if let Some(ref mut db) = self.db_override { - db.db = BackendDatabase::Forked(fork, id); - } else { - let mut db = self.backend.db.clone(); - db.db = BackendDatabase::Forked(fork, id); - self.set_active(db); - } - - update_current_env_with_fork_env(env, fork_env); - Ok(()) + fn select_fork( + &mut self, + id: LocalForkId, + env: &mut Env, + subroutine: &mut SubRoutine, + ) -> eyre::Result<()> { + trace!(?id, "fuzz: select fork"); + self.backend.to_mut().select_fork(id, env, subroutine) } fn roll_fork( &mut self, env: &mut Env, block_number: U256, - id: Option, + id: Option, ) -> eyre::Result<()> { - let id = self.ensure_fork(id)?; - let (fork_id, fork) = self - .backend - .forks - .roll_fork(self.inner.ensure_fork_id(id).cloned()?, block_number.as_u64())?; - // this will update the local mapping - self.inner.update_fork_mapping(id, fork_id, fork); - if self.active_fork() == Some(id) { - // need to update the block number right away - env.block.number = block_number; - } - Ok(()) + trace!(?id, ?block_number, "fuzz: roll fork"); + self.backend.to_mut().roll_fork(env, block_number, id) + } + + fn active_fork_id(&self) -> Option { + self.backend.active_fork_id() + } + + fn ensure_fork(&self, id: Option) -> eyre::Result { + self.backend.ensure_fork(id) + } + + fn ensure_fork_id(&self, id: LocalForkId) -> eyre::Result<&ForkId> { + self.backend.ensure_fork_id(id) + } + + fn diagnose_revert( + &self, + callee: Address, + subroutine: &SubRoutine, + ) -> Option { + self.backend.diagnose_revert(callee, subroutine) } - fn active_fork(&self) -> Option { - self.active_db().db.as_fork() + fn is_persistent(&self, acc: &Address) -> bool { + self.backend.is_persistent(acc) } - fn ensure_fork(&self, id: Option) -> eyre::Result { - if let Some(id) = id { - if self.inner.issued_local_fork_ids.contains_key(&id) || - self.backend.inner.issued_local_fork_ids.contains_key(&id) - { - return Ok(id) - } - eyre::bail!("Requested fork `{}` does not exit", id) - } - if let Some(id) = self.active_fork() { - Ok(id) - } else { - eyre::bail!("No fork active") - } + fn remove_persistent_account(&mut self, account: &Address) -> bool { + self.backend.to_mut().remove_persistent_account(account) } - fn ensure_fork_id(&self, id: U256) -> eyre::Result<&ForkId> { - self.inner.ensure_fork_id(id).or_else(|_| self.backend.ensure_fork_id(id)) + fn add_persistent_account(&mut self, account: Address) -> bool { + self.backend.to_mut().add_persistent_account(account) } } impl<'a> DatabaseRef for FuzzBackendWrapper<'a> { fn basic(&self, address: H160) -> AccountInfo { - if let Some(ref db) = self.db_override { - DatabaseRef::basic(db, address) - } else { - DatabaseRef::basic(self.backend, address) - } + DatabaseRef::basic(self.backend.as_ref(), address) } fn code_by_hash(&self, code_hash: H256) -> Bytes { - if let Some(ref db) = self.db_override { - DatabaseRef::code_by_hash(db, code_hash) - } else { - DatabaseRef::code_by_hash(self.backend, code_hash) - } + DatabaseRef::code_by_hash(self.backend.as_ref(), code_hash) } fn storage(&self, address: H160, index: U256) -> U256 { - if let Some(ref db) = self.db_override { - DatabaseRef::storage(db, address, index) - } else { - DatabaseRef::storage(self.backend, address, index) - } + DatabaseRef::storage(self.backend.as_ref(), address, index) } fn block_hash(&self, number: U256) -> H256 { - if let Some(ref db) = self.db_override { - DatabaseRef::block_hash(db, number) - } else { - DatabaseRef::block_hash(self.backend, number) - } + DatabaseRef::block_hash(self.backend.as_ref(), number) } } diff --git a/evm/src/executor/backend/mod.rs b/evm/src/executor/backend/mod.rs index dfcbb34fc495..8167f75e29cc 100644 --- a/evm/src/executor/backend/mod.rs +++ b/evm/src/executor/backend/mod.rs @@ -9,19 +9,43 @@ use ethers::{ }; use hashbrown::HashMap as Map; use revm::{ - db::{CacheDB, DatabaseRef, EmptyDB}, - Account, AccountInfo, Database, DatabaseCommit, Env, Inspector, Log, Return, SubRoutine, - TransactOut, TransactTo, + db::{CacheDB, DatabaseRef}, + Account, AccountInfo, Database, DatabaseCommit, Env, InMemoryDB, Inspector, Log, Return, + SubRoutine, TransactOut, TransactTo, KECCAK_EMPTY, }; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use tracing::{trace, warn}; + mod fuzz; mod snapshot; pub use fuzz::FuzzBackendWrapper; +mod diagnostic; +pub use diagnostic::RevertDiagnostic; mod in_memory_db; -use crate::{abi::CHEATCODE_ADDRESS, executor::backend::snapshot::BackendSnapshot}; + +use crate::{ + abi::CHEATCODE_ADDRESS, + executor::{backend::snapshot::BackendSnapshot, inspector::DEFAULT_CREATE2_DEPLOYER}, + CALLER, +}; pub use in_memory_db::MemDb; +// A `revm::Database` that is used in forking mode +type ForkDB = CacheDB; + +/// Represents a numeric `ForkId` valid only for the existence of the `Backend`. +/// The difference between `ForkId` and `LocalForkId` is that `ForkId` tracks pairs of `endpoint + +/// block` which can be reused by multiple tests, whereas the `LocalForkId` is unique within a test +pub type LocalForkId = U256; + +/// Represents the index of a fork in the created forks vector +/// This is used for fast lookup +type ForkLookupIndex = usize; + +/// All accounts that will have persistent storage across fork swaps. See also [`clone_data()`] +const DEFAULT_PERSISTENT_ACCOUNTS: [H160; 3] = + [CALLER, CHEATCODE_ADDRESS, DEFAULT_CREATE2_DEPLOYER]; + /// An extension trait that allows us to easily extend the `revm::Inspector` capabilities #[auto_impl::auto_impl(&mut, Box)] pub trait DatabaseExt: Database { @@ -39,21 +63,30 @@ pub trait DatabaseExt: Database { /// **N.B.** While this reverts the state of the evm to the snapshot, it keeps new logs made /// since the snapshots was created. This way we can show logs that were emitted between /// snapshot and its revert. - /// This will also revert any changes in the `Env` and replace it with the caputured `Env` of + /// This will also revert any changes in the `Env` and replace it with the captured `Env` of /// `Self::snapshot` fn revert(&mut self, id: U256, subroutine: &SubRoutine, env: &mut Env) -> Option; /// Creates and also selects a new fork /// /// This is basically `create_fork` + `select_fork` - fn create_select_fork(&mut self, fork: CreateFork, env: &mut Env) -> eyre::Result { - let id = self.create_fork(fork)?; - self.select_fork(id, env)?; + fn create_select_fork( + &mut self, + fork: CreateFork, + env: &mut Env, + subroutine: &mut SubRoutine, + ) -> eyre::Result { + let id = self.create_fork(fork, subroutine)?; + self.select_fork(id, env, subroutine)?; Ok(id) } /// Creates a new fork but does _not_ select it - fn create_fork(&mut self, fork: CreateFork) -> eyre::Result; + fn create_fork( + &mut self, + fork: CreateFork, + subroutine: &SubRoutine, + ) -> eyre::Result; /// Selects the fork's state /// @@ -64,7 +97,12 @@ pub trait DatabaseExt: Database { /// # Errors /// /// Returns an error if no fork with the given `id` exists - fn select_fork(&mut self, id: U256, env: &mut Env) -> eyre::Result<()>; + fn select_fork( + &mut self, + id: LocalForkId, + env: &mut Env, + subroutine: &mut SubRoutine, + ) -> eyre::Result<()>; /// Updates the fork to given block number. /// @@ -77,11 +115,16 @@ pub trait DatabaseExt: Database { &mut self, env: &mut Env, block_number: U256, - id: Option, + id: Option, ) -> eyre::Result<()>; /// Returns the `ForkId` that's currently used in the database, if fork mode is on - fn active_fork(&self) -> Option; + fn active_fork_id(&self) -> Option; + + /// Whether the database is currently in forked + fn is_forked_mode(&self) -> bool { + self.active_fork_id().is_some() + } /// Ensures that an appropriate fork exits /// @@ -93,10 +136,62 @@ pub trait DatabaseExt: Database { /// Returns an error if the given `id` does not match any forks /// /// Returns an error if no fork exits - fn ensure_fork(&self, id: Option) -> eyre::Result; + fn ensure_fork(&self, id: Option) -> eyre::Result; /// Ensures that a corresponding `ForkId` exists for the given local `id` - fn ensure_fork_id(&self, id: U256) -> eyre::Result<&ForkId>; + fn ensure_fork_id(&self, id: LocalForkId) -> eyre::Result<&ForkId>; + + /// Handling multiple accounts/new contracts in a multifork environment can be challenging since + /// every fork has its own standalone storage section. So this can be a common error to run + /// into: + /// + /// ```solidity + /// function testCanDeploy() public { + /// cheats.selectFork(mainnetFork); + /// // contract created while on `mainnetFork` + /// DummyContract dummy = new DummyContract(); + /// // this will succeed + /// dummy.hello(); + /// + /// cheats.selectFork(optimismFork); + /// + /// cheats.expectRevert(); + /// // this will revert since `dummy` contract only exists on `mainnetFork` + /// dummy.hello(); + /// } + /// ``` + /// + /// If this happens (`dummy.hello()`), or more general, a call on an address that's not a + /// contract, revm will revert without useful context. This call will check in this context if + /// `address(dummy)` belongs to an existing contract and if not will check all other forks if + /// the contract is deployed there. + /// + /// Returns a more useful error message if that's the case + fn diagnose_revert(&self, callee: Address, subroutine: &SubRoutine) + -> Option; + + /// Returns true if the given account is currently marked as persistent. + fn is_persistent(&self, acc: &Address) -> bool; + + /// Revokes persistent status from the given account. + fn remove_persistent_account(&mut self, account: &Address) -> bool; + + /// Marks the given account as persistent. + fn add_persistent_account(&mut self, account: Address) -> bool; + + /// Removes persistent status from all given accounts + fn remove_persistent_accounts(&mut self, accounts: impl IntoIterator) { + for acc in accounts { + self.remove_persistent_account(&acc); + } + } + + /// Extends the persistent accounts with the accounts the iterator yields. + fn extend_persistent_accounts(&mut self, accounts: impl IntoIterator) { + for acc in accounts { + self.add_persistent_account(acc); + } + } } /// Provides the underlying `revm::Database` implementation. @@ -118,11 +213,13 @@ pub trait DatabaseExt: Database { /// (`Backend::clone`). This way each contract uses its own encapsulated evm state. For in-memory /// testing, the database is just an owned `revm::InMemoryDB`. /// -/// The `db` if fork-mode basically consists of 2 halves: +/// Each `Fork`, identified by a unique id, uses completely separate storage, write operations are +/// performed only in the fork's own database, `ForkDB`. +/// +/// A `ForkDB` consists of 2 halves: /// - everything fetched from the remote is readonly /// - all local changes (instructed by the contract) are written to the backend's `db` and don't -/// alter the state of the remote client. This way a fork (`SharedBackend`), can be used by -/// multiple contracts at the same time. +/// alter the state of the remote client. /// /// # Fork swapping /// @@ -132,8 +229,10 @@ pub trait DatabaseExt: Database { /// When swapping forks (`Backend::select_fork()`) we also update the current `Env` of the `EVM` /// accordingly, so that all `block.*` config values match /// -/// **Note:** this only affects the readonly half of the `db`, local changes are persistent across -/// fork-state swaps. +/// When another for is selected [`DatabaseExt::select_fork()`] the entire storage, including +/// `Subroutine` is swapped, but the storage of the caller's and the test contract account is +/// _always_ cloned. This way a fork has entirely separate storage but data can still be shared +/// across fork boundaries via stack and contract variables. /// /// # Snapshotting /// @@ -151,9 +250,12 @@ pub trait DatabaseExt: Database { pub struct Backend { /// The access point for managing forks forks: MultiFork, - /// The database that holds the entire state, uses an internal database depending on current - /// state - pub db: CacheDB, + // The default in memory db + mem_db: InMemoryDB, + /// The currently active fork database + /// + /// If this is set, then the Backend is currently in forking mode + active_fork_ids: Option<(LocalForkId, ForkLookupIndex)>, /// holds additional Backend data inner: BackendInner, } @@ -171,46 +273,93 @@ impl Backend { /// if `fork` is `Some` this will launch with a `fork` database, otherwise with an in-memory /// database pub fn new(forks: MultiFork, fork: Option) -> Self { - let (db, launched_with) = if let Some(f) = fork { - let (id, fork) = forks.create_fork(f).expect("Unable to fork"); - (CacheDB::new(BackendDatabase::Forked(fork.clone(), U256::zero())), Some((id, fork))) - } else { - (CacheDB::new(BackendDatabase::InMemory(EmptyDB())), None) - }; // Note: this will take of registering the `fork` - Self { forks, db, inner: BackendInner::new(launched_with) } + let mut backend = Self { + forks, + mem_db: InMemoryDB::default(), + active_fork_ids: None, + inner: BackendInner { + persistent_accounts: HashSet::from(DEFAULT_PERSISTENT_ACCOUNTS), + ..Default::default() + }, + }; + + if let Some(fork) = fork { + let (fork_id, fork) = backend.forks.create_fork(fork).expect("Unable to create fork"); + let fork_db = ForkDB::new(fork); + backend.active_fork_ids = + Some(backend.inner.insert_new_fork(fork_id.clone(), fork_db, Default::default())); + backend.inner.launched_with_fork = Some(fork_id); + } + + backend } /// Creates a new instance with a `BackendDatabase::InMemory` cache layer for the `CacheDB` pub fn clone_empty(&self) -> Self { - let mut db = self.db.clone(); - db.db = BackendDatabase::InMemory(EmptyDB()); - Self { forks: self.forks.clone(), db, inner: Default::default() } + Self { + forks: self.forks.clone(), + mem_db: InMemoryDB::default(), + active_fork_ids: None, + inner: Default::default(), + } } pub fn insert_account_info(&mut self, address: H160, account: AccountInfo) { - self.db.insert_account_info(address, account) - } - - /// Returns all forks created by this backend - pub fn created_forks(&self) -> &HashMap { - &self.inner.created_forks + if let Some(db) = self.active_fork_db_mut() { + db.insert_account_info(address, account) + } else { + self.mem_db.insert_account_info(address, account) + } } /// Returns all snapshots created in this backend - pub fn snapshots(&self) -> &Snapshots>> { + pub fn snapshots(&self) -> &Snapshots> { &self.inner.snapshots } /// Sets the address of the `DSTest` contract that is being executed - pub fn set_test_contract(&mut self, addr: Address) -> &mut Self { - self.inner.test_contract_context = Some(addr); + /// + /// This will also mark the caller as persistent and remove the persistent status from the + /// previous test contract address + pub fn set_test_contract(&mut self, acc: Address) -> &mut Self { + trace!(?acc, "setting test account"); + // toggle the previous sender + if let Some(current) = self.inner.test_contract_address.take() { + self.remove_persistent_account(¤t); + } + + self.add_persistent_account(acc); + self.inner.test_contract_address = Some(acc); + self + } + + /// Sets the caller address + /// + /// This will also mark the caller as persistent and remove the persistent status from the + /// previous caller + pub fn set_caller(&mut self, acc: Address) -> &mut Self { + trace!(?acc, "setting caller account"); + // toggle the previous sender + if let Some(current) = self.inner.caller.take() { + if current != CALLER { + self.remove_persistent_account(¤t); + } + } + + self.add_persistent_account(acc); + self.inner.caller = Some(acc); self } /// Returns the address of the set `DSTest` contract pub fn test_contract_address(&self) -> Option
{ - self.inner.test_contract_context + self.inner.test_contract_address + } + + /// Returns the set caller address + pub fn caller_address(&self) -> Option
{ + self.inner.caller } /// Checks if the test contract associated with this backend failed, See @@ -249,6 +398,98 @@ impl Backend { value == U256::one() } + /// when creating or switching forks, we update the AccountInfo of the contract + pub(crate) fn update_fork_db(&self, subroutine: &mut SubRoutine, fork: &mut Fork) { + debug_assert!( + self.inner.test_contract_address.is_some(), + "Test contract address must be set" + ); + self.update_fork_db_contracts( + self.inner.persistent_accounts.iter().cloned(), + subroutine, + fork, + ) + } + + /// Copies the state of all `accounts` from the currently active db into the given `fork` + pub(crate) fn update_fork_db_contracts( + &self, + accounts: impl IntoIterator, + subroutine: &mut SubRoutine, + fork: &mut Fork, + ) { + if let Some((_, fork_idx)) = self.active_fork_ids.as_ref() { + let active = self.inner.get_fork(*fork_idx); + clone_data(accounts, &active.db, subroutine, fork) + } else { + clone_data(accounts, &self.mem_db, subroutine, fork) + } + } + + /// Returns the memory db used if not in forking mode + pub fn mem_db(&self) -> &InMemoryDB { + &self.mem_db + } + + /// Returns true if the `id` is currently active + pub fn is_active_fork(&self, id: LocalForkId) -> bool { + self.active_fork_ids.map(|(i, _)| i == id).unwrap_or_default() + } + + /// Returns the currently active `Fork`, if any + pub fn active_fork(&self) -> Option<&Fork> { + self.active_fork_ids.map(|(_, idx)| self.inner.get_fork(idx)) + } + + /// Returns the currently active `Fork`, if any + pub fn active_fork_mut(&mut self) -> Option<&mut Fork> { + self.active_fork_ids.map(|(_, idx)| self.inner.get_fork_mut(idx)) + } + + /// Returns the currently active `ForkDB`, if any + pub fn active_fork_db(&self) -> Option<&ForkDB> { + self.active_fork().map(|f| &f.db) + } + + /// Returns the currently active `ForkDB`, if any + fn active_fork_db_mut(&mut self) -> Option<&mut ForkDB> { + self.active_fork_mut().map(|f| &mut f.db) + } + + /// Creates a snapshot of the currently active database + pub(crate) fn create_db_snapshot(&self) -> BackendDatabaseSnapshot { + if let Some((id, idx)) = self.active_fork_ids { + let fork = self.inner.get_fork(idx).clone(); + let fork_id = self.inner.ensure_fork_id(id).cloned().expect("Exists; qed"); + BackendDatabaseSnapshot::Forked(id, fork_id, idx, Box::new(fork)) + } else { + BackendDatabaseSnapshot::InMemory(self.mem_db.clone()) + } + } + + /// Since each `Fork` tracks logs separately, we need to merge them to get _all_ of them + pub fn merged_logs(&self, mut logs: Vec) -> Vec { + if let Some((_, active)) = self.active_fork_ids { + let mut all_logs = Vec::with_capacity(logs.len()); + + self.inner + .forks + .iter() + .enumerate() + .filter_map(|(idx, f)| f.as_ref().map(|f| (idx, f))) + .for_each(|(idx, f)| { + if idx == active { + all_logs.append(&mut logs); + } else { + all_logs.extend(f.subroutine.logs.clone()) + } + }); + return all_logs + } + + logs + } + /// Executes the configured test call of the `env` without commiting state changes pub fn inspect_ref( &mut self, @@ -258,8 +499,9 @@ impl Backend { where INSP: Inspector, { + self.set_caller(env.tx.caller); if let TransactTo::Call(to) = env.tx.transact_to { - self.inner.test_contract_context = Some(to); + self.set_test_contract(to); } revm::evm_inner::(&mut env, self, &mut inspector).transact() } @@ -269,8 +511,9 @@ impl Backend { impl DatabaseExt for Backend { fn snapshot(&mut self, subroutine: &SubRoutine, env: &Env) -> U256 { + trace!("create snapshot"); let id = self.inner.snapshots.insert(BackendSnapshot::new( - self.db.clone(), + self.create_db_snapshot(), subroutine.clone(), env.clone(), )); @@ -284,6 +527,7 @@ impl DatabaseExt for Backend { subroutine: &SubRoutine, current: &mut Env, ) -> Option { + trace!(?id, "revert snapshot"); if let Some(mut snapshot) = self.inner.snapshots.remove(id) { // need to check whether DSTest's `failed` variable is set to `true` which means an // error occurred either during the snapshot or even before @@ -294,7 +538,16 @@ impl DatabaseExt for Backend { // merge additional logs snapshot.merge(subroutine); let BackendSnapshot { db, subroutine, env } = snapshot; - self.db = db; + match db { + BackendDatabaseSnapshot::InMemory(mem_db) => { + self.mem_db = mem_db; + } + BackendDatabaseSnapshot::Forked(id, fork_id, idx, fork) => { + self.inner.revert_snapshot(id, fork_id, idx, *fork); + self.active_fork_ids = Some((id, idx)) + } + } + update_current_env_with_fork_env(current, env); trace!(target: "backend", "Reverted snapshot {}", id); @@ -305,20 +558,50 @@ impl DatabaseExt for Backend { } } - fn create_fork(&mut self, fork: CreateFork) -> eyre::Result { - let (id, fork) = self.forks.create_fork(fork)?; - let id = self.inner.insert_new_fork(id, fork); + fn create_fork( + &mut self, + fork: CreateFork, + subroutine: &SubRoutine, + ) -> eyre::Result { + trace!("create fork"); + let (fork_id, fork) = self.forks.create_fork(fork)?; + let fork_db = ForkDB::new(fork); + let (id, _) = self.inner.insert_new_fork(fork_id, fork_db, subroutine.clone()); Ok(id) } - fn select_fork(&mut self, id: U256, env: &mut Env) -> eyre::Result<()> { + /// When switching forks we copy the shared state + fn select_fork( + &mut self, + id: LocalForkId, + env: &mut Env, + subroutine: &mut SubRoutine, + ) -> eyre::Result<()> { + trace!(?id, "select fork"); + if self.is_active_fork(id) { + // nothing to do + return Ok(()) + } + let fork_id = self.ensure_fork_id(id).cloned()?; + let idx = self.inner.ensure_fork_index(&fork_id)?; let fork_env = self .forks .get_env(fork_id)? .ok_or_else(|| eyre::eyre!("Requested fork `{}` does not exit", id))?; - let fork = self.inner.ensure_backend(id).cloned()?; - self.db.db = BackendDatabase::Forked(fork, id); + + // if currently active we need to update the subroutine to this point + if let Some(active) = self.active_fork_mut() { + active.subroutine = subroutine.clone(); + } + + // update the shared state and track + let mut fork = self.inner.take_fork(idx); + self.update_fork_db(subroutine, &mut fork); + self.inner.set_fork(idx, fork); + + self.active_fork_ids = Some((id, idx)); + // update the environment accordingly update_current_env_with_fork_env(env, fork_env); Ok(()) } @@ -327,152 +610,228 @@ impl DatabaseExt for Backend { &mut self, env: &mut Env, block_number: U256, - id: Option, + id: Option, ) -> eyre::Result<()> { + trace!(?id, ?block_number, "roll fork"); let id = self.ensure_fork(id)?; - let (fork_id, fork) = + let (fork_id, backend) = self.forks.roll_fork(self.inner.ensure_fork_id(id).cloned()?, block_number.as_u64())?; // this will update the local mapping - self.inner.update_fork_mapping(id, fork_id, fork); - if self.active_fork() == Some(id) { + self.inner.roll_fork(id, fork_id, backend)?; + if self.active_fork_id() == Some(id) { // need to update the block number right away env.block.number = block_number; } Ok(()) } - fn active_fork(&self) -> Option { - self.db.db.as_fork() + fn active_fork_id(&self) -> Option { + self.active_fork_ids.map(|(id, _)| id) } - fn ensure_fork(&self, id: Option) -> eyre::Result { + fn ensure_fork(&self, id: Option) -> eyre::Result { if let Some(id) = id { if self.inner.issued_local_fork_ids.contains_key(&id) { return Ok(id) } eyre::bail!("Requested fork `{}` does not exit", id) } - if let Some(id) = self.active_fork() { + if let Some(id) = self.active_fork_id() { Ok(id) } else { eyre::bail!("No fork active") } } - fn ensure_fork_id(&self, id: U256) -> eyre::Result<&ForkId> { + fn ensure_fork_id(&self, id: LocalForkId) -> eyre::Result<&ForkId> { self.inner.ensure_fork_id(id) } + + fn diagnose_revert( + &self, + callee: Address, + _subroutine: &SubRoutine, + ) -> Option { + let active_id = self.active_fork_id()?; + let active_fork = self.active_fork()?; + if !active_fork.is_contract(callee) { + // no contract for `callee` available on current fork, check if available on other forks + let mut available_on = Vec::new(); + for (id, fork) in self.inner.forks_iter().filter(|(id, _)| *id != active_id) { + if fork.is_contract(callee) { + available_on.push(id); + } + } + + return if available_on.is_empty() { + Some(RevertDiagnostic::ContractDoesNotExist { contract: callee, active: active_id }) + } else { + // likely user error: called a contract that's not available on active fork but is + // present other forks + Some(RevertDiagnostic::ContractExistsOnOtherForks { + contract: callee, + active: active_id, + available_on, + }) + } + } + None + } + + fn is_persistent(&self, acc: &Address) -> bool { + self.inner.persistent_accounts.contains(acc) + } + + fn remove_persistent_account(&mut self, account: &Address) -> bool { + trace!(?account, "remove persistent account"); + self.inner.persistent_accounts.remove(account) + } + + fn add_persistent_account(&mut self, account: Address) -> bool { + trace!(?account, "add persistent account"); + self.inner.persistent_accounts.insert(account) + } } impl DatabaseRef for Backend { fn basic(&self, address: H160) -> AccountInfo { - self.db.basic(address) + if let Some(db) = self.active_fork_db() { + db.basic(address) + } else { + self.mem_db.basic(address) + } } fn code_by_hash(&self, code_hash: H256) -> Bytes { - self.db.code_by_hash(code_hash) + if let Some(db) = self.active_fork_db() { + db.code_by_hash(code_hash) + } else { + self.mem_db.code_by_hash(code_hash) + } } fn storage(&self, address: H160, index: U256) -> U256 { - DatabaseRef::storage(&self.db, address, index) + if let Some(db) = self.active_fork_db() { + DatabaseRef::storage(db, address, index) + } else { + DatabaseRef::storage(&self.mem_db, address, index) + } } fn block_hash(&self, number: U256) -> H256 { - self.db.block_hash(number) + if let Some(db) = self.active_fork_db() { + db.block_hash(number) + } else { + self.mem_db.block_hash(number) + } } } impl<'a> DatabaseRef for &'a mut Backend { fn basic(&self, address: H160) -> AccountInfo { - DatabaseRef::basic(&self.db, address) + if let Some(db) = self.active_fork_db() { + DatabaseRef::basic(db, address) + } else { + DatabaseRef::basic(&self.mem_db, address) + } } fn code_by_hash(&self, code_hash: H256) -> Bytes { - DatabaseRef::code_by_hash(&self.db, code_hash) + if let Some(db) = self.active_fork_db() { + DatabaseRef::code_by_hash(db, code_hash) + } else { + DatabaseRef::code_by_hash(&self.mem_db, code_hash) + } } fn storage(&self, address: H160, index: U256) -> U256 { - DatabaseRef::storage(&self.db, address, index) + if let Some(db) = self.active_fork_db() { + DatabaseRef::storage(db, address, index) + } else { + DatabaseRef::storage(&self.mem_db, address, index) + } } fn block_hash(&self, number: U256) -> H256 { - DatabaseRef::block_hash(&self.db, number) + if let Some(db) = self.active_fork_db() { + DatabaseRef::block_hash(db, number) + } else { + DatabaseRef::block_hash(&self.mem_db, number) + } } } impl DatabaseCommit for Backend { fn commit(&mut self, changes: Map) { - self.db.commit(changes) + if let Some(db) = self.active_fork_db_mut() { + db.commit(changes) + } else { + self.mem_db.commit(changes) + } } } impl Database for Backend { fn basic(&mut self, address: H160) -> AccountInfo { - self.db.basic(address) + if let Some(db) = self.active_fork_db_mut() { + db.basic(address) + } else { + self.mem_db.basic(address) + } } fn code_by_hash(&mut self, code_hash: H256) -> Bytes { - self.db.code_by_hash(code_hash) + if let Some(db) = self.active_fork_db_mut() { + db.code_by_hash(code_hash) + } else { + self.mem_db.code_by_hash(code_hash) + } } fn storage(&mut self, address: H160, index: U256) -> U256 { - Database::storage(&mut self.db, address, index) + if let Some(db) = self.active_fork_db_mut() { + Database::storage(db, address, index) + } else { + Database::storage(&mut self.mem_db, address, index) + } } fn block_hash(&mut self, number: U256) -> H256 { - self.db.block_hash(number) + if let Some(db) = self.active_fork_db_mut() { + db.block_hash(number) + } else { + self.mem_db.block_hash(number) + } } } /// Variants of a [revm::Database] #[derive(Debug, Clone)] -pub enum BackendDatabase { +pub enum BackendDatabaseSnapshot { /// Simple in-memory [revm::Database] - InMemory(EmptyDB), - /// A [revm::Database] that forks of a remote location and can have multiple consumers of the - /// same data - Forked(SharedBackend, U256), + InMemory(InMemoryDB), + /// Contains the entire forking mode database + Forked(LocalForkId, ForkId, ForkLookupIndex, Box), } -// === impl BackendDatabase === - -impl BackendDatabase { - /// Returns the `ForkId` if in fork mode - pub fn as_fork(&self) -> Option { - match self { - BackendDatabase::InMemory(_) => None, - BackendDatabase::Forked(_, id) => Some(*id), - } - } +/// Represents a fork +#[derive(Debug, Clone)] +pub struct Fork { + db: ForkDB, + subroutine: SubRoutine, } -impl DatabaseRef for BackendDatabase { - fn basic(&self, address: H160) -> AccountInfo { - match self { - BackendDatabase::InMemory(inner) => inner.basic(address), - BackendDatabase::Forked(inner, _) => inner.basic(address), - } - } - - fn code_by_hash(&self, address: H256) -> Bytes { - match self { - BackendDatabase::InMemory(inner) => inner.code_by_hash(address), - BackendDatabase::Forked(inner, _) => inner.code_by_hash(address), - } - } - - fn storage(&self, address: H160, index: U256) -> U256 { - match self { - BackendDatabase::InMemory(inner) => inner.storage(address, index), - BackendDatabase::Forked(inner, _) => inner.storage(address, index), - } - } +// === impl Fork === - fn block_hash(&self, number: U256) -> H256 { - match self { - BackendDatabase::InMemory(inner) => inner.block_hash(number), - BackendDatabase::Forked(inner, _) => inner.block_hash(number), - } +impl Fork { + /// Returns true if the account is a contract + pub fn is_contract(&self, acc: Address) -> bool { + self.db.basic(acc).code_hash != KECCAK_EMPTY || + self.subroutine + .state + .get(&acc) + .map(|acc| acc.info.code_hash != KECCAK_EMPTY) + .unwrap_or_default() } } @@ -486,7 +845,7 @@ pub struct BackendInner { pub launched_with_fork: Option, /// This tracks numeric fork ids and the `ForkId` used by the handler. /// - /// This is neceessary, because there can be multiple `Backends` associated with a single + /// This is necessary, because there can be multiple `Backends` associated with a single /// `ForkId` which is only a pair of endpoint + block. Since an existing fork can be /// modified (e.g. `roll_fork`), but this should only affect the fork that's unique for the /// test and not the `ForkId` @@ -495,11 +854,15 @@ pub struct BackendInner { /// is basically creating(or reusing) another `ForkId` that's then mapped to the previous /// issued _local_ numeric identifier, that remains constant, even if the underlying fork /// backend changes. - pub issued_local_fork_ids: HashMap, - /// tracks all created forks - pub created_forks: HashMap, + pub issued_local_fork_ids: HashMap, + /// tracks all the created forks + /// Contains the index of the corresponding `ForkDB` in the `forks` vec + pub created_forks: HashMap, + /// Holds all created fork databases + // Note: data is stored in an `Option` so we can remove it without reshuffling + pub forks: Vec>, /// Contains snapshots made at a certain point - pub snapshots: Snapshots>>, + pub snapshots: Snapshots>, /// Tracks whether there was a failure in a snapshot that was reverted /// /// The Test contract contains a bool variable that is set to true when an `assert` function @@ -514,54 +877,131 @@ pub struct BackendInner { /// /// This address can be used to inspect the state of the contract when a test is being /// executed. E.g. the `_failed` variable of `DSTest` - pub test_contract_context: Option
, + pub test_contract_address: Option
, + /// Tracks the caller of the test function + pub caller: Option
, /// Tracks numeric identifiers for forks - pub next_fork_id: U256, + pub next_fork_id: LocalForkId, + /// All accounts that should be kept persistent when switching forks. + /// This means all accounts stored here _don't_ use a separate storage section on each fork + /// instead the use only one that's persistent across fork swaps. + /// + /// See also [`clone_data()`] + pub persistent_accounts: HashSet
, } // === impl BackendInner === impl BackendInner { - /// Creates a new instance that tracks the fork used at launch - pub fn new(launched_with: Option<(ForkId, SharedBackend)>) -> Self { - launched_with.map(|(id, backend)| Self::forked(id, backend)).unwrap_or_default() + pub fn ensure_fork_id(&self, id: LocalForkId) -> eyre::Result<&ForkId> { + self.issued_local_fork_ids + .get(&id) + .ok_or_else(|| eyre::eyre!("No matching fork found for {}", id)) } - pub fn forked(id: ForkId, fork: SharedBackend) -> Self { - let launched_with_fork = Some(id.clone()); - let mut backend = Self { launched_with_fork, ..Default::default() }; - backend.insert_new_fork(id, fork); - backend + pub fn ensure_fork_index(&self, id: &ForkId) -> eyre::Result { + self.created_forks + .get(id) + .copied() + .ok_or_else(|| eyre::eyre!("No matching fork found for {}", id)) + } + + /// Returns the underlying + #[track_caller] + fn get_fork(&self, idx: ForkLookupIndex) -> &Fork { + debug_assert!(idx < self.forks.len(), "fork lookup index must exist"); + self.forks[idx].as_ref().unwrap() + } + + /// Returns the underlying + #[track_caller] + fn get_fork_mut(&mut self, idx: ForkLookupIndex) -> &mut Fork { + debug_assert!(idx < self.forks.len(), "fork lookup index must exist"); + self.forks[idx].as_mut().unwrap() + } + + /// Removes the fork + fn take_fork(&mut self, idx: ForkLookupIndex) -> Fork { + debug_assert!(idx < self.forks.len(), "fork lookup index must exist"); + self.forks[idx].take().unwrap() } - pub fn ensure_fork_id(&self, id: U256) -> eyre::Result<&ForkId> { + fn set_fork(&mut self, idx: ForkLookupIndex, fork: Fork) { + self.forks[idx] = Some(fork) + } + + /// Returns an iterator over Forks + pub fn forks_iter(&self) -> impl Iterator + '_ { self.issued_local_fork_ids - .get(&id) - .ok_or_else(|| eyre::eyre!("No matching fork found for {}", id)) + .iter() + .map(|(id, fork_id)| (*id, self.get_fork(self.created_forks[fork_id]))) } - /// Ensures that the `SharedBackend` for the given `id` exits - pub fn ensure_backend(&self, id: U256) -> eyre::Result<&SharedBackend> { - self.get_backend(id).ok_or_else(|| eyre::eyre!("No matching fork found for {}", id)) + /// Reverts the entire fork database + pub fn revert_snapshot( + &mut self, + id: LocalForkId, + fork_id: ForkId, + idx: ForkLookupIndex, + fork: Fork, + ) { + self.created_forks.insert(fork_id.clone(), idx); + self.issued_local_fork_ids.insert(id, fork_id); + self.set_fork(idx, fork) } - /// Returns the matching `SharedBackend` for the givne id - fn get_backend(&self, id: U256) -> Option<&SharedBackend> { - self.issued_local_fork_ids.get(&id).and_then(|id| self.created_forks.get(id)) + /// Updates the fork and the local mapping and returns the new index for the `fork_db` + pub fn update_fork_mapping( + &mut self, + id: LocalForkId, + fork_id: ForkId, + db: ForkDB, + subroutine: SubRoutine, + ) -> ForkLookupIndex { + let idx = self.forks.len(); + self.issued_local_fork_ids.insert(id, fork_id.clone()); + self.created_forks.insert(fork_id, idx); + + let fork = Fork { db, subroutine }; + self.forks.push(Some(fork)); + idx } - /// Updates the fork and the local mapping - pub fn update_fork_mapping(&mut self, id: U256, fork: ForkId, backend: SharedBackend) { - self.created_forks.insert(fork.clone(), backend); - self.issued_local_fork_ids.insert(id, fork); + pub fn roll_fork( + &mut self, + id: LocalForkId, + new_fork_id: ForkId, + backend: SharedBackend, + ) -> eyre::Result { + let fork_id = self.ensure_fork_id(id)?; + let idx = self.ensure_fork_index(fork_id)?; + + if let Some(f) = self.forks[idx].as_mut() { + f.db.db = backend; + } + + // update mappings + self.issued_local_fork_ids.insert(id, new_fork_id.clone()); + self.created_forks.insert(new_fork_id, idx); + Ok(idx) } - /// Issues a new local fork identifier - pub fn insert_new_fork(&mut self, fork: ForkId, backend: SharedBackend) -> U256 { - self.created_forks.insert(fork.clone(), backend); + /// Inserts a _new_ `ForkDB` and issues a new local fork identifier + /// + /// Also returns the index where the `ForDB` is stored + pub fn insert_new_fork( + &mut self, + fork_id: ForkId, + db: ForkDB, + subroutine: SubRoutine, + ) -> (LocalForkId, ForkLookupIndex) { + let idx = self.forks.len(); + self.created_forks.insert(fork_id.clone(), idx); let id = self.next_id(); - self.issued_local_fork_ids.insert(id, fork); - id + self.issued_local_fork_ids.insert(id, fork_id); + let fork = Fork { db, subroutine }; + self.forks.push(Some(fork)); + (id, idx) } fn next_id(&mut self) -> U256 { @@ -569,6 +1009,16 @@ impl BackendInner { self.next_fork_id += U256::one(); id } + + /// Returns the number of issued ids + pub fn len(&self) -> usize { + self.issued_local_fork_ids.len() + } + + /// Returns true if no forks are issued + pub fn is_empty(&self) -> bool { + self.issued_local_fork_ids.is_empty() + } } /// This updates the currently used env with the fork's environment @@ -576,3 +1026,28 @@ pub(crate) fn update_current_env_with_fork_env(current: &mut Env, fork: Env) { current.block = fork.block; current.cfg = fork.cfg; } + +/// Clones the data of the given `accounts` from the `active` database into the `fork_db` +/// This includes the data held in storage (`CacheDB`) and kept in the `Subroutine` +pub(crate) fn clone_data( + accounts: impl IntoIterator, + active: &CacheDB, + active_subroutine: &mut SubRoutine, + fork: &mut Fork, +) { + for addr in accounts.into_iter() { + trace!(?addr, "cloning data"); + let acc = active.accounts.get(&addr).cloned().unwrap_or_default(); + if let Some(code) = active.contracts.get(&acc.info.code_hash).cloned() { + fork.db.contracts.insert(acc.info.code_hash, code); + } + fork.db.accounts.insert(addr, acc); + + if let Some(acc) = active_subroutine.state.get(&addr).cloned() { + trace!(?addr, "updating subroutine account data"); + fork.subroutine.state.insert(addr, acc); + } + } + + *active_subroutine = fork.subroutine.clone(); +} diff --git a/evm/src/executor/fork/multi.rs b/evm/src/executor/fork/multi.rs index f2a8288171c1..99de26bd456e 100644 --- a/evm/src/executor/fork/multi.rs +++ b/evm/src/executor/fork/multi.rs @@ -113,6 +113,7 @@ impl MultiFork { /// /// If no matching fork backend exists it will be created pub fn create_fork(&self, fork: CreateFork) -> eyre::Result<(ForkId, SharedBackend)> { + trace!("Creating new fork, url={}, block={:?}", fork.url, fork.evm_opts.fork_block_number); let (sender, rx) = oneshot_channel(); let req = Request::CreateFork(Box::new(fork), sender); self.handler.clone().try_send(req).map_err(|e| eyre::eyre!("{:?}", e))?; @@ -123,6 +124,7 @@ impl MultiFork { /// /// If no matching fork backend exists it will be created pub fn roll_fork(&self, fork: ForkId, block: u64) -> eyre::Result<(ForkId, SharedBackend)> { + trace!(?fork, ?block, "rolling fork"); let (sender, rx) = oneshot_channel(); let req = Request::RollFork(fork, block, sender); self.handler.clone().try_send(req).map_err(|e| eyre::eyre!("{:?}", e))?; @@ -131,6 +133,7 @@ impl MultiFork { /// Returns the `Env` of the given fork, if any pub fn get_env(&self, fork: ForkId) -> eyre::Result> { + trace!(?fork, "getting env config"); let (sender, rx) = oneshot_channel(); let req = Request::GetEnv(fork, sender); self.handler.clone().try_send(req).map_err(|e| eyre::eyre!("{:?}", e))?; @@ -141,8 +144,10 @@ impl MultiFork { /// /// Returns `None` if no matching fork backend is available. pub fn get_fork(&self, id: impl Into) -> eyre::Result> { + let id = id.into(); + trace!(?id, "get fork backend"); let (sender, rx) = oneshot_channel(); - let req = Request::GetFork(id.into(), sender); + let req = Request::GetFork(id, sender); self.handler.clone().try_send(req).map_err(|e| eyre::eyre!("{:?}", e))?; Ok(rx.recv()?) } @@ -226,6 +231,8 @@ impl MultiForkHandler { fn create_fork(&mut self, fork: CreateFork, sender: CreateSender) { let fork_id = create_fork_id(&fork.url, fork.evm_opts.fork_block_number); + trace!(?fork_id, "created new forkId"); + if let Some(fork) = self.forks.get_mut(&fork_id) { fork.num_senders += 1; let _ = sender.send(Ok((fork_id, fork.backend.clone()))); diff --git a/evm/src/executor/inspector/cheatcodes/config.rs b/evm/src/executor/inspector/cheatcodes/config.rs index 5b86ee418425..08bd46fd539a 100644 --- a/evm/src/executor/inspector/cheatcodes/config.rs +++ b/evm/src/executor/inspector/cheatcodes/config.rs @@ -3,6 +3,7 @@ use bytes::Bytes; use foundry_config::{cache::StorageCachingConfig, Config, ResolvedRpcEndpoints}; use std::path::{Path, PathBuf}; +use tracing::trace; use super::util; @@ -38,10 +39,12 @@ impl CheatsConfig { allowed_paths.extend(config.libs.clone()); allowed_paths.extend(config.allow_paths.clone()); + let rpc_endpoints = config.rpc_endpoints.clone().resolved(); + trace!(?rpc_endpoints, "using resolved rpc endpoints"); Self { ffi: evm_opts.ffi, rpc_storage_caching: config.rpc_storage_caching.clone(), - rpc_endpoints: config.rpc_endpoints.clone().resolved(), + rpc_endpoints, root: config.__root.0.clone(), allowed_paths, evm_opts: evm_opts.clone(), diff --git a/evm/src/executor/inspector/cheatcodes/expect.rs b/evm/src/executor/inspector/cheatcodes/expect.rs index e6f204c6d921..9f0561bfbf73 100644 --- a/evm/src/executor/inspector/cheatcodes/expect.rs +++ b/evm/src/executor/inspector/cheatcodes/expect.rs @@ -1,5 +1,3 @@ -use std::cmp::Ordering; - use super::Cheatcodes; use crate::{ abi::HEVMCalls, @@ -11,6 +9,7 @@ use ethers::{ types::{Address, H160, U256}, }; use revm::{return_ok, Database, EVMData, Return}; +use std::cmp::Ordering; /// For some cheatcodes we may internally change the status of the call, i.e. in `expectRevert`. /// Solidity will see a successful call and attempt to decode the return data. Therefore, we need @@ -72,37 +71,39 @@ pub fn handle_expect_revert( _ => None, }; - let (err, actual_revert): (_, Bytes) = match string_data { - Some(data) => { - // It's a revert string, so we do some conversion to perform the check - let decoded_data = ethers::prelude::Bytes::decode(data) - .expect("String error code, but data can't be decoded as bytes"); + let stringify = |data: &[u8]| { + String::decode(data) + .ok() + .or_else(|| String::from_utf8(data.to_vec()).ok()) + .unwrap_or_else(|| format!("0x{}", hex::encode(data))) + }; + + let (err, actual_revert): (_, Bytes) = if let Some(data) = string_data { + // It's a revert string, so we do some conversion to perform the check + let decoded_data = ethers::prelude::Bytes::decode(data) + .expect("String error code, but data can't be decoded as bytes"); - ( - format!( - "Error != expected error: '{}' != '{}'", - String::from_utf8(decoded_data.to_vec()) - .ok() - .unwrap_or_else(|| hex::encode(&decoded_data)), - String::from_utf8(expected_revert.to_vec()) - .ok() - .unwrap_or_else(|| hex::encode(&expected_revert)) - ) - .encode() - .into(), - decoded_data.0, + ( + format!( + "Error != expected error: '{}' != '{}'", + stringify(&decoded_data), + stringify(expected_revert), ) - } - _ => ( + .encode() + .into(), + decoded_data.0, + ) + } else { + ( format!( - "Error != expected error: 0x{} != 0x{}", - hex::encode(&retdata), - hex::encode(&expected_revert) + "Error != expected error: {} != {}", + stringify(&retdata), + stringify(expected_revert), ) .encode() .into(), retdata, - ), + ) }; if actual_revert == expected_revert { diff --git a/evm/src/executor/inspector/cheatcodes/fork.rs b/evm/src/executor/inspector/cheatcodes/fork.rs index 78b832dabbff..8fff66e026e3 100644 --- a/evm/src/executor/inspector/cheatcodes/fork.rs +++ b/evm/src/executor/inspector/cheatcodes/fork.rs @@ -29,9 +29,37 @@ pub fn apply( .map(|id| id.encode().into()) } HEVMCalls::SelectFork(fork_id) => select_fork(data, fork_id.0), + HEVMCalls::MakePersistent0(acc) => { + data.db.add_persistent_account(acc.0); + Ok(Default::default()) + } + HEVMCalls::MakePersistent1(acc) => { + data.db.extend_persistent_accounts(acc.0.clone()); + Ok(Default::default()) + } + HEVMCalls::MakePersistent2(acc) => { + data.db.add_persistent_account(acc.0); + data.db.add_persistent_account(acc.1); + Ok(Default::default()) + } + HEVMCalls::MakePersistent3(acc) => { + data.db.add_persistent_account(acc.0); + data.db.add_persistent_account(acc.1); + data.db.add_persistent_account(acc.2); + Ok(Default::default()) + } + HEVMCalls::IsPersistent(acc) => Ok(data.db.is_persistent(&acc.0).encode().into()), + HEVMCalls::RevokePersistent0(acc) => { + data.db.remove_persistent_account(&acc.0); + Ok(Default::default()) + } + HEVMCalls::RevokePersistent1(acc) => { + data.db.remove_persistent_accounts(acc.0.clone()); + Ok(Default::default()) + } HEVMCalls::ActiveFork(_) => data .db - .active_fork() + .active_fork_id() .map(|id| id.encode().into()) .ok_or_else(|| util::encode_error("No active fork")), HEVMCalls::RollFork0(fork) => { @@ -69,7 +97,10 @@ pub fn apply( /// Selects the given fork id fn select_fork(data: &mut EVMData, fork_id: U256) -> Result { - data.db.select_fork(fork_id, data.env).map(|_| Default::default()).map_err(util::encode_error) + data.db + .select_fork(fork_id, data.env, &mut data.subroutine) + .map(|_| Default::default()) + .map_err(util::encode_error) } /// Creates and then also selects the new fork @@ -80,7 +111,7 @@ fn create_select_fork( block: Option, ) -> Result { let fork = create_fork_request(state, url_or_alias, block, data)?; - data.db.create_select_fork(fork, data.env).map_err(util::encode_error) + data.db.create_select_fork(fork, data.env, &mut data.subroutine).map_err(util::encode_error) } /// Creates a new fork @@ -91,7 +122,7 @@ fn create_fork( block: Option, ) -> Result { let fork = create_fork_request(state, url_or_alias, block, data)?; - data.db.create_fork(fork).map_err(util::encode_error) + data.db.create_fork(fork, &data.subroutine).map_err(util::encode_error) } /// Creates the request object for a new fork request diff --git a/evm/src/executor/inspector/cheatcodes/mod.rs b/evm/src/executor/inspector/cheatcodes/mod.rs index a4a4e61daacf..3200f591b6d9 100644 --- a/evm/src/executor/inspector/cheatcodes/mod.rs +++ b/evm/src/executor/inspector/cheatcodes/mod.rs @@ -20,6 +20,7 @@ use ethers::{ }; use revm::{ opcode, BlockEnv, CallInputs, CreateInputs, EVMData, Gas, Inspector, Interpreter, Return, + TransactTo, }; use std::{ collections::{BTreeMap, HashMap, VecDeque}, @@ -49,6 +50,7 @@ pub mod util; pub use util::{DEFAULT_CREATE2_DEPLOYER, MISSING_CREATE2_DEPLOYER}; mod config; +use crate::executor::backend::RevertDiagnostic; pub use config::CheatsConfig; /// An inspector that handles calls to various cheatcodes, each with their own behavior. @@ -78,6 +80,9 @@ pub struct Cheatcodes { /// Expected revert information pub expected_revert: Option, + /// Additional diagnostic for reverts + pub fork_revert_diagnostic: Option, + /// Recorded storage reads and writes pub accesses: Option, @@ -176,6 +181,40 @@ where Return::Continue } + fn step(&mut self, interpreter: &mut Interpreter, _: &mut EVMData<'_, DB>, _: bool) -> Return { + // Record writes and reads if `record` has been called + if let Some(storage_accesses) = &mut self.accesses { + match interpreter.contract.code[interpreter.program_counter()] { + opcode::SLOAD => { + let key = try_or_continue!(interpreter.stack().peek(0)); + storage_accesses + .reads + .entry(interpreter.contract().address) + .or_insert_with(Vec::new) + .push(key); + } + opcode::SSTORE => { + let key = try_or_continue!(interpreter.stack().peek(0)); + + // An SSTORE does an SLOAD internally + storage_accesses + .reads + .entry(interpreter.contract().address) + .or_insert_with(Vec::new) + .push(key); + storage_accesses + .writes + .entry(interpreter.contract().address) + .or_insert_with(Vec::new) + .push(key); + } + _ => (), + } + } + + Return::Continue + } + fn log(&mut self, _: &mut EVMData<'_, DB>, address: &Address, topics: &[H256], data: &Bytes) { // Match logs if `expectEmit` has been called if !self.expected_emits.is_empty() { @@ -306,40 +345,6 @@ where } } - fn step(&mut self, interpreter: &mut Interpreter, _: &mut EVMData<'_, DB>, _: bool) -> Return { - // Record writes and reads if `record` has been called - if let Some(storage_accesses) = &mut self.accesses { - match interpreter.contract.code[interpreter.program_counter()] { - opcode::SLOAD => { - let key = try_or_continue!(interpreter.stack().peek(0)); - storage_accesses - .reads - .entry(interpreter.contract().address) - .or_insert_with(Vec::new) - .push(key); - } - opcode::SSTORE => { - let key = try_or_continue!(interpreter.stack().peek(0)); - - // An SSTORE does an SLOAD internally - storage_accesses - .reads - .entry(interpreter.contract().address) - .or_insert_with(Vec::new) - .push(key); - storage_accesses - .writes - .entry(interpreter.contract().address) - .or_insert_with(Vec::new) - .push(key); - } - _ => (), - } - } - - Return::Continue - } - fn call_end( &mut self, data: &mut EVMData<'_, DB>, @@ -431,6 +436,30 @@ where } } + // if there's a revert and a previous call was diagnosed as fork related revert then we can + // return a better error here + if status == Return::Revert { + if let Some(err) = self.fork_revert_diagnostic.take() { + return (status, remaining_gas, err.to_error_msg(self).encode().into()) + } + } + + // this will ensure we don't have false positives when trying to diagnose reverts in fork + // mode + let _ = self.fork_revert_diagnostic.take(); + + // try to diagnose reverts in multi-fork mode where a call is made to an address that does + // not exist + if let TransactTo::Call(test_contract) = data.env.tx.transact_to { + // if a call to a different contract than the original test contract returned with + // `Stop` we check if the contract actually exists on the active fork + if data.db.is_forked_mode() && status == Return::Stop && call.contract != test_contract + { + self.fork_revert_diagnostic = + data.db.diagnose_revert(call.contract, &data.subroutine); + } + } + (status, remaining_gas, retdata) } diff --git a/evm/src/executor/inspector/stack.rs b/evm/src/executor/inspector/stack.rs index 483d50e79cfc..684a12bc20a9 100644 --- a/evm/src/executor/inspector/stack.rs +++ b/evm/src/executor/inspector/stack.rs @@ -212,9 +212,10 @@ where is_static, ); - // If the inspector returns a different status we assume it wants to tell us - // something - if new_status != status { + // If the inspector returns a different status or a revert with a non-empty message, + // we assume it wants to tell us something + if new_status != status || (new_status == Return::Revert && new_retdata != retdata) + { return (new_status, new_gas, new_retdata) } } diff --git a/evm/src/executor/mod.rs b/evm/src/executor/mod.rs index 4670b6cb9a07..67c7ea94bcd7 100644 --- a/evm/src/executor/mod.rs +++ b/evm/src/executor/mod.rs @@ -81,7 +81,7 @@ impl Executor { ) -> Self { // Need to create a non-empty contract on the cheatcodes address so `extcodesize` checks // does not fail - backend.db.insert_account_info( + backend.insert_account_info( CHEATCODE_ADDRESS, revm::AccountInfo { code: Some(Bytes::from_static(&[1])), ..Default::default() }, ); @@ -171,10 +171,11 @@ impl Executor { pub fn setup( &mut self, from: Option
, - address: Address, + to: Address, ) -> std::result::Result, EvmError> { let from = from.unwrap_or(CALLER); - self.call_committing::<(), _, _>(from, address, "setUp()", (), 0.into(), None) + self.backend_mut().set_test_contract(to).set_caller(from); + self.call_committing::<(), _, _>(from, to, "setUp()", (), 0.into(), None) } /// Performs a call to an account on the current state of the VM. @@ -327,6 +328,9 @@ impl Executor { let (status, out, gas, state_changeset, logs) = self.backend_mut().inspect_ref(env, &mut inspector); + // if there are multiple forks we need to merge them + let logs = self.backend.merged_logs(logs); + let executed_call = ExecutedCall { status, out, gas, state_changeset, logs, stipend }; let call_result = convert_executed_call(inspector, executed_call)?; @@ -369,6 +373,9 @@ impl Executor { let env = self.build_env(from, TransactTo::Call(to), calldata, value); let mut db = FuzzBackendWrapper::new(self.backend()); let (status, out, gas, state_changeset, logs) = db.inspect_ref(env, &mut inspector); + + let logs = db.backend.merged_logs(logs); + let executed_call = ExecutedCall { status, out, gas, state_changeset, logs, stipend }; convert_executed_call(inspector, executed_call) } @@ -664,7 +671,7 @@ fn convert_executed_call( _ => Bytes::default(), }; - let InspectorData { logs, labels, traces, debug, cheatcodes, coverage, .. } = + let InspectorData { logs, labels, traces, coverage, debug, cheatcodes } = inspector.collect_inspector_states(); let transactions = if let Some(cheats) = cheatcodes { diff --git a/evm/src/fuzz/mod.rs b/evm/src/fuzz/mod.rs index 6f3807a10617..cccc1a923bf2 100644 --- a/evm/src/fuzz/mod.rs +++ b/evm/src/fuzz/mod.rs @@ -61,7 +61,11 @@ impl<'a> FuzzedExecutor<'a> { let counterexample: RefCell<(Bytes, RawCallResult)> = RefCell::new(Default::default()); // Stores fuzz state for use with [fuzz_calldata_from_state] - let state: EvmFuzzState = build_initial_state(&self.executor.backend().db); + let state: EvmFuzzState = if let Some(fork_db) = self.executor.backend().active_fork_db() { + build_initial_state(fork_db) + } else { + build_initial_state(self.executor.backend().mem_db()) + }; // TODO: We should have a `FuzzerOpts` struct where we can configure the fuzzer. When we // have that, we should add a way to configure strategy weights diff --git a/evm/src/lib.rs b/evm/src/lib.rs index 9eaeb10780e6..a3b32f8b9126 100644 --- a/evm/src/lib.rs +++ b/evm/src/lib.rs @@ -36,7 +36,7 @@ use serde::{Deserialize, Serialize}; /// /// The address was derived from `address(uint160(uint256(keccak256("foundry default caller"))))` /// and is equal to 0x1804c8AB1F12E6bbf3894d4083f33e07309d1f38. -pub static CALLER: Address = H160([ +pub const CALLER: Address = H160([ 0x18, 0x04, 0xc8, 0xAB, 0x1F, 0x12, 0xE6, 0xbb, 0xF3, 0x89, 0x4D, 0x40, 0x83, 0xF3, 0x3E, 0x07, 0x30, 0x9D, 0x1F, 0x38, ]); diff --git a/forge/src/multi_runner.rs b/forge/src/multi_runner.rs index b97123b774be..f3624ce37297 100644 --- a/forge/src/multi_runner.rs +++ b/forge/src/multi_runner.rs @@ -1189,13 +1189,41 @@ Reason: `setEnv` failed to set an environment variable `{}={}`", ); } - /// Executes all fork cheatcodes + /// Executes reverting fork test + #[test] + fn test_cheats_fork_revert() { + let mut runner = runner(); + let suite_result = runner + .test( + &Filter::new( + "testNonExistingContractRevert", + ".*", + &format!(".*cheats{}Fork", RE_PATH_SEPARATOR), + ), + None, + true, + ) + .unwrap(); + assert_eq!(suite_result.len(), 1); + + for (_, SuiteResult { test_results, .. }) in suite_result { + for (_, result) in test_results { + assert_eq!( + result.reason.unwrap(), + "Contract 0xCe71065D4017F316EC606Fe4422e11eB2c47c246 does not exists on active fork with id `1`\n But exists on non active forks: `[0]`" + ); + } + } + } + + /// Executes all non-reverting fork cheatcodes #[test] fn test_cheats_fork() { let mut runner = runner(); let suite_result = runner .test( - &Filter::new(".*", ".*", &format!(".*cheats{}Fork", RE_PATH_SEPARATOR)), + &Filter::new(".*", ".*", &format!(".*cheats{}Fork", RE_PATH_SEPARATOR)) + .exclude_tests(".*Revert"), None, true, ) diff --git a/forge/src/runner.rs b/forge/src/runner.rs index 098f73f4eb55..2150c507ce10 100644 --- a/forge/src/runner.rs +++ b/forge/src/runner.rs @@ -66,6 +66,8 @@ impl<'a> ContractRunner<'a> { /// Deploys the test contract inside the runner from the sending account, and optionally runs /// the `setUp` function on the test contract. pub fn setup(&mut self, setup: bool) -> Result { + trace!(?setup, "Setting test contract"); + // We max out their balance so that they can deploy and make calls. self.executor.set_balance(self.sender, U256::MAX); self.executor.set_balance(CALLER, U256::MAX); diff --git a/forge/src/test_helpers.rs b/forge/src/test_helpers.rs index 9e906ec899af..0819b9cdad1f 100644 --- a/forge/src/test_helpers.rs +++ b/forge/src/test_helpers.rs @@ -95,6 +95,7 @@ pub mod filter { test_regex: Regex, contract_regex: Regex, path_regex: Regex, + exclude_tests: Option, } impl Filter { @@ -103,21 +104,37 @@ pub mod filter { test_regex: Regex::new(test_pattern).unwrap(), contract_regex: Regex::new(contract_pattern).unwrap(), path_regex: Regex::new(path_pattern).unwrap(), + exclude_tests: None, } } + /// All tests to also exclude + /// + /// This is a workaround since regex does not support negative look aheads + pub fn exclude_tests(mut self, pattern: &str) -> Self { + self.exclude_tests = Some(Regex::new(pattern).unwrap()); + self + } + pub fn matches_all() -> Self { Filter { test_regex: Regex::new(".*").unwrap(), contract_regex: Regex::new(".*").unwrap(), path_regex: Regex::new(".*").unwrap(), + exclude_tests: None, } } } impl TestFilter for Filter { fn matches_test(&self, test_name: impl AsRef) -> bool { - self.test_regex.is_match(test_name.as_ref()) + let test_name = test_name.as_ref(); + if let Some(ref exclude) = self.exclude_tests { + if exclude.is_match(test_name) { + return false + } + } + self.test_regex.is_match(test_name) } fn matches_contract(&self, contract_name: impl AsRef) -> bool { diff --git a/testdata/cheats/Cheats.sol b/testdata/cheats/Cheats.sol index ca75a94ada9a..d6b8aa295982 100644 --- a/testdata/cheats/Cheats.sol +++ b/testdata/cheats/Cheats.sol @@ -157,9 +157,20 @@ interface Cheats { function createSelectFork(string calldata) external returns(uint256); // Takes a fork identifier created by `createFork` and sets the corresponding forked state as active. function selectFork(uint256) external; - /// Returns the currently active fork - /// Reverts if no fork is currently active + // Returns the currently active fork + // Reverts if no fork is currently active function activeFork() external returns(uint256); + // Marks that the account(s) should use persistent storage across fork swaps. + // Meaning, changes made to the state of this account will be kept when switching forks + function makePersistent(address) external; + function makePersistent(address, address) external; + function makePersistent(address, address, address) external; + function makePersistent(address[] calldata) external; + // Revokes persistent status from the address, previously added via `makePersistent` + function revokePersistent(address) external; + function revokePersistent(address[] calldata) external; + // Returns true if the account is marked as persistent + function isPersistent(address) external returns (bool); // Updates the currently active fork to given block number // This is similar to `roll` but for the currently active fork function rollFork(uint256) external; diff --git a/testdata/cheats/Fork.t.sol b/testdata/cheats/Fork.t.sol index 533c80086236..9d69ae91d2cc 100644 --- a/testdata/cheats/Fork.t.sol +++ b/testdata/cheats/Fork.t.sol @@ -20,10 +20,13 @@ contract ForkTest is DSTest { uint256 forkA; uint256 forkB; + uint256 testValue; + // this will create two _different_ forks during setup function setUp() public { forkA = cheats.createFork("https://eth-mainnet.alchemyapi.io/v2/Lc7oIGYeL_QvInzI0Wiu_pOZZDEKBrdf", mainblock); forkB = cheats.createFork("https://eth-mainnet.alchemyapi.io/v2/9VWGraLx0tMiSWx05WH-ywgSVmMxs66W", mainblock - 1); + testValue = 999; } // ensures forks use different ids @@ -31,6 +34,12 @@ contract ForkTest is DSTest { assert(forkA != forkB); } + // ensures we can create and select in one step + function testCreateSelect() public { + uint256 fork = cheats.createSelectFork("https://eth-mainnet.alchemyapi.io/v2/Lc7oIGYeL_QvInzI0Wiu_pOZZDEKBrdf"); + assertEq(fork, cheats.activeFork()); + } + // ensures forks use different ids function testCanSwitchForks() public { cheats.selectFork(forkA); @@ -39,7 +48,7 @@ contract ForkTest is DSTest { cheats.selectFork(forkA); } - function testLocalStatePersistent() public { + function testForksHaveSeparatedStorage() public { cheats.selectFork(forkA); // read state from forkA assert( @@ -48,8 +57,9 @@ contract ForkTest is DSTest { cheats.selectFork(forkB); // read state from forkB + uint256 forkBbalance = WETH.balanceOf(0x0000000000000000000000000000000000000000); assert( - WETH.balanceOf(0x0000000000000000000000000000000000000000) != 1 + forkBbalance != 1 ); cheats.selectFork(forkA); @@ -62,8 +72,33 @@ contract ForkTest is DSTest { cheats.store(WETH_TOKEN_ADDR, zero_address_balance_slot, value); assertEq(WETH.balanceOf(0x0000000000000000000000000000000000000000), 1, "Cheatcode did not change value at the storage slot."); - // switch forks and ensure local modified state is persistent + // switch forks and ensure the balance on forkB remains untouched cheats.selectFork(forkB); - assertEq(WETH.balanceOf(0x0000000000000000000000000000000000000000), 1, "Cheatcode did not change value at the storage slot."); + assert( + forkBbalance != 1 + ); + // balance of forkB is untouched + assertEq(WETH.balanceOf(0x0000000000000000000000000000000000000000), forkBbalance, "Cheatcode did not change value at the storage slot."); + } + + function testCanShareDataAcrossSwaps() public { + assertEq(testValue, 999); + + uint256 val = 300; + cheats.selectFork(forkA); + assertEq(val, 300); + + testValue = 100; + + cheats.selectFork(forkB); + assertEq(val, 300); + assertEq(testValue, 100); + + val = 99; + testValue = 300; + + cheats.selectFork(forkA); + assertEq(val, 99); + assertEq(testValue, 300); } } \ No newline at end of file diff --git a/testdata/cheats/Fork2.t.sol b/testdata/cheats/Fork2.t.sol index cffc1d445381..ea9fb11c1068 100644 --- a/testdata/cheats/Fork2.t.sol +++ b/testdata/cheats/Fork2.t.sol @@ -23,8 +23,7 @@ contract MyContract { function ensureBlockHash() public view { require( - blockhash(block.number - 1) == blockHash, - "Block Hash does not match" + blockhash(block.number - 1) == blockHash, "Block Hash does not match" ); } } @@ -76,24 +75,6 @@ contract ForkTest is DSTest { assert(mainHash != optimismHash); } - function testCanSwitchContracts() public { - cheats.selectFork(mainnetFork); - MyContract contract1 = new MyContract(mainnetFork); - - contract1.ensureForkId(mainnetFork); // Valid - contract1.ensureBlockHash(); // Valid - - cheats.selectFork(optimismFork); - - cheats.expectRevert("ForkId does not match"); - contract1.ensureForkId(optimismFork); - - contract1.ensureForkId(mainnetFork); // Valid - - cheats.expectRevert("Block Hash does not match"); - contract1.ensureBlockHash(); - } - // test that we can switch between forks, and "roll" blocks function testCanRollFork() public { cheats.selectFork(mainnetFork); @@ -101,7 +82,7 @@ contract ForkTest is DSTest { cheats.selectFork(otherMain); uint256 mainBlock = block.number; - uint256 forkedBlock = 14_608_400; + uint256 forkedBlock = 14608400; uint256 otherFork = cheats.createFork("rpcAlias", forkedBlock); cheats.selectFork(otherFork); assertEq(block.number, forkedBlock); @@ -116,4 +97,59 @@ contract ForkTest is DSTest { cheats.selectFork(otherMain); assertEq(block.number, mainBlock + 1); } + + /// checks that marking as persistent works + function testMarkPersistent() public { + assert(cheats.isPersistent(msg.sender)); + assert(cheats.isPersistent(address(this))); + + cheats.selectFork(mainnetFork); + + DummyContract dummy = new DummyContract(); + assert(!cheats.isPersistent(address(dummy))); + + uint256 expectedValue = 99; + dummy.set(expectedValue); + + cheats.selectFork(optimismFork); + + cheats.selectFork(mainnetFork); + assertEq(dummy.val(), expectedValue); + cheats.makePersistent(address(dummy)); + assert(cheats.isPersistent(address(dummy))); + + cheats.selectFork(optimismFork); + // the account is now marked as persistent and the contract is persistent across swaps + dummy.hello(); + assertEq(dummy.val(), expectedValue); + } + + // checks diagnostic + function testNonExistingContractRevert() public { + cheats.selectFork(mainnetFork); + DummyContract dummy = new DummyContract(); + + // this will succeed since `dummy` is deployed on the currently active fork + string memory msg = dummy.hello(); + + address dummyAddress = address(dummy); + + cheats.selectFork(optimismFork); + assertEq(dummyAddress, address(dummy)); + + // this will revert since `dummy` does not exists on the currently active fork + string memory msg2 = dummy.hello(); + } +} + +contract DummyContract { + uint256 public val; + + function hello() external view returns (string memory) { + return "hello"; + } + + function set(uint256 _val) public { + val = _val; + } }