diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index b313c32ce33d4..8d53f457af915 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -35,7 +35,6 @@ exclude_crates=( reth-ethereum-payload-builder reth-etl reth-evm-ethereum - reth-evm-optimism reth-execution-errors reth-exex reth-exex-test-utils @@ -49,8 +48,9 @@ exclude_crates=( reth-node-ethereum reth-node-events reth-node-metrics - reth-node-optimism reth-optimism-cli + reth-optimism-evm + reth-optimism-node reth-optimism-payload-builder reth-optimism-rpc reth-payload-builder diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 103a87706bcae..82bd5705a3200 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -47,7 +47,7 @@ jobs: name: Run tests run: | cargo nextest run \ - --locked -p reth-node-optimism --features "optimism" + --locked -p reth-optimism-node --features "optimism" integration-success: name: integration success diff --git a/Cargo.lock b/Cargo.lock index 7509d0f2020c7..540a5e978f133 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -734,16 +734,15 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.5.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a46c9c4fdccda7982e7928904bd85fe235a0404ee3d7e197fff13d61eac8b4f" +checksum = "e9703ce68b97f8faae6f7739d1e003fc97621b856953cbcdbb2b515743f23288" dependencies = [ "alloy-primitives", "alloy-rlp", "arbitrary", "derive_arbitrary", "derive_more", - "hashbrown 0.14.5", "nybbles", "proptest", "proptest-derive", @@ -2763,7 +2762,6 @@ dependencies = [ "futures-util", "reth", "reth-node-ethereum", - "reth-rpc-types", ] [[package]] @@ -5215,8 +5213,8 @@ dependencies = [ "clap", "reth-cli-util", "reth-node-builder", - "reth-node-optimism", "reth-optimism-cli", + "reth-optimism-node", "reth-optimism-rpc", "reth-provider", ] @@ -6397,6 +6395,7 @@ name = "reth-blockchain-tree" version = "1.0.7" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-genesis", "alloy-primitives", "aquamarine", @@ -7397,11 +7396,14 @@ name = "reth-exex" version = "1.0.7" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-genesis", "alloy-primitives", + "dashmap 6.1.0", "eyre", "futures", "metrics", + "parking_lot 0.12.3", "reth-blockchain-tree", "reth-chain-state", "reth-chainspec", @@ -7544,6 +7546,7 @@ dependencies = [ "rand 0.8.5", "rand_xorshift", "reth-mdbx-sys", + "smallvec", "tempfile", "thiserror", "tracing", @@ -8071,14 +8074,14 @@ dependencies = [ "reth-db-common", "reth-downloaders", "reth-errors", - "reth-evm-optimism", "reth-execution-types", "reth-network-p2p", "reth-node-builder", "reth-node-core", "reth-node-events", - "reth-node-optimism", "reth-optimism-chainspec", + "reth-optimism-evm", + "reth-optimism-node", "reth-optimism-primitives", "reth-primitives", "reth-provider", @@ -8109,6 +8112,30 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-optimism-evm" +version = "1.0.7" +dependencies = [ + "alloy-consensus", + "alloy-genesis", + "alloy-primitives", + "reth-chainspec", + "reth-ethereum-forks", + "reth-evm", + "reth-execution-errors", + "reth-execution-types", + "reth-optimism-chainspec", + "reth-optimism-consensus", + "reth-optimism-forks", + "reth-primitives", + "reth-prune-types", + "reth-revm", + "revm", + "revm-primitives", + "thiserror", + "tracing", +] + [[package]] name = "reth-optimism-forks" version = "1.0.7" @@ -8120,6 +8147,58 @@ dependencies = [ "serde", ] +[[package]] +name = "reth-optimism-node" +version = "1.0.7" +dependencies = [ + "alloy-genesis", + "alloy-primitives", + "alloy-rpc-types-engine", + "async-trait", + "clap", + "eyre", + "jsonrpsee", + "jsonrpsee-types", + "op-alloy-rpc-types-engine", + "parking_lot 0.12.3", + "reqwest", + "reth", + "reth-auto-seal-consensus", + "reth-basic-payload-builder", + "reth-beacon-consensus", + "reth-chainspec", + "reth-consensus", + "reth-db", + "reth-discv5", + "reth-e2e-test-utils", + "reth-evm", + "reth-network", + "reth-node-api", + "reth-node-builder", + "reth-optimism-chainspec", + "reth-optimism-consensus", + "reth-optimism-evm", + "reth-optimism-forks", + "reth-optimism-payload-builder", + "reth-optimism-rpc", + "reth-payload-builder", + "reth-primitives", + "reth-provider", + "reth-revm", + "reth-rpc", + "reth-rpc-eth-api", + "reth-rpc-eth-types", + "reth-rpc-types", + "reth-rpc-types-compat", + "reth-tracing", + "reth-transaction-pool", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "reth-optimism-payload-builder" version = "1.0.7" @@ -8133,9 +8212,9 @@ dependencies = [ "reth-chain-state", "reth-chainspec", "reth-evm", - "reth-evm-optimism", "reth-execution-types", "reth-optimism-consensus", + "reth-optimism-evm", "reth-optimism-forks", "reth-payload-builder", "reth-payload-primitives", @@ -8178,12 +8257,12 @@ dependencies = [ "reqwest", "reth-chainspec", "reth-evm", - "reth-evm-optimism", "reth-network-api", "reth-node-api", "reth-node-builder", "reth-optimism-chainspec", "reth-optimism-consensus", + "reth-optimism-evm", "reth-optimism-forks", "reth-primitives", "reth-provider", @@ -10831,9 +10910,9 @@ dependencies = [ [[package]] name = "tracy-client" -version = "0.17.3" +version = "0.17.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373db47331c3407b343538df77eea2516884a0b126cdfb4b135acfd400015dd7" +checksum = "746b078c6a09ebfd5594609049e07116735c304671eaab06ce749854d23435bc" dependencies = [ "loom", "once_cell", @@ -10843,9 +10922,9 @@ dependencies = [ [[package]] name = "tracy-client-sys" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49cf0064dcb31c99aa1244c1b93439359e53f72ed217eef5db50abd442241e9a" +checksum = "68613466112302fdbeabc5fa55f7d57462a0b247d5a6b7d7e09401fb471a144d" dependencies = [ "cc", ] diff --git a/Cargo.toml b/Cargo.toml index 4ef2b272db44a..7387029568d7e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -333,7 +333,7 @@ reth-ethereum-payload-builder = { path = "crates/ethereum/payload" } reth-etl = { path = "crates/etl" } reth-evm = { path = "crates/evm" } reth-evm-ethereum = { path = "crates/ethereum/evm" } -reth-evm-optimism = { path = "crates/optimism/evm" } +reth-optimism-evm = { path = "crates/optimism/evm" } reth-execution-errors = { path = "crates/evm/execution-errors" } reth-execution-types = { path = "crates/evm/execution-types" } reth-exex = { path = "crates/exex/exex" } @@ -360,7 +360,7 @@ reth-node-core = { path = "crates/node/core" } reth-node-ethereum = { path = "crates/ethereum/node" } reth-node-events = { path = "crates/node/events" } reth-node-metrics = { path = "crates/node/metrics" } -reth-node-optimism = { path = "crates/optimism/node" } +reth-optimism-node = { path = "crates/optimism/node" } reth-node-types = { path = "crates/node/types" } reth-optimism-chainspec = { path = "crates/optimism/chainspec" } reth-optimism-cli = { path = "crates/optimism/cli" } @@ -426,7 +426,7 @@ alloy-dyn-abi = "0.8.0" alloy-primitives = { version = "0.8.4", default-features = false } alloy-rlp = "0.3.4" alloy-sol-types = "0.8.0" -alloy-trie = { version = "0.5", default-features = false } +alloy-trie = { version = "0.6", default-features = false } alloy-consensus = { version = "0.3.6", default-features = false } alloy-eips = { version = "0.3.6", default-features = false } diff --git a/book/developers/exex/hello-world.md b/book/developers/exex/hello-world.md index 3c90e5a693d08..facb07e5307f2 100644 --- a/book/developers/exex/hello-world.md +++ b/book/developers/exex/hello-world.md @@ -125,7 +125,7 @@ async fn my_exex(mut ctx: ExExContext) -> eyre:: if let Some(committed_chain) = notification.committed_chain() { ctx.events - .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; } } diff --git a/book/developers/exex/how-it-works.md b/book/developers/exex/how-it-works.md index 7fd179bf91559..228711d3fea48 100644 --- a/book/developers/exex/how-it-works.md +++ b/book/developers/exex/how-it-works.md @@ -23,4 +23,4 @@ event to signify what blocks have been processed. This event is used by Reth to An ExEx will only receive notifications for block numbers greater than the block in the most recently emitted `FinishedHeight` event. -To clarify: if an ExEx emits `ExExEvent::FinishedHeight(0)` it will receive notifications for any `block_number > 0`. +To clarify: if an ExEx emits `ExExEvent::FinishedHeight` for `block #0` it will receive notifications for any `block_number > 0`. diff --git a/book/developers/exex/remote.md b/book/developers/exex/remote.md index 2db5074e1df75..4344e28b34fc8 100644 --- a/book/developers/exex/remote.md +++ b/book/developers/exex/remote.md @@ -279,7 +279,7 @@ async fn remote_exex( while let Some(notification) = ctx.notifications.next().await { if let Some(committed_chain) = notification.committed_chain() { ctx.events - .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; } info!("Notification sent to the gRPC server"); @@ -388,7 +388,7 @@ async fn remote_exex( while let Some(notification) = ctx.notifications.next().await { if let Some(committed_chain) = notification.committed_chain() { ctx.events - .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; } info!(?notification, "Notification sent to the gRPC server"); diff --git a/book/developers/exex/tracking-state.md b/book/developers/exex/tracking-state.md index 4d3bbd0a35ae1..52c73e6180297 100644 --- a/book/developers/exex/tracking-state.md +++ b/book/developers/exex/tracking-state.md @@ -57,7 +57,7 @@ impl Future for MyExEx { if let Some(committed_chain) = notification.committed_chain() { this.ctx .events - .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; } } @@ -152,7 +152,7 @@ impl Future for MyExEx { this.ctx .events - .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; } if let Some(first_block) = this.first_block { diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index d0718c97b8ccd..cff117c92b05b 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -32,6 +32,7 @@ reth-node-types.workspace = true # ethereum alloy-primitives.workspace = true +alloy-eips.workspace = true # common parking_lot.workspace = true diff --git a/crates/blockchain-tree/src/block_buffer.rs b/crates/blockchain-tree/src/block_buffer.rs index 99729af0fae5b..e116463e4af67 100644 --- a/crates/blockchain-tree/src/block_buffer.rs +++ b/crates/blockchain-tree/src/block_buffer.rs @@ -183,8 +183,9 @@ impl BlockBuffer { #[cfg(test)] mod tests { use crate::BlockBuffer; + use alloy_eips::BlockNumHash; use alloy_primitives::BlockHash; - use reth_primitives::{BlockNumHash, SealedBlockWithSenders}; + use reth_primitives::SealedBlockWithSenders; use reth_testing_utils::generators::{self, random_block, BlockParams, Rng}; use std::collections::HashMap; diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index 23c63bf6d243a..0c48b3b9ce853 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -2,10 +2,11 @@ use super::state::SidechainId; use crate::canonical_chain::CanonicalChain; +use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; use linked_hash_set::LinkedHashSet; use reth_execution_types::Chain; -use reth_primitives::{BlockNumHash, SealedBlockWithSenders}; +use reth_primitives::SealedBlockWithSenders; use std::collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}; /// Internal indices of the blocks and chains. diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 5d3bffe8968d4..e0feee2cdc2c5 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -5,6 +5,7 @@ use crate::{ state::{SidechainId, TreeState}, AppendableChain, BlockIndices, BlockchainTreeConfig, ExecutionData, TreeExternals, }; +use alloy_eips::{BlockNumHash, ForkBlock}; use alloy_primitives::{BlockHash, BlockNumber, B256, U256}; use reth_blockchain_tree_api::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, @@ -16,8 +17,8 @@ use reth_execution_errors::{BlockExecutionError, BlockValidationError}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ - BlockNumHash, EthereumHardfork, ForkBlock, GotExpected, Hardforks, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, StaticFileSegment, + EthereumHardfork, GotExpected, Hardforks, Receipt, SealedBlock, SealedBlockWithSenders, + SealedHeader, StaticFileSegment, }; use reth_provider::{ providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, BlockWriter, diff --git a/crates/blockchain-tree/src/bundle.rs b/crates/blockchain-tree/src/bundle.rs index 226afd8fab59e..6f62d4136bb76 100644 --- a/crates/blockchain-tree/src/bundle.rs +++ b/crates/blockchain-tree/src/bundle.rs @@ -1,7 +1,7 @@ //! [`ExecutionDataProvider`] implementations used by the tree. +use alloy_eips::ForkBlock; use alloy_primitives::{BlockHash, BlockNumber}; -use reth_primitives::ForkBlock; use reth_provider::{BlockExecutionForkProvider, ExecutionDataProvider, ExecutionOutcome}; use std::collections::BTreeMap; diff --git a/crates/blockchain-tree/src/canonical_chain.rs b/crates/blockchain-tree/src/canonical_chain.rs index e3dc596ba0e72..7dcd466f7d64a 100644 --- a/crates/blockchain-tree/src/canonical_chain.rs +++ b/crates/blockchain-tree/src/canonical_chain.rs @@ -1,5 +1,5 @@ +use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; -use reth_primitives::BlockNumHash; use std::collections::BTreeMap; /// This keeps track of (non-finalized) blocks of the canonical chain. diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 596458e20390c..393e525d5ae20 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -5,6 +5,7 @@ use super::externals::TreeExternals; use crate::BundleStateDataRef; +use alloy_eips::ForkBlock; use alloy_primitives::{BlockHash, BlockNumber, U256}; use reth_blockchain_tree_api::{ error::{BlockchainTreeError, InsertBlockErrorKind}, @@ -14,7 +15,7 @@ use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{Chain, ExecutionOutcome}; -use reth_primitives::{ForkBlock, GotExpected, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{GotExpected, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ providers::{BundleStateProvider, ConsistentDbView, ProviderNodeTypes}, FullExecutionDataProvider, ProviderError, StateRootProvider, TryIntoHistoricalStateProvider, diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index 76e59a47792f0..925b8f03add7b 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -1,3 +1,4 @@ +use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; use reth_blockchain_tree_api::{ self, @@ -5,7 +6,7 @@ use reth_blockchain_tree_api::{ BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, InsertPayloadOk, }; -use reth_primitives::{BlockNumHash, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ BlockchainTreePendingStateProvider, CanonStateNotificationSender, CanonStateNotifications, CanonStateSubscriptions, FullExecutionDataProvider, diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 333527b83ef79..8e6cceccdd19c 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -1,6 +1,7 @@ //! Wrapper around `BlockchainTree` that allows for it to be shared. use super::BlockchainTree; +use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; use parking_lot::RwLock; use reth_blockchain_tree_api::{ @@ -10,7 +11,7 @@ use reth_blockchain_tree_api::{ }; use reth_evm::execute::BlockExecutorProvider; use reth_node_types::NodeTypesWithDB; -use reth_primitives::{BlockNumHash, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateSubscriptions, FullExecutionDataProvider, ProviderError, diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 1563dad64759f..fc142dd03a71b 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -233,7 +233,7 @@ impl CanonicalInMemoryState { pub fn set_pending_block(&self, pending: ExecutedBlock) { // fetch the state of the pending block's parent block let parent = self.state_by_hash(pending.block().parent_hash); - let pending = BlockState::with_parent(pending, parent.map(|p| (*p).clone())); + let pending = BlockState::with_parent(pending, parent); self.inner.in_memory_state.pending.send_modify(|p| { p.replace(pending); }); @@ -261,8 +261,7 @@ impl CanonicalInMemoryState { // insert the new blocks for block in new_blocks { let parent = blocks.get(&block.block().parent_hash).cloned(); - let block_state = - BlockState::with_parent(block.clone(), parent.map(|p| (*p).clone())); + let block_state = BlockState::with_parent(block.clone(), parent); let hash = block_state.hash(); let number = block_state.number(); @@ -329,8 +328,7 @@ impl CanonicalInMemoryState { for block in old_blocks { let parent = blocks.get(&block.block().parent_hash).cloned(); - let block_state = - BlockState::with_parent(block.clone(), parent.map(|p| (*p).clone())); + let block_state = BlockState::with_parent(block.clone(), parent); let hash = block_state.hash(); let number = block_state.number(); @@ -342,10 +340,7 @@ impl CanonicalInMemoryState { // also shift the pending state if it exists self.inner.in_memory_state.pending.send_modify(|p| { if let Some(p) = p.as_mut() { - p.parent = blocks - .get(&p.block().block.parent_hash) - .cloned() - .map(|p| Box::new((*p).clone())); + p.parent = blocks.get(&p.block().block.parent_hash).cloned(); } }); } @@ -517,22 +512,12 @@ impl CanonicalInMemoryState { MemoryOverlayStateProvider::new(historical, in_memory) } - /// Returns an iterator over all canonical blocks in the in-memory state, from newest to oldest. + /// Returns an iterator over all __canonical blocks__ in the in-memory state, from newest to + /// oldest (highest to lowest). + /// + /// This iterator contains a snapshot of the in-memory state at the time of the call. pub fn canonical_chain(&self) -> impl Iterator> { - let pending = self.inner.in_memory_state.pending.borrow().clone(); - let head = self.inner.in_memory_state.head_state(); - - // this clone is cheap because we only expect to keep in memory a few - // blocks and all of them are Arcs. - let blocks = self.inner.in_memory_state.blocks.read().clone(); - - std::iter::once(pending).filter_map(|p| p.map(Arc::new)).chain(std::iter::successors( - head, - move |state| { - let parent_hash = state.block().block().parent_hash; - blocks.get(&parent_hash).cloned() - }, - )) + self.inner.in_memory_state.head_state().into_iter().flat_map(|head| head.iter()) } /// Returns a `TransactionSigned` for the given `TxHash` if found. @@ -594,7 +579,7 @@ pub struct BlockState { /// The executed block that determines the state after this block has been executed. block: ExecutedBlock, /// The block's parent block if it exists. - parent: Option>, + parent: Option>, } #[allow(dead_code)] @@ -605,8 +590,8 @@ impl BlockState { } /// [`BlockState`] constructor with parent. - pub fn with_parent(block: ExecutedBlock, parent: Option) -> Self { - Self { block, parent: parent.map(Box::new) } + pub const fn with_parent(block: ExecutedBlock, parent: Option>) -> Self { + Self { block, parent } } /// Returns the hash and block of the on disk block this state can be traced back to. @@ -666,8 +651,12 @@ impl BlockState { .unwrap_or_default() } - /// Returns a vector of parent `BlockStates`. - /// The block state order in the output vector is newest to oldest. + /// Returns a vector of __parent__ `BlockStates`. + /// + /// The block state order in the output vector is newest to oldest (highest to lowest): + /// `[5,4,3,2,1]` + /// + /// Note: This does not include self. pub fn parent_state_chain(&self) -> Vec<&Self> { let mut parents = Vec::new(); let mut current = self.parent.as_deref(); @@ -681,8 +670,8 @@ impl BlockState { } /// Returns a vector of `BlockStates` representing the entire in memory chain. - /// The block state order in the output vector is newest to oldest, including - /// self as the first element. + /// The block state order in the output vector is newest to oldest (highest to lowest), + /// including self as the first element. pub fn chain(&self) -> Vec<&Self> { let mut chain = vec![self]; self.append_parent_chain(&mut chain); @@ -693,6 +682,13 @@ impl BlockState { pub fn append_parent_chain<'a>(&'a self, chain: &mut Vec<&'a Self>) { chain.extend(self.parent_state_chain()); } + + /// Returns an iterator over the atomically captured chain of in memory blocks. + /// + /// This yields the blocks from newest to oldest (highest to lowest). + pub fn iter(self: Arc) -> impl Iterator> { + std::iter::successors(Some(self), |state| state.parent.clone()) + } } /// Represents an executed block stored in-memory. @@ -870,7 +866,7 @@ mod tests { for i in 1..=num_blocks { let mut state = create_mock_state(test_block_builder, i, parent_hash); if let Some(parent) = parent_state { - state.parent = Some(Box::new(parent)); + state.parent = Some(Arc::new(parent)); } parent_hash = state.hash(); parent_state = Some(state.clone()); @@ -1166,7 +1162,7 @@ mod tests { // Check the pending state assert_eq!( state.pending_state().unwrap(), - BlockState::with_parent(block2.clone(), Some(BlockState::new(block1))) + BlockState::with_parent(block2.clone(), Some(Arc::new(BlockState::new(block1)))) ); // Check the pending block @@ -1201,14 +1197,14 @@ mod tests { let block2 = test_block_builder.get_executed_block_with_number(2, block1.block().hash()); let block3 = test_block_builder.get_executed_block_with_number(3, block2.block().hash()); - let state1 = BlockState::new(block1.clone()); - let state2 = BlockState::with_parent(block2.clone(), Some(state1.clone())); - let state3 = BlockState::with_parent(block3.clone(), Some(state2.clone())); + let state1 = Arc::new(BlockState::new(block1.clone())); + let state2 = Arc::new(BlockState::with_parent(block2.clone(), Some(state1.clone()))); + let state3 = Arc::new(BlockState::with_parent(block3.clone(), Some(state2.clone()))); let mut blocks = HashMap::default(); - blocks.insert(block1.block().hash(), Arc::new(state1)); - blocks.insert(block2.block().hash(), Arc::new(state2)); - blocks.insert(block3.block().hash(), Arc::new(state3)); + blocks.insert(block1.block().hash(), state1); + blocks.insert(block2.block().hash(), state2); + blocks.insert(block3.block().hash(), state3); let mut numbers = BTreeMap::new(); numbers.insert(1, block1.block().hash()); @@ -1267,20 +1263,17 @@ mod tests { #[test] fn test_canonical_in_memory_state_canonical_chain_multiple_blocks() { - let mut blocks = HashMap::default(); - let mut numbers = BTreeMap::new(); let mut parent_hash = B256::random(); let mut block_builder = TestBlockBuilder::default(); + let state = CanonicalInMemoryState::empty(); for i in 1..=3 { let block = block_builder.get_executed_block_with_number(i, parent_hash); let hash = block.block().hash(); - blocks.insert(hash, Arc::new(BlockState::new(block.clone()))); - numbers.insert(i, hash); + state.update_blocks(Some(block), None); parent_hash = hash; } - let state = CanonicalInMemoryState::new(blocks, numbers, None, None); let chain: Vec<_> = state.canonical_chain().collect(); assert_eq!(chain.len(), 3); @@ -1289,31 +1282,27 @@ mod tests { assert_eq!(chain[2].number(), 1); } + // ensures the pending block is not part of the canonical chain #[test] fn test_canonical_in_memory_state_canonical_chain_with_pending_block() { - let mut blocks = HashMap::default(); - let mut numbers = BTreeMap::new(); let mut parent_hash = B256::random(); let mut block_builder = TestBlockBuilder::default(); + let state = CanonicalInMemoryState::empty(); for i in 1..=2 { let block = block_builder.get_executed_block_with_number(i, parent_hash); let hash = block.block().hash(); - blocks.insert(hash, Arc::new(BlockState::new(block.clone()))); - numbers.insert(i, hash); + state.update_blocks(Some(block), None); parent_hash = hash; } let pending_block = block_builder.get_executed_block_with_number(3, parent_hash); - let pending_state = BlockState::new(pending_block); - - let state = CanonicalInMemoryState::new(blocks, numbers, Some(pending_state), None); + state.set_pending_block(pending_block); let chain: Vec<_> = state.canonical_chain().collect(); - assert_eq!(chain.len(), 3); - assert_eq!(chain[0].number(), 3); - assert_eq!(chain[1].number(), 2); - assert_eq!(chain[2].number(), 1); + assert_eq!(chain.len(), 2); + assert_eq!(chain[0].number(), 2); + assert_eq!(chain[1].number(), 1); } #[test] diff --git a/crates/cli/util/src/allocator.rs b/crates/cli/util/src/allocator.rs index b5974e2245f82..ee13e7c61cb52 100644 --- a/crates/cli/util/src/allocator.rs +++ b/crates/cli/util/src/allocator.rs @@ -12,6 +12,7 @@ cfg_if::cfg_if! { cfg_if::cfg_if! { if #[cfg(feature = "tracy-allocator")] { type AllocatorWrapper = tracy_client::ProfiledAllocator; + tracy_client::register_demangler!(); const fn new_allocator_wrapper() -> AllocatorWrapper { AllocatorWrapper::new(AllocatorInner {}, 100) } @@ -23,9 +24,6 @@ cfg_if::cfg_if! { } } -#[cfg(feature = "tracy-allocator")] -tracy_client::register_demangler!(); - /// Custom allocator. pub type Allocator = AllocatorWrapper; diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index 286b9f836aa4f..f045bb6fda1d0 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -11,6 +11,7 @@ exclude.workspace = true [dependencies] # reth reth-beacon-consensus.workspace = true +reth-chain-state.workspace = true reth-engine-tree.workspace = true reth-node-types.workspace = true reth-payload-builder.workspace = true diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index d276dc5c1f8a0..c9794ecfabb0f 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -7,8 +7,9 @@ //! building at a fixed interval. use crate::miner::MiningMode; -use alloy_primitives::B256; +use eyre::eyre; use reth_beacon_consensus::EngineNodeTypes; +use reth_chain_state::{CanonicalInMemoryState, ExecutedBlock, NewCanonicalChain}; use reth_engine_tree::persistence::PersistenceHandle; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{ @@ -17,12 +18,12 @@ use reth_payload_primitives::{ use reth_provider::ProviderFactory; use reth_prune::PrunerWithFactory; use reth_stages_api::MetricEventsSender; -use std::fmt::Formatter; use tokio::sync::oneshot; use tracing::debug; /// Provides a local dev service engine that can be used to drive the /// chain forward. +#[derive(Debug)] pub struct LocalEngineService where N: EngineNodeTypes, @@ -32,30 +33,14 @@ where payload_builder: PayloadBuilderHandle, /// The payload attribute builder for the engine payload_attributes_builder: B, + /// Keep track of the Canonical chain state that isn't persisted on disk yet + canonical_in_memory_state: CanonicalInMemoryState, /// A handle to the persistence layer persistence_handle: PersistenceHandle, - /// The hash of the current head - head: B256, /// The mining mode for the engine mode: MiningMode, } -impl std::fmt::Debug for LocalEngineService -where - N: EngineNodeTypes, - B: PayloadAttributesBuilder::PayloadAttributes>, -{ - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.debug_struct("LocalEngineService") - .field("payload_builder", &self.payload_builder) - .field("payload_attributes_builder", &self.payload_attributes_builder) - .field("persistence_handle", &self.persistence_handle) - .field("head", &self.head) - .field("mode", &self.mode) - .finish() - } -} - impl LocalEngineService where N: EngineNodeTypes, @@ -67,14 +52,20 @@ where payload_attributes_builder: B, provider: ProviderFactory, pruner: PrunerWithFactory>, + canonical_in_memory_state: CanonicalInMemoryState, sync_metrics_tx: MetricEventsSender, - head: B256, mode: MiningMode, ) -> Self { let persistence_handle = PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx); - Self { payload_builder, payload_attributes_builder, persistence_handle, head, mode } + Self { + payload_builder, + payload_attributes_builder, + canonical_in_memory_state, + persistence_handle, + mode, + } } /// Spawn the [`LocalEngineService`] on a tokio green thread. The service will poll the payload @@ -86,8 +77,8 @@ where payload_attributes_builder: B, provider: ProviderFactory, pruner: PrunerWithFactory>, + canonical_in_memory_state: CanonicalInMemoryState, sync_metrics_tx: MetricEventsSender, - head: B256, mode: MiningMode, ) { let engine = Self::new( @@ -95,8 +86,8 @@ where payload_attributes_builder, provider, pruner, + canonical_in_memory_state, sync_metrics_tx, - head, mode, ); @@ -112,26 +103,29 @@ where (&mut self.mode).await; // Start a new payload building job - let new_head = self.build_and_save_payload().await; + let executed_block = self.build_and_save_payload().await; - if new_head.is_err() { - debug!(target: "local_engine", err = ?new_head.unwrap_err(), "failed payload building"); + if executed_block.is_err() { + debug!(target: "local_engine", err = ?executed_block.unwrap_err(), "failed payload building"); continue } + let block = executed_block.expect("not error"); - // Update the head - self.head = new_head.expect("not error"); + let res = self.update_canonical_in_memory_state(block); + if res.is_err() { + debug!(target: "local_engine", err = ?res.unwrap_err(), "failed canonical state update"); + } } } /// Builds a payload by initiating a new payload job via the [`PayloadBuilderHandle`], - /// saving the execution outcome to persistence and returning the current head of the - /// chain. - async fn build_and_save_payload(&self) -> eyre::Result { + /// saving the execution outcome to persistence and returning the executed block. + async fn build_and_save_payload(&self) -> eyre::Result { let payload_attributes = self.payload_attributes_builder.build()?; + let parent = self.canonical_in_memory_state.get_canonical_head().hash(); let payload_builder_attributes = ::PayloadBuilderAttributes::try_new( - self.head, + parent, payload_attributes, ) .map_err(|_| eyre::eyre!("failed to fetch payload attributes"))?; @@ -142,22 +136,38 @@ where .await? .await?; - let block = payload.executed_block().map(|block| vec![block]).unwrap_or_default(); + let executed_block = + payload.executed_block().ok_or_else(|| eyre!("missing executed block"))?; let (tx, rx) = oneshot::channel(); - let _ = self.persistence_handle.save_blocks(block, tx); + let _ = self.persistence_handle.save_blocks(vec![executed_block.clone()], tx); // Wait for the persistence_handle to complete - let new_head = rx.await?.ok_or_else(|| eyre::eyre!("missing new head"))?; + let _ = rx.await?.ok_or_else(|| eyre!("missing new head"))?; + + Ok(executed_block) + } + + /// Update the canonical in memory state and send notification for a new canon state to + /// all the listeners. + fn update_canonical_in_memory_state(&self, executed_block: ExecutedBlock) -> eyre::Result<()> { + let chain = NewCanonicalChain::Commit { new: vec![executed_block] }; + let tip = chain.tip().header.clone(); + let notification = chain.to_chain_notification(); - Ok(new_head.hash) + // Update the tracked in-memory state with the new chain + self.canonical_in_memory_state.update_chain(chain); + self.canonical_in_memory_state.set_canonical_head(tip); + + // Sends an event to all active listeners about the new canonical chain + self.canonical_in_memory_state.notify_canon_state(notification); + Ok(()) } } #[cfg(test)] mod tests { use super::*; - use alloy_primitives::B256; use reth_chainspec::MAINNET; use reth_config::PruneConfig; use reth_db::test_utils::{create_test_rw_db, create_test_static_files_dir}; @@ -201,20 +211,20 @@ mod tests { let provider = ProviderFactory::>::new( create_test_rw_db(), MAINNET.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), + StaticFileProvider::read_write(static_dir_path)?, ); let pruner = PrunerBuilder::new(PruneConfig::default()) .build_with_provider_factory(provider.clone()); + // Create an empty canonical in memory state + let canonical_in_memory_state = CanonicalInMemoryState::empty(); + // Start the payload builder service let payload_handle = spawn_test_payload_service::(); // Sync metric channel let (sync_metrics_tx, _) = unbounded_channel(); - // Get the attributes for start of block building - let genesis_hash = B256::random(); - // Launch the LocalEngineService in interval mode let period = Duration::from_secs(1); LocalEngineService::spawn_new( @@ -222,13 +232,17 @@ mod tests { TestPayloadAttributesBuilder, provider.clone(), pruner, + canonical_in_memory_state, sync_metrics_tx, - genesis_hash, MiningMode::interval(period), ); + // Check that we have no block for now + let block = provider.block_by_number(0)?; + assert!(block.is_none()); + // Wait 4 intervals - tokio::time::sleep(4 * period).await; + tokio::time::sleep(2 * period).await; // Assert a block has been build let block = provider.block_by_number(0)?; @@ -246,11 +260,14 @@ mod tests { let provider = ProviderFactory::>::new( create_test_rw_db(), MAINNET.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), + StaticFileProvider::read_write(static_dir_path)?, ); let pruner = PrunerBuilder::new(PruneConfig::default()) .build_with_provider_factory(provider.clone()); + // Create an empty canonical in memory state + let canonical_in_memory_state = CanonicalInMemoryState::empty(); + // Start the payload builder service let payload_handle = spawn_test_payload_service::(); @@ -260,17 +277,14 @@ mod tests { // Sync metric channel let (sync_metrics_tx, _) = unbounded_channel(); - // Get the attributes for start of block building - let genesis_hash = B256::random(); - // Launch the LocalEngineService in instant mode LocalEngineService::spawn_new( payload_handle, TestPayloadAttributesBuilder, provider.clone(), pruner, + canonical_in_memory_state, sync_metrics_tx, - genesis_hash, MiningMode::instant(pool.clone()), ); @@ -295,4 +309,54 @@ mod tests { Ok(()) } + + #[tokio::test] + async fn test_canonical_chain_subscription() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + // Start the provider and the pruner + let (_, static_dir_path) = create_test_static_files_dir(); + let provider = ProviderFactory::>::new( + create_test_rw_db(), + MAINNET.clone(), + StaticFileProvider::read_write(static_dir_path)?, + ); + let pruner = PrunerBuilder::new(PruneConfig::default()) + .build_with_provider_factory(provider.clone()); + + // Create an empty canonical in memory state + let canonical_in_memory_state = CanonicalInMemoryState::empty(); + let mut notifications = canonical_in_memory_state.subscribe_canon_state(); + + // Start the payload builder service + let payload_handle = spawn_test_payload_service::(); + + // Start a transaction pool + let pool = testing_pool(); + + // Sync metric channel + let (sync_metrics_tx, _) = unbounded_channel(); + + // Launch the LocalEngineService in instant mode + LocalEngineService::spawn_new( + payload_handle, + TestPayloadAttributesBuilder, + provider.clone(), + pruner, + canonical_in_memory_state, + sync_metrics_tx, + MiningMode::instant(pool.clone()), + ); + + // Add a transaction to the pool + let transaction = MockTransaction::legacy().with_gas_price(10); + pool.add_transaction(Default::default(), transaction).await?; + + // Check a notification is received for block 0 + let res = notifications.recv().await?; + + assert_eq!(res.tip().number, 0); + + Ok(()) + } } diff --git a/crates/evm/execution-errors/src/trie.rs b/crates/evm/execution-errors/src/trie.rs index 306cd6750a9a6..c85819ee74def 100644 --- a/crates/evm/execution-errors/src/trie.rs +++ b/crates/evm/execution-errors/src/trie.rs @@ -101,6 +101,9 @@ pub enum TrieWitnessError { /// Missing target node. #[display("target node missing from proof {_0:?}")] MissingTargetNode(Nibbles), + /// Unexpected empty root. + #[display("unexpected empty root: {_0:?}")] + UnexpectedEmptyRoot(Nibbles), } impl From for ProviderError { diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index cbb2214192b4f..f10775e245872 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -34,6 +34,7 @@ reth-tracing.workspace = true # alloy alloy-primitives.workspace = true +alloy-eips.workspace = true ## async futures.workspace = true @@ -41,8 +42,10 @@ tokio-util.workspace = true tokio.workspace = true ## misc +dashmap.workspace = true eyre.workspace = true metrics.workspace = true +parking_lot.workspace = true serde_json.workspace = true tracing.workspace = true diff --git a/crates/exex/exex/src/event.rs b/crates/exex/exex/src/event.rs index c26c1c5344b20..1215ea2a502a8 100644 --- a/crates/exex/exex/src/event.rs +++ b/crates/exex/exex/src/event.rs @@ -1,4 +1,4 @@ -use alloy_primitives::BlockNumber; +use reth_primitives::BlockNumHash; /// Events emitted by an `ExEx`. #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -9,5 +9,5 @@ pub enum ExExEvent { /// meaning that Reth is allowed to prune them. /// /// On reorgs, it's possible for the height to go down. - FinishedHeight(BlockNumber), + FinishedHeight(BlockNumHash), } diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index 3230e003b28db..ada9e7a4b315b 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -1,11 +1,12 @@ -use crate::{wal::Wal, ExExEvent, ExExNotification, ExExNotifications, FinishedExExHeight}; -use alloy_primitives::BlockNumber; +use crate::{ + wal::Wal, ExExEvent, ExExNotification, ExExNotifications, FinishedExExHeight, WalHandle, +}; use futures::StreamExt; use metrics::Gauge; use reth_chain_state::ForkChoiceStream; use reth_chainspec::Head; use reth_metrics::{metrics::Counter, Metrics}; -use reth_primitives::SealedHeader; +use reth_primitives::{BlockNumHash, SealedHeader}; use reth_tracing::tracing::debug; use std::{ collections::VecDeque, @@ -24,6 +25,12 @@ use tokio::sync::{ }; use tokio_util::sync::{PollSendError, PollSender, ReusableBoxFuture}; +/// Default max size of the internal state notifications buffer. +/// +/// 1024 notifications in the buffer is 3.5 hours of mainnet blocks, +/// or 17 minutes of 1-second blocks. +pub const DEFAULT_EXEX_MANAGER_CAPACITY: usize = 1024; + /// Metrics for an `ExEx`. #[derive(Metrics)] #[metrics(scope = "exex")] @@ -51,10 +58,10 @@ pub struct ExExHandle { receiver: UnboundedReceiver, /// The ID of the next notification to send to this `ExEx`. next_notification_id: usize, - /// The finished block number of the `ExEx`. + /// The finished block of the `ExEx`. /// /// If this is `None`, the `ExEx` has not emitted a `FinishedHeight` event. - finished_height: Option, + finished_height: Option, } impl ExExHandle { @@ -67,10 +74,12 @@ impl ExExHandle { node_head: Head, provider: P, executor: E, + wal_handle: WalHandle, ) -> (Self, UnboundedSender, ExExNotifications) { let (notification_tx, notification_rx) = mpsc::channel(1); let (event_tx, event_rx) = mpsc::unbounded_channel(); - let notifications = ExExNotifications::new(node_head, provider, executor, notification_rx); + let notifications = + ExExNotifications::new(node_head, provider, executor, notification_rx, wal_handle); ( Self { @@ -101,11 +110,11 @@ impl ExExHandle { // Skip the chain commit notification if the finished height of the ExEx is // higher than or equal to the tip of the new notification. // I.e., the ExEx has already processed the notification. - if finished_height >= new.tip().number { + if finished_height.number >= new.tip().number { debug!( exex_id = %self.id, %notification_id, - %finished_height, + ?finished_height, new_tip = %new.tip().number, "Skipping notification" ); @@ -373,7 +382,7 @@ impl Future for ExExManager { // update watch channel block number let finished_height = self.exex_handles.iter_mut().try_fold(u64::MAX, |curr, exex| { - exex.finished_height.map_or(Err(()), |height| Ok(height.min(curr))) + exex.finished_height.map_or(Err(()), |height| Ok(height.number.min(curr))) }); if let Ok(finished_height) = finished_height { let _ = self.finished_height.send(FinishedExExHeight::Height(finished_height)); @@ -521,77 +530,64 @@ mod tests { #[tokio::test] async fn test_delivers_events() { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let (mut exex_handle, event_tx, mut _notification_rx) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); // Send an event and check that it's delivered correctly - event_tx.send(ExExEvent::FinishedHeight(42)).unwrap(); + let event = ExExEvent::FinishedHeight(BlockNumHash::new(42, B256::random())); + event_tx.send(event).unwrap(); let received_event = exex_handle.receiver.recv().await.unwrap(); - assert_eq!(received_event, ExExEvent::FinishedHeight(42)); + assert_eq!(received_event, event); } #[tokio::test] async fn test_has_exexs() { let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let (exex_handle_1, _, _) = - ExExHandle::new("test_exex_1".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex_1".to_string(), Head::default(), (), (), wal.handle()); - assert!(!ExExManager::new( - vec![], - 0, - Wal::new(temp_dir.path()).unwrap(), - empty_finalized_header_stream() - ) - .handle - .has_exexs()); + assert!(!ExExManager::new(vec![], 0, wal.clone(), empty_finalized_header_stream()) + .handle + .has_exexs()); - assert!(ExExManager::new( - vec![exex_handle_1], - 0, - Wal::new(temp_dir.path()).unwrap(), - empty_finalized_header_stream() - ) - .handle - .has_exexs()); + assert!(ExExManager::new(vec![exex_handle_1], 0, wal, empty_finalized_header_stream()) + .handle + .has_exexs()); } #[tokio::test] async fn test_has_capacity() { let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let (exex_handle_1, _, _) = - ExExHandle::new("test_exex_1".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex_1".to_string(), Head::default(), (), (), wal.handle()); - assert!(!ExExManager::new( - vec![], - 0, - Wal::new(temp_dir.path()).unwrap(), - empty_finalized_header_stream() - ) - .handle - .has_capacity()); + assert!(!ExExManager::new(vec![], 0, wal.clone(), empty_finalized_header_stream()) + .handle + .has_capacity()); - assert!(ExExManager::new( - vec![exex_handle_1], - 10, - Wal::new(temp_dir.path()).unwrap(), - empty_finalized_header_stream() - ) - .handle - .has_capacity()); + assert!(ExExManager::new(vec![exex_handle_1], 10, wal, empty_finalized_header_stream()) + .handle + .has_capacity()); } #[test] fn test_push_notification() { let temp_dir = tempfile::tempdir().unwrap(); - let (exex_handle, _, _) = ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + let wal = Wal::new(temp_dir.path()).unwrap(); + + let (exex_handle, _, _) = + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); // Create a mock ExExManager and add the exex_handle to it - let mut exex_manager = ExExManager::new( - vec![exex_handle], - 10, - Wal::new(temp_dir.path()).unwrap(), - empty_finalized_header_stream(), - ); + let mut exex_manager = + ExExManager::new(vec![exex_handle], 10, wal, empty_finalized_header_stream()); // Define the notification for testing let mut block1 = SealedBlockWithSenders::default(); @@ -634,16 +630,15 @@ mod tests { #[test] fn test_update_capacity() { let temp_dir = tempfile::tempdir().unwrap(); - let (exex_handle, _, _) = ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + let wal = Wal::new(temp_dir.path()).unwrap(); + + let (exex_handle, _, _) = + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); // Create a mock ExExManager and add the exex_handle to it let max_capacity = 5; - let mut exex_manager = ExExManager::new( - vec![exex_handle], - max_capacity, - Wal::new(temp_dir.path()).unwrap(), - empty_finalized_header_stream(), - ); + let mut exex_manager = + ExExManager::new(vec![exex_handle], max_capacity, wal, empty_finalized_header_stream()); // Push some notifications to fill part of the buffer let mut block1 = SealedBlockWithSenders::default(); @@ -674,14 +669,17 @@ mod tests { #[tokio::test] async fn test_updates_block_height() { let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let (exex_handle, event_tx, mut _notification_rx) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); // Check initial block height assert!(exex_handle.finished_height.is_none()); // Update the block height via an event - event_tx.send(ExExEvent::FinishedHeight(42)).unwrap(); + let block = BlockNumHash::new(42, B256::random()); + event_tx.send(ExExEvent::FinishedHeight(block)).unwrap(); // Create a mock ExExManager and add the exex_handle to it let exex_manager = ExExManager::new( @@ -699,7 +697,7 @@ mod tests { // Check that the block height was updated let updated_exex_handle = &pinned_manager.exex_handles[0]; - assert_eq!(updated_exex_handle.finished_height, Some(42)); + assert_eq!(updated_exex_handle.finished_height, Some(block)); // Get the receiver for the finished height let mut receiver = pinned_manager.handle.finished_height(); @@ -717,15 +715,20 @@ mod tests { #[tokio::test] async fn test_updates_block_height_lower() { let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + // Create two `ExExHandle` instances let (exex_handle1, event_tx1, _) = - ExExHandle::new("test_exex1".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex1".to_string(), Head::default(), (), (), wal.handle()); let (exex_handle2, event_tx2, _) = - ExExHandle::new("test_exex2".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex2".to_string(), Head::default(), (), (), wal.handle()); + + let block1 = BlockNumHash::new(42, B256::random()); + let block2 = BlockNumHash::new(10, B256::random()); // Send events to update the block heights of the two handles, with the second being lower - event_tx1.send(ExExEvent::FinishedHeight(42)).unwrap(); - event_tx2.send(ExExEvent::FinishedHeight(10)).unwrap(); + event_tx1.send(ExExEvent::FinishedHeight(block1)).unwrap(); + event_tx2.send(ExExEvent::FinishedHeight(block2)).unwrap(); let exex_manager = ExExManager::new( vec![exex_handle1, exex_handle2], @@ -756,18 +759,23 @@ mod tests { #[tokio::test] async fn test_updates_block_height_greater() { let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + // Create two `ExExHandle` instances let (exex_handle1, event_tx1, _) = - ExExHandle::new("test_exex1".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex1".to_string(), Head::default(), (), (), wal.handle()); let (exex_handle2, event_tx2, _) = - ExExHandle::new("test_exex2".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex2".to_string(), Head::default(), (), (), wal.handle()); // Assert that the initial block height is `None` for the first `ExExHandle`. assert!(exex_handle1.finished_height.is_none()); + let block1 = BlockNumHash::new(42, B256::random()); + let block2 = BlockNumHash::new(100, B256::random()); + // Send events to update the block heights of the two handles, with the second being higher. - event_tx1.send(ExExEvent::FinishedHeight(42)).unwrap(); - event_tx2.send(ExExEvent::FinishedHeight(100)).unwrap(); + event_tx1.send(ExExEvent::FinishedHeight(block1)).unwrap(); + event_tx2.send(ExExEvent::FinishedHeight(block2)).unwrap(); let exex_manager = ExExManager::new( vec![exex_handle1, exex_handle2], @@ -802,8 +810,10 @@ mod tests { #[tokio::test] async fn test_exex_manager_capacity() { let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let (exex_handle_1, _, _) = - ExExHandle::new("test_exex_1".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex_1".to_string(), Head::default(), (), (), wal.handle()); // Create an ExExManager with a small max capacity let max_capacity = 2; @@ -846,8 +856,11 @@ mod tests { #[tokio::test] async fn exex_handle_new() { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let (mut exex_handle, _, mut notifications) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); // Check initial state assert_eq!(exex_handle.id, "test_exex"); @@ -889,11 +902,14 @@ mod tests { #[tokio::test] async fn test_notification_if_finished_height_gt_chain_tip() { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let (mut exex_handle, _, mut notifications) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); // Set finished_height to a value higher than the block tip - exex_handle.finished_height = Some(15); + exex_handle.finished_height = Some(BlockNumHash::new(15, B256::random())); let mut block1 = SealedBlockWithSenders::default(); block1.block.header.set_hash(B256::new([0x01; 32])); @@ -931,8 +947,11 @@ mod tests { #[tokio::test] async fn test_sends_chain_reorged_notification() { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let (mut exex_handle, _, mut notifications) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); let notification = ExExNotification::ChainReorged { old: Arc::new(Chain::default()), @@ -941,7 +960,7 @@ mod tests { // Even if the finished height is higher than the tip of the new chain, the reorg // notification should be received - exex_handle.finished_height = Some(u64::MAX); + exex_handle.finished_height = Some(BlockNumHash::new(u64::MAX, B256::random())); let mut cx = Context::from_waker(futures::task::noop_waker_ref()); @@ -962,14 +981,17 @@ mod tests { #[tokio::test] async fn test_sends_chain_reverted_notification() { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let (mut exex_handle, _, mut notifications) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); let notification = ExExNotification::ChainReverted { old: Arc::new(Chain::default()) }; // Even if the finished height is higher than the tip of the new chain, the reorg // notification should be received - exex_handle.finished_height = Some(u64::MAX); + exex_handle.finished_height = Some(BlockNumHash::new(u64::MAX, B256::random())); let mut cx = Context::from_waker(futures::task::noop_waker_ref()); @@ -994,6 +1016,7 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let mut wal = Wal::new(temp_dir.path()).unwrap(); + let block = random_block(&mut generators::rng(), 0, Default::default()) .seal_with_senders() .ok_or_eyre("failed to recover senders")?; @@ -1005,7 +1028,8 @@ mod tests { let (tx, rx) = watch::channel(None); let finalized_header_stream = ForkChoiceStream::new(rx); - let (exex_handle, _, _) = ExExHandle::new("test_exex".to_string(), Head::default(), (), ()); + let (exex_handle, _, _) = + ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); let mut exex_manager = std::pin::pin!(ExExManager::new(vec![exex_handle], 1, wal, finalized_header_stream)); diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 54d7959dc5e86..369a0586c0c53 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -1,6 +1,4 @@ -use crate::{BackfillJobFactory, ExExNotification, StreamBackfillJob}; -use alloy_primitives::U256; -use eyre::OptionExt; +use crate::{BackfillJobFactory, ExExNotification, StreamBackfillJob, WalHandle}; use futures::{Stream, StreamExt}; use reth_chainspec::Head; use reth_evm::execute::BlockExecutorProvider; @@ -21,6 +19,7 @@ pub struct ExExNotifications { provider: P, executor: E, notifications: Receiver, + wal_handle: WalHandle, } impl Debug for ExExNotifications { @@ -40,8 +39,9 @@ impl ExExNotifications { provider: P, executor: E, notifications: Receiver, + wal_handle: WalHandle, ) -> Self { - Self { node_head, provider, executor, notifications } + Self { node_head, provider, executor, notifications, wal_handle } } /// Receives the next value for this receiver. @@ -113,6 +113,7 @@ where self.provider, self.executor, self.notifications, + self.wal_handle, head, ) } @@ -134,13 +135,16 @@ pub struct ExExNotificationsWithHead { provider: P, executor: E, notifications: Receiver, + wal_handle: WalHandle, exex_head: ExExHead, - pending_sync: bool, + /// If true, then we need to check if the ExEx head is on the canonical chain and if not, + /// revert its head. + pending_check_canonical: bool, + /// If true, then we need to check if the ExEx head is behind the node head and if so, backfill + /// the missing blocks. + pending_check_backfill: bool, /// The backfill job to run before consuming any notifications. backfill_job: Option>, - /// Whether we're currently waiting for the node head to catch up to the same height as the - /// ExEx head. - node_head_catchup_in_progress: bool, } impl ExExNotificationsWithHead @@ -154,6 +158,7 @@ where provider: P, executor: E, notifications: Receiver, + wal_handle: WalHandle, exex_head: ExExHead, ) -> Self { Self { @@ -161,91 +166,78 @@ where provider, executor, notifications, + wal_handle, exex_head, - pending_sync: true, + pending_check_canonical: true, + pending_check_backfill: true, backfill_job: None, - node_head_catchup_in_progress: false, } } - /// Compares the node head against the ExEx head, and synchronizes them in case of a mismatch. + /// Checks if the ExEx head is on the canonical chain. + /// + /// If the head block is not found in the database, it means we're not on the canonical chain + /// and we need to revert the notification with the ExEx head block. + fn check_canonical(&mut self) -> eyre::Result> { + if self.provider.header(&self.exex_head.block.hash)?.is_some() { + debug!(target: "exex::notifications", "ExEx head is on the canonical chain"); + return Ok(None) + } + + // If the head block is not found in the database, it means we're not on the canonical + // chain. + + // Get the committed notification for the head block from the WAL. + let Some(notification) = + self.wal_handle.get_committed_notification_by_block_hash(&self.exex_head.block.hash)? + else { + return Err(eyre::eyre!( + "Could not find notification for block hash {:?} in the WAL", + self.exex_head.block.hash + )) + }; + + // Update the head block hash to the parent hash of the first committed block. + let committed_chain = notification.committed_chain().unwrap(); + let new_exex_head = + (committed_chain.first().parent_hash, committed_chain.first().number - 1).into(); + debug!(target: "exex::notifications", old_exex_head = ?self.exex_head.block, new_exex_head = ?new_exex_head, "ExEx head updated"); + self.exex_head.block = new_exex_head; + + // Return an inverted notification. See the documentation for + // `ExExNotification::into_inverted`. + Ok(Some(notification.into_inverted())) + } + + /// Compares the node head against the ExEx head, and backfills if needed. + /// + /// CAUTON: This method assumes that the ExEx head is <= the node head, and that it's on the + /// canonical chain. /// /// Possible situations are: - /// - ExEx is behind the node head (`node_head.number < exex_head.number`). - /// - ExEx is on the canonical chain (`exex_head.hash` is found in the node database). - /// Backfill from the node database. - /// - ExEx is not on the canonical chain (`exex_head.hash` is not found in the node database). - /// Unwind the ExEx to the first block matching between the ExEx and the node, and then - /// bacfkill from the node database. - /// - ExEx is at the same block number (`node_head.number == exex_head.number`). - /// - ExEx is on the canonical chain (`exex_head.hash` is found in the node database). Nothing - /// to do. - /// - ExEx is not on the canonical chain (`exex_head.hash` is not found in the node database). - /// Unwind the ExEx to the first block matching between the ExEx and the node, and then - /// backfill from the node database. - /// - ExEx is ahead of the node head (`node_head.number > exex_head.number`). Wait until the - /// node head catches up to the ExEx head, and then repeat the synchronization process. - fn synchronize(&mut self) -> eyre::Result<()> { + /// - ExEx is behind the node head (`node_head.number < exex_head.number`). Backfill from the + /// node database. + /// - ExEx is at the same block number as the node head (`node_head.number == + /// exex_head.number`). Nothing to do. + fn check_backfill(&mut self) -> eyre::Result<()> { debug!(target: "exex::manager", "Synchronizing ExEx head"); let backfill_job_factory = BackfillJobFactory::new(self.executor.clone(), self.provider.clone()); match self.exex_head.block.number.cmp(&self.node_head.number) { std::cmp::Ordering::Less => { - // ExEx is behind the node head - - if let Some(exex_header) = self.provider.header(&self.exex_head.block.hash)? { - // ExEx is on the canonical chain - debug!(target: "exex::manager", "ExEx is behind the node head and on the canonical chain"); - - if exex_header.number != self.exex_head.block.number { - eyre::bail!("ExEx head number does not match the hash") - } - - // ExEx is on the canonical chain, start backfill - let backfill = backfill_job_factory - .backfill(self.exex_head.block.number + 1..=self.node_head.number) - .into_stream(); - self.backfill_job = Some(backfill); - } else { - debug!(target: "exex::manager", "ExEx is behind the node head and not on the canonical chain"); - // ExEx is not on the canonical chain, first unwind it and then backfill - - // TODO(alexey): unwind and backfill - self.backfill_job = None; - } + // ExEx is behind the node head, start backfill + debug!(target: "exex::manager", "ExEx is behind the node head and on the canonical chain, starting backfill"); + let backfill = backfill_job_factory + .backfill(self.exex_head.block.number + 1..=self.node_head.number) + .into_stream(); + self.backfill_job = Some(backfill); } - #[allow(clippy::branches_sharing_code)] std::cmp::Ordering::Equal => { - // ExEx is at the same block height as the node head - - if let Some(exex_header) = self.provider.header(&self.exex_head.block.hash)? { - // ExEx is on the canonical chain - debug!(target: "exex::manager", "ExEx is at the same block height as the node head and on the canonical chain"); - - if exex_header.number != self.exex_head.block.number { - eyre::bail!("ExEx head number does not match the hash") - } - - // ExEx is on the canonical chain and the same as the node head, no need to - // backfill - self.backfill_job = None; - } else { - // ExEx is not on the canonical chain, first unwind it and then backfill - debug!(target: "exex::manager", "ExEx is at the same block height as the node head but not on the canonical chain"); - - // TODO(alexey): unwind and backfill - self.backfill_job = None; - } + debug!(target: "exex::manager", "ExEx is at the node head"); } std::cmp::Ordering::Greater => { - debug!(target: "exex::manager", "ExEx is ahead of the node head"); - - // ExEx is ahead of the node head - - // TODO(alexey): wait until the node head is at the same height as the ExEx head - // and then repeat the process above - self.node_head_catchup_in_progress = true; + return Err(eyre::eyre!("ExEx is ahead of the node head")) } }; @@ -263,9 +255,18 @@ where fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); - if this.pending_sync { - this.synchronize()?; - this.pending_sync = false; + if this.pending_check_canonical { + if let Some(canonical_notification) = this.check_canonical()? { + return Poll::Ready(Some(Ok(canonical_notification))) + } + + // ExEx head is on the canonical chain, we no longer need to check it + this.pending_check_canonical = false; + } + + if this.pending_check_backfill { + this.check_backfill()?; + this.pending_check_backfill = false; } if let Some(backfill_job) = &mut this.backfill_job { @@ -279,81 +280,36 @@ where this.backfill_job = None; } - loop { - let Some(notification) = ready!(this.notifications.poll_recv(cx)) else { - return Poll::Ready(None) - }; - - // 1. Either committed or reverted chain from the notification. - // 2. Block number of the tip of the canonical chain: - // - For committed chain, it's the tip block number. - // - For reverted chain, it's the block number preceding the first block in the chain. - let (chain, tip) = notification - .committed_chain() - .map(|chain| (chain.clone(), chain.tip().number)) - .or_else(|| { - notification - .reverted_chain() - .map(|chain| (chain.clone(), chain.first().number - 1)) - }) - .unzip(); - - if this.node_head_catchup_in_progress { - // If we are waiting for the node head to catch up to the same height as the ExEx - // head, then we need to check if the ExEx is on the canonical chain. - - // Query the chain from the new notification for the ExEx head block number. - let exex_head_block = chain - .as_ref() - .and_then(|chain| chain.blocks().get(&this.exex_head.block.number)); - - // Compare the hash of the block from the new notification to the ExEx head - // hash. - if let Some((block, tip)) = exex_head_block.zip(tip) { - if block.hash() == this.exex_head.block.hash { - // ExEx is on the canonical chain, proceed with the notification - this.node_head_catchup_in_progress = false; - } else { - // ExEx is not on the canonical chain, synchronize - let tip = - this.provider.sealed_header(tip)?.ok_or_eyre("node head not found")?; - this.node_head = Head::new( - tip.number, - tip.hash(), - tip.difficulty, - U256::MAX, - tip.timestamp, - ); - this.synchronize()?; - } - } - } + let Some(notification) = ready!(this.notifications.poll_recv(cx)) else { + return Poll::Ready(None) + }; - if notification - .committed_chain() - .or_else(|| notification.reverted_chain()) - .map_or(false, |chain| chain.first().number > this.exex_head.block.number) - { - return Poll::Ready(Some(Ok(notification))) - } + if let Some(committed_chain) = notification.committed_chain() { + this.exex_head.block = committed_chain.tip().num_hash(); + } else if let Some(reverted_chain) = notification.reverted_chain() { + let first_block = reverted_chain.first(); + this.exex_head.block = (first_block.parent_hash, first_block.number - 1).into(); } + + Poll::Ready(Some(Ok(notification))) } } #[cfg(test)] mod tests { - use std::future::poll_fn; + use crate::Wal; use super::*; use alloy_consensus::Header; + use alloy_eips::BlockNumHash; use eyre::OptionExt; use futures::StreamExt; use reth_db_common::init::init_genesis; use reth_evm_ethereum::execute::EthExecutorProvider; - use reth_primitives::{Block, BlockNumHash}; + use reth_primitives::Block; use reth_provider::{ providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockWriter, - Chain, + Chain, DatabaseProviderFactory, }; use reth_testing_utils::generators::{self, random_block, BlockParams}; use tokio::sync::mpsc; @@ -362,6 +318,9 @@ mod tests { async fn exex_notifications_behind_head_canonical() -> eyre::Result<()> { let mut rng = generators::rng(); + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let provider_factory = create_test_provider_factory(); let genesis_hash = init_genesis(&provider_factory)?; let genesis_block = provider_factory @@ -412,6 +371,7 @@ mod tests { provider, EthExecutorProvider::mainnet(), notifications_rx, + wal.handle(), ) .with_head(exex_head); @@ -437,14 +397,11 @@ mod tests { Ok(()) } - #[ignore] - #[tokio::test] - async fn exex_notifications_behind_head_non_canonical() -> eyre::Result<()> { - Ok(()) - } - #[tokio::test] async fn exex_notifications_same_head_canonical() -> eyre::Result<()> { + let temp_dir = tempfile::tempdir().unwrap(); + let wal = Wal::new(temp_dir.path()).unwrap(); + let provider_factory = create_test_provider_factory(); let genesis_hash = init_genesis(&provider_factory)?; let genesis_block = provider_factory @@ -485,6 +442,7 @@ mod tests { provider, EthExecutorProvider::mainnet(), notifications_rx, + wal.handle(), ) .with_head(exex_head); @@ -494,16 +452,13 @@ mod tests { Ok(()) } - #[ignore] #[tokio::test] async fn exex_notifications_same_head_non_canonical() -> eyre::Result<()> { - Ok(()) - } - - #[tokio::test] - async fn test_notifications_ahead_of_head() -> eyre::Result<()> { let mut rng = generators::rng(); + let temp_dir = tempfile::tempdir().unwrap(); + let mut wal = Wal::new(temp_dir.path()).unwrap(); + let provider_factory = create_test_provider_factory(); let genesis_hash = init_genesis(&provider_factory)?; let genesis_block = provider_factory @@ -512,53 +467,135 @@ mod tests { let provider = BlockchainProvider2::new(provider_factory)?; + let node_head_block = random_block( + &mut rng, + genesis_block.number + 1, + BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, + ) + .seal_with_senders() + .ok_or_eyre("failed to recover senders")?; + let node_head = Head { + number: node_head_block.number, + hash: node_head_block.hash(), + ..Default::default() + }; + let provider_rw = provider.database_provider_rw()?; + provider_rw.insert_block(node_head_block)?; + provider_rw.commit()?; + let node_head_notification = ExExNotification::ChainCommitted { + new: Arc::new( + BackfillJobFactory::new(EthExecutorProvider::mainnet(), provider.clone()) + .backfill(node_head.number..=node_head.number) + .next() + .ok_or_else(|| eyre::eyre!("failed to backfill"))??, + ), + }; + let exex_head_block = random_block( &mut rng, genesis_block.number + 1, BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, ); + let exex_head = ExExHead { block: exex_head_block.num_hash() }; + let exex_head_notification = ExExNotification::ChainCommitted { + new: Arc::new(Chain::new( + vec![exex_head_block + .clone() + .seal_with_senders() + .ok_or_eyre("failed to recover senders")?], + Default::default(), + None, + )), + }; + wal.commit(&exex_head_notification)?; - let node_head = - Head { number: genesis_block.number, hash: genesis_hash, ..Default::default() }; - let exex_head = ExExHead { - block: BlockNumHash { number: exex_head_block.number, hash: exex_head_block.hash() }, + let new_notification = ExExNotification::ChainCommitted { + new: Arc::new(Chain::new( + vec![random_block( + &mut rng, + node_head.number + 1, + BlockParams { parent: Some(node_head.hash), ..Default::default() }, + ) + .seal_with_senders() + .ok_or_eyre("failed to recover senders")?], + Default::default(), + None, + )), }; let (notifications_tx, notifications_rx) = mpsc::channel(1); - notifications_tx - .send(ExExNotification::ChainCommitted { - new: Arc::new(Chain::new( - vec![exex_head_block - .clone() - .seal_with_senders() - .ok_or_eyre("failed to recover senders")?], - Default::default(), - None, - )), - }) - .await?; + notifications_tx.send(new_notification.clone()).await?; let mut notifications = ExExNotifications::new( node_head, provider, EthExecutorProvider::mainnet(), notifications_rx, + wal.handle(), ) .with_head(exex_head); - // First notification is skipped because the node is catching up with the ExEx - let new_notification = poll_fn(|cx| Poll::Ready(notifications.poll_next_unpin(cx))).await; - assert!(new_notification.is_pending()); + // First notification is the revert of the ExEx head block to get back to the canonical + // chain + assert_eq!( + notifications.next().await.transpose()?, + Some(exex_head_notification.into_inverted()) + ); + // Second notification is the backfilled block from the canonical chain to get back to the + // canonical tip + assert_eq!(notifications.next().await.transpose()?, Some(node_head_notification)); + // Third notification is the actual notification that we sent before + assert_eq!(notifications.next().await.transpose()?, Some(new_notification)); + + Ok(()) + } - // Imitate the node catching up with the ExEx by sending a notification for the missing - // block - let notification = ExExNotification::ChainCommitted { + #[tokio::test] + async fn test_notifications_ahead_of_head() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + let mut rng = generators::rng(); + + let temp_dir = tempfile::tempdir().unwrap(); + let mut wal = Wal::new(temp_dir.path()).unwrap(); + + let provider_factory = create_test_provider_factory(); + let genesis_hash = init_genesis(&provider_factory)?; + let genesis_block = provider_factory + .block(genesis_hash.into())? + .ok_or_else(|| eyre::eyre!("genesis block not found"))?; + + let provider = BlockchainProvider2::new(provider_factory)?; + + let exex_head_block = random_block( + &mut rng, + genesis_block.number + 1, + BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, + ); + let exex_head_notification = ExExNotification::ChainCommitted { + new: Arc::new(Chain::new( + vec![exex_head_block + .clone() + .seal_with_senders() + .ok_or_eyre("failed to recover senders")?], + Default::default(), + None, + )), + }; + wal.commit(&exex_head_notification)?; + + let node_head = + Head { number: genesis_block.number, hash: genesis_hash, ..Default::default() }; + let exex_head = ExExHead { + block: BlockNumHash { number: exex_head_block.number, hash: exex_head_block.hash() }, + }; + + let new_notification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new( vec![random_block( &mut rng, - exex_head_block.number + 1, - BlockParams { parent: Some(exex_head_block.hash()), ..Default::default() }, + genesis_block.number + 1, + BlockParams { parent: Some(genesis_hash), ..Default::default() }, ) .seal_with_senders() .ok_or_eyre("failed to recover senders")?], @@ -566,10 +603,29 @@ mod tests { None, )), }; - notifications_tx.send(notification.clone()).await?; - // Second notification is received because the node caught up with the ExEx - assert_eq!(notifications.next().await.transpose()?, Some(notification)); + let (notifications_tx, notifications_rx) = mpsc::channel(1); + + notifications_tx.send(new_notification.clone()).await?; + + let mut notifications = ExExNotifications::new( + node_head, + provider, + EthExecutorProvider::mainnet(), + notifications_rx, + wal.handle(), + ) + .with_head(exex_head); + + // First notification is the revert of the ExEx head block to get back to the canonical + // chain + assert_eq!( + notifications.next().await.transpose()?, + Some(exex_head_notification.into_inverted()) + ); + + // Second notification is the actual notification that we sent before + assert_eq!(notifications.next().await.transpose()?, Some(new_notification)); Ok(()) } diff --git a/crates/exex/exex/src/wal/cache.rs b/crates/exex/exex/src/wal/cache.rs index 25719d11bf938..cef27369eb63c 100644 --- a/crates/exex/exex/src/wal/cache.rs +++ b/crates/exex/exex/src/wal/cache.rs @@ -1,61 +1,79 @@ use std::collections::{BTreeMap, VecDeque}; +use alloy_eips::BlockNumHash; +use alloy_primitives::B256; +use dashmap::DashMap; +use parking_lot::RwLock; use reth_exex_types::ExExNotification; -use reth_primitives::BlockNumHash; -/// The block cache of the WAL. Acts as a mapping of `File ID -> List of Blocks`. -/// -/// For each notification written to the WAL, there will be an entry per block written to -/// the cache with the same file ID. I.e. for each notification, there may be multiple blocks in the -/// cache. +/// The block cache of the WAL. /// /// This cache is needed to avoid walking the WAL directory every time we want to find a -/// notification corresponding to a block. +/// notification corresponding to a block or a block corresponding to a hash. #[derive(Debug)] -pub struct BlockCache(BTreeMap>); +pub struct BlockCache { + /// A mapping of `File ID -> List of Blocks`. + /// + /// For each notification written to the WAL, there will be an entry per block written to + /// the cache with the same file ID. I.e. for each notification, there may be multiple blocks + /// in the cache. + files: RwLock>>, + /// A mapping of committed blocks `Block Hash -> Block`. + /// + /// For each [`ExExNotification::ChainCommitted`] notification, there will be an entry per + /// block. + committed_blocks: DashMap, +} impl BlockCache { /// Creates a new instance of [`BlockCache`]. - pub(super) const fn new() -> Self { - Self(BTreeMap::new()) + pub(super) fn new() -> Self { + Self { files: RwLock::new(BTreeMap::new()), committed_blocks: DashMap::new() } } /// Returns `true` if the cache is empty. pub(super) fn is_empty(&self) -> bool { - self.0.is_empty() + self.files.read().is_empty() } /// Returns a front-to-back iterator. pub(super) fn iter(&self) -> impl Iterator + '_ { - self.0.iter().flat_map(|(k, v)| v.iter().map(move |b| (*k, *b))) + self.files + .read() + .iter() + .flat_map(|(k, v)| v.iter().map(move |b| (*k, *b))) + .collect::>() + .into_iter() } /// Provides a reference to the first block from the cache, or `None` if the cache is /// empty. pub(super) fn front(&self) -> Option<(u64, CachedBlock)> { - self.0.first_key_value().and_then(|(k, v)| v.front().map(|b| (*k, *b))) + self.files.read().first_key_value().and_then(|(k, v)| v.front().map(|b| (*k, *b))) } /// Provides a reference to the last block from the cache, or `None` if the cache is /// empty. pub(super) fn back(&self) -> Option<(u64, CachedBlock)> { - self.0.last_key_value().and_then(|(k, v)| v.back().map(|b| (*k, *b))) + self.files.read().last_key_value().and_then(|(k, v)| v.back().map(|b| (*k, *b))) } /// Removes the notification with the given file ID. - pub(super) fn remove_notification(&mut self, key: u64) -> Option> { - self.0.remove(&key) + pub(super) fn remove_notification(&self, key: u64) -> Option> { + self.files.write().remove(&key) } /// Pops the first block from the cache. If it resulted in the whole file entry being empty, /// it will also remove the file entry. - pub(super) fn pop_front(&mut self) -> Option<(u64, CachedBlock)> { - let first_entry = self.0.first_entry()?; + pub(super) fn pop_front(&self) -> Option<(u64, CachedBlock)> { + let mut files = self.files.write(); + + let first_entry = files.first_entry()?; let key = *first_entry.key(); let blocks = first_entry.into_mut(); let first_block = blocks.pop_front().unwrap(); if blocks.is_empty() { - self.0.remove(&key); + files.remove(&key); } Some((key, first_block)) @@ -63,55 +81,58 @@ impl BlockCache { /// Pops the last block from the cache. If it resulted in the whole file entry being empty, /// it will also remove the file entry. - pub(super) fn pop_back(&mut self) -> Option<(u64, CachedBlock)> { - let last_entry = self.0.last_entry()?; + pub(super) fn pop_back(&self) -> Option<(u64, CachedBlock)> { + let mut files = self.files.write(); + + let last_entry = files.last_entry()?; let key = *last_entry.key(); let blocks = last_entry.into_mut(); let last_block = blocks.pop_back().unwrap(); if blocks.is_empty() { - self.0.remove(&key); + files.remove(&key); } Some((key, last_block)) } - /// Appends a block to the back of the specified file entry. - pub(super) fn insert(&mut self, file_id: u64, block: CachedBlock) { - self.0.entry(file_id).or_default().push_back(block); + /// Returns the file ID for the notification containing the given committed block hash, if it + /// exists. + pub(super) fn get_file_id_by_committed_block_hash(&self, block_hash: &B256) -> Option { + self.committed_blocks.get(block_hash).map(|entry| entry.0) } /// Inserts the blocks from the notification into the cache with the given file ID. /// /// First, inserts the reverted blocks (if any), then the committed blocks (if any). pub(super) fn insert_notification_blocks_with_file_id( - &mut self, + &self, file_id: u64, notification: &ExExNotification, ) { + let mut files = self.files.write(); + let reverted_chain = notification.reverted_chain(); let committed_chain = notification.committed_chain(); if let Some(reverted_chain) = reverted_chain { for block in reverted_chain.blocks().values() { - self.insert( - file_id, - CachedBlock { - action: CachedBlockAction::Revert, - block: (block.number, block.hash()).into(), - }, - ); + files.entry(file_id).or_default().push_back(CachedBlock { + action: CachedBlockAction::Revert, + block: (block.number, block.hash()).into(), + parent_hash: block.parent_hash, + }); } } if let Some(committed_chain) = committed_chain { for block in committed_chain.blocks().values() { - self.insert( - file_id, - CachedBlock { - action: CachedBlockAction::Commit, - block: (block.number, block.hash()).into(), - }, - ); + let cached_block = CachedBlock { + action: CachedBlockAction::Commit, + block: (block.number, block.hash()).into(), + parent_hash: block.parent_hash, + }; + files.entry(file_id).or_default().push_back(cached_block); + self.committed_blocks.insert(block.hash(), (file_id, cached_block)); } } } @@ -122,6 +143,8 @@ pub(super) struct CachedBlock { pub(super) action: CachedBlockAction, /// The block number and hash of the block. pub(super) block: BlockNumHash, + /// The hash of the parent block. + pub(super) parent_hash: B256, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] diff --git a/crates/exex/exex/src/wal/mod.rs b/crates/exex/exex/src/wal/mod.rs index 0b699883ead32..d7aea3aafdfaa 100644 --- a/crates/exex/exex/src/wal/mod.rs +++ b/crates/exex/exex/src/wal/mod.rs @@ -3,39 +3,76 @@ mod cache; pub use cache::BlockCache; mod storage; +use eyre::OptionExt; pub use storage::Storage; -use std::path::Path; +use std::{path::Path, sync::Arc}; +use alloy_eips::BlockNumHash; use reth_exex_types::ExExNotification; -use reth_primitives::BlockNumHash; +use reth_primitives::B256; use reth_tracing::tracing::{debug, instrument}; /// WAL is a write-ahead log (WAL) that stores the notifications sent to ExExes. /// /// WAL is backed by a directory of binary files represented by [`Storage`] and a block cache /// represented by [`BlockCache`]. The role of the block cache is to avoid walking the WAL directory -/// and decoding notifications every time we want to rollback/finalize the WAL. +/// and decoding notifications every time we want to iterate or finalize the WAL. /// /// The expected mode of operation is as follows: /// 1. On every new canonical chain notification, call [`Wal::commit`]. -/// 2. When ExEx is on a wrong fork, rollback the WAL using [`Wal::rollback`]. The caller is -/// expected to create reverts from the removed notifications and backfill the blocks between the -/// returned block and the given rollback block. After that, commit new notifications as usual -/// with [`Wal::commit`]. -/// 3. When the chain is finalized, call [`Wal::finalize`] to prevent the infinite growth of the +/// 2. When the chain is finalized, call [`Wal::finalize`] to prevent the infinite growth of the /// WAL. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct Wal { + inner: Arc, +} + +impl Wal { + /// Creates a new instance of [`Wal`]. + pub fn new(directory: impl AsRef) -> eyre::Result { + Ok(Self { inner: Arc::new(WalInner::new(directory)?) }) + } + + /// Returns a read-only handle to the WAL. + pub fn handle(&self) -> WalHandle { + WalHandle { wal: self.inner.clone() } + } + + /// Commits the notification to WAL. + pub fn commit(&mut self, notification: &ExExNotification) -> eyre::Result<()> { + self.inner.commit(notification) + } + + /// Finalizes the WAL to the given block, inclusive. + /// + /// 1. Finds a notification with first unfinalized block (first notification containing a + /// committed block higher than `to_block`). + /// 2. Removes the notifications from the beginning of WAL until the found notification. If this + /// notification includes both finalized and non-finalized blocks, it will not be removed. + pub fn finalize(&self, to_block: BlockNumHash) -> eyre::Result<()> { + self.inner.finalize(to_block) + } + + /// Returns an iterator over all notifications in the WAL. + pub fn iter_notifications( + &self, + ) -> eyre::Result> + '_>> { + self.inner.iter_notifications() + } +} + +/// Inner type for the WAL. +#[derive(Debug)] +struct WalInner { /// The underlying WAL storage backed by a file. storage: Storage, /// WAL block cache. See [`cache::BlockCache`] docs for more details. block_cache: BlockCache, } -impl Wal { - /// Creates a new instance of [`Wal`]. - pub fn new(directory: impl AsRef) -> eyre::Result { +impl WalInner { + fn new(directory: impl AsRef) -> eyre::Result { let mut wal = Self { storage: Storage::new(directory)?, block_cache: BlockCache::new() }; wal.fill_block_cache()?; Ok(wal) @@ -66,12 +103,11 @@ impl Wal { Ok(()) } - /// Commits the notification to WAL. #[instrument(target = "exex::wal", skip_all, fields( reverted_block_range = ?notification.reverted_chain().as_ref().map(|chain| chain.range()), committed_block_range = ?notification.committed_chain().as_ref().map(|chain| chain.range()) ))] - pub fn commit(&mut self, notification: &ExExNotification) -> eyre::Result<()> { + fn commit(&self, notification: &ExExNotification) -> eyre::Result<()> { let file_id = self.block_cache.back().map_or(0, |block| block.0 + 1); self.storage.write_notification(file_id, notification)?; @@ -81,79 +117,6 @@ impl Wal { Ok(()) } - /// Rollbacks the WAL to the given block, inclusive. - /// - /// 1. Walks the WAL from the end and searches for the first notification where committed chain - /// contains a block with the same number and hash as `to_block`. - /// 2. If the notification is found, truncates the WAL. It means that if the found notification - /// contains both given block and blocks before it, the whole notification will be truncated. - /// - /// # Returns - /// - /// 1. The block number and hash of the lowest removed block. - /// 2. The notifications that were removed. - #[instrument(target = "exex::wal", skip(self))] - pub fn rollback( - &mut self, - to_block: BlockNumHash, - ) -> eyre::Result)>> { - // First, pop items from the back of the cache until we find the notification with the - // specified block. When found, save the file ID of that notification. - let mut remove_from_file_id = None; - let mut remove_to_file_id = None; - let mut lowest_removed_block = None; - while let Some((file_id, block)) = self.block_cache.pop_back() { - debug!(?file_id, ?block, "Popped back block from the block cache"); - if block.action.is_commit() && block.block.number == to_block.number { - debug!( - ?file_id, - ?block, - ?remove_from_file_id, - ?lowest_removed_block, - "Found the requested block" - ); - - if block.block.hash != to_block.hash { - eyre::bail!("block hash mismatch in WAL") - } - - remove_from_file_id = Some(file_id); - - let notification = self.storage.read_notification(file_id)?; - lowest_removed_block = notification - .committed_chain() - .as_ref() - .map(|chain| chain.first()) - .map(|block| (block.number, block.hash()).into()); - - break - } - - remove_from_file_id = Some(file_id); - remove_to_file_id.get_or_insert(file_id); - } - - // If the specified block is still not found, we can't do anything and just return. The - // cache was empty. - let Some((remove_from_file_id, remove_to_file_id)) = - remove_from_file_id.zip(remove_to_file_id) - else { - debug!("No blocks were rolled back"); - return Ok(None) - }; - - // Remove the rest of the block cache entries for the file ID that we found. - self.block_cache.remove_notification(remove_from_file_id); - debug!(?remove_from_file_id, "Block cache was rolled back"); - - // Remove notifications from the storage. - let removed_notifications = - self.storage.take_notifications(remove_from_file_id..=remove_to_file_id)?; - debug!(removed_notifications = ?removed_notifications.len(), "Storage was rolled back"); - - Ok(Some((lowest_removed_block.expect("qed"), removed_notifications))) - } - /// Finalizes the WAL to the given block, inclusive. /// /// 1. Finds a notification with first unfinalized block (first notification containing a @@ -161,7 +124,7 @@ impl Wal { /// 2. Removes the notifications from the beginning of WAL until the found notification. If this /// notification includes both finalized and non-finalized blocks, it will not be removed. #[instrument(target = "exex::wal", skip(self))] - pub fn finalize(&mut self, to_block: BlockNumHash) -> eyre::Result<()> { + fn finalize(&self, to_block: BlockNumHash) -> eyre::Result<()> { // First, walk cache to find the file ID of the notification with the finalized block and // save the file ID with the first unfinalized block. Do not remove any notifications // yet. @@ -174,7 +137,10 @@ impl Wal { block.block.number == to_block.number && block.block.hash == to_block.hash { - let notification = self.storage.read_notification(file_id)?; + let notification = self + .storage + .read_notification(file_id)? + .ok_or_eyre("notification not found")?; if notification.committed_chain().unwrap().blocks().len() == 1 { unfinalized_from_file_id = Some( block_cache.peek().map(|(file_id, _)| *file_id).unwrap_or(u64::MAX), @@ -229,7 +195,7 @@ impl Wal { } /// Returns an iterator over all notifications in the WAL. - pub(crate) fn iter_notifications( + fn iter_notifications( &self, ) -> eyre::Result> + '_>> { let Some(range) = self.storage.files_range()? else { @@ -240,6 +206,27 @@ impl Wal { } } +/// A read-only handle to the WAL that can be shared. +#[derive(Debug)] +pub struct WalHandle { + wal: Arc, +} + +impl WalHandle { + /// Returns the notification for the given committed block hash if it exists. + pub fn get_committed_notification_by_block_hash( + &self, + block_hash: &B256, + ) -> eyre::Result> { + let Some(file_id) = self.wal.block_cache.get_file_id_by_committed_block_hash(block_hash) + else { + return Ok(None) + }; + + self.wal.storage.read_notification(file_id) + } +} + #[cfg(test)] mod tests { use std::sync::Arc; @@ -257,9 +244,10 @@ mod tests { }; fn read_notifications(wal: &Wal) -> eyre::Result> { - let Some(files_range) = wal.storage.files_range()? else { return Ok(Vec::new()) }; + let Some(files_range) = wal.inner.storage.files_range()? else { return Ok(Vec::new()) }; - wal.storage + wal.inner + .storage .iter_notifications(files_range) .map(|entry| Ok(entry?.1)) .collect::>() @@ -274,7 +262,7 @@ mod tests { // Create an instance of the WAL in a temporary directory let temp_dir = tempfile::tempdir()?; let mut wal = Wal::new(&temp_dir)?; - assert!(wal.block_cache.is_empty()); + assert!(wal.inner.block_cache.is_empty()); // Create 4 canonical blocks and one reorged block with number 2 let blocks = random_block_range(&mut rng, 0..=3, BlockRangeParams::default()) @@ -339,6 +327,7 @@ mod tests { CachedBlock { action: CachedBlockAction::Commit, block: (blocks[0].number, blocks[0].hash()).into(), + parent_hash: blocks[0].parent_hash, }, ), ( @@ -346,11 +335,15 @@ mod tests { CachedBlock { action: CachedBlockAction::Commit, block: (blocks[1].number, blocks[1].hash()).into(), + parent_hash: blocks[1].parent_hash, }, ), ]; wal.commit(&committed_notification_1)?; - assert_eq!(wal.block_cache.iter().collect::>(), committed_notification_1_cache); + assert_eq!( + wal.inner.block_cache.iter().collect::>(), + committed_notification_1_cache + ); assert_eq!(read_notifications(&wal)?, vec![committed_notification_1.clone()]); // Second notification (revert block 1) @@ -361,43 +354,11 @@ mod tests { CachedBlock { action: CachedBlockAction::Revert, block: (blocks[1].number, blocks[1].hash()).into(), + parent_hash: blocks[1].parent_hash, }, )]; assert_eq!( - wal.block_cache.iter().collect::>(), - [committed_notification_1_cache.clone(), reverted_notification_cache.clone()].concat() - ); - assert_eq!( - read_notifications(&wal)?, - vec![committed_notification_1.clone(), reverted_notification.clone()] - ); - - // Now, rollback to block 1 and verify that both the block cache and the storage are - // empty. We expect the rollback to delete the first notification (commit block 0, 1), - // because we can't delete blocks partly from the notification, and also the second - // notification (revert block 1). Additionally, check that the block that the rolled - // back to is the block with number 0. - let rollback_result = wal.rollback((blocks[1].number, blocks[1].hash()).into())?; - assert_eq!(wal.block_cache.iter().collect::>(), vec![]); - assert_eq!(read_notifications(&wal)?, vec![]); - assert_eq!( - rollback_result, - Some(( - (blocks[0].number, blocks[0].hash()).into(), - vec![committed_notification_1.clone(), reverted_notification.clone()] - )) - ); - - // Commit notifications 1 and 2 again - wal.commit(&committed_notification_1)?; - assert_eq!( - wal.block_cache.iter().collect::>(), - [committed_notification_1_cache.clone()].concat() - ); - assert_eq!(read_notifications(&wal)?, vec![committed_notification_1.clone()]); - wal.commit(&reverted_notification)?; - assert_eq!( - wal.block_cache.iter().collect::>(), + wal.inner.block_cache.iter().collect::>(), [committed_notification_1_cache.clone(), reverted_notification_cache.clone()].concat() ); assert_eq!( @@ -414,6 +375,7 @@ mod tests { CachedBlock { action: CachedBlockAction::Commit, block: (block_1_reorged.number, block_1_reorged.hash()).into(), + parent_hash: block_1_reorged.parent_hash, }, ), ( @@ -421,11 +383,12 @@ mod tests { CachedBlock { action: CachedBlockAction::Commit, block: (blocks[2].number, blocks[2].hash()).into(), + parent_hash: blocks[2].parent_hash, }, ), ]; assert_eq!( - wal.block_cache.iter().collect::>(), + wal.inner.block_cache.iter().collect::>(), [ committed_notification_1_cache.clone(), reverted_notification_cache.clone(), @@ -451,6 +414,7 @@ mod tests { CachedBlock { action: CachedBlockAction::Revert, block: (blocks[2].number, blocks[2].hash()).into(), + parent_hash: blocks[2].parent_hash, }, ), ( @@ -458,6 +422,7 @@ mod tests { CachedBlock { action: CachedBlockAction::Commit, block: (block_2_reorged.number, block_2_reorged.hash()).into(), + parent_hash: block_2_reorged.parent_hash, }, ), ( @@ -465,11 +430,12 @@ mod tests { CachedBlock { action: CachedBlockAction::Commit, block: (blocks[3].number, blocks[3].hash()).into(), + parent_hash: blocks[3].parent_hash, }, ), ]; assert_eq!( - wal.block_cache.iter().collect::>(), + wal.inner.block_cache.iter().collect::>(), [ committed_notification_1_cache, reverted_notification_cache, @@ -494,7 +460,7 @@ mod tests { // the notifications before it. wal.finalize((block_1_reorged.number, block_1_reorged.hash()).into())?; assert_eq!( - wal.block_cache.iter().collect::>(), + wal.inner.block_cache.iter().collect::>(), [committed_notification_2_cache, reorged_notification_cache].concat() ); assert_eq!(read_notifications(&wal)?, vec![committed_notification_2, reorged_notification]); diff --git a/crates/exex/exex/src/wal/storage.rs b/crates/exex/exex/src/wal/storage.rs index 766d70b072749..817d57d193f04 100644 --- a/crates/exex/exex/src/wal/storage.rs +++ b/crates/exex/exex/src/wal/storage.rs @@ -1,6 +1,5 @@ use std::{ fs::File, - io::{Read, Write}, ops::RangeInclusive, path::{Path, PathBuf}, }; @@ -14,7 +13,7 @@ use tracing::instrument; /// /// Each notification is represented by a single file that contains a MessagePack-encoded /// notification. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct Storage { /// The path to the WAL file. path: PathBuf, @@ -81,39 +80,31 @@ impl Storage { Ok(range.count()) } - /// Removes notifications from the storage according to the given range. - /// - /// # Returns - /// - /// Notifications that were removed. - pub(super) fn take_notifications( - &self, - range: RangeInclusive, - ) -> eyre::Result> { - let notifications = self.iter_notifications(range).collect::>>()?; - - for (id, _) in ¬ifications { - self.remove_notification(*id); - } - - Ok(notifications.into_iter().map(|(_, notification)| notification).collect()) - } - pub(super) fn iter_notifications( &self, range: RangeInclusive, ) -> impl Iterator> + '_ { - range.map(move |id| self.read_notification(id).map(|notification| (id, notification))) + range.map(move |id| { + let notification = self.read_notification(id)?.ok_or_eyre("notification not found")?; + + Ok((id, notification)) + }) } /// Reads the notification from the file with the given id. #[instrument(target = "exex::wal::storage", skip(self))] - pub(super) fn read_notification(&self, file_id: u64) -> eyre::Result { + pub(super) fn read_notification(&self, file_id: u64) -> eyre::Result> { let file_path = self.file_path(file_id); debug!(?file_path, "Reading notification from WAL"); - let mut file = File::open(&file_path)?; - read_notification(&mut file) + let mut file = match File::open(&file_path) { + Ok(file) => file, + Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(None), + Err(err) => return Err(err.into()), + }; + + // TODO(alexey): use rmp-serde when Alloy and Reth serde issues are resolved + Ok(serde_json::from_reader(&mut file)?) } /// Writes the notification to the file with the given id. @@ -126,27 +117,13 @@ impl Storage { let file_path = self.file_path(file_id); debug!(?file_path, "Writing notification to WAL"); - let mut file = File::create_new(&file_path)?; - write_notification(&mut file, notification)?; - - Ok(()) + Ok(reth_fs_util::atomic_write_file(&file_path, |file| { + // TODO(alexey): use rmp-serde when Alloy and Reth serde issues are resolved + serde_json::to_writer(file, notification) + })?) } } -// TODO(alexey): use rmp-serde when Alloy and Reth serde issues are resolved - -fn write_notification(mut w: &mut impl Write, notification: &ExExNotification) -> eyre::Result<()> { - // rmp_serde::encode::write(w, notification)?; - serde_json::to_writer(&mut w, notification)?; - w.flush()?; - Ok(()) -} - -fn read_notification(r: &mut impl Read) -> eyre::Result { - // Ok(rmp_serde::from_read(r)?) - Ok(serde_json::from_reader(r)?) -} - #[cfg(test)] mod tests { use std::sync::Arc; @@ -181,7 +158,7 @@ mod tests { let file_id = 0; storage.write_notification(file_id, ¬ification)?; let deserialized_notification = storage.read_notification(file_id)?; - assert_eq!(deserialized_notification, notification); + assert_eq!(deserialized_notification, Some(notification)); Ok(()) } diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 4117c0c73c9a4..3a9b8dc0ab42e 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -20,7 +20,7 @@ use reth_db_common::init::init_genesis; use reth_ethereum_engine_primitives::EthereumEngineValidator; use reth_evm::test_utils::MockExecutorProvider; use reth_execution_types::Chain; -use reth_exex::{ExExContext, ExExEvent, ExExNotification, ExExNotifications}; +use reth_exex::{ExExContext, ExExEvent, ExExNotification, ExExNotifications, Wal}; use reth_network::{config::SecretKey, NetworkConfigBuilder, NetworkManager}; use reth_node_api::{ FullNodeTypes, FullNodeTypesAdapter, NodeTypes, NodeTypesWithDBAdapter, NodeTypesWithEngine, @@ -41,7 +41,7 @@ use reth_node_ethereum::{ EthEngineTypes, EthEvmConfig, }; use reth_payload_builder::noop::NoopPayloadBuilderService; -use reth_primitives::{Head, SealedBlockWithSenders}; +use reth_primitives::{BlockNumHash, Head, SealedBlockWithSenders}; use reth_provider::{ providers::{BlockchainProvider, StaticFileProvider}, BlockReader, ProviderFactory, @@ -49,6 +49,7 @@ use reth_provider::{ use reth_tasks::TaskManager; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; use std::{ + env::temp_dir, fmt::Debug, future::{poll_fn, Future}, sync::Arc, @@ -222,7 +223,7 @@ impl TestExExHandle { /// Asserts that the Execution Extension emitted a `FinishedHeight` event with the correct /// height. #[track_caller] - pub fn assert_event_finished_height(&mut self, height: u64) -> eyre::Result<()> { + pub fn assert_event_finished_height(&mut self, height: BlockNumHash) -> eyre::Result<()> { let event = self.events_rx.try_recv()?; assert_eq!(event, ExExEvent::FinishedHeight(height)); Ok(()) @@ -310,6 +311,8 @@ pub async fn test_exex_context_with_chain_spec( components.provider.clone(), components.components.executor.clone(), notifications_rx, + // TODO(alexey): do we want to expose WAL to the user? + Wal::new(temp_dir())?.handle(), ); let ctx = ExExContext { diff --git a/crates/fs-util/src/lib.rs b/crates/fs-util/src/lib.rs index 91e60c313f8ee..f77632cc89196 100644 --- a/crates/fs-util/src/lib.rs +++ b/crates/fs-util/src/lib.rs @@ -8,8 +8,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] use serde::{de::DeserializeOwned, Serialize}; use std::{ - fs::{self, File, ReadDir}, - io::{self, BufWriter, Write}, + fs::{self, File, OpenOptions, ReadDir}, + io::{self, BufWriter, Error, ErrorKind, Write}, path::{Path, PathBuf}, }; @@ -138,6 +138,14 @@ pub enum FsPathError { /// The path related to the operation. path: PathBuf, }, + /// Error variant for failed fsync operation with additional path context. + #[error("failed to sync path {path:?}: {source}")] + Fsync { + /// The source `io::Error`. + source: io::Error, + /// The path related to the operation. + path: PathBuf, + }, } impl FsPathError { @@ -195,6 +203,11 @@ impl FsPathError { pub fn metadata(source: io::Error, path: impl Into) -> Self { Self::Metadata { source, path: path.into() } } + + /// Returns the complementary error variant for `fsync`. + pub fn fsync(source: io::Error, path: impl Into) -> Self { + Self::Fsync { source, path: path.into() } + } } /// Wrapper for `std::fs::read_to_string` @@ -277,3 +290,61 @@ pub fn write_json_file(path: &Path, obj: &T) -> Result<()> { .map_err(|source| FsPathError::WriteJson { source, path: path.into() })?; writer.flush().map_err(|e| FsPathError::write(e, path)) } + +/// Writes atomically to file. +/// +/// 1. Creates a temporary file with a `.tmp` extension in the same file directory. +/// 2. Writes content with `write_fn`. +/// 3. Fsyncs the temp file to disk. +/// 4. Renames the temp file to the target path. +/// 5. Fsyncs the file directory. +/// +/// Atomic writes are hard: +/// * +/// * +pub fn atomic_write_file(file_path: &Path, write_fn: F) -> Result<()> +where + F: FnOnce(&mut File) -> std::result::Result<(), E>, + E: Into>, +{ + let mut tmp_path = file_path.to_path_buf(); + tmp_path.set_extension("tmp"); + + // Write to the temporary file + let mut file = + File::create(&tmp_path).map_err(|err| FsPathError::create_file(err, &tmp_path))?; + + write_fn(&mut file).map_err(|err| FsPathError::Write { + source: Error::new(ErrorKind::Other, err.into()), + path: tmp_path.clone(), + })?; + + // fsync() file + file.sync_all().map_err(|err| FsPathError::fsync(err, &tmp_path))?; + + // Rename file, not move + rename(&tmp_path, file_path)?; + + // fsync() directory + if let Some(parent) = file_path.parent() { + #[cfg(windows)] + OpenOptions::new() + .read(true) + .write(true) + .custom_flags(0x02000000) // FILE_FLAG_BACKUP_SEMANTICS + .open(parent) + .map_err(|err| FsPathError::open(err, parent))? + .sync_all() + .map_err(|err| FsPathError::fsync(err, parent))?; + + #[cfg(not(windows))] + OpenOptions::new() + .read(true) + .open(parent) + .map_err(|err| FsPathError::open(err, parent))? + .sync_all() + .map_err(|err| FsPathError::fsync(err, parent))?; + } + + Ok(()) +} diff --git a/crates/net/ecies/src/error.rs b/crates/net/ecies/src/error.rs index f476e00c4c1d6..79965f73303f3 100644 --- a/crates/net/ecies/src/error.rs +++ b/crates/net/ecies/src/error.rs @@ -13,6 +13,11 @@ impl ECIESError { pub fn into_inner(self) -> ECIESErrorImpl { *self.inner } + + /// Returns a reference to the inner error + pub const fn inner(&self) -> &ECIESErrorImpl { + &self.inner + } } impl fmt::Display for ECIESError { diff --git a/crates/net/network/src/error.rs b/crates/net/network/src/error.rs index 055bcddd647b7..2709c4a290758 100644 --- a/crates/net/network/src/error.rs +++ b/crates/net/network/src/error.rs @@ -3,6 +3,7 @@ use std::{fmt, io, io::ErrorKind, net::SocketAddr}; use reth_dns_discovery::resolver::ResolveError; +use reth_ecies::ECIESErrorImpl; use reth_eth_wire::{ errors::{EthHandshakeError, EthStreamError, P2PHandshakeError, P2PStreamError}, DisconnectReason, @@ -206,7 +207,17 @@ impl SessionError for PendingSessionHandshakeError { fn merits_discovery_ban(&self) -> bool { match self { Self::Eth(eth) => eth.merits_discovery_ban(), - Self::Ecies(_) => true, + Self::Ecies(err) => matches!( + err.inner(), + ECIESErrorImpl::TagCheckDecryptFailed | + ECIESErrorImpl::TagCheckHeaderFailed | + ECIESErrorImpl::TagCheckBodyFailed | + ECIESErrorImpl::InvalidAuthData | + ECIESErrorImpl::InvalidAckData | + ECIESErrorImpl::InvalidHeader | + ECIESErrorImpl::Secp256k1(_) | + ECIESErrorImpl::InvalidHandshake { .. } + ), Self::Timeout => false, } } @@ -214,7 +225,17 @@ impl SessionError for PendingSessionHandshakeError { fn is_fatal_protocol_error(&self) -> bool { match self { Self::Eth(eth) => eth.is_fatal_protocol_error(), - Self::Ecies(_) => true, + Self::Ecies(err) => matches!( + err.inner(), + ECIESErrorImpl::TagCheckDecryptFailed | + ECIESErrorImpl::TagCheckHeaderFailed | + ECIESErrorImpl::TagCheckBodyFailed | + ECIESErrorImpl::InvalidAuthData | + ECIESErrorImpl::InvalidAckData | + ECIESErrorImpl::InvalidHeader | + ECIESErrorImpl::Secp256k1(_) | + ECIESErrorImpl::InvalidHandshake { .. } + ), Self::Timeout => false, } } diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 2bd43d3c6ac79..61141f0677cce 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -80,7 +80,7 @@ pub type RethFullAdapter = FullNodeTypesAdapter< /// configured components and can interact with the node. /// /// There are convenience functions for networks that come with a preset of types and components via -/// the [Node] trait, see `reth_node_ethereum::EthereumNode` or `reth_node_optimism::OptimismNode`. +/// the [Node] trait, see `reth_node_ethereum::EthereumNode` or `reth_optimism_node::OptimismNode`. /// /// The [`NodeBuilder::node`] function configures the node's types and components in one step. /// diff --git a/crates/node/builder/src/launch/exex.rs b/crates/node/builder/src/launch/exex.rs index d037200869c44..816335d3dbdfd 100644 --- a/crates/node/builder/src/launch/exex.rs +++ b/crates/node/builder/src/launch/exex.rs @@ -5,7 +5,9 @@ use std::{fmt, fmt::Debug}; use futures::future; use reth_chain_state::ForkChoiceSubscriptions; use reth_chainspec::EthChainSpec; -use reth_exex::{ExExContext, ExExHandle, ExExManager, ExExManagerHandle, Wal}; +use reth_exex::{ + ExExContext, ExExHandle, ExExManager, ExExManagerHandle, Wal, DEFAULT_EXEX_MANAGER_CAPACITY, +}; use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_primitives::Head; use reth_provider::CanonStateSubscriptions; @@ -45,6 +47,15 @@ impl ExExLauncher { return Ok(None) } + let exex_wal = Wal::new( + config_container + .config + .datadir + .clone() + .resolve_datadir(config_container.config.chain.chain()) + .exex_wal(), + )?; + let mut exex_handles = Vec::with_capacity(extensions.len()); let mut exexes = Vec::with_capacity(extensions.len()); @@ -55,6 +66,7 @@ impl ExExLauncher { head, components.provider().clone(), components.block_executor().clone(), + exex_wal.handle(), ); exex_handles.push(handle); @@ -96,17 +108,9 @@ impl ExExLauncher { // spawn exex manager debug!(target: "reth::cli", "spawning exex manager"); // todo(onbjerg): rm magic number - let exex_wal = Wal::new( - config_container - .config - .datadir - .clone() - .resolve_datadir(config_container.config.chain.chain()) - .exex_wal(), - )?; let exex_manager = ExExManager::new( exex_handles, - 1024, + DEFAULT_EXEX_MANAGER_CAPACITY, exex_wal, components.provider().finalized_block_stream(), ); diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index eee07c0be876b..08e13ae3800ed 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -14,7 +14,7 @@ reth-cli-util.workspace = true reth-optimism-cli.workspace = true reth-provider.workspace = true reth-optimism-rpc.workspace = true -reth-node-optimism.workspace = true +reth-optimism-node.workspace = true clap = { workspace = true, features = ["derive", "env"] } @@ -28,9 +28,9 @@ jemalloc = ["reth-cli-util/jemalloc"] jemalloc-prof = ["reth-cli-util/jemalloc-prof"] tracy-allocator = ["reth-cli-util/tracy-allocator"] -asm-keccak = ["reth-optimism-cli/asm-keccak", "reth-node-optimism/asm-keccak"] +asm-keccak = ["reth-optimism-cli/asm-keccak", "reth-optimism-node/asm-keccak"] -optimism = ["reth-optimism-cli/optimism", "reth-node-optimism/optimism"] +optimism = ["reth-optimism-cli/optimism", "reth-optimism-node/optimism"] [[bin]] name = "op-reth" diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index f5a88798a06b4..58d5ba7a438d4 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -5,8 +5,8 @@ use clap::Parser; use reth_node_builder::{engine_tree_config::TreeConfig, EngineNodeLauncher}; -use reth_node_optimism::{args::RollupArgs, node::OptimismAddOns, OptimismNode}; use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli}; +use reth_optimism_node::{args::RollupArgs, node::OptimismAddOns, OptimismNode}; use reth_optimism_rpc::SequencerClient; use reth_provider::providers::BlockchainProvider2; diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index e54ab9be7c40a..99d1641e36438 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -24,7 +24,7 @@ reth-stages.workspace = true reth-static-file.workspace = true reth-execution-types.workspace = true reth-node-core.workspace = true -reth-node-optimism.workspace = true +reth-optimism-node.workspace = true reth-primitives.workspace = true ## optimism @@ -37,7 +37,7 @@ reth-node-events.workspace = true reth-network-p2p.workspace = true reth-errors.workspace = true reth-config.workspace = true -reth-evm-optimism.workspace = true +reth-optimism-evm.workspace = true reth-cli.workspace = true reth-cli-runner.workspace = true reth-node-builder.workspace = true @@ -71,14 +71,14 @@ reth-cli-commands.workspace = true [features] optimism = [ "reth-primitives/optimism", - "reth-evm-optimism/optimism", + "reth-optimism-evm/optimism", "reth-provider/optimism", "reth-node-core/optimism", - "reth-node-optimism/optimism", + "reth-optimism-node/optimism", ] asm-keccak = [ "alloy-primitives/asm-keccak", "reth-node-core/asm-keccak", - "reth-node-optimism/asm-keccak", + "reth-optimism-node/asm-keccak", "reth-primitives/asm-keccak", ] diff --git a/crates/optimism/cli/src/commands/build_pipeline.rs b/crates/optimism/cli/src/commands/build_pipeline.rs index b546a1a515a26..b2ac97eef2d17 100644 --- a/crates/optimism/cli/src/commands/build_pipeline.rs +++ b/crates/optimism/cli/src/commands/build_pipeline.rs @@ -8,13 +8,13 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_errors::ProviderError; -use reth_evm_optimism::OpExecutorProvider; use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::{HeaderDownloader, SyncTarget}, }; use reth_node_builder::NodeTypesWithDB; use reth_node_events::node::NodeEvent; +use reth_optimism_evm::OpExecutorProvider; use reth_provider::{BlockNumReader, ChainSpecProvider, HeaderProvider, ProviderFactory}; use reth_prune::PruneModes; use reth_stages::{sets::DefaultStages, Pipeline, StageSet}; diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index 137463ee2ffa3..ea8a77087fafa 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -40,13 +40,13 @@ use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::node::NoArgs; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; -use reth_evm_optimism::OpExecutorProvider; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{ args::LogArgs, version::{LONG_VERSION, SHORT_VERSION}, }; -use reth_node_optimism::OptimismNode; +use reth_optimism_evm::OpExecutorProvider; +use reth_optimism_node::OptimismNode; use reth_tracing::FileWorkerGuard; use tracing::info; diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 0f427ee4caf45..c0be459167fff 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "reth-evm-optimism" +name = "reth-optimism-evm" version.workspace = true edition.workspace = true rust-version.workspace = true diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index a736a52fb5500..ad2107307c8f9 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -353,7 +353,7 @@ mod tests { let expected_l1_blob_base_fee = U256::from_be_bytes(hex!( "0000000000000000000000000000000000000000000000000000000d5ea528d2" // 57422457042 )); - let expecte_l1_blob_base_fee_scalar = U256::from(810949); + let expected_l1_blob_base_fee_scalar = U256::from(810949); // test @@ -362,7 +362,7 @@ mod tests { assert_eq!(l1_block_info.l1_base_fee, expected_l1_base_fee); assert_eq!(l1_block_info.l1_base_fee_scalar, expected_l1_base_fee_scalar); assert_eq!(l1_block_info.l1_blob_base_fee, Some(expected_l1_blob_base_fee)); - assert_eq!(l1_block_info.l1_blob_base_fee_scalar, Some(expecte_l1_blob_base_fee_scalar)); + assert_eq!(l1_block_info.l1_blob_base_fee_scalar, Some(expected_l1_blob_base_fee_scalar)); } #[test] diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 89238eb9c5eec..fdfb9bf6cee03 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "reth-node-optimism" +name = "reth-optimism-node" version.workspace = true edition.workspace = true rust-version.workspace = true @@ -36,7 +36,7 @@ reth-rpc.workspace = true # op-reth reth-optimism-payload-builder.workspace = true -reth-evm-optimism.workspace = true +reth-optimism-evm.workspace = true reth-optimism-rpc.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true @@ -82,7 +82,7 @@ optimism = [ "reth-primitives/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", - "reth-evm-optimism/optimism", + "reth-optimism-evm/optimism", "reth-optimism-payload-builder/optimism", "reth-beacon-consensus/optimism", "reth-revm/optimism", diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index 9d7d6f3554c85..768f4d94efd5f 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -26,4 +26,4 @@ pub use reth_optimism_payload_builder::{ OptimismBuiltPayload, OptimismPayloadBuilder, OptimismPayloadBuilderAttributes, }; -pub use reth_evm_optimism::*; +pub use reth_optimism_evm::*; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 8614e8d60db1a..2a28d44e2a0d6 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -5,7 +5,6 @@ use std::sync::Arc; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_chainspec::ChainSpec; use reth_evm::ConfigureEvm; -use reth_evm_optimism::{OpExecutorProvider, OptimismEvmConfig}; use reth_network::{NetworkHandle, NetworkManager}; use reth_node_api::{EngineValidator, FullNodeComponents, NodeAddOns}; use reth_node_builder::{ @@ -18,6 +17,7 @@ use reth_node_builder::{ }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OptimismBeaconConsensus; +use reth_optimism_evm::{OpExecutorProvider, OptimismEvmConfig}; use reth_optimism_rpc::OpEthApi; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::Header; diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 5c04309acea80..3bd5d6fd38db8 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -2,7 +2,7 @@ use alloy_eips::eip2718::Encodable2718; use parking_lot::RwLock; use reth_chainspec::ChainSpec; -use reth_evm_optimism::RethL1BlockInfo; +use reth_optimism_evm::RethL1BlockInfo; use reth_primitives::{Block, GotExpected, InvalidTransactionError, SealedBlock}; use reth_provider::{BlockReaderIdExt, StateProviderFactory}; use reth_revm::L1BlockInfo; @@ -99,7 +99,7 @@ where /// Update the L1 block info. fn update_l1_block_info(&self, block: &Block) { self.block_info.timestamp.store(block.timestamp, Ordering::Relaxed); - if let Ok(cost_addition) = reth_evm_optimism::extract_l1_info(block) { + if let Ok(cost_addition) = reth_optimism_evm::extract_l1_info(block) { *self.block_info.l1_block_info.write() = cost_addition; } } diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs index 6b8e07a42e38b..a8dda7b9956be 100644 --- a/crates/optimism/node/tests/e2e/utils.rs +++ b/crates/optimism/node/tests/e2e/utils.rs @@ -5,10 +5,10 @@ use alloy_primitives::{Address, B256}; use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; use reth_chainspec::ChainSpecBuilder; use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet::Wallet, NodeHelperType}; -use reth_node_optimism::{ +use reth_optimism_chainspec::BASE_MAINNET; +use reth_optimism_node::{ node::OptimismAddOns, OptimismBuiltPayload, OptimismNode, OptimismPayloadBuilderAttributes, }; -use reth_optimism_chainspec::BASE_MAINNET; use reth_payload_builder::EthPayloadBuilderAttributes; use tokio::sync::Mutex; diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index cc9c772c027be..8d5cc1554e3db 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -3,7 +3,7 @@ use reth_db::test_utils::create_test_rw_db; use reth_node_api::FullNodeComponents; use reth_node_builder::{NodeBuilder, NodeConfig}; -use reth_node_optimism::node::{OptimismAddOns, OptimismNode}; +use reth_optimism_node::node::{OptimismAddOns, OptimismNode}; #[test] fn test_basic_setup() { diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 86bbd91c8b2e1..c83b0fc73741e 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -28,8 +28,8 @@ reth-trie.workspace = true reth-chain-state.workspace = true # op-reth -reth-evm-optimism.workspace = true reth-optimism-consensus.workspace = true +reth-optimism-evm.workspace = true reth-optimism-forks.workspace = true # ethereum @@ -52,6 +52,6 @@ optimism = [ "reth-primitives/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", - "reth-evm-optimism/optimism", + "reth-optimism-evm/optimism", "reth-revm/optimism", ] diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 84e69cb92e73e..878e9cf224de9 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -224,7 +224,7 @@ where // blocks will always have at least a single transaction in them (the L1 info transaction), // so we can safely assume that this will always be triggered upon the transition and that // the above check for empty blocks will never be hit on OP chains. - reth_evm_optimism::ensure_create2_deployer( + reth_optimism_evm::ensure_create2_deployer( chain_spec.clone(), attributes.payload_attributes.timestamp, &mut db, diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 4edbd690e05ee..e347be2317d4a 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -28,8 +28,8 @@ reth-node-builder.workspace = true reth-chainspec.workspace = true # op-reth -reth-evm-optimism.workspace = true reth-optimism-consensus.workspace = true +reth-optimism-evm.workspace = true reth-optimism-forks.workspace = true # ethereum @@ -61,7 +61,7 @@ reth-optimism-chainspec.workspace = true [features] optimism = [ - "reth-evm-optimism/optimism", + "reth-optimism-evm/optimism", "reth-primitives/optimism", "reth-provider/optimism", "reth-rpc-eth-api/optimism", diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index 35bc147986a97..b4d349e1cc455 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -2,7 +2,7 @@ use alloy_rpc_types::error::EthRpcErrorCode; use jsonrpsee_types::error::INTERNAL_ERROR_CODE; -use reth_evm_optimism::OptimismBlockExecutionError; +use reth_optimism_evm::OptimismBlockExecutionError; use reth_primitives::revm_primitives::{InvalidTransaction, OptimismInvalidTransaction}; use reth_rpc_eth_api::AsEthApiError; use reth_rpc_eth_types::EthApiError; diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 41267a1e78231..da799e140d8e8 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -44,7 +44,7 @@ where let block = block.unseal(); let l1_block_info = - reth_evm_optimism::extract_l1_info(&block).map_err(OpEthApiError::from)?; + reth_optimism_evm::extract_l1_info(&block).map_err(OpEthApiError::from)?; return block .body diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index b07459c778cf0..aa53ef81feb41 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -7,8 +7,8 @@ use op_alloy_rpc_types::{ receipt::L1BlockInfo, OpTransactionReceipt, OptimismTransactionReceiptFields, }; use reth_chainspec::ChainSpec; -use reth_evm_optimism::RethL1BlockInfo; use reth_node_api::{FullNodeComponents, NodeTypes}; +use reth_optimism_evm::RethL1BlockInfo; use reth_optimism_forks::OptimismHardforks; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; use reth_provider::ChainSpecProvider; @@ -43,7 +43,7 @@ where let block = block.unseal(); let l1_block_info = - reth_evm_optimism::extract_l1_info(&block).map_err(OpEthApiError::from)?; + reth_optimism_evm::extract_l1_info(&block).map_err(OpEthApiError::from)?; Ok(OpReceiptBuilder::new( &self.inner.provider().chain_spec(), @@ -356,7 +356,7 @@ mod test { }; let l1_block_info = - reth_evm_optimism::extract_l1_info(&block).expect("should extract l1 info"); + reth_optimism_evm::extract_l1_info(&block).expect("should extract l1 info"); // test assert!(OP_MAINNET.is_fjord_active_at_timestamp(BLOCK_124665056_TIMESTAMP)); diff --git a/crates/primitives-traits/src/integer_list.rs b/crates/primitives-traits/src/integer_list.rs index 767fb3ec30a1c..570c96c9fdafa 100644 --- a/crates/primitives-traits/src/integer_list.rs +++ b/crates/primitives-traits/src/integer_list.rs @@ -4,7 +4,7 @@ use core::fmt; use derive_more::Deref; use roaring::RoaringTreemap; use serde::{ - de::{SeqAccess, Unexpected, Visitor}, + de::{SeqAccess, Visitor}, ser::SerializeSeq, Deserialize, Deserializer, Serialize, Serializer, }; @@ -16,34 +16,54 @@ pub struct IntegerList(pub RoaringTreemap); impl fmt::Debug for IntegerList { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let vec: Vec = self.0.iter().collect(); - write!(f, "IntegerList {vec:?}") + f.write_str("IntegerList")?; + f.debug_list().entries(self.0.iter()).finish() } } impl IntegerList { + /// Creates a new empty `IntegerList`. + pub fn empty() -> Self { + Self(RoaringTreemap::new()) + } + /// Creates an `IntegerList` from a list of integers. /// - /// # Returns - /// - /// Returns an error if the list is empty or not pre-sorted. - pub fn new>(list: T) -> Result { - Ok(Self( - RoaringTreemap::from_sorted_iter(list.as_ref().iter().copied()) - .map_err(|_| RoaringBitmapError::InvalidInput)?, - )) + /// Returns an error if the list is not pre-sorted. + pub fn new(list: impl IntoIterator) -> Result { + RoaringTreemap::from_sorted_iter(list) + .map(Self) + .map_err(|_| IntegerListError::UnsortedInput) } // Creates an IntegerList from a pre-sorted list of integers. /// /// # Panics /// - /// Panics if the list is empty or not pre-sorted. - pub fn new_pre_sorted>(list: T) -> Self { - Self( - RoaringTreemap::from_sorted_iter(list.as_ref().iter().copied()) - .expect("IntegerList must be pre-sorted and non-empty"), - ) + /// Panics if the list is not pre-sorted. + #[inline] + #[track_caller] + pub fn new_pre_sorted(list: impl IntoIterator) -> Self { + Self::new(list).expect("IntegerList must be pre-sorted and non-empty") + } + + /// Appends a list of integers to the current list. + pub fn append(&mut self, list: impl IntoIterator) -> Result { + self.0.append(list).map_err(|_| IntegerListError::UnsortedInput) + } + + /// Pushes a new integer to the list. + pub fn push(&mut self, value: u64) -> Result<(), IntegerListError> { + if self.0.push(value) { + Ok(()) + } else { + Err(IntegerListError::UnsortedInput) + } + } + + /// Clears the list. + pub fn clear(&mut self) { + self.0.clear(); } /// Serializes a [`IntegerList`] into a sequence of bytes. @@ -59,36 +79,21 @@ impl IntegerList { } /// Deserializes a sequence of bytes into a proper [`IntegerList`]. - pub fn from_bytes(data: &[u8]) -> Result { + pub fn from_bytes(data: &[u8]) -> Result { Ok(Self( RoaringTreemap::deserialize_from(data) - .map_err(|_| RoaringBitmapError::FailedToDeserialize)?, + .map_err(|_| IntegerListError::FailedToDeserialize)?, )) } } -macro_rules! impl_uint { - ($($w:tt),+) => { - $( - impl From> for IntegerList { - fn from(v: Vec<$w>) -> Self { - Self::new_pre_sorted(v.iter().map(|v| *v as u64).collect::>()) - } - } - )+ - }; -} - -impl_uint!(usize, u64, u32, u8, u16); - impl Serialize for IntegerList { fn serialize(&self, serializer: S) -> Result where S: Serializer, { - let vec = self.0.iter().collect::>(); let mut seq = serializer.serialize_seq(Some(self.len() as usize))?; - for e in vec { + for e in &self.0 { seq.serialize_element(&e)?; } seq.end() @@ -107,12 +112,11 @@ impl<'de> Visitor<'de> for IntegerListVisitor { where E: SeqAccess<'de>, { - let mut list = Vec::new(); + let mut list = IntegerList::empty(); while let Some(item) = seq.next_element()? { - list.push(item); + list.push(item).map_err(serde::de::Error::custom)?; } - - IntegerList::new(list).map_err(|_| serde::de::Error::invalid_value(Unexpected::Seq, &self)) + Ok(list) } } @@ -132,17 +136,17 @@ use arbitrary::{Arbitrary, Unstructured}; impl<'a> Arbitrary<'a> for IntegerList { fn arbitrary(u: &mut Unstructured<'a>) -> Result { let mut nums: Vec = Vec::arbitrary(u)?; - nums.sort(); + nums.sort_unstable(); Self::new(nums).map_err(|_| arbitrary::Error::IncorrectFormat) } } /// Primitives error type. #[derive(Debug, derive_more::Display, derive_more::Error)] -pub enum RoaringBitmapError { - /// The provided input is invalid. - #[display("the provided input is invalid")] - InvalidInput, +pub enum IntegerListError { + /// The provided input is unsorted. + #[display("the provided input is unsorted")] + UnsortedInput, /// Failed to deserialize data into type. #[display("failed to deserialize data into type")] FailedToDeserialize, @@ -152,6 +156,12 @@ pub enum RoaringBitmapError { mod tests { use super::*; + #[test] + fn empty_list() { + assert_eq!(IntegerList::empty().len(), 0); + assert_eq!(IntegerList::new_pre_sorted(std::iter::empty()).len(), 0); + } + #[test] fn test_integer_list() { let original_list = [1, 2, 3]; diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index e5c57de74b9c2..5445ce467114d 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -21,7 +21,7 @@ pub mod account; pub use account::{Account, Bytecode}; mod integer_list; -pub use integer_list::{IntegerList, RoaringBitmapError}; +pub use integer_list::{IntegerList, IntegerListError}; pub mod request; pub use request::{Request, Requests}; diff --git a/crates/prune/prune/src/segments/user/account_history.rs b/crates/prune/prune/src/segments/user/account_history.rs index 016d9a22fba2f..01f8c0850a187 100644 --- a/crates/prune/prune/src/segments/user/account_history.rs +++ b/crates/prune/prune/src/segments/user/account_history.rs @@ -275,10 +275,8 @@ mod tests { .iter() .filter(|(key, _)| key.highest_block_number > last_pruned_block_number) .map(|(key, blocks)| { - let new_blocks = blocks - .iter() - .skip_while(|block| *block <= last_pruned_block_number) - .collect::>(); + let new_blocks = + blocks.iter().skip_while(|block| *block <= last_pruned_block_number); (key.clone(), BlockNumberList::new_pre_sorted(new_blocks)) }) .collect::>(); diff --git a/crates/prune/prune/src/segments/user/storage_history.rs b/crates/prune/prune/src/segments/user/storage_history.rs index 5291d822cefaf..315ad750a8b77 100644 --- a/crates/prune/prune/src/segments/user/storage_history.rs +++ b/crates/prune/prune/src/segments/user/storage_history.rs @@ -281,10 +281,8 @@ mod tests { .iter() .filter(|(key, _)| key.sharded_key.highest_block_number > last_pruned_block_number) .map(|(key, blocks)| { - let new_blocks = blocks - .iter() - .skip_while(|block| *block <= last_pruned_block_number) - .collect::>(); + let new_blocks = + blocks.iter().skip_while(|block| *block <= last_pruned_block_number); (key.clone(), BlockNumberList::new_pre_sorted(new_blocks)) }) .collect::>(); diff --git a/crates/rpc/ipc/src/server/rpc_service.rs b/crates/rpc/ipc/src/server/rpc_service.rs index 2f645605da72c..5e89c6a0d7f31 100644 --- a/crates/rpc/ipc/src/server/rpc_service.rs +++ b/crates/rpc/ipc/src/server/rpc_service.rs @@ -6,8 +6,8 @@ use jsonrpsee::{ IdProvider, }, types::{error::reject_too_many_subscriptions, ErrorCode, ErrorObject, Request}, - BoundedSubscriptions, ConnectionId, Extensions, MethodCallback, MethodResponse, MethodSink, - Methods, SubscriptionState, + BoundedSubscriptions, ConnectionId, MethodCallback, MethodResponse, MethodSink, Methods, + SubscriptionState, }; use std::sync::Arc; @@ -58,7 +58,7 @@ impl<'a> RpcServiceT<'a> for RpcService { let params = req.params(); let name = req.method_name(); let id = req.id().clone(); - let extensions = Extensions::new(); + let extensions = req.extensions.clone(); match self.methods.method_with_name(name) { None => { diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index d489d0dd7f643..50181d23a75c3 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -219,7 +219,7 @@ pub trait EngineApi { #[method(name = "getBlobsV1")] async fn get_blobs_v1( &self, - transaction_ids: Vec, + versioned_hashes: Vec, ) -> RpcResult>>; } diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index dd8f75898bfee..2d169f1c540a9 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -2,7 +2,7 @@ use std::sync::Arc; -use alloy_primitives::{keccak256, U256}; +use alloy_primitives::{Keccak256, U256}; use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use jsonrpsee::core::RpcResult; use reth_chainspec::EthChainSpec; @@ -161,7 +161,7 @@ where let mut coinbase_balance_after_tx = initial_coinbase; let mut total_gas_used = 0u64; let mut total_gas_fess = U256::ZERO; - let mut hash_bytes = Vec::with_capacity(32 * transactions.len()); + let mut hasher = Keccak256::new(); let mut evm = Call::evm_config(ð_api).evm_with_env(db, env); @@ -179,7 +179,7 @@ where let tx = tx.into_transaction(); - hash_bytes.extend_from_slice(tx.hash().as_slice()); + hasher.update(tx.hash()); let gas_price = tx .effective_tip_per_gas(basefee) .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow) @@ -244,7 +244,7 @@ where coinbase_diff.checked_div(U256::from(total_gas_used)).unwrap_or_default(); let res = EthCallBundleResponse { bundle_gas_price, - bundle_hash: keccak256(&hash_bytes), + bundle_hash: hasher.finalize(), coinbase_diff, eth_sent_to_coinbase, gas_fees: total_gas_fess, diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 9c6562606b746..6be507501f6f3 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -698,28 +698,28 @@ impl Iterator for BlockRangeInclusiveIter { #[cfg(test)] mod tests { use super::*; - use rand::{thread_rng, Rng}; + use rand::Rng; + use reth_testing_utils::generators; #[test] fn test_block_range_iter() { - for _ in 0..100 { - let mut rng = thread_rng(); - let start = rng.gen::() as u64; - let end = start.saturating_add(rng.gen::() as u64); - let step = rng.gen::() as u64; - let range = start..=end; - let mut iter = BlockRangeInclusiveIter::new(range.clone(), step); - let (from, mut end) = iter.next().unwrap(); - assert_eq!(from, start); - assert_eq!(end, (from + step).min(*range.end())); - - for (next_from, next_end) in iter { - // ensure range starts with previous end + 1 - assert_eq!(next_from, end + 1); - end = next_end; - } - - assert_eq!(end, *range.end()); + let mut rng = generators::rng(); + + let start = rng.gen::() as u64; + let end = start.saturating_add(rng.gen::() as u64); + let step = rng.gen::() as u64; + let range = start..=end; + let mut iter = BlockRangeInclusiveIter::new(range.clone(), step); + let (from, mut end) = iter.next().unwrap(); + assert_eq!(from, start); + assert_eq!(end, (from + step).min(*range.end())); + + for (next_from, next_end) in iter { + // ensure range starts with previous end + 1 + assert_eq!(next_from, end + 1); + end = next_end; } + + assert_eq!(end, *range.end()); } } diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index ba1e03c1a296f..1862a3248ded6 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -134,7 +134,7 @@ where B256::from_slice(&addr_key[..32]), StorageEntry { key: B256::from_slice(&addr_key[32..]), - value: CompactU256::decompress(value)?.into(), + value: CompactU256::decompress_owned(value)?.into(), }, )?; } diff --git a/crates/stages/stages/src/stages/index_account_history.rs b/crates/stages/stages/src/stages/index_account_history.rs index e0fcde2b194f5..1e96419807714 100644 --- a/crates/stages/stages/src/stages/index_account_history.rs +++ b/crates/stages/stages/src/stages/index_account_history.rs @@ -118,7 +118,7 @@ where collector, first_sync, ShardedKey::new, - ShardedKey::
::decode, + ShardedKey::
::decode_owned, |key| key.key, )?; @@ -182,7 +182,7 @@ mod tests { } fn list(list: &[u64]) -> BlockNumberList { - BlockNumberList::new(list).unwrap() + BlockNumberList::new(list.iter().copied()).unwrap() } fn cast( diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index 4af2cb3efea2e..ee68e934f4ed0 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -124,7 +124,7 @@ where |AddressStorageKey((address, storage_key)), highest_block_number| { StorageShardedKey::new(address, storage_key, highest_block_number) }, - StorageShardedKey::decode, + StorageShardedKey::decode_owned, |key| AddressStorageKey((key.address, key.sharded_key.key)), )?; @@ -197,7 +197,7 @@ mod tests { } fn list(list: &[u64]) -> BlockNumberList { - BlockNumberList::new(list).unwrap() + BlockNumberList::new(list.iter().copied()).unwrap() } fn cast( diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index 7cdab4ff24489..caf039faca108 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -54,11 +54,11 @@ where let mut cache: HashMap> = HashMap::default(); let mut collect = |cache: &HashMap>| { - for (key, indice_list) in cache { - let last = indice_list.last().expect("qed"); + for (key, indices) in cache { + let last = indices.last().expect("qed"); collector.insert( sharded_key_factory(*key, *last), - BlockNumberList::new_pre_sorted(indice_list), + BlockNumberList::new_pre_sorted(indices.iter().copied()), )?; } Ok::<(), StageError>(()) diff --git a/crates/storage/db-api/src/models/accounts.rs b/crates/storage/db-api/src/models/accounts.rs index 338a3a06f6004..94922632129b9 100644 --- a/crates/storage/db-api/src/models/accounts.rs +++ b/crates/storage/db-api/src/models/accounts.rs @@ -64,11 +64,9 @@ impl Encode for BlockNumberAddress { } impl Decode for BlockNumberAddress { - fn decode>(value: B) -> Result { - let value = value.as_ref(); + fn decode(value: &[u8]) -> Result { let num = u64::from_be_bytes(value[..8].try_into().map_err(|_| DatabaseError::Decode)?); let hash = Address::from_slice(&value[8..]); - Ok(Self((num, hash))) } } @@ -97,11 +95,9 @@ impl Encode for AddressStorageKey { } impl Decode for AddressStorageKey { - fn decode>(value: B) -> Result { - let value = value.as_ref(); + fn decode(value: &[u8]) -> Result { let address = Address::from_slice(&value[..20]); let storage_key = StorageKey::from_slice(&value[20..]); - Ok(Self((address, storage_key))) } } @@ -127,7 +123,7 @@ mod tests { let encoded = Encode::encode(key); assert_eq!(encoded, bytes); - let decoded: BlockNumberAddress = Decode::decode(encoded).unwrap(); + let decoded: BlockNumberAddress = Decode::decode(&encoded).unwrap(); assert_eq!(decoded, key); } @@ -152,7 +148,7 @@ mod tests { let encoded = Encode::encode(key); assert_eq!(encoded, bytes); - let decoded: AddressStorageKey = Decode::decode(encoded).unwrap(); + let decoded: AddressStorageKey = Decode::decode(&encoded).unwrap(); assert_eq!(decoded, key); } diff --git a/crates/storage/db-api/src/models/blocks.rs b/crates/storage/db-api/src/models/blocks.rs index b48baf6d6b26f..7268d82dd3cca 100644 --- a/crates/storage/db-api/src/models/blocks.rs +++ b/crates/storage/db-api/src/models/blocks.rs @@ -29,9 +29,6 @@ mod tests { let mut ommer = StoredBlockOmmers::default(); ommer.ommers.push(Header::default()); ommer.ommers.push(Header::default()); - assert_eq!( - ommer.clone(), - StoredBlockOmmers::decompress::>(ommer.compress()).unwrap() - ); + assert_eq!(ommer.clone(), StoredBlockOmmers::decompress(&ommer.compress()).unwrap()); } } diff --git a/crates/storage/db-api/src/models/integer_list.rs b/crates/storage/db-api/src/models/integer_list.rs index f47605bf88b52..480b52a9e2c09 100644 --- a/crates/storage/db-api/src/models/integer_list.rs +++ b/crates/storage/db-api/src/models/integer_list.rs @@ -12,13 +12,14 @@ impl Compress for IntegerList { fn compress(self) -> Self::Compressed { self.to_bytes() } + fn compress_to_buf>(self, buf: &mut B) { self.to_mut_bytes(buf) } } impl Decompress for IntegerList { - fn decompress>(value: B) -> Result { - Self::from_bytes(value.as_ref()).map_err(|_| DatabaseError::Decode) + fn decompress(value: &[u8]) -> Result { + Self::from_bytes(value).map_err(|_| DatabaseError::Decode) } } diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 6e832a0314f45..9e7e8957b5a9e 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -42,10 +42,10 @@ macro_rules! impl_uints { } impl Decode for $name { - fn decode>(value: B) -> Result { + fn decode(value: &[u8]) -> Result { Ok( $name::from_be_bytes( - value.as_ref().try_into().map_err(|_| $crate::DatabaseError::Decode)? + value.try_into().map_err(|_| $crate::DatabaseError::Decode)? ) ) } @@ -65,8 +65,12 @@ impl Encode for Vec { } impl Decode for Vec { - fn decode>(value: B) -> Result { - Ok(value.as_ref().to_vec()) + fn decode(value: &[u8]) -> Result { + Ok(value.to_vec()) + } + + fn decode_owned(value: Vec) -> Result { + Ok(value) } } @@ -79,8 +83,8 @@ impl Encode for Address { } impl Decode for Address { - fn decode>(value: B) -> Result { - Ok(Self::from_slice(value.as_ref())) + fn decode(value: &[u8]) -> Result { + Ok(Self::from_slice(value)) } } @@ -93,8 +97,8 @@ impl Encode for B256 { } impl Decode for B256 { - fn decode>(value: B) -> Result { - Ok(Self::new(value.as_ref().try_into().map_err(|_| DatabaseError::Decode)?)) + fn decode(value: &[u8]) -> Result { + Ok(Self::new(value.try_into().map_err(|_| DatabaseError::Decode)?)) } } @@ -107,8 +111,12 @@ impl Encode for String { } impl Decode for String { - fn decode>(value: B) -> Result { - Self::from_utf8(value.as_ref().to_vec()).map_err(|_| DatabaseError::Decode) + fn decode(value: &[u8]) -> Result { + Self::decode_owned(value.to_vec()) + } + + fn decode_owned(value: Vec) -> Result { + Self::from_utf8(value).map_err(|_| DatabaseError::Decode) } } @@ -124,9 +132,8 @@ impl Encode for StoredNibbles { } impl Decode for StoredNibbles { - fn decode>(value: B) -> Result { - let buf = value.as_ref(); - Ok(Self::from_compact(buf, buf.len()).0) + fn decode(value: &[u8]) -> Result { + Ok(Self::from_compact(value, value.len()).0) } } @@ -142,9 +149,8 @@ impl Encode for StoredNibblesSubKey { } impl Decode for StoredNibblesSubKey { - fn decode>(value: B) -> Result { - let buf = value.as_ref(); - Ok(Self::from_compact(buf, buf.len()).0) + fn decode(value: &[u8]) -> Result { + Ok(Self::from_compact(value, value.len()).0) } } @@ -159,9 +165,8 @@ impl Encode for PruneSegment { } impl Decode for PruneSegment { - fn decode>(value: B) -> Result { - let buf = value.as_ref(); - Ok(Self::from_compact(buf, buf.len()).0) + fn decode(value: &[u8]) -> Result { + Ok(Self::from_compact(value, value.len()).0) } } @@ -177,9 +182,8 @@ impl Encode for ClientVersion { } impl Decode for ClientVersion { - fn decode>(value: B) -> Result { - let buf = value.as_ref(); - Ok(Self::from_compact(buf, buf.len()).0) + fn decode(value: &[u8]) -> Result { + Ok(Self::from_compact(value, value.len()).0) } } @@ -196,9 +200,8 @@ macro_rules! impl_compression_for_compact { } impl Decompress for $name { - fn decompress>(value: B) -> Result<$name, $crate::DatabaseError> { - let value = value.as_ref(); - let (obj, _) = Compact::from_compact(&value, value.len()); + fn decompress(value: &[u8]) -> Result<$name, $crate::DatabaseError> { + let (obj, _) = Compact::from_compact(value, value.len()); Ok(obj) } } @@ -236,23 +239,20 @@ impl_compression_for_compact!( macro_rules! impl_compression_fixed_compact { ($($name:tt),+) => { $( - impl Compress for $name - { + impl Compress for $name { type Compressed = Vec; - fn compress_to_buf>(self, buf: &mut B) { - let _ = Compact::to_compact(&self, buf); - } - fn uncompressable_ref(&self) -> Option<&[u8]> { Some(self.as_ref()) } + + fn compress_to_buf>(self, buf: &mut B) { + let _ = Compact::to_compact(&self, buf); + } } - impl Decompress for $name - { - fn decompress>(value: B) -> Result<$name, $crate::DatabaseError> { - let value = value.as_ref(); + impl Decompress for $name { + fn decompress(value: &[u8]) -> Result<$name, $crate::DatabaseError> { let (obj, _) = Compact::from_compact(&value, value.len()); Ok(obj) } diff --git a/crates/storage/db-api/src/models/sharded_key.rs b/crates/storage/db-api/src/models/sharded_key.rs index dd8702a4812bf..d1de1bd400c4e 100644 --- a/crates/storage/db-api/src/models/sharded_key.rs +++ b/crates/storage/db-api/src/models/sharded_key.rs @@ -16,7 +16,7 @@ pub const NUM_OF_INDICES_IN_SHARD: usize = 2_000; /// `Address | 200` -> data is from block 0 to 200. /// /// `Address | 300` -> data is from block 201 to 300. -#[derive(Debug, Default, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Hash)] pub struct ShardedKey { /// The key for this type. pub key: T, @@ -43,11 +43,7 @@ impl ShardedKey { } } -impl Encode for ShardedKey -where - T: Encode, - Vec: From<::Encoded>, -{ +impl Encode for ShardedKey { type Encoded = Vec; fn encode(self) -> Self::Encoded { @@ -57,30 +53,11 @@ where } } -impl Decode for ShardedKey -where - T: Decode, -{ - fn decode>(value: B) -> Result { - let value = value.as_ref(); - - let tx_num_index = value.len() - 8; - - let highest_tx_number = u64::from_be_bytes( - value[tx_num_index..].try_into().map_err(|_| DatabaseError::Decode)?, - ); - let key = T::decode(&value[..tx_num_index])?; - +impl Decode for ShardedKey { + fn decode(value: &[u8]) -> Result { + let (key, highest_tx_number) = value.split_last_chunk().unwrap(); + let key = T::decode(key)?; + let highest_tx_number = u64::from_be_bytes(*highest_tx_number); Ok(Self::new(key, highest_tx_number)) } } - -impl Hash for ShardedKey -where - T: Hash, -{ - fn hash(&self, state: &mut H) { - self.key.hash(state); - self.highest_block_number.hash(state); - } -} diff --git a/crates/storage/db-api/src/models/storage_sharded_key.rs b/crates/storage/db-api/src/models/storage_sharded_key.rs index b6538256e6299..5fd79ba655c1a 100644 --- a/crates/storage/db-api/src/models/storage_sharded_key.rs +++ b/crates/storage/db-api/src/models/storage_sharded_key.rs @@ -61,8 +61,7 @@ impl Encode for StorageShardedKey { } impl Decode for StorageShardedKey { - fn decode>(value: B) -> Result { - let value = value.as_ref(); + fn decode(value: &[u8]) -> Result { let tx_num_index = value.len() - 8; let highest_tx_number = u64::from_be_bytes( diff --git a/crates/storage/db-api/src/scale.rs b/crates/storage/db-api/src/scale.rs index 99382a4a91793..591635be054e6 100644 --- a/crates/storage/db-api/src/scale.rs +++ b/crates/storage/db-api/src/scale.rs @@ -22,7 +22,7 @@ where } fn compress_to_buf>(self, buf: &mut B) { - buf.put_slice(&parity_scale_codec::Encode::encode(&self)) + parity_scale_codec::Encode::encode_to(&self, OutputCompat::wrap_mut(buf)); } } @@ -30,8 +30,8 @@ impl Decompress for T where T: ScaleValue + parity_scale_codec::Decode + Sync + Send + std::fmt::Debug, { - fn decompress>(value: B) -> Result { - parity_scale_codec::Decode::decode(&mut value.as_ref()).map_err(|_| DatabaseError::Decode) + fn decompress(mut value: &[u8]) -> Result { + parity_scale_codec::Decode::decode(&mut value).map_err(|_| DatabaseError::Decode) } } @@ -50,3 +50,22 @@ impl sealed::Sealed for Vec {} impl_compression_for_scale!(U256); impl_compression_for_scale!(u8, u32, u16, u64); + +#[repr(transparent)] +struct OutputCompat(B); + +impl OutputCompat { + fn wrap_mut(buf: &mut B) -> &mut Self { + unsafe { std::mem::transmute(buf) } + } +} + +impl parity_scale_codec::Output for OutputCompat { + fn write(&mut self, bytes: &[u8]) { + self.0.put_slice(bytes); + } + + fn push_byte(&mut self, byte: u8) { + self.0.put_u8(byte); + } +} diff --git a/crates/storage/db-api/src/table.rs b/crates/storage/db-api/src/table.rs index 6d3f52198d28a..963457af05c36 100644 --- a/crates/storage/db-api/src/table.rs +++ b/crates/storage/db-api/src/table.rs @@ -38,11 +38,11 @@ pub trait Compress: Send + Sync + Sized + Debug { /// Trait that will transform the data to be read from the DB. pub trait Decompress: Send + Sync + Sized + Debug { /// Decompresses data coming from the database. - fn decompress>(value: B) -> Result; + fn decompress(value: &[u8]) -> Result; /// Decompresses owned data coming from the database. fn decompress_owned(value: Vec) -> Result { - Self::decompress(value) + Self::decompress(&value) } } @@ -58,7 +58,12 @@ pub trait Encode: Send + Sync + Sized + Debug { /// Trait that will transform the data to be read from the DB. pub trait Decode: Send + Sync + Sized + Debug { /// Decodes data coming from the database. - fn decode>(value: B) -> Result; + fn decode(value: &[u8]) -> Result; + + /// Decodes owned data coming from the database. + fn decode_owned(value: Vec) -> Result { + Self::decode(&value) + } } /// Generic trait that enforces the database key to implement [`Encode`] and [`Decode`]. diff --git a/crates/storage/db-api/src/utils.rs b/crates/storage/db-api/src/utils.rs index b9ee6277e9595..65ed5b6c01d4d 100644 --- a/crates/storage/db-api/src/utils.rs +++ b/crates/storage/db-api/src/utils.rs @@ -10,8 +10,7 @@ macro_rules! impl_fixed_arbitrary { fn arbitrary(u: &mut Unstructured<'a>) -> Result { let mut buffer = vec![0; $size]; u.fill_buffer(buffer.as_mut_slice())?; - - Decode::decode(buffer).map_err(|_| arbitrary::Error::IncorrectFormat) + Decode::decode_owned(buffer).map_err(|_| arbitrary::Error::IncorrectFormat) } } @@ -26,7 +25,7 @@ macro_rules! impl_fixed_arbitrary { fn arbitrary_with(args: Self::Parameters) -> Self::Strategy { use proptest::strategy::Strategy; proptest::collection::vec(proptest::arbitrary::any_with::(args), $size) - .prop_map(move |vec| Decode::decode(vec).unwrap()) + .prop_map(move |vec| Decode::decode_owned(vec).unwrap()) } } )+ diff --git a/crates/storage/db/benches/criterion.rs b/crates/storage/db/benches/criterion.rs index 6d273a8ce93ca..7ac9566d80c54 100644 --- a/crates/storage/db/benches/criterion.rs +++ b/crates/storage/db/benches/criterion.rs @@ -87,7 +87,7 @@ where |input| { { for (_, k, _, _) in input { - let _ = ::Key::decode(k); + let _ = ::Key::decode(&k); } }; black_box(()); @@ -115,7 +115,7 @@ where |input| { { for (_, _, _, v) in input { - let _ = ::Value::decompress(v); + let _ = ::Value::decompress(&v); } }; black_box(()); diff --git a/crates/storage/db/benches/iai.rs b/crates/storage/db/benches/iai.rs index ebcf6c8a42c0b..167cd0860e260 100644 --- a/crates/storage/db/benches/iai.rs +++ b/crates/storage/db/benches/iai.rs @@ -25,7 +25,7 @@ macro_rules! impl_iai_callgrind_inner { #[library_benchmark] pub fn $decompress() { for (_, _, _, comp) in black_box(load_vectors::()) { - let _ = black_box(::Value::decompress(comp)); + let _ = black_box(::Value::decompress(&comp)); } } @@ -39,7 +39,7 @@ macro_rules! impl_iai_callgrind_inner { #[library_benchmark] pub fn $decode() { for (_, enc, _, _) in black_box(load_vectors::()) { - let _ = black_box(::Key::decode(enc)); + let _ = black_box(::Key::decode(&enc)); } } diff --git a/crates/storage/db/benches/utils.rs b/crates/storage/db/benches/utils.rs index 72d121aa75ccb..9700ef94b241c 100644 --- a/crates/storage/db/benches/utils.rs +++ b/crates/storage/db/benches/utils.rs @@ -1,7 +1,5 @@ -#![cfg(feature = "test-utils")] #![allow(missing_docs)] - -use std::{path::Path, sync::Arc}; +#![cfg(feature = "test-utils")] use alloy_primitives::Bytes; use reth_db::{test_utils::create_test_rw_db_with_path, DatabaseEnv}; @@ -11,6 +9,7 @@ use reth_db_api::{ Database, }; use reth_fs_util as fs; +use std::{path::Path, sync::Arc}; /// Path where the DB is initialized for benchmarks. #[allow(dead_code)] diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index c908bad459424..756a622bcb035 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -81,7 +81,7 @@ macro_rules! compress_to_buf_or_ref { if let Some(value) = $value.uncompressable_ref() { Some(value) } else { - $self.buf.truncate(0); + $self.buf.clear(); $value.compress_to_buf(&mut $self.buf); None } diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 8b4a136c300d8..1deb86ba614f7 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -1319,7 +1319,7 @@ mod tests { for i in 1..5 { let key = ShardedKey::new(real_key, i * 100); - let list: IntegerList = vec![i * 100u64].into(); + let list = IntegerList::new_pre_sorted([i * 100u64]); db.update(|tx| tx.put::(key.clone(), list.clone()).expect("")) .unwrap(); @@ -1340,7 +1340,7 @@ mod tests { .expect("should be able to retrieve it."); assert_eq!(ShardedKey::new(real_key, 200), key); - let list200: IntegerList = vec![200u64].into(); + let list200 = IntegerList::new_pre_sorted([200u64]); assert_eq!(list200, list); } // Seek greatest index @@ -1357,7 +1357,7 @@ mod tests { .expect("should be able to retrieve it."); assert_eq!(ShardedKey::new(real_key, 400), key); - let list400: IntegerList = vec![400u64].into(); + let list400 = IntegerList::new_pre_sorted([400u64]); assert_eq!(list400, list); } } diff --git a/crates/storage/db/src/lockfile.rs b/crates/storage/db/src/lockfile.rs index 828cec6e7d293..0b2e31a968e2d 100644 --- a/crates/storage/db/src/lockfile.rs +++ b/crates/storage/db/src/lockfile.rs @@ -7,7 +7,7 @@ use reth_tracing::tracing::error; use std::{ path::{Path, PathBuf}, process, - sync::Arc, + sync::{Arc, OnceLock}, }; use sysinfo::{ProcessRefreshKind, RefreshKind, System}; @@ -91,7 +91,7 @@ impl StorageLockInner { } } -#[derive(Debug)] +#[derive(Clone, Debug)] struct ProcessUID { /// OS process identifier pid: usize, @@ -102,14 +102,16 @@ struct ProcessUID { impl ProcessUID { /// Creates [`Self`] for the provided PID. fn new(pid: usize) -> Option { - System::new_with_specifics(RefreshKind::new().with_processes(ProcessRefreshKind::new())) - .process(pid.into()) - .map(|process| Self { pid, start_time: process.start_time() }) + let mut system = System::new(); + let pid2 = sysinfo::Pid::from(pid); + system.refresh_process_specifics(pid2, ProcessRefreshKind::new()); + system.process(pid2).map(|process| Self { pid, start_time: process.start_time() }) } /// Creates [`Self`] from own process. fn own() -> Self { - Self::new(process::id() as usize).expect("own process") + static CACHE: OnceLock = OnceLock::new(); + CACHE.get_or_init(|| Self::new(process::id() as usize).expect("own process")).clone() } /// Parses [`Self`] from a file. diff --git a/crates/storage/db/src/tables/codecs/fuzz/inputs.rs b/crates/storage/db/src/tables/codecs/fuzz/inputs.rs index 2c944e158eb18..bb26e8b9e217f 100644 --- a/crates/storage/db/src/tables/codecs/fuzz/inputs.rs +++ b/crates/storage/db/src/tables/codecs/fuzz/inputs.rs @@ -10,12 +10,7 @@ pub struct IntegerListInput(pub Vec); impl From for IntegerList { fn from(list: IntegerListInput) -> Self { let mut v = list.0; - - // Empty lists are not supported by `IntegerList`, so we want to skip these cases. - if v.is_empty() { - return vec![1u64].into() - } - v.sort(); - v.into() + v.sort_unstable(); + Self::new_pre_sorted(v) } } diff --git a/crates/storage/db/src/tables/codecs/fuzz/mod.rs b/crates/storage/db/src/tables/codecs/fuzz/mod.rs index 846ed17e1f1a1..e64a3841df49e 100644 --- a/crates/storage/db/src/tables/codecs/fuzz/mod.rs +++ b/crates/storage/db/src/tables/codecs/fuzz/mod.rs @@ -30,13 +30,12 @@ macro_rules! impl_fuzzer_with_input { /// Encodes and decodes table types returning its encoded size and the decoded object. /// This method is used for benchmarking, so its parameter should be the actual type that is being tested. - pub fn encode_and_decode(obj: $name) -> (usize, $name) - { + pub fn encode_and_decode(obj: $name) -> (usize, $name) { let data = table::$encode::$encode_method(obj); let size = data.len(); // Some `data` might be a fixed array. - (size, table::$decode::$decode_method(data.to_vec()).expect("failed to decode")) + (size, table::$decode::$decode_method(&data).expect("failed to decode")) } #[cfg(test)] diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 835d1486dafe4..384139618163f 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -429,8 +429,8 @@ impl Encode for ChainStateKey { } impl Decode for ChainStateKey { - fn decode>(value: B) -> Result { - if value.as_ref() == [0] { + fn decode(value: &[u8]) -> Result { + if value == [0] { Ok(Self::LastFinalizedBlock) } else { Err(reth_db_api::DatabaseError::Decode) diff --git a/crates/storage/db/src/tables/raw.rs b/crates/storage/db/src/tables/raw.rs index 1e8fa56b3603c..6b6de41613eb4 100644 --- a/crates/storage/db/src/tables/raw.rs +++ b/crates/storage/db/src/tables/raw.rs @@ -96,8 +96,12 @@ impl Encode for RawKey { // Decode impl Decode for RawKey { - fn decode>(key: B) -> Result { - Ok(Self { key: key.as_ref().to_vec(), _phantom: std::marker::PhantomData }) + fn decode(value: &[u8]) -> Result { + Ok(Self { key: value.to_vec(), _phantom: std::marker::PhantomData }) + } + + fn decode_owned(value: Vec) -> Result { + Ok(Self { key: value, _phantom: std::marker::PhantomData }) } } @@ -168,8 +172,8 @@ impl Compress for RawValue { } impl Decompress for RawValue { - fn decompress>(value: B) -> Result { - Ok(Self { value: value.as_ref().to_vec(), _phantom: std::marker::PhantomData }) + fn decompress(value: &[u8]) -> Result { + Ok(Self { value: value.to_vec(), _phantom: std::marker::PhantomData }) } fn decompress_owned(value: Vec) -> Result { diff --git a/crates/storage/db/src/tables/utils.rs b/crates/storage/db/src/tables/utils.rs index 616d1038264e7..0948ee108f687 100644 --- a/crates/storage/db/src/tables/utils.rs +++ b/crates/storage/db/src/tables/utils.rs @@ -6,7 +6,7 @@ use std::borrow::Cow; /// Helper function to decode a `(key, value)` pair. pub(crate) fn decoder<'a, T>( - kv: (Cow<'a, [u8]>, Cow<'a, [u8]>), + (k, v): (Cow<'a, [u8]>, Cow<'a, [u8]>), ) -> Result, DatabaseError> where T: Table, @@ -14,11 +14,11 @@ where T::Value: Decompress, { Ok(( - match kv.0 { + match k { Cow::Borrowed(k) => Decode::decode(k)?, - Cow::Owned(k) => Decode::decode(k)?, + Cow::Owned(k) => Decode::decode_owned(k)?, }, - match kv.1 { + match v { Cow::Borrowed(v) => Decompress::decompress(v)?, Cow::Owned(v) => Decompress::decompress_owned(v)?, }, diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index 8056b68557b81..fa10a73cb330b 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -19,14 +19,16 @@ byteorder = "1" derive_more.workspace = true indexmap = "2" parking_lot.workspace = true +smallvec.workspace = true thiserror.workspace = true -dashmap = { workspace = true, features = ["inline"], optional = true } tracing.workspace = true +dashmap = { workspace = true, features = ["inline"], optional = true } + [features] default = [] return-borrowed = [] -read-tx-timeouts = ["dashmap", "dashmap/inline"] +read-tx-timeouts = ["dep:dashmap"] [dev-dependencies] pprof = { workspace = true, features = [ diff --git a/crates/storage/libmdbx-rs/src/database.rs b/crates/storage/libmdbx-rs/src/database.rs index 1c4739b2bee71..c8733889160e8 100644 --- a/crates/storage/libmdbx-rs/src/database.rs +++ b/crates/storage/libmdbx-rs/src/database.rs @@ -4,7 +4,7 @@ use crate::{ Environment, Transaction, }; use ffi::MDBX_db_flags_t; -use std::{ffi::CString, ptr}; +use std::{ffi::CStr, ptr}; /// A handle to an individual database in an environment. /// @@ -27,8 +27,13 @@ impl Database { name: Option<&str>, flags: MDBX_db_flags_t, ) -> Result { - let c_name = name.map(|n| CString::new(n).unwrap()); - let name_ptr = if let Some(c_name) = &c_name { c_name.as_ptr() } else { ptr::null() }; + let mut c_name_buf = smallvec::SmallVec::<[u8; 32]>::new(); + let c_name = name.map(|n| { + c_name_buf.extend_from_slice(n.as_bytes()); + c_name_buf.push(0); + CStr::from_bytes_with_nul(&c_name_buf).unwrap() + }); + let name_ptr = if let Some(c_name) = c_name { c_name.as_ptr() } else { ptr::null() }; let mut dbi: ffi::MDBX_dbi = 0; txn.txn_execute(|txn_ptr| { mdbx_result(unsafe { ffi::mdbx_dbi_open(txn_ptr, name_ptr, flags, &mut dbi) }) diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index a720192d6a051..bdc950aa38a78 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -17,7 +17,7 @@ use memmap2::Mmap; use serde::{Deserialize, Serialize}; use std::{ error::Error as StdError, - fs::{File, OpenOptions}, + fs::File, ops::Range, path::{Path, PathBuf}, }; @@ -250,35 +250,9 @@ impl NippyJar { /// Writes all necessary configuration to file. fn freeze_config(&self) -> Result<(), NippyJarError> { - // Atomic writes are hard: - let mut tmp_path = self.config_path(); - tmp_path.set_extension(".tmp"); - - // Write to temporary file - let mut file = File::create(&tmp_path)?; - bincode::serialize_into(&mut file, &self)?; - - // fsync() file - file.sync_all()?; - - // Rename file, not move - reth_fs_util::rename(&tmp_path, self.config_path())?; - - // fsync() dir - if let Some(parent) = tmp_path.parent() { - //custom_flags() is only available on Windows - #[cfg(windows)] - OpenOptions::new() - .read(true) - .write(true) - .custom_flags(0x02000000) // FILE_FLAG_BACKUP_SEMANTICS - .open(parent)? - .sync_all()?; - - #[cfg(not(windows))] - OpenOptions::new().read(true).open(parent)?.sync_all()?; - } - Ok(()) + Ok(reth_fs_util::atomic_write_file(&self.config_path(), |file| { + bincode::serialize_into(file, &self) + })?) } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 6e026f5c910ab..7159720bf3715 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1356,7 +1356,7 @@ impl DatabaseProvider { }; self.tx.put::( sharded_key_factory(partial_key, highest_block_number), - BlockNumberList::new_pre_sorted(list), + BlockNumberList::new_pre_sorted(list.iter().copied()), )?; } } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 2ba101721b08c..4c1a7f2c29bbc 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -463,6 +463,7 @@ where } if let Some(replaced) = added.replaced_blob_transaction() { + debug!(target: "txpool", "[{:?}] delete replaced blob sidecar", replaced); // delete the replaced transaction from the blob store self.delete_blob(replaced); } @@ -579,9 +580,11 @@ where /// Notify all listeners about a blob sidecar for a newly inserted blob (eip4844) transaction. fn on_new_blob_sidecar(&self, tx_hash: &TxHash, sidecar: &BlobTransactionSidecar) { - let sidecar = Arc::new(sidecar.clone()); - let mut sidecar_listeners = self.blob_transaction_sidecar_listener.lock(); + if sidecar_listeners.is_empty() { + return + } + let sidecar = Arc::new(sidecar.clone()); sidecar_listeners.retain_mut(|listener| { let new_blob_event = NewBlobSidecar { tx_hash: *tx_hash, sidecar: sidecar.clone() }; match listener.sender.try_send(new_blob_event) { @@ -798,6 +801,7 @@ where /// Inserts a blob transaction into the blob store fn insert_blob(&self, hash: TxHash, blob: BlobTransactionSidecar) { + debug!(target: "txpool", "[{:?}] storing blob sidecar", hash); if let Err(err) = self.blob_store.insert(hash, blob) { warn!(target: "txpool", %err, "[{:?}] failed to insert blob", hash); self.blob_store_metrics.blobstore_failed_inserts.increment(1); diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index df32b1cb9f6a9..b35edd96d560c 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -5,12 +5,13 @@ use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; use alloy_rlp::{encode_fixed_size, Decodable}; use alloy_trie::{ nodes::TrieNode, - proof::{verify_proof, ProofVerificationError}, + proof::{verify_proof, ProofNodes, ProofVerificationError}, EMPTY_ROOT_HASH, }; +use itertools::Itertools; use reth_primitives_traits::{constants::KECCAK_EMPTY, Account}; use serde::{Deserialize, Serialize}; -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; /// The state multiproof of target accounts and multiproofs of their storage tries. /// Multiproof is effectively a state subtrie that only contains the nodes @@ -18,7 +19,7 @@ use std::collections::{BTreeMap, HashMap}; #[derive(Clone, Default, Debug)] pub struct MultiProof { /// State trie multiproof for requested accounts. - pub account_subtree: BTreeMap, + pub account_subtree: ProofNodes, /// Storage trie multiproofs. pub storages: HashMap, } @@ -36,8 +37,8 @@ impl MultiProof { // Retrieve the account proof. let proof = self .account_subtree - .iter() - .filter(|(path, _)| nibbles.starts_with(path)) + .matching_nodes_iter(&nibbles) + .sorted_by(|a, b| a.0.cmp(b.0)) .map(|(_, node)| node.clone()) .collect::>(); @@ -82,12 +83,12 @@ pub struct StorageMultiProof { /// Storage trie root. pub root: B256, /// Storage multiproof for requested slots. - pub subtree: BTreeMap, + pub subtree: ProofNodes, } impl Default for StorageMultiProof { fn default() -> Self { - Self { root: EMPTY_ROOT_HASH, subtree: BTreeMap::default() } + Self { root: EMPTY_ROOT_HASH, subtree: Default::default() } } } @@ -99,8 +100,8 @@ impl StorageMultiProof { // Retrieve the storage proof. let proof = self .subtree - .iter() - .filter(|(path, _)| nibbles.starts_with(path)) + .matching_nodes_iter(&nibbles) + .sorted_by(|a, b| a.0.cmp(b.0)) .map(|(_, node)| node.clone()) .collect::>(); diff --git a/crates/trie/common/src/root.rs b/crates/trie/common/src/root.rs index 600e818ebbaab..20f3ba1366d57 100644 --- a/crates/trie/common/src/root.rs +++ b/crates/trie/common/src/root.rs @@ -75,7 +75,7 @@ pub fn state_root_unhashed>( pub fn state_root_unsorted>( state: impl IntoIterator, ) -> B256 { - state_root(state.into_iter().sorted_by_key(|(key, _)| *key)) + state_root(state.into_iter().sorted_unstable_by_key(|(key, _)| *key)) } /// Calculates the root hash of the state represented as MPT. @@ -105,7 +105,7 @@ pub fn storage_root_unhashed(storage: impl IntoIterator) -> /// Sorts and calculates the root hash of account storage trie. /// See [`storage_root`] for more info. pub fn storage_root_unsorted(storage: impl IntoIterator) -> B256 { - storage_root(storage.into_iter().sorted_by_key(|(key, _)| *key)) + storage_root(storage.into_iter().sorted_unstable_by_key(|(key, _)| *key)) } /// Calculates the root hash of account storage trie. diff --git a/crates/trie/db/src/proof.rs b/crates/trie/db/src/proof.rs index 9416d078090cb..1d5fda84cc5bc 100644 --- a/crates/trie/db/src/proof.rs +++ b/crates/trie/db/src/proof.rs @@ -78,7 +78,6 @@ impl<'a, TX: DbTx> DatabaseProof<'a, TX> &state_sorted, )) .with_prefix_sets_mut(input.prefix_sets) - .with_targets(targets) - .multiproof() + .multiproof(targets) } } diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof.rs index 69b648ba001dc..3e9ca5783814b 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof.rs @@ -1,7 +1,7 @@ use crate::{ hashed_cursor::{HashedCursorFactory, HashedStorageCursor}, node_iter::{TrieElement, TrieNodeIter}, - prefix_set::TriePrefixSetsMut, + prefix_set::{PrefixSetMut, TriePrefixSetsMut}, trie_cursor::TrieCursorFactory, walker::TrieWalker, HashBuilder, Nibbles, @@ -30,18 +30,15 @@ pub struct Proof { hashed_cursor_factory: H, /// A set of prefix sets that have changes. prefix_sets: TriePrefixSetsMut, - /// Proof targets. - targets: HashMap>, } impl Proof { - /// Create a new [Proof] instance. + /// Create a new [`Proof`] instance. pub fn new(t: T, h: H) -> Self { Self { trie_cursor_factory: t, hashed_cursor_factory: h, prefix_sets: TriePrefixSetsMut::default(), - targets: HashMap::default(), } } @@ -51,7 +48,6 @@ impl Proof { trie_cursor_factory, hashed_cursor_factory: self.hashed_cursor_factory, prefix_sets: self.prefix_sets, - targets: self.targets, } } @@ -61,7 +57,6 @@ impl Proof { trie_cursor_factory: self.trie_cursor_factory, hashed_cursor_factory, prefix_sets: self.prefix_sets, - targets: self.targets, } } @@ -70,22 +65,11 @@ impl Proof { self.prefix_sets = prefix_sets; self } - - /// Set the target account and slots. - pub fn with_target(self, target: (B256, HashSet)) -> Self { - self.with_targets(HashMap::from_iter([target])) - } - - /// Set the target accounts and slots. - pub fn with_targets(mut self, targets: HashMap>) -> Self { - self.targets = targets; - self - } } impl Proof where - T: TrieCursorFactory, + T: TrieCursorFactory + Clone, H: HashedCursorFactory + Clone, { /// Generate an account proof from intermediate nodes. @@ -95,23 +79,28 @@ where slots: &[B256], ) -> Result { Ok(self - .with_target((keccak256(address), slots.iter().map(keccak256).collect())) - .multiproof()? + .multiproof(HashMap::from_iter([( + keccak256(address), + slots.iter().map(keccak256).collect(), + )]))? .account_proof(address, slots)?) } /// Generate a state multiproof according to specified targets. - pub fn multiproof(&self) -> Result { + pub fn multiproof( + mut self, + mut targets: HashMap>, + ) -> Result { let hashed_account_cursor = self.hashed_cursor_factory.hashed_account_cursor()?; let trie_cursor = self.trie_cursor_factory.account_trie_cursor()?; // Create the walker. let mut prefix_set = self.prefix_sets.account_prefix_set.clone(); - prefix_set.extend(self.targets.keys().map(Nibbles::unpack)); + prefix_set.extend(targets.keys().map(Nibbles::unpack)); let walker = TrieWalker::new(trie_cursor, prefix_set.freeze()); // Create a hash builder to rebuild the root node since it is not available in the database. - let retainer = ProofRetainer::from_iter(self.targets.keys().map(Nibbles::unpack)); + let retainer = ProofRetainer::from_iter(targets.keys().map(Nibbles::unpack)); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); let mut storages = HashMap::default(); @@ -123,7 +112,19 @@ where hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } TrieElement::Leaf(hashed_address, account) => { - let storage_multiproof = self.storage_multiproof(hashed_address)?; + let storage_prefix_set = self + .prefix_sets + .storage_prefix_sets + .remove(&hashed_address) + .unwrap_or_default(); + let proof_targets = targets.remove(&hashed_address).unwrap_or_default(); + let storage_multiproof = StorageProof::new_hashed( + self.trie_cursor_factory.clone(), + self.hashed_cursor_factory.clone(), + hashed_address, + ) + .with_prefix_set_mut(storage_prefix_set) + .storage_proof(proof_targets)?; // Encode account account_rlp.clear(); @@ -136,32 +137,69 @@ where } } let _ = hash_builder.root(); - Ok(MultiProof { account_subtree: hash_builder.take_proofs(), storages }) + Ok(MultiProof { account_subtree: hash_builder.take_proof_nodes(), storages }) + } +} + +/// Generates storage merkle proofs. +#[derive(Debug)] +pub struct StorageProof { + /// The factory for traversing trie nodes. + trie_cursor_factory: T, + /// The factory for hashed cursors. + hashed_cursor_factory: H, + /// The hashed address of an account. + hashed_address: B256, + /// The set of storage slot prefixes that have changed. + prefix_set: PrefixSetMut, +} + +impl StorageProof { + /// Create a new [`StorageProof`] instance. + pub fn new(t: T, h: H, address: Address) -> Self { + Self::new_hashed(t, h, keccak256(address)) + } + + /// Create a new [`StorageProof`] instance with hashed address. + pub fn new_hashed(t: T, h: H, hashed_address: B256) -> Self { + Self { + trie_cursor_factory: t, + hashed_cursor_factory: h, + hashed_address, + prefix_set: PrefixSetMut::default(), + } } - /// Generate a storage multiproof according to specified targets. - pub fn storage_multiproof( - &self, - hashed_address: B256, + /// Set the changed prefixes. + pub fn with_prefix_set_mut(mut self, prefix_set: PrefixSetMut) -> Self { + self.prefix_set = prefix_set; + self + } +} + +impl StorageProof +where + T: TrieCursorFactory, + H: HashedCursorFactory, +{ + /// Generate storage proof. + pub fn storage_proof( + mut self, + targets: HashSet, ) -> Result { let mut hashed_storage_cursor = - self.hashed_cursor_factory.hashed_storage_cursor(hashed_address)?; + self.hashed_cursor_factory.hashed_storage_cursor(self.hashed_address)?; // short circuit on empty storage if hashed_storage_cursor.is_storage_empty()? { return Ok(StorageMultiProof::default()) } - let target_nibbles = self - .targets - .get(&hashed_address) - .map_or(Vec::new(), |slots| slots.iter().map(Nibbles::unpack).collect()); + let target_nibbles = targets.into_iter().map(Nibbles::unpack).collect::>(); + self.prefix_set.extend(target_nibbles.clone()); - let mut prefix_set = - self.prefix_sets.storage_prefix_sets.get(&hashed_address).cloned().unwrap_or_default(); - prefix_set.extend(target_nibbles.clone()); - let trie_cursor = self.trie_cursor_factory.storage_trie_cursor(hashed_address)?; - let walker = TrieWalker::new(trie_cursor, prefix_set.freeze()); + let trie_cursor = self.trie_cursor_factory.storage_trie_cursor(self.hashed_address)?; + let walker = TrieWalker::new(trie_cursor, self.prefix_set.freeze()); let retainer = ProofRetainer::from_iter(target_nibbles); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); @@ -181,6 +219,6 @@ where } let root = hash_builder.root(); - Ok(StorageMultiProof { root, subtree: hash_builder.take_proofs() }) + Ok(StorageMultiProof { root, subtree: hash_builder.take_proof_nodes() }) } } diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index d634f05f0f39b..3b0af5cd879b6 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -100,6 +100,36 @@ impl HashedPostState { self } + /// Returns `true` if the hashed state is empty. + pub fn is_empty(&self) -> bool { + self.accounts.is_empty() && self.storages.is_empty() + } + + /// Construct [`TriePrefixSetsMut`] from hashed post state. + /// The prefix sets contain the hashed account and storage keys that have been changed in the + /// post state. + pub fn construct_prefix_sets(&self) -> TriePrefixSetsMut { + // Populate account prefix set. + let mut account_prefix_set = PrefixSetMut::with_capacity(self.accounts.len()); + let mut destroyed_accounts = HashSet::default(); + for (hashed_address, account) in &self.accounts { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + + if account.is_none() { + destroyed_accounts.insert(*hashed_address); + } + } + + // Populate storage prefix sets. + let mut storage_prefix_sets = HashMap::with_capacity(self.storages.len()); + for (hashed_address, hashed_storage) in &self.storages { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + storage_prefix_sets.insert(*hashed_address, hashed_storage.construct_prefix_set()); + } + + TriePrefixSetsMut { account_prefix_set, storage_prefix_sets, destroyed_accounts } + } + /// Extend this hashed post state with contents of another. /// Entries in the second hashed post state take precedence. pub fn extend(&mut self, other: Self) { @@ -166,31 +196,6 @@ impl HashedPostState { HashedPostStateSorted { accounts, storages } } - - /// Construct [`TriePrefixSetsMut`] from hashed post state. - /// The prefix sets contain the hashed account and storage keys that have been changed in the - /// post state. - pub fn construct_prefix_sets(&self) -> TriePrefixSetsMut { - // Populate account prefix set. - let mut account_prefix_set = PrefixSetMut::with_capacity(self.accounts.len()); - let mut destroyed_accounts = HashSet::default(); - for (hashed_address, account) in &self.accounts { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - - if account.is_none() { - destroyed_accounts.insert(*hashed_address); - } - } - - // Populate storage prefix sets. - let mut storage_prefix_sets = HashMap::with_capacity(self.storages.len()); - for (hashed_address, hashed_storage) in &self.storages { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - storage_prefix_sets.insert(*hashed_address, hashed_storage.construct_prefix_set()); - } - - TriePrefixSetsMut { account_prefix_set, storage_prefix_sets, destroyed_accounts } - } } /// Representation of in-memory hashed storage. diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 1f521ca7db5f4..972afc10c3424 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -1,8 +1,11 @@ use std::collections::BTreeMap; use crate::{ - hashed_cursor::HashedCursorFactory, prefix_set::TriePrefixSetsMut, proof::Proof, - trie_cursor::TrieCursorFactory, HashedPostState, + hashed_cursor::HashedCursorFactory, + prefix_set::TriePrefixSetsMut, + proof::{Proof, StorageProof}, + trie_cursor::TrieCursorFactory, + HashedPostState, }; use alloy_primitives::{ keccak256, @@ -10,8 +13,8 @@ use alloy_primitives::{ Bytes, B256, }; use alloy_rlp::{BufMut, Decodable, Encodable}; -use itertools::Either; -use reth_execution_errors::{StateProofError, TrieWitnessError}; +use itertools::{Either, Itertools}; +use reth_execution_errors::TrieWitnessError; use reth_primitives::constants::EMPTY_ROOT_HASH; use reth_trie_common::{ BranchNode, HashBuilder, Nibbles, TrieAccount, TrieNode, CHILD_INDEX_RANGE, @@ -83,6 +86,10 @@ where mut self, state: HashedPostState, ) -> Result, TrieWitnessError> { + if state.is_empty() { + return Ok(self.witness) + } + let proof_targets = HashMap::from_iter( state .accounts @@ -92,11 +99,11 @@ where (*hashed_address, storage.storage.keys().copied().collect()) })), ); + let mut account_multiproof = Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) .with_prefix_sets_mut(self.prefix_sets.clone()) - .with_targets(proof_targets.clone()) - .multiproof()?; + .multiproof(proof_targets.clone())?; // Attempt to compute state root from proofs and gather additional // information for the witness. @@ -120,24 +127,36 @@ where None }; let key = Nibbles::unpack(hashed_address); - let proof = account_multiproof.account_subtree.iter().filter(|e| key.starts_with(e.0)); - account_trie_nodes.extend(self.target_nodes(key.clone(), value, proof)?); + account_trie_nodes.extend( + self.target_nodes( + key.clone(), + value, + account_multiproof + .account_subtree + .matching_nodes_iter(&key) + .sorted_by(|a, b| a.0.cmp(b.0)), + )?, + ); // Gather and record storage trie nodes for this account. let mut storage_trie_nodes = BTreeMap::default(); let storage = state.storages.get(&hashed_address); for hashed_slot in hashed_slots { - let slot_key = Nibbles::unpack(hashed_slot); + let slot_nibbles = Nibbles::unpack(hashed_slot); let slot_value = storage .and_then(|s| s.storage.get(&hashed_slot)) .filter(|v| !v.is_zero()) .map(|v| alloy_rlp::encode_fixed_size(v).to_vec()); - let proof = storage_multiproof.subtree.iter().filter(|e| slot_key.starts_with(e.0)); - storage_trie_nodes.extend(self.target_nodes( - slot_key.clone(), - slot_value, - proof, - )?); + storage_trie_nodes.extend( + self.target_nodes( + slot_nibbles.clone(), + slot_value, + storage_multiproof + .subtree + .matching_nodes_iter(&slot_nibbles) + .sorted_by(|a, b| a.0.cmp(b.0)), + )?, + ); } Self::next_root_from_proofs(storage_trie_nodes, |key: Nibbles| { @@ -145,19 +164,25 @@ where let mut padded_key = key.pack(); padded_key.resize(32, 0); let target_key = B256::from_slice(&padded_key); - let mut proof = Proof::new( + let storage_prefix_set = self + .prefix_sets + .storage_prefix_sets + .get(&hashed_address) + .cloned() + .unwrap_or_default(); + let proof = StorageProof::new_hashed( self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone(), + hashed_address, ) - .with_prefix_sets_mut(self.prefix_sets.clone()) - .with_target((hashed_address, HashSet::from_iter([target_key]))) - .storage_multiproof(hashed_address)?; + .with_prefix_set_mut(storage_prefix_set) + .storage_proof(HashSet::from_iter([target_key]))?; // The subtree only contains the proof for a single target. let node = - proof.subtree.remove(&key).ok_or(TrieWitnessError::MissingTargetNode(key))?; + proof.subtree.get(&key).ok_or(TrieWitnessError::MissingTargetNode(key))?; self.witness.insert(keccak256(node.as_ref()), node.clone()); // record in witness - Ok(node) + Ok(node.clone()) })?; } @@ -165,19 +190,17 @@ where // Right pad the target with 0s. let mut padded_key = key.pack(); padded_key.resize(32, 0); - let mut proof = + let targets = HashMap::from_iter([(B256::from_slice(&padded_key), HashSet::default())]); + let proof = Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) .with_prefix_sets_mut(self.prefix_sets.clone()) - .with_target((B256::from_slice(&padded_key), HashSet::default())) - .multiproof()?; + .multiproof(targets)?; // The subtree only contains the proof for a single target. - let node = proof - .account_subtree - .remove(&key) - .ok_or(TrieWitnessError::MissingTargetNode(key))?; + let node = + proof.account_subtree.get(&key).ok_or(TrieWitnessError::MissingTargetNode(key))?; self.witness.insert(keccak256(node.as_ref()), node.clone()); // record in witness - Ok(node) + Ok(node.clone()) })?; Ok(self.witness) @@ -190,7 +213,7 @@ where key: Nibbles, value: Option>, proof: impl IntoIterator, - ) -> Result>>, StateProofError> { + ) -> Result>>, TrieWitnessError> { let mut trie_nodes = BTreeMap::default(); for (path, encoded) in proof { // Record the node in witness. @@ -216,6 +239,7 @@ where trie_nodes.insert(next_path.clone(), Either::Right(leaf.value.clone())); } } + TrieNode::EmptyRoot => return Err(TrieWitnessError::UnexpectedEmptyRoot(next_path)), }; } @@ -273,6 +297,9 @@ where TrieNode::Extension(ext) => { path.extend_from_slice(&ext.key); } + TrieNode::EmptyRoot => { + return Err(TrieWitnessError::UnexpectedEmptyRoot(path)) + } } } } diff --git a/examples/custom-inspector/Cargo.toml b/examples/custom-inspector/Cargo.toml index a94980951627e..18629556c42fe 100644 --- a/examples/custom-inspector/Cargo.toml +++ b/examples/custom-inspector/Cargo.toml @@ -8,7 +8,6 @@ license.workspace = true [dependencies] reth.workspace = true reth-node-ethereum.workspace = true -reth-rpc-types.workspace = true alloy-rpc-types.workspace = true clap = { workspace = true, features = ["derive"] } futures-util.workspace = true