From b2590bcb37784f7f3540aa10a9cca123e9c66777 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 12 Dec 2024 09:51:46 +1100 Subject: [PATCH 01/15] Tweak reconstruction batch size (#6668) * Tweak reconstruction batch size * Merge branch 'release-v6.0.1' into reconstruction-batch-size --- beacon_node/beacon_chain/src/migrate.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 37a2e8917ba..bc4b8e1ed86 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -26,8 +26,10 @@ const MIN_COMPACTION_PERIOD_SECONDS: u64 = 7200; const COMPACTION_FINALITY_DISTANCE: u64 = 1024; /// Maximum number of blocks applied in each reconstruction burst. /// -/// This limits the amount of time that the finalization migration is paused for. -const BLOCKS_PER_RECONSTRUCTION: usize = 8192 * 4; +/// This limits the amount of time that the finalization migration is paused for. We set this +/// conservatively because pausing the finalization migration for too long can cause hot state +/// cache misses and excessive disk use. +const BLOCKS_PER_RECONSTRUCTION: usize = 1024; /// Default number of epochs to wait between finalization migrations. pub const DEFAULT_EPOCHS_PER_MIGRATION: u64 = 1; From b7ffcc8229e028bf43ddca5c5924b9ec10bd6931 Mon Sep 17 00:00:00 2001 From: antondlr Date: Thu, 12 Dec 2024 01:24:58 +0100 Subject: [PATCH 02/15] Fix: Docker CI to use org tokens (#6655) * update Dockerhub creds to new scheme * Merge branch 'release-v6.0.1' into fix-docker-ci --- .github/workflows/docker.yml | 4 ++-- .github/workflows/release.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index cd45bd6d98f..e7682089731 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -13,8 +13,8 @@ concurrency: cancel-in-progress: true env: - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DH_KEY }} + DOCKER_USERNAME: ${{ secrets.DH_ORG }} # Enable self-hosted runners for the sigp repo only. SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f1ec2e46551..cfba601fad6 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,8 +10,8 @@ concurrency: cancel-in-progress: true env: - DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} - DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DH_KEY }} + DOCKER_USERNAME: ${{ secrets.DH_ORG }} REPO_NAME: ${{ github.repository_owner }}/lighthouse IMAGE_NAME: ${{ github.repository_owner }}/lighthouse # Enable self-hosted runners for the sigp repo only. From fc0e0ae613a479a21e931b200f88b6e4ff9e6681 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 12 Dec 2024 12:58:41 +1100 Subject: [PATCH 03/15] Prevent reconstruction starting prematurely (#6669) * Prevent reconstruction starting prematurely * Simplify condition * Merge remote-tracking branch 'origin/release-v6.0.1' into dont-start-reconstruction-early --- beacon_node/beacon_chain/src/builder.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 589db0af501..9d99ff9d8e0 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1037,7 +1037,9 @@ where ); // Check for states to reconstruct (in the background). - if beacon_chain.config.reconstruct_historic_states { + if beacon_chain.config.reconstruct_historic_states + && beacon_chain.store.get_oldest_block_slot() == 0 + { beacon_chain.store_migrator.process_reconstruction(); } From 494634399027b94f31759ba5bb4d3a5d2aaff503 Mon Sep 17 00:00:00 2001 From: Povilas Liubauskas Date: Thu, 12 Dec 2024 10:36:34 +0200 Subject: [PATCH 04/15] Fix subscribing to attestation subnets for aggregating (#6681) (#6682) * Fix subscribing to attestation subnets for aggregating (#6681) * Prevent scheduled subnet subscriptions from being overwritten by other subscriptions from same subnet with additional scoping by slot --- beacon_node/network/src/subnet_service/mod.rs | 9 ++- .../network/src/subnet_service/tests/mod.rs | 55 +++++++++++++++++-- 2 files changed, 57 insertions(+), 7 deletions(-) diff --git a/beacon_node/network/src/subnet_service/mod.rs b/beacon_node/network/src/subnet_service/mod.rs index ec6f3b10a38..da1f220f042 100644 --- a/beacon_node/network/src/subnet_service/mod.rs +++ b/beacon_node/network/src/subnet_service/mod.rs @@ -86,7 +86,7 @@ pub struct SubnetService { subscriptions: HashSetDelay, /// Subscriptions that need to be executed in the future. - scheduled_subscriptions: HashSetDelay, + scheduled_subscriptions: HashSetDelay, /// A list of permanent subnets that this node is subscribed to. // TODO: Shift this to a dynamic bitfield @@ -484,8 +484,10 @@ impl SubnetService { self.subscribe_to_subnet_immediately(subnet, slot + 1)?; } else { // This is a future slot, schedule subscribing. + // We need to include the slot to make the key unique to prevent overwriting the entry + // for the same subnet. self.scheduled_subscriptions - .insert_at(subnet, time_to_subscription_start); + .insert_at(ExactSubnet { subnet, slot }, time_to_subscription_start); } Ok(()) @@ -626,7 +628,8 @@ impl Stream for SubnetService { // Process scheduled subscriptions that might be ready, since those can extend a soon to // expire subscription. match self.scheduled_subscriptions.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(subnet))) => { + Poll::Ready(Some(Ok(exact_subnet))) => { + let ExactSubnet { subnet, .. } = exact_subnet; let current_slot = self.beacon_chain.slot_clock.now().unwrap_or_default(); if let Err(e) = self.subscribe_to_subnet_immediately(subnet, current_slot + 1) { debug!(self.log, "Failed to subscribe to short lived subnet"; "subnet" => ?subnet, "err" => e); diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 91e4841b264..7283b4af314 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -500,12 +500,15 @@ mod test { // subscription config let committee_count = 1; - // Makes 2 validator subscriptions to the same subnet but at different slots. - // There should be just 1 unsubscription event for the later slot subscription (subscription_slot2). + // Makes 3 validator subscriptions to the same subnet but at different slots. + // There should be just 1 unsubscription event for each of the later slots subscriptions + // (subscription_slot2 and subscription_slot3). let subscription_slot1 = 0; let subscription_slot2 = MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD + 4; + let subscription_slot3 = subscription_slot2 * 2; let com1 = MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD + 4; let com2 = 0; + let com3 = CHAIN.chain.spec.attestation_subnet_count - com1; // create the attestation service and subscriptions let mut subnet_service = get_subnet_service(); @@ -532,6 +535,13 @@ mod test { true, ); + let sub3 = get_subscription( + com3, + current_slot + Slot::new(subscription_slot3), + committee_count, + true, + ); + let subnet_id1 = SubnetId::compute_subnet::( current_slot + Slot::new(subscription_slot1), com1, @@ -548,12 +558,23 @@ mod test { ) .unwrap(); + let subnet_id3 = SubnetId::compute_subnet::( + current_slot + Slot::new(subscription_slot3), + com3, + committee_count, + &subnet_service.beacon_chain.spec, + ) + .unwrap(); + // Assert that subscriptions are different but their subnet is the same assert_ne!(sub1, sub2); + assert_ne!(sub1, sub3); + assert_ne!(sub2, sub3); assert_eq!(subnet_id1, subnet_id2); + assert_eq!(subnet_id1, subnet_id3); // submit the subscriptions - subnet_service.validator_subscriptions(vec![sub1, sub2].into_iter()); + subnet_service.validator_subscriptions(vec![sub1, sub2, sub3].into_iter()); // Unsubscription event should happen at the end of the slot. // We wait for 2 slots, to avoid timeout issues @@ -590,10 +611,36 @@ mod test { // If the permanent and short lived subnets are different, we should get an unsubscription event. if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { assert_eq!( - [expected_subscription, expected_unsubscription], + [ + expected_subscription.clone(), + expected_unsubscription.clone(), + ], second_subscribe_event[..] ); } + + let subscription_slot = current_slot + subscription_slot3 - 1; + + let wait_slots = subnet_service + .beacon_chain + .slot_clock + .duration_to_slot(subscription_slot) + .unwrap() + .as_millis() as u64 + / SLOT_DURATION_MILLIS; + + let no_events = dbg!(get_events(&mut subnet_service, None, wait_slots as u32).await); + + assert_eq!(no_events, []); + + let third_subscribe_event = get_events(&mut subnet_service, None, 2).await; + + if !subnet_service.is_subscribed(&Subnet::Attestation(subnet_id1)) { + assert_eq!( + [expected_subscription, expected_unsubscription], + third_subscribe_event[..] + ); + } } #[tokio::test] From 775fa6730b2ddd60b87344761cccf7a05b2a72d4 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Fri, 13 Dec 2024 13:02:10 +0800 Subject: [PATCH 05/15] Stuck lookup v6 (#6658) * Fix stuck lookups if no peers on v6 * Merge branch 'release-v6.0.1' into stuck-lookup-v6 --- .../network/src/sync/block_lookups/single_block_lookup.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index d701cbbb8d3..9bbd2bf295b 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -171,7 +171,10 @@ impl SingleBlockLookup { self.awaiting_parent.is_some() || self.block_request_state.state.is_awaiting_event() || match &self.component_requests { - ComponentRequests::WaitingForBlock => true, + // If components are waiting for the block request to complete, here we should + // check if the`block_request_state.state.is_awaiting_event(). However we already + // checked that above, so `WaitingForBlock => false` is equivalent. + ComponentRequests::WaitingForBlock => false, ComponentRequests::ActiveBlobRequest(request, _) => { request.state.is_awaiting_event() } From f3b78889e50752f40e6d371621764b49bca4090f Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Sat, 14 Dec 2024 19:43:00 +1100 Subject: [PATCH 06/15] Compact more when pruning states (#6667) * Compact more when pruning states * Merge branch 'release-v6.0.1' into compact-more --- .../src/schema_change/migration_schema_v22.rs | 2 +- beacon_node/store/src/hot_cold_store.rs | 42 ++++++++++++++++++- 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs index f532c0e6728..c34512ededb 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs @@ -152,7 +152,7 @@ pub fn delete_old_schema_freezer_data( db.cold_db.do_atomically(cold_ops)?; // In order to reclaim space, we need to compact the freezer DB as well. - db.cold_db.compact()?; + db.compact_freezer()?; Ok(()) } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 4942b148810..da3e6d4ebcb 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -2484,6 +2484,45 @@ impl, Cold: ItemStore> HotColdDB Ok(()) } + /// Run a compaction pass on the freezer DB to free up space used by deleted states. + pub fn compact_freezer(&self) -> Result<(), Error> { + let current_schema_columns = vec![ + DBColumn::BeaconColdStateSummary, + DBColumn::BeaconStateSnapshot, + DBColumn::BeaconStateDiff, + DBColumn::BeaconStateRoots, + ]; + + // We can remove this once schema V21 has been gone for a while. + let previous_schema_columns = vec![ + DBColumn::BeaconState, + DBColumn::BeaconStateSummary, + DBColumn::BeaconBlockRootsChunked, + DBColumn::BeaconStateRootsChunked, + DBColumn::BeaconRestorePoint, + DBColumn::BeaconHistoricalRoots, + DBColumn::BeaconRandaoMixes, + DBColumn::BeaconHistoricalSummaries, + ]; + let mut columns = current_schema_columns; + columns.extend(previous_schema_columns); + + for column in columns { + info!( + self.log, + "Starting compaction"; + "column" => ?column + ); + self.cold_db.compact_column(column)?; + info!( + self.log, + "Finishing compaction"; + "column" => ?column + ); + } + Ok(()) + } + /// Return `true` if compaction on finalization/pruning is enabled. pub fn compact_on_prune(&self) -> bool { self.config.compact_on_prune @@ -2875,6 +2914,7 @@ impl, Cold: ItemStore> HotColdDB // // We can remove this once schema V21 has been gone for a while. let previous_schema_columns = vec![ + DBColumn::BeaconState, DBColumn::BeaconStateSummary, DBColumn::BeaconBlockRootsChunked, DBColumn::BeaconStateRootsChunked, @@ -2916,7 +2956,7 @@ impl, Cold: ItemStore> HotColdDB self.cold_db.do_atomically(cold_ops)?; // In order to reclaim space, we need to compact the freezer DB as well. - self.cold_db.compact()?; + self.compact_freezer()?; Ok(()) } From c3a0757ad2c0d70bb0686463e6d5c4a2041114a3 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 16 Dec 2024 10:16:53 +1100 Subject: [PATCH 07/15] Correct `/nat` API for libp2p (#6677) * Fix nat API --- .../lighthouse_network/src/peer_manager/network_behaviour.rs | 4 ---- common/system_health/src/lib.rs | 4 ++-- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index 11676f9a01f..9fd059df857 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -141,10 +141,6 @@ impl NetworkBehaviour for PeerManager { debug!(self.log, "Failed to dial peer"; "peer_id"=> ?peer_id, "error" => %ClearDialError(error)); self.on_dial_failure(peer_id); } - FromSwarm::ExternalAddrConfirmed(_) => { - // We have an external address confirmed, means we are able to do NAT traversal. - metrics::set_gauge_vec(&metrics::NAT_OPEN, &["libp2p"], 1); - } _ => { // NOTE: FromSwarm is a non exhaustive enum so updates should be based on release // notes more than compiler feedback diff --git a/common/system_health/src/lib.rs b/common/system_health/src/lib.rs index 34311898420..9f351e943bb 100644 --- a/common/system_health/src/lib.rs +++ b/common/system_health/src/lib.rs @@ -235,14 +235,14 @@ pub fn observe_nat() -> NatState { let libp2p_ipv4 = lighthouse_network::metrics::get_int_gauge( &lighthouse_network::metrics::NAT_OPEN, - &["libp2p"], + &["libp2p_ipv4"], ) .map(|g| g.get() == 1) .unwrap_or_default(); let libp2p_ipv6 = lighthouse_network::metrics::get_int_gauge( &lighthouse_network::metrics::NAT_OPEN, - &["libp2p"], + &["libp2p_ipv6"], ) .map(|g| g.get() == 1) .unwrap_or_default(); From 0d90135047519f4c2ee586d50e560f7bb2ff9b10 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 16 Dec 2024 14:03:22 +1100 Subject: [PATCH 08/15] Release v6.0.1 (#6659) * Release v6.0.1 --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1ddeecf7116..c9744f500d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -833,7 +833,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "6.0.0" +version = "6.0.1" dependencies = [ "account_utils", "beacon_chain", @@ -1078,7 +1078,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "6.0.0" +version = "6.0.1" dependencies = [ "beacon_node", "bytes", @@ -4674,7 +4674,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "6.0.0" +version = "6.0.1" dependencies = [ "account_utils", "beacon_chain", @@ -5244,7 +5244,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "6.0.0" +version = "6.0.1" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index fd4f0f6d4a8..15cdf15dc5d 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "6.0.0" +version = "6.0.1" authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 07e51597e37..0751bdadff5 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v6.0.0-", - fallback = "Lighthouse/v6.0.0" + prefix = "Lighthouse/v6.0.1-", + fallback = "Lighthouse/v6.0.1" ); /// Returns the first eight characters of the latest commit hash for this build. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 88daddd8aab..9612bded475 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "6.0.0" +version = "6.0.1" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 329519fb54f..fa426daffaa 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "6.0.0" +version = "6.0.1" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false From c92c07ff498721d9eea60db8a5acfde399f47eea Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 16 Dec 2024 12:33:33 +0800 Subject: [PATCH 09/15] Track beacon processor import result metrics (#6541) * Track beacon processor import result metrics * Update metric name --- .../beacon_chain/src/block_verification.rs | 3 +- beacon_node/network/src/metrics.rs | 62 +++++++++++++++- .../gossip_methods.rs | 70 +++++++++---------- .../network_beacon_processor/sync_methods.rs | 7 +- 4 files changed, 99 insertions(+), 43 deletions(-) diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 4c5f53248f7..ddb7bb614a3 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -92,6 +92,7 @@ use std::fs; use std::io::Write; use std::sync::Arc; use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; +use strum::AsRefStr; use task_executor::JoinHandle; use types::{ data_column_sidecar::DataColumnSidecarError, BeaconBlockRef, BeaconState, BeaconStateError, @@ -137,7 +138,7 @@ const WRITE_BLOCK_PROCESSING_SSZ: bool = cfg!(feature = "write_ssz_files"); /// /// - The block is malformed/invalid (indicated by all results other than `BeaconChainError`. /// - We encountered an error whilst trying to verify the block (a `BeaconChainError`). -#[derive(Debug)] +#[derive(Debug, AsRefStr)] pub enum BlockError { /// The parent block was unknown. /// diff --git a/beacon_node/network/src/metrics.rs b/beacon_node/network/src/metrics.rs index 4b7e8a50a36..154a59eade7 100644 --- a/beacon_node/network/src/metrics.rs +++ b/beacon_node/network/src/metrics.rs @@ -2,7 +2,8 @@ use beacon_chain::{ attestation_verification::Error as AttnError, light_client_finality_update_verification::Error as LightClientFinalityUpdateError, light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError, - sync_committee_verification::Error as SyncCommitteeError, + sync_committee_verification::Error as SyncCommitteeError, AvailabilityProcessingStatus, + BlockError, }; use fnv::FnvHashMap; use lighthouse_network::{ @@ -11,12 +12,19 @@ use lighthouse_network::{ }; pub use metrics::*; use std::sync::{Arc, LazyLock}; +use strum::AsRefStr; use strum::IntoEnumIterator; use types::EthSpec; pub const SUCCESS: &str = "SUCCESS"; pub const FAILURE: &str = "FAILURE"; +#[derive(Debug, AsRefStr)] +pub(crate) enum BlockSource { + Gossip, + Rpc, +} + pub static BEACON_BLOCK_MESH_PEERS_PER_CLIENT: LazyLock> = LazyLock::new(|| { try_create_int_gauge_vec( @@ -59,6 +67,27 @@ pub static SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS: LazyLock> = ) }); +/* + * Beacon processor + */ +pub static BEACON_PROCESSOR_MISSING_COMPONENTS: LazyLock> = LazyLock::new( + || { + try_create_int_counter_vec( + "beacon_processor_missing_components_total", + "Total number of imported individual block components that resulted in missing components", + &["source", "component"], + ) + }, +); +pub static BEACON_PROCESSOR_IMPORT_ERRORS_PER_TYPE: LazyLock> = + LazyLock::new(|| { + try_create_int_counter_vec( + "beacon_processor_import_errors_total", + "Total number of block components that were not verified", + &["source", "component", "type"], + ) + }); + /* * Gossip processor */ @@ -606,6 +635,37 @@ pub fn register_sync_committee_error(error: &SyncCommitteeError) { inc_counter_vec(&GOSSIP_SYNC_COMMITTEE_ERRORS_PER_TYPE, &[error.as_ref()]); } +pub(crate) fn register_process_result_metrics( + result: &std::result::Result, + source: BlockSource, + block_component: &'static str, +) { + match result { + Ok(status) => match status { + AvailabilityProcessingStatus::Imported { .. } => match source { + BlockSource::Gossip => { + inc_counter(&BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); + } + BlockSource::Rpc => { + inc_counter(&BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); + } + }, + AvailabilityProcessingStatus::MissingComponents { .. } => { + inc_counter_vec( + &BEACON_PROCESSOR_MISSING_COMPONENTS, + &[source.as_ref(), block_component], + ); + } + }, + Err(error) => { + inc_counter_vec( + &BEACON_PROCESSOR_IMPORT_ERRORS_PER_TYPE, + &[source.as_ref(), block_component, error.as_ref()], + ); + } + } +} + pub fn from_result(result: &std::result::Result) -> &str { match result { Ok(_) => SUCCESS, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 317bfb104bc..4fc83b09230 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -1,5 +1,5 @@ use crate::{ - metrics, + metrics::{self, register_process_result_metrics}, network_beacon_processor::{InvalidBlockStorage, NetworkBeaconProcessor}, service::NetworkMessage, sync::SyncMessage, @@ -915,12 +915,11 @@ impl NetworkBeaconProcessor { let blob_index = verified_blob.id().index; let result = self.chain.process_gossip_blob(verified_blob).await; + register_process_result_metrics(&result, metrics::BlockSource::Gossip, "blob"); match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { - // Note: Reusing block imported metric here - metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); - debug!( + info!( self.log, "Gossipsub blob processed - imported fully available block"; "block_root" => %block_root @@ -989,43 +988,39 @@ impl NetworkBeaconProcessor { let data_column_slot = verified_data_column.slot(); let data_column_index = verified_data_column.id().index; - match self + let result = self .chain .process_gossip_data_columns(vec![verified_data_column], || Ok(())) - .await - { - Ok(availability) => { - match availability { - AvailabilityProcessingStatus::Imported(block_root) => { - // Note: Reusing block imported metric here - metrics::inc_counter( - &metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL, - ); - info!( - self.log, - "Gossipsub data column processed, imported fully available block"; - "block_root" => %block_root - ); - self.chain.recompute_head_at_current_slot().await; + .await; + register_process_result_metrics(&result, metrics::BlockSource::Gossip, "data_column"); - metrics::set_gauge( - &metrics::BEACON_BLOB_DELAY_FULL_VERIFICATION, - processing_start_time.elapsed().as_millis() as i64, - ); - } - AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { - trace!( - self.log, - "Processed data column, waiting for other components"; - "slot" => %slot, - "data_column_index" => %data_column_index, - "block_root" => %block_root, - ); + match result { + Ok(availability) => match availability { + AvailabilityProcessingStatus::Imported(block_root) => { + info!( + self.log, + "Gossipsub data column processed, imported fully available block"; + "block_root" => %block_root + ); + self.chain.recompute_head_at_current_slot().await; - self.attempt_data_column_reconstruction(block_root).await; - } + metrics::set_gauge( + &metrics::BEACON_BLOB_DELAY_FULL_VERIFICATION, + processing_start_time.elapsed().as_millis() as i64, + ); } - } + AvailabilityProcessingStatus::MissingComponents(slot, block_root) => { + trace!( + self.log, + "Processed data column, waiting for other components"; + "slot" => %slot, + "data_column_index" => %data_column_index, + "block_root" => %block_root, + ); + + self.attempt_data_column_reconstruction(block_root).await; + } + }, Err(BlockError::DuplicateFullyImported(_)) => { debug!( self.log, @@ -1467,11 +1462,10 @@ impl NetworkBeaconProcessor { NotifyExecutionLayer::Yes, ) .await; + register_process_result_metrics(&result, metrics::BlockSource::Gossip, "block"); match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { - metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); - if reprocess_tx .try_send(ReprocessQueueMessage::BlockImported { block_root: *block_root, diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 6c6bb26ee09..817e6b64409 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -1,4 +1,4 @@ -use crate::metrics; +use crate::metrics::{self, register_process_result_metrics}; use crate::network_beacon_processor::{NetworkBeaconProcessor, FUTURE_SLOT_TOLERANCE}; use crate::sync::BatchProcessResult; use crate::sync::{ @@ -163,8 +163,7 @@ impl NetworkBeaconProcessor { NotifyExecutionLayer::Yes, ) .await; - - metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); + register_process_result_metrics(&result, metrics::BlockSource::Rpc, "block"); // RPC block imported, regardless of process type match result.as_ref() { @@ -286,6 +285,7 @@ impl NetworkBeaconProcessor { } let result = self.chain.process_rpc_blobs(slot, block_root, blobs).await; + register_process_result_metrics(&result, metrics::BlockSource::Rpc, "blobs"); match &result { Ok(AvailabilityProcessingStatus::Imported(hash)) => { @@ -343,6 +343,7 @@ impl NetworkBeaconProcessor { .chain .process_rpc_custody_columns(custody_columns) .await; + register_process_result_metrics(&result, metrics::BlockSource::Rpc, "custody_columns"); match &result { Ok(availability) => match availability { From 11e1d5bf148784d1ccbaf8b1023e26b3d0fb4cd1 Mon Sep 17 00:00:00 2001 From: Jun Song <87601811+syjn99@users.noreply.github.com> Date: Mon, 16 Dec 2024 14:43:54 +0900 Subject: [PATCH 10/15] Add CLI flag for HTTP API token path (VC) (#6577) * Add cli flag for HTTP API token path (VC) * Add http_token_path_flag test * Add pre-check for directory case & Fix test utils * Update docs * Apply review: move http_token_path into validator_http_api config * Lint * Make diff lesser to replace PK_FILENAME * Merge branch 'unstable' into feature/cli-token-path * Applt review: help_vc.md Co-authored-by: chonghe <44791194+chong-he@users.noreply.github.com> * Fix help for cli * Fix issues on ci * Merge branch 'unstable' into feature/cli-token-path * Merge branch 'unstable' into feature/cli-token-path * Merge branch 'unstable' into feature/cli-token-path * Merge branch 'unstable' into feature/cli-token-path --- Cargo.lock | 2 ++ book/src/api-vc-auth-header.md | 3 +- book/src/api-vc-endpoints.md | 2 +- book/src/help_vc.md | 4 +++ lighthouse/tests/validator_client.rs | 15 +++++++++ validator_client/http_api/Cargo.toml | 2 ++ validator_client/http_api/src/api_secret.rs | 37 ++++++++++++++++----- validator_client/http_api/src/lib.rs | 10 ++++++ validator_client/http_api/src/test_utils.rs | 9 +++-- validator_client/http_api/src/tests.rs | 8 +++-- validator_client/src/cli.rs | 12 +++++++ validator_client/src/config.rs | 8 ++++- validator_client/src/lib.rs | 2 +- 13 files changed, 96 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9d0d38c1ae5..2978a3a19f6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9552,6 +9552,8 @@ dependencies = [ "beacon_node_fallback", "bls", "deposit_contract", + "directory", + "dirs", "doppelganger_service", "eth2", "eth2_keystore", diff --git a/book/src/api-vc-auth-header.md b/book/src/api-vc-auth-header.md index adde78270a6..feb93724c06 100644 --- a/book/src/api-vc-auth-header.md +++ b/book/src/api-vc-auth-header.md @@ -18,7 +18,8 @@ Authorization: Bearer hGut6B8uEujufDXSmZsT0thnxvdvKFBvh ## Obtaining the API token The API token is stored as a file in the `validators` directory. For most users -this is `~/.lighthouse/{network}/validators/api-token.txt`. Here's an +this is `~/.lighthouse/{network}/validators/api-token.txt`, unless overridden using the +`--http-token-path` CLI parameter. Here's an example using the `cat` command to print the token to the terminal, but any text editor will suffice: diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 80eba7a0590..98605a3dcd0 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -53,7 +53,7 @@ Example Response Body: } ``` -> Note: The command provided in this documentation links to the API token file. In this documentation, it is assumed that the API token file is located in `/var/lib/lighthouse/validators/api-token.txt`. If your database is saved in another directory, modify the `DATADIR` accordingly. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. +> Note: The command provided in this documentation links to the API token file. In this documentation, it is assumed that the API token file is located in `/var/lib/lighthouse/validators/api-token.txt`. If your database is saved in another directory, modify the `DATADIR` accordingly. If you've specified a custom token path using `--http-token-path`, use that path instead. If you are having permission issue with accessing the API token file, you can modify the header to become `-H "Authorization: Bearer $(sudo cat ${DATADIR}/validators/api-token.txt)"`. > As an alternative, you can also provide the API token directly, for example, `-H "Authorization: Bearer hGut6B8uEujufDXSmZsT0thnxvdvKFBvh`. In this case, you obtain the token from the file `api-token.txt` and the command becomes: diff --git a/book/src/help_vc.md b/book/src/help_vc.md index 2cfbfbc857a..71e21d68c91 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -69,6 +69,10 @@ Options: this server (e.g., http://localhost:5062). --http-port Set the listen TCP port for the RESTful HTTP API server. + --http-token-path + Path to file containing the HTTP API token for validator client + authentication. If not specified, defaults to + {validators-dir}/api-token.txt. --log-format Specifies the log format used when emitting logs to the terminal. [possible values: JSON] diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 34fe04cc452..587001f77bd 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -344,6 +344,21 @@ fn http_store_keystore_passwords_in_secrets_dir_present() { .with_config(|config| assert!(config.http_api.store_passwords_in_secrets_dir)); } +#[test] +fn http_token_path_flag() { + let dir = TempDir::new().expect("Unable to create temporary directory"); + CommandLineTest::new() + .flag("http", None) + .flag("http-token-path", dir.path().join("api-token.txt").to_str()) + .run() + .with_config(|config| { + assert_eq!( + config.http_api.http_token_path, + dir.path().join("api-token.txt") + ); + }); +} + // Tests for Metrics flags. #[test] fn metrics_flag() { diff --git a/validator_client/http_api/Cargo.toml b/validator_client/http_api/Cargo.toml index 18e0604ad51..96c836f6f3a 100644 --- a/validator_client/http_api/Cargo.toml +++ b/validator_client/http_api/Cargo.toml @@ -13,7 +13,9 @@ account_utils = { workspace = true } bls = { workspace = true } beacon_node_fallback = { workspace = true } deposit_contract = { workspace = true } +directory = { workspace = true } doppelganger_service = { workspace = true } +dirs = { workspace = true } graffiti_file = { workspace = true } eth2 = { workspace = true } eth2_keystore = { workspace = true } diff --git a/validator_client/http_api/src/api_secret.rs b/validator_client/http_api/src/api_secret.rs index afcac477ecb..bac54dc8b24 100644 --- a/validator_client/http_api/src/api_secret.rs +++ b/validator_client/http_api/src/api_secret.rs @@ -5,7 +5,7 @@ use std::fs; use std::path::{Path, PathBuf}; use warp::Filter; -/// The name of the file which stores the API token. +/// The default name of the file which stores the API token. pub const PK_FILENAME: &str = "api-token.txt"; pub const PK_LEN: usize = 33; @@ -31,14 +31,32 @@ pub struct ApiSecret { impl ApiSecret { /// If the public key is already on-disk, use it. /// - /// The provided `dir` is a directory containing `PK_FILENAME`. + /// The provided `pk_path` is a path containing API token. /// /// If the public key file is missing on disk, create a new key and /// write it to disk (over-writing any existing files). - pub fn create_or_open>(dir: P) -> Result { - let pk_path = dir.as_ref().join(PK_FILENAME); + pub fn create_or_open>(pk_path: P) -> Result { + let pk_path = pk_path.as_ref(); + + // Check if the path is a directory + if pk_path.is_dir() { + return Err(format!( + "API token path {:?} is a directory, not a file", + pk_path + )); + } if !pk_path.exists() { + // Create parent directories if they don't exist + if let Some(parent) = pk_path.parent() { + std::fs::create_dir_all(parent).map_err(|e| { + format!( + "Unable to create parent directories for {:?}: {:?}", + pk_path, e + ) + })?; + } + let length = PK_LEN; let pk: String = thread_rng() .sample_iter(&Alphanumeric) @@ -47,7 +65,7 @@ impl ApiSecret { .collect(); // Create and write the public key to file with appropriate permissions - create_with_600_perms(&pk_path, pk.to_string().as_bytes()).map_err(|e| { + create_with_600_perms(pk_path, pk.to_string().as_bytes()).map_err(|e| { format!( "Unable to create file with permissions for {:?}: {:?}", pk_path, e @@ -55,13 +73,16 @@ impl ApiSecret { })?; } - let pk = fs::read(&pk_path) - .map_err(|e| format!("cannot read {}: {}", PK_FILENAME, e))? + let pk = fs::read(pk_path) + .map_err(|e| format!("cannot read {}: {}", pk_path.display(), e))? .iter() .map(|&c| char::from(c)) .collect(); - Ok(Self { pk, pk_path }) + Ok(Self { + pk, + pk_path: pk_path.to_path_buf(), + }) } /// Returns the API token. diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index b58c7ccec02..f3dab3780c0 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -7,6 +7,7 @@ mod remotekeys; mod tests; pub mod test_utils; +pub use api_secret::PK_FILENAME; use graffiti::{delete_graffiti, get_graffiti, set_graffiti}; @@ -23,6 +24,7 @@ use beacon_node_fallback::CandidateInfo; use create_validator::{ create_validators_mnemonic, create_validators_web3signer, get_voting_password_storage, }; +use directory::{DEFAULT_HARDCODED_NETWORK, DEFAULT_ROOT_DIR, DEFAULT_VALIDATOR_DIR}; use eth2::lighthouse_vc::{ std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, types::{ @@ -99,10 +101,17 @@ pub struct Config { pub allow_origin: Option, pub allow_keystore_export: bool, pub store_passwords_in_secrets_dir: bool, + pub http_token_path: PathBuf, } impl Default for Config { fn default() -> Self { + let http_token_path = dirs::home_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join(DEFAULT_ROOT_DIR) + .join(DEFAULT_HARDCODED_NETWORK) + .join(DEFAULT_VALIDATOR_DIR) + .join(PK_FILENAME); Self { enabled: false, listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), @@ -110,6 +119,7 @@ impl Default for Config { allow_origin: None, allow_keystore_export: false, store_passwords_in_secrets_dir: false, + http_token_path, } } } diff --git a/validator_client/http_api/src/test_utils.rs b/validator_client/http_api/src/test_utils.rs index d033fdbf2d9..390095eec73 100644 --- a/validator_client/http_api/src/test_utils.rs +++ b/validator_client/http_api/src/test_utils.rs @@ -1,3 +1,4 @@ +use crate::api_secret::PK_FILENAME; use crate::{ApiSecret, Config as HttpConfig, Context}; use account_utils::validator_definitions::ValidatorDefinitions; use account_utils::{ @@ -73,6 +74,7 @@ impl ApiTester { let validator_dir = tempdir().unwrap(); let secrets_dir = tempdir().unwrap(); + let token_path = tempdir().unwrap().path().join(PK_FILENAME); let validator_defs = ValidatorDefinitions::open_or_create(validator_dir.path()).unwrap(); @@ -85,7 +87,7 @@ impl ApiTester { .await .unwrap(); - let api_secret = ApiSecret::create_or_open(validator_dir.path()).unwrap(); + let api_secret = ApiSecret::create_or_open(token_path).unwrap(); let api_pubkey = api_secret.api_token(); let config = ValidatorStoreConfig { @@ -177,6 +179,7 @@ impl ApiTester { allow_origin: None, allow_keystore_export: true, store_passwords_in_secrets_dir: false, + http_token_path: tempdir().unwrap().path().join(PK_FILENAME), } } @@ -199,8 +202,8 @@ impl ApiTester { } pub fn invalid_token_client(&self) -> ValidatorClientHttpClient { - let tmp = tempdir().unwrap(); - let api_secret = ApiSecret::create_or_open(tmp.path()).unwrap(); + let tmp = tempdir().unwrap().path().join("invalid-token.txt"); + let api_secret = ApiSecret::create_or_open(tmp).unwrap(); let invalid_pubkey = api_secret.api_token(); ValidatorClientHttpClient::new(self.url.clone(), invalid_pubkey).unwrap() } diff --git a/validator_client/http_api/src/tests.rs b/validator_client/http_api/src/tests.rs index 262bb64e69d..027b10e2464 100644 --- a/validator_client/http_api/src/tests.rs +++ b/validator_client/http_api/src/tests.rs @@ -63,6 +63,7 @@ impl ApiTester { let validator_dir = tempdir().unwrap(); let secrets_dir = tempdir().unwrap(); + let token_path = tempdir().unwrap().path().join("api-token.txt"); let validator_defs = ValidatorDefinitions::open_or_create(validator_dir.path()).unwrap(); @@ -75,7 +76,7 @@ impl ApiTester { .await .unwrap(); - let api_secret = ApiSecret::create_or_open(validator_dir.path()).unwrap(); + let api_secret = ApiSecret::create_or_open(&token_path).unwrap(); let api_pubkey = api_secret.api_token(); let spec = Arc::new(E::default_spec()); @@ -127,6 +128,7 @@ impl ApiTester { allow_origin: None, allow_keystore_export: true, store_passwords_in_secrets_dir: false, + http_token_path: token_path, }, sse_logging_components: None, log, @@ -161,8 +163,8 @@ impl ApiTester { } pub fn invalid_token_client(&self) -> ValidatorClientHttpClient { - let tmp = tempdir().unwrap(); - let api_secret = ApiSecret::create_or_open(tmp.path()).unwrap(); + let tmp = tempdir().unwrap().path().join("invalid-token.txt"); + let api_secret = ApiSecret::create_or_open(tmp).unwrap(); let invalid_pubkey = api_secret.api_token(); ValidatorClientHttpClient::new(self.url.clone(), invalid_pubkey.clone()).unwrap() } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 209876f07b0..b2d1ebb3c25 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -247,6 +247,18 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) + .arg( + Arg::new("http-token-path") + .long("http-token-path") + .requires("http") + .value_name("HTTP_TOKEN_PATH") + .help( + "Path to file containing the HTTP API token for validator client authentication. \ + If not specified, defaults to {validators-dir}/api-token.txt." + ) + .action(ArgAction::Set) + .display_order(0) + ) /* Prometheus metrics HTTP server related arguments */ .arg( Arg::new("metrics") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index abdadeb393b..0fecb5202d1 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -17,7 +17,7 @@ use std::path::PathBuf; use std::str::FromStr; use std::time::Duration; use types::{Address, GRAFFITI_BYTES_LEN}; -use validator_http_api; +use validator_http_api::{self, PK_FILENAME}; use validator_http_metrics; use validator_store::Config as ValidatorStoreConfig; @@ -314,6 +314,12 @@ impl Config { config.http_api.store_passwords_in_secrets_dir = true; } + if cli_args.get_one::("http-token-path").is_some() { + config.http_api.http_token_path = parse_required(cli_args, "http-token-path") + // For backward compatibility, default to the path under the validator dir if not provided. + .unwrap_or_else(|_| config.validator_dir.join(PK_FILENAME)); + } + /* * Prometheus metrics HTTP server */ diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 2cc22357fbc..8ebfe98b15e 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -551,7 +551,7 @@ impl ProductionValidatorClient { let (block_service_tx, block_service_rx) = mpsc::channel(channel_capacity); let log = self.context.log(); - let api_secret = ApiSecret::create_or_open(&self.config.validator_dir)?; + let api_secret = ApiSecret::create_or_open(&self.config.http_api.http_token_path)?; self.http_api_listen_addr = if self.config.http_api.enabled { let ctx = Arc::new(validator_http_api::Context { From 86891e6d0f111c318660aaea63ed39c58dd716a5 Mon Sep 17 00:00:00 2001 From: ethDreamer <37123614+ethDreamer@users.noreply.github.com> Date: Sun, 15 Dec 2024 21:43:58 -0800 Subject: [PATCH 11/15] builder gas limit & some refactoring (#6583) * Cache gas_limit * Payload Parameters Refactor * Enforce Proposer Gas Limit * Fixed and Added New Tests * Fix Beacon Chain Tests --- .../beacon_chain/src/execution_payload.rs | 24 +- .../tests/payload_invalidation.rs | 22 +- beacon_node/execution_layer/src/lib.rs | 284 +++++++++++------- .../test_utils/execution_block_generator.rs | 35 ++- .../src/test_utils/mock_builder.rs | 63 +++- .../src/test_utils/mock_execution_layer.rs | 29 +- .../execution_layer/src/test_utils/mod.rs | 5 +- beacon_node/http_api/src/lib.rs | 27 +- .../http_api/tests/interactive_tests.rs | 13 +- beacon_node/http_api/tests/tests.rs | 234 +++++++++++---- consensus/types/src/chain_spec.rs | 27 ++ consensus/types/src/payload.rs | 33 ++ testing/ef_tests/src/cases/fork_choice.rs | 11 +- .../src/test_rig.rs | 34 ++- 14 files changed, 598 insertions(+), 243 deletions(-) diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index f2420eea0d2..5e13f0624da 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -14,7 +14,7 @@ use crate::{ }; use execution_layer::{ BlockProposalContents, BlockProposalContentsType, BuilderParams, NewPayloadRequest, - PayloadAttributes, PayloadStatus, + PayloadAttributes, PayloadParameters, PayloadStatus, }; use fork_choice::{InvalidationOperation, PayloadVerificationStatus}; use proto_array::{Block as ProtoBlock, ExecutionStatus}; @@ -375,8 +375,9 @@ pub fn get_execution_payload( let timestamp = compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?; let random = *state.get_randao_mix(current_epoch)?; - let latest_execution_payload_header_block_hash = - state.latest_execution_payload_header()?.block_hash(); + let latest_execution_payload_header = state.latest_execution_payload_header()?; + let latest_execution_payload_header_block_hash = latest_execution_payload_header.block_hash(); + let latest_execution_payload_header_gas_limit = latest_execution_payload_header.gas_limit(); let withdrawals = match state { &BeaconState::Capella(_) | &BeaconState::Deneb(_) | &BeaconState::Electra(_) => { Some(get_expected_withdrawals(state, spec)?.0.into()) @@ -406,6 +407,7 @@ pub fn get_execution_payload( random, proposer_index, latest_execution_payload_header_block_hash, + latest_execution_payload_header_gas_limit, builder_params, withdrawals, parent_beacon_block_root, @@ -443,6 +445,7 @@ pub async fn prepare_execution_payload( random: Hash256, proposer_index: u64, latest_execution_payload_header_block_hash: ExecutionBlockHash, + latest_execution_payload_header_gas_limit: u64, builder_params: BuilderParams, withdrawals: Option>, parent_beacon_block_root: Option, @@ -526,13 +529,20 @@ where parent_beacon_block_root, ); + let target_gas_limit = execution_layer.get_proposer_gas_limit(proposer_index).await; + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit: latest_execution_payload_header_gas_limit, + proposer_gas_limit: target_gas_limit, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: fork, + }; + let block_contents = execution_layer .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - fork, &chain.spec, builder_boost_factor, block_production_version, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 1325875a275..729d88450f4 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -986,10 +986,13 @@ async fn payload_preparation() { // Provide preparation data to the EL for `proposer`. el.update_proposer_preparation( Epoch::new(1), - &[ProposerPreparationData { - validator_index: proposer as u64, - fee_recipient, - }], + [( + &ProposerPreparationData { + validator_index: proposer as u64, + fee_recipient, + }, + &None, + )], ) .await; @@ -1119,10 +1122,13 @@ async fn payload_preparation_before_transition_block() { // Provide preparation data to the EL for `proposer`. el.update_proposer_preparation( Epoch::new(0), - &[ProposerPreparationData { - validator_index: proposer as u64, - fee_recipient, - }], + [( + &ProposerPreparationData { + validator_index: proposer as u64, + fee_recipient, + }, + &None, + )], ) .await; diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 08a00d7bf8d..ae0dca9833f 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -28,7 +28,7 @@ use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; -use std::collections::HashMap; +use std::collections::{hash_map::Entry, HashMap}; use std::fmt; use std::future::Future; use std::io::Write; @@ -319,10 +319,52 @@ impl> BlockProposalContents { + pub parent_hash: ExecutionBlockHash, + pub parent_gas_limit: u64, + pub proposer_gas_limit: Option, + pub payload_attributes: &'a PayloadAttributes, + pub forkchoice_update_params: &'a ForkchoiceUpdateParameters, + pub current_fork: ForkName, +} + #[derive(Clone, PartialEq)] pub struct ProposerPreparationDataEntry { update_epoch: Epoch, preparation_data: ProposerPreparationData, + gas_limit: Option, +} + +impl ProposerPreparationDataEntry { + pub fn update(&mut self, updated: Self) -> bool { + let mut changed = false; + // Update `gas_limit` if `updated.gas_limit` is `Some` and: + // - `self.gas_limit` is `None`, or + // - both are `Some` but the values differ. + if let Some(updated_gas_limit) = updated.gas_limit { + if self.gas_limit != Some(updated_gas_limit) { + self.gas_limit = Some(updated_gas_limit); + changed = true; + } + } + + // Update `update_epoch` if it differs + if self.update_epoch != updated.update_epoch { + self.update_epoch = updated.update_epoch; + changed = true; + } + + // Update `preparation_data` if it differs + if self.preparation_data != updated.preparation_data { + self.preparation_data = updated.preparation_data; + changed = true; + } + + changed + } } #[derive(Hash, PartialEq, Eq)] @@ -711,23 +753,29 @@ impl ExecutionLayer { } /// Updates the proposer preparation data provided by validators - pub async fn update_proposer_preparation( - &self, - update_epoch: Epoch, - preparation_data: &[ProposerPreparationData], - ) { + pub async fn update_proposer_preparation<'a, I>(&self, update_epoch: Epoch, proposer_data: I) + where + I: IntoIterator)>, + { let mut proposer_preparation_data = self.proposer_preparation_data().await; - for preparation_entry in preparation_data { + + for (preparation_entry, gas_limit) in proposer_data { let new = ProposerPreparationDataEntry { update_epoch, preparation_data: preparation_entry.clone(), + gas_limit: *gas_limit, }; - let existing = - proposer_preparation_data.insert(preparation_entry.validator_index, new.clone()); - - if existing != Some(new) { - metrics::inc_counter(&metrics::EXECUTION_LAYER_PROPOSER_DATA_UPDATED); + match proposer_preparation_data.entry(preparation_entry.validator_index) { + Entry::Occupied(mut entry) => { + if entry.get_mut().update(new) { + metrics::inc_counter(&metrics::EXECUTION_LAYER_PROPOSER_DATA_UPDATED); + } + } + Entry::Vacant(entry) => { + entry.insert(new); + metrics::inc_counter(&metrics::EXECUTION_LAYER_PROPOSER_DATA_UPDATED); + } } } } @@ -809,6 +857,13 @@ impl ExecutionLayer { } } + pub async fn get_proposer_gas_limit(&self, proposer_index: u64) -> Option { + self.proposer_preparation_data() + .await + .get(&proposer_index) + .and_then(|entry| entry.gas_limit) + } + /// Maps to the `engine_getPayload` JSON-RPC call. /// /// However, it will attempt to call `self.prepare_payload` if it cannot find an existing @@ -818,14 +873,10 @@ impl ExecutionLayer { /// /// The result will be returned from the first node that returns successfully. No more nodes /// will be contacted. - #[allow(clippy::too_many_arguments)] pub async fn get_payload( &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, + payload_parameters: PayloadParameters<'_>, builder_params: BuilderParams, - current_fork: ForkName, spec: &ChainSpec, builder_boost_factor: Option, block_production_version: BlockProductionVersion, @@ -833,11 +884,8 @@ impl ExecutionLayer { let payload_result_type = match block_production_version { BlockProductionVersion::V3 => match self .determine_and_fetch_payload( - parent_hash, - payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - current_fork, builder_boost_factor, spec, ) @@ -857,25 +905,11 @@ impl ExecutionLayer { &metrics::EXECUTION_LAYER_REQUEST_TIMES, &[metrics::GET_BLINDED_PAYLOAD], ); - self.determine_and_fetch_payload( - parent_hash, - payload_attributes, - forkchoice_update_params, - builder_params, - current_fork, - None, - spec, - ) - .await? + self.determine_and_fetch_payload(payload_parameters, builder_params, None, spec) + .await? } BlockProductionVersion::FullV2 => self - .get_full_payload_with( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - noop, - ) + .get_full_payload_with(payload_parameters, noop) .await .and_then(GetPayloadResponseType::try_into) .map(ProvenancedPayload::Local)?, @@ -922,17 +956,15 @@ impl ExecutionLayer { async fn fetch_builder_and_local_payloads( &self, builder: &BuilderHttpClient, - parent_hash: ExecutionBlockHash, builder_params: &BuilderParams, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, - current_fork: ForkName, + payload_parameters: PayloadParameters<'_>, ) -> ( Result>>, builder_client::Error>, Result, Error>, ) { let slot = builder_params.slot; let pubkey = &builder_params.pubkey; + let parent_hash = payload_parameters.parent_hash; info!( self.log(), @@ -950,17 +982,12 @@ impl ExecutionLayer { .await }), timed_future(metrics::GET_BLINDED_PAYLOAD_LOCAL, async { - self.get_full_payload_caching( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - ) - .await - .and_then(|local_result_type| match local_result_type { - GetPayloadResponseType::Full(payload) => Ok(payload), - GetPayloadResponseType::Blinded(_) => Err(Error::PayloadTypeMismatch), - }) + self.get_full_payload_caching(payload_parameters) + .await + .and_then(|local_result_type| match local_result_type { + GetPayloadResponseType::Full(payload) => Ok(payload), + GetPayloadResponseType::Blinded(_) => Err(Error::PayloadTypeMismatch), + }) }) ); @@ -984,26 +1011,17 @@ impl ExecutionLayer { (relay_result, local_result) } - #[allow(clippy::too_many_arguments)] async fn determine_and_fetch_payload( &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, + payload_parameters: PayloadParameters<'_>, builder_params: BuilderParams, - current_fork: ForkName, builder_boost_factor: Option, spec: &ChainSpec, ) -> Result>, Error> { let Some(builder) = self.builder() else { // no builder.. return local payload return self - .get_full_payload_caching( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - ) + .get_full_payload_caching(payload_parameters) .await .and_then(GetPayloadResponseType::try_into) .map(ProvenancedPayload::Local); @@ -1034,26 +1052,15 @@ impl ExecutionLayer { ), } return self - .get_full_payload_caching( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - ) + .get_full_payload_caching(payload_parameters) .await .and_then(GetPayloadResponseType::try_into) .map(ProvenancedPayload::Local); } + let parent_hash = payload_parameters.parent_hash; let (relay_result, local_result) = self - .fetch_builder_and_local_payloads( - builder.as_ref(), - parent_hash, - &builder_params, - payload_attributes, - forkchoice_update_params, - current_fork, - ) + .fetch_builder_and_local_payloads(builder.as_ref(), &builder_params, payload_parameters) .await; match (relay_result, local_result) { @@ -1118,14 +1125,9 @@ impl ExecutionLayer { ); // check relay payload validity - if let Err(reason) = verify_builder_bid( - &relay, - parent_hash, - payload_attributes, - Some(local.block_number()), - current_fork, - spec, - ) { + if let Err(reason) = + verify_builder_bid(&relay, payload_parameters, Some(local.block_number()), spec) + { // relay payload invalid -> return local metrics::inc_counter_vec( &metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS, @@ -1202,14 +1204,7 @@ impl ExecutionLayer { "parent_hash" => ?parent_hash, ); - match verify_builder_bid( - &relay, - parent_hash, - payload_attributes, - None, - current_fork, - spec, - ) { + match verify_builder_bid(&relay, payload_parameters, None, spec) { Ok(()) => Ok(ProvenancedPayload::try_from(relay.data.message)?), Err(reason) => { metrics::inc_counter_vec( @@ -1234,32 +1229,28 @@ impl ExecutionLayer { /// Get a full payload and cache its result in the execution layer's payload cache. async fn get_full_payload_caching( &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, - current_fork: ForkName, + payload_parameters: PayloadParameters<'_>, ) -> Result, Error> { - self.get_full_payload_with( - parent_hash, - payload_attributes, - forkchoice_update_params, - current_fork, - Self::cache_payload, - ) - .await + self.get_full_payload_with(payload_parameters, Self::cache_payload) + .await } async fn get_full_payload_with( &self, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, - forkchoice_update_params: ForkchoiceUpdateParameters, - current_fork: ForkName, + payload_parameters: PayloadParameters<'_>, cache_fn: fn( &ExecutionLayer, PayloadContentsRefTuple, ) -> Option>, ) -> Result, Error> { + let PayloadParameters { + parent_hash, + payload_attributes, + forkchoice_update_params, + current_fork, + .. + } = payload_parameters; + self.engine() .request(move |engine| async move { let payload_id = if let Some(id) = engine @@ -1984,6 +1975,10 @@ enum InvalidBuilderPayload { payload: Option, expected: Option, }, + GasLimitMismatch { + payload: u64, + expected: u64, + }, } impl fmt::Display for InvalidBuilderPayload { @@ -2022,19 +2017,51 @@ impl fmt::Display for InvalidBuilderPayload { opt_string(expected) ) } + InvalidBuilderPayload::GasLimitMismatch { payload, expected } => { + write!(f, "payload gas limit was {} not {}", payload, expected) + } } } } +/// Calculate the expected gas limit for a block. +pub fn expected_gas_limit( + parent_gas_limit: u64, + target_gas_limit: u64, + spec: &ChainSpec, +) -> Option { + // Calculate the maximum gas limit difference allowed safely + let max_gas_limit_difference = parent_gas_limit + .checked_div(spec.gas_limit_adjustment_factor) + .and_then(|result| result.checked_sub(1)) + .unwrap_or(0); + + // Adjust the gas limit safely + if target_gas_limit > parent_gas_limit { + let gas_diff = target_gas_limit.saturating_sub(parent_gas_limit); + parent_gas_limit.checked_add(std::cmp::min(gas_diff, max_gas_limit_difference)) + } else { + let gas_diff = parent_gas_limit.saturating_sub(target_gas_limit); + parent_gas_limit.checked_sub(std::cmp::min(gas_diff, max_gas_limit_difference)) + } +} + /// Perform some cursory, non-exhaustive validation of the bid returned from the builder. fn verify_builder_bid( bid: &ForkVersionedResponse>, - parent_hash: ExecutionBlockHash, - payload_attributes: &PayloadAttributes, + payload_parameters: PayloadParameters<'_>, block_number: Option, - current_fork: ForkName, spec: &ChainSpec, ) -> Result<(), Box> { + let PayloadParameters { + parent_hash, + payload_attributes, + current_fork, + parent_gas_limit, + proposer_gas_limit, + .. + } = payload_parameters; + let is_signature_valid = bid.data.verify_signature(spec); let header = &bid.data.message.header(); @@ -2050,6 +2077,8 @@ fn verify_builder_bid( .cloned() .map(|withdrawals| Withdrawals::::from(withdrawals).tree_hash_root()); let payload_withdrawals_root = header.withdrawals_root().ok(); + let expected_gas_limit = proposer_gas_limit + .and_then(|target_gas_limit| expected_gas_limit(parent_gas_limit, target_gas_limit, spec)); if header.parent_hash() != parent_hash { Err(Box::new(InvalidBuilderPayload::ParentHash { @@ -2086,6 +2115,14 @@ fn verify_builder_bid( payload: payload_withdrawals_root, expected: expected_withdrawals_root, })) + } else if expected_gas_limit + .map(|gas_limit| header.gas_limit() != gas_limit) + .unwrap_or(false) + { + Err(Box::new(InvalidBuilderPayload::GasLimitMismatch { + payload: header.gas_limit(), + expected: expected_gas_limit.unwrap_or(0), + })) } else { Ok(()) } @@ -2138,6 +2175,27 @@ mod test { .await; } + #[tokio::test] + async fn test_expected_gas_limit() { + let spec = ChainSpec::mainnet(); + assert_eq!( + expected_gas_limit(30_000_000, 30_000_000, &spec), + Some(30_000_000) + ); + assert_eq!( + expected_gas_limit(30_000_000, 40_000_000, &spec), + Some(30_029_295) + ); + assert_eq!( + expected_gas_limit(30_029_295, 40_000_000, &spec), + Some(30_058_619) + ); + assert_eq!( + expected_gas_limit(30_058_619, 30_000_000, &spec), + Some(30_029_266) + ); + } + #[tokio::test] async fn test_forked_terminal_block() { let runtime = TestRuntime::default(); diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 4deb91e0567..4fab7150ce3 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -28,8 +28,8 @@ use super::DEFAULT_TERMINAL_BLOCK; const TEST_BLOB_BUNDLE: &[u8] = include_bytes!("fixtures/mainnet/test_blobs_bundle.ssz"); -const GAS_LIMIT: u64 = 16384; -const GAS_USED: u64 = GAS_LIMIT - 1; +pub const DEFAULT_GAS_LIMIT: u64 = 30_000_000; +const GAS_USED: u64 = DEFAULT_GAS_LIMIT - 1; #[derive(Clone, Debug, PartialEq)] #[allow(clippy::large_enum_variant)] // This struct is only for testing. @@ -38,6 +38,10 @@ pub enum Block { PoS(ExecutionPayload), } +pub fn mock_el_extra_data() -> types::VariableList { + "block gen was here".as_bytes().to_vec().into() +} + impl Block { pub fn block_number(&self) -> u64 { match self { @@ -67,6 +71,13 @@ impl Block { } } + pub fn gas_limit(&self) -> u64 { + match self { + Block::PoW(_) => DEFAULT_GAS_LIMIT, + Block::PoS(payload) => payload.gas_limit(), + } + } + pub fn as_execution_block(&self, total_difficulty: Uint256) -> ExecutionBlock { match self { Block::PoW(block) => ExecutionBlock { @@ -570,10 +581,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), @@ -587,10 +598,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), @@ -603,10 +614,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), @@ -623,10 +634,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), @@ -642,10 +653,10 @@ impl ExecutionBlockGenerator { logs_bloom: vec![0; 256].into(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, + gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), transactions: vec![].into(), diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 341daedbc8d..879b54eb075 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,5 +1,5 @@ use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; -use crate::{Config, ExecutionLayer, PayloadAttributes}; +use crate::{Config, ExecutionLayer, PayloadAttributes, PayloadParameters}; use eth2::types::{BlobsBundle, BlockId, StateId, ValidatorId}; use eth2::{BeaconNodeHttpClient, Timeouts, CONSENSUS_VERSION_HEADER}; use fork_choice::ForkchoiceUpdateParameters; @@ -54,6 +54,10 @@ impl Operation { } } +pub fn mock_builder_extra_data() -> types::VariableList { + "mock_builder".as_bytes().to_vec().into() +} + #[derive(Debug)] // We don't use the string value directly, but it's used in the Debug impl which is required by `warp::reject::Reject`. struct Custom(#[allow(dead_code)] String); @@ -72,6 +76,8 @@ pub trait BidStuff { fn set_withdrawals_root(&mut self, withdrawals_root: Hash256); fn sign_builder_message(&mut self, sk: &SecretKey, spec: &ChainSpec) -> Signature; + + fn stamp_payload(&mut self); } impl BidStuff for BuilderBid { @@ -203,6 +209,29 @@ impl BidStuff for BuilderBid { let message = self.signing_root(domain); sk.sign(message) } + + // this helps differentiate a builder block from a regular block + fn stamp_payload(&mut self) { + let extra_data = mock_builder_extra_data::(); + match self.to_mut().header_mut() { + ExecutionPayloadHeaderRefMut::Bellatrix(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } + ExecutionPayloadHeaderRefMut::Capella(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } + ExecutionPayloadHeaderRefMut::Deneb(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } + ExecutionPayloadHeaderRefMut::Electra(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } + } + } } #[derive(Clone)] @@ -286,6 +315,7 @@ impl MockBuilder { while let Some(op) = guard.pop() { op.apply(bid); } + bid.stamp_payload(); } } @@ -413,11 +443,12 @@ pub fn serve( let block = head.data.message(); let head_block_root = block.tree_hash_root(); - let head_execution_hash = block + let head_execution_payload = block .body() .execution_payload() - .map_err(|_| reject("pre-merge block"))? - .block_hash(); + .map_err(|_| reject("pre-merge block"))?; + let head_execution_hash = head_execution_payload.block_hash(); + let head_gas_limit = head_execution_payload.gas_limit(); if head_execution_hash != parent_hash { return Err(reject("head mismatch")); } @@ -529,14 +560,24 @@ pub fn serve( finalized_hash: Some(finalized_execution_hash), }; + let proposer_gas_limit = builder + .val_registration_cache + .read() + .get(&pubkey) + .map(|v| v.message.gas_limit); + + let payload_parameters = PayloadParameters { + parent_hash: head_execution_hash, + parent_gas_limit: head_gas_limit, + proposer_gas_limit, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: fork, + }; + let payload_response_type = builder .el - .get_full_payload_caching( - head_execution_hash, - &payload_attributes, - forkchoice_update_params, - fork, - ) + .get_full_payload_caching(payload_parameters) .await .map_err(|_| reject("couldn't get payload"))?; @@ -648,8 +689,6 @@ pub fn serve( } }; - message.set_gas_limit(cached_data.gas_limit); - builder.apply_operations(&mut message); let mut signature = diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index a9f1313e462..48372a39be1 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -90,6 +90,7 @@ impl MockExecutionLayer { }; let parent_hash = latest_execution_block.block_hash(); + let parent_gas_limit = latest_execution_block.gas_limit(); let block_number = latest_execution_block.block_number() + 1; let timestamp = block_number; let prev_randao = Hash256::from_low_u64_be(block_number); @@ -131,14 +132,20 @@ impl MockExecutionLayer { let payload_attributes = PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit, + proposer_gas_limit: None, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: ForkName::Bellatrix, + }; + let block_proposal_content_type = self .el .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - ForkName::Bellatrix, &self.spec, None, BlockProductionVersion::FullV2, @@ -171,14 +178,20 @@ impl MockExecutionLayer { let payload_attributes = PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None); + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit, + proposer_gas_limit: None, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: ForkName::Bellatrix, + }; + let block_proposal_content_type = self .el .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - ForkName::Bellatrix, &self.spec, None, BlockProductionVersion::BlindedV2, diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 1e71fde2551..faf6d4ef0b6 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -25,12 +25,13 @@ use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; use crate::EngineCapabilities; +pub use execution_block_generator::DEFAULT_GAS_LIMIT; pub use execution_block_generator::{ generate_blobs, generate_genesis_block, generate_genesis_header, generate_pow_block, - static_valid_tx, Block, ExecutionBlockGenerator, + mock_el_extra_data, static_valid_tx, Block, ExecutionBlockGenerator, }; pub use hook::Hook; -pub use mock_builder::{MockBuilder, Operation}; +pub use mock_builder::{mock_builder_extra_data, MockBuilder, Operation}; pub use mock_execution_layer::MockExecutionLayer; pub const DEFAULT_TERMINAL_DIFFICULTY: u64 = 6400; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index fe05f55a01a..23d177da785 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3704,7 +3704,10 @@ pub fn serve( ); execution_layer - .update_proposer_preparation(current_epoch, &preparation_data) + .update_proposer_preparation( + current_epoch, + preparation_data.iter().map(|data| (data, &None)), + ) .await; chain @@ -3762,7 +3765,7 @@ pub fn serve( let spec = &chain.spec; let (preparation_data, filtered_registration_data): ( - Vec, + Vec<(ProposerPreparationData, Option)>, Vec, ) = register_val_data .into_iter() @@ -3792,12 +3795,15 @@ pub fn serve( // Filter out validators who are not 'active' or 'pending'. is_active_or_pending.then_some({ ( - ProposerPreparationData { - validator_index: validator_index as u64, - fee_recipient: register_data - .message - .fee_recipient, - }, + ( + ProposerPreparationData { + validator_index: validator_index as u64, + fee_recipient: register_data + .message + .fee_recipient, + }, + Some(register_data.message.gas_limit), + ), register_data, ) }) @@ -3807,7 +3813,10 @@ pub fn serve( // Update the prepare beacon proposer cache based on this request. execution_layer - .update_proposer_preparation(current_epoch, &preparation_data) + .update_proposer_preparation( + current_epoch, + preparation_data.iter().map(|(data, limit)| (data, limit)), + ) .await; // Call prepare beacon proposer blocking with the latest update in order to make diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index c3ed3347821..627b0d0b179 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -447,9 +447,14 @@ pub async fn proposer_boost_re_org_test( // Send proposer preparation data for all validators. let proposer_preparation_data = all_validators .iter() - .map(|i| ProposerPreparationData { - validator_index: *i as u64, - fee_recipient: Address::from_low_u64_be(*i as u64), + .map(|i| { + ( + ProposerPreparationData { + validator_index: *i as u64, + fee_recipient: Address::from_low_u64_be(*i as u64), + }, + None, + ) }) .collect::>(); harness @@ -459,7 +464,7 @@ pub async fn proposer_boost_re_org_test( .unwrap() .update_proposer_preparation( head_slot.epoch(E::slots_per_epoch()) + 1, - &proposer_preparation_data, + proposer_preparation_data.iter().map(|(a, b)| (a, b)), ) .await; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 940f3ae9c0c..080a393b4d0 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -13,8 +13,10 @@ use eth2::{ Error::ServerMessage, StatusCode, Timeouts, }; +use execution_layer::expected_gas_limit; use execution_layer::test_utils::{ - MockBuilder, Operation, DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, + mock_builder_extra_data, mock_el_extra_data, MockBuilder, Operation, + DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_GAS_LIMIT, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, }; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; @@ -348,7 +350,6 @@ impl ApiTester { let bls_to_execution_change = harness.make_bls_to_execution_change(4, Address::zero()); let chain = harness.chain.clone(); - let log = test_logger(); let ApiServer { @@ -3755,7 +3756,11 @@ impl ApiTester { self } - pub async fn test_post_validator_register_validator(self) -> Self { + async fn generate_validator_registration_data( + &self, + fee_recipient_generator: impl Fn(usize) -> Address, + gas_limit: u64, + ) -> (Vec, Vec
) { let mut registrations = vec![]; let mut fee_recipients = vec![]; @@ -3766,15 +3771,13 @@ impl ApiTester { epoch: genesis_epoch, }; - let expected_gas_limit = 11_111_111; - for (val_index, keypair) in self.validator_keypairs().iter().enumerate() { let pubkey = keypair.pk.compress(); - let fee_recipient = Address::from_low_u64_be(val_index as u64); + let fee_recipient = fee_recipient_generator(val_index); let data = ValidatorRegistrationData { fee_recipient, - gas_limit: expected_gas_limit, + gas_limit, timestamp: 0, pubkey, }; @@ -3797,6 +3800,17 @@ impl ApiTester { registrations.push(signed); } + (registrations, fee_recipients) + } + + pub async fn test_post_validator_register_validator(self) -> Self { + let (registrations, fee_recipients) = self + .generate_validator_registration_data( + |val_index| Address::from_low_u64_be(val_index as u64), + DEFAULT_GAS_LIMIT, + ) + .await; + self.client .post_validator_register_validator(®istrations) .await @@ -3811,14 +3825,22 @@ impl ApiTester { .zip(fee_recipients.into_iter()) .enumerate() { - let actual = self + let actual_fee_recipient = self .chain .execution_layer .as_ref() .unwrap() .get_suggested_fee_recipient(val_index as u64) .await; - assert_eq!(actual, fee_recipient); + let actual_gas_limit = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_proposer_gas_limit(val_index as u64) + .await; + assert_eq!(actual_fee_recipient, fee_recipient); + assert_eq!(actual_gas_limit, Some(DEFAULT_GAS_LIMIT)); } self @@ -3839,46 +3861,12 @@ impl ApiTester { ) .await; - let mut registrations = vec![]; - let mut fee_recipients = vec![]; - - let genesis_epoch = self.chain.spec.genesis_slot.epoch(E::slots_per_epoch()); - let fork = Fork { - current_version: self.chain.spec.genesis_fork_version, - previous_version: self.chain.spec.genesis_fork_version, - epoch: genesis_epoch, - }; - - let expected_gas_limit = 11_111_111; - - for (val_index, keypair) in self.validator_keypairs().iter().enumerate() { - let pubkey = keypair.pk.compress(); - let fee_recipient = Address::from_low_u64_be(val_index as u64); - - let data = ValidatorRegistrationData { - fee_recipient, - gas_limit: expected_gas_limit, - timestamp: 0, - pubkey, - }; - - let domain = self.chain.spec.get_domain( - genesis_epoch, - Domain::ApplicationMask(ApplicationDomain::Builder), - &fork, - Hash256::zero(), - ); - let message = data.signing_root(domain); - let signature = keypair.sk.sign(message); - - let signed = SignedValidatorRegistrationData { - message: data, - signature, - }; - - fee_recipients.push(fee_recipient); - registrations.push(signed); - } + let (registrations, fee_recipients) = self + .generate_validator_registration_data( + |val_index| Address::from_low_u64_be(val_index as u64), + DEFAULT_GAS_LIMIT, + ) + .await; self.client .post_validator_register_validator(®istrations) @@ -3911,6 +3899,47 @@ impl ApiTester { self } + pub async fn test_post_validator_register_validator_higher_gas_limit(&self) { + let (registrations, fee_recipients) = self + .generate_validator_registration_data( + |val_index| Address::from_low_u64_be(val_index as u64), + DEFAULT_GAS_LIMIT + 10_000_000, + ) + .await; + + self.client + .post_validator_register_validator(®istrations) + .await + .unwrap(); + + for (val_index, (_, fee_recipient)) in self + .chain + .head_snapshot() + .beacon_state + .validators() + .into_iter() + .zip(fee_recipients.into_iter()) + .enumerate() + { + let actual_fee_recipient = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_suggested_fee_recipient(val_index as u64) + .await; + let actual_gas_limit = self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_proposer_gas_limit(val_index as u64) + .await; + assert_eq!(actual_fee_recipient, fee_recipient); + assert_eq!(actual_gas_limit, Some(DEFAULT_GAS_LIMIT + 10_000_000)); + } + } + pub async fn test_post_validator_liveness_epoch(self) -> Self { let epoch = self.chain.epoch().unwrap(); let head_state = self.chain.head_beacon_state_cloned(); @@ -4031,7 +4060,7 @@ impl ApiTester { let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 11_111_111); + assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); self } @@ -4058,7 +4087,8 @@ impl ApiTester { let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 16_384); + // This is the graffiti of the mock execution layer, not the builder. + assert_eq!(payload.extra_data(), mock_el_extra_data::()); self } @@ -4085,7 +4115,7 @@ impl ApiTester { let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 11_111_111); + assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); self } @@ -4109,7 +4139,7 @@ impl ApiTester { let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 11_111_111); + assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); // If this cache is empty, it indicates fallback was not used, so the payload came from the // mock builder. @@ -4126,10 +4156,16 @@ impl ApiTester { pub async fn test_payload_accepts_mutated_gas_limit(self) -> Self { // Mutate gas limit. + let builder_limit = expected_gas_limit( + DEFAULT_GAS_LIMIT, + DEFAULT_GAS_LIMIT + 10_000_000, + self.chain.spec.as_ref(), + ) + .expect("calculate expected gas limit"); self.mock_builder .as_ref() .unwrap() - .add_operation(Operation::GasLimit(30_000_000)); + .add_operation(Operation::GasLimit(builder_limit as usize)); let slot = self.chain.slot().unwrap(); let epoch = self.chain.epoch().unwrap(); @@ -4149,7 +4185,7 @@ impl ApiTester { let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); assert_eq!(payload.fee_recipient(), expected_fee_recipient); - assert_eq!(payload.gas_limit(), 30_000_000); + assert_eq!(payload.gas_limit(), builder_limit); // This cache should not be populated because fallback should not have been used. assert!(self @@ -4159,6 +4195,49 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); + + self + } + + pub async fn test_builder_payload_rejected_when_gas_limit_incorrect(self) -> Self { + self.test_post_validator_register_validator_higher_gas_limit() + .await; + + // Mutate gas limit. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::GasLimit(1)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload: BlindedPayload = self + .client + .get_validator_blinded_blocks::(slot, &randao_reveal, None) + .await + .unwrap() + .data + .body() + .execution_payload() + .unwrap() + .into(); + + // If this cache is populated, it indicates fallback to the local EE was correctly used. + assert!(self + .chain + .execution_layer + .as_ref() + .unwrap() + .get_payload_by_root(&payload.tree_hash_root()) + .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4232,6 +4311,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); + self } @@ -4315,6 +4397,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4404,6 +4489,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4491,6 +4579,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4577,6 +4668,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4647,6 +4741,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4707,6 +4804,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -4780,6 +4880,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); // Without proposing, advance into the next slot, this should make us cross the threshold // number of skips, causing us to use the fallback. @@ -4809,6 +4911,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); self } @@ -4915,6 +5019,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); // Fill another epoch with blocks, should be enough to finalize. (Sneaky plus 1 because this // scenario starts at an epoch boundary). @@ -4954,6 +5060,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); self } @@ -5072,6 +5180,8 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); self } @@ -5149,6 +5259,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); + self } @@ -5214,6 +5327,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -5279,6 +5395,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_some()); + // another way is to check for the extra data of the local EE + assert_eq!(payload.extra_data(), mock_el_extra_data::()); + self } @@ -5343,6 +5462,9 @@ impl ApiTester { .unwrap() .get_payload_by_root(&payload.tree_hash_root()) .is_none()); + // Another way is to check for the extra data of the mock builder + assert_eq!(payload.extra_data(), mock_builder_extra_data::()); + self } @@ -6682,6 +6804,8 @@ async fn post_validator_register_valid_v3() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_validator_register_gas_limit_mutation() { ApiTester::new_mev_tester() + .await + .test_builder_payload_rejected_when_gas_limit_incorrect() .await .test_payload_accepts_mutated_gas_limit() .await; diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 79dcc65ea3c..0b33a76ff19 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -127,6 +127,11 @@ pub struct ChainSpec { pub deposit_network_id: u64, pub deposit_contract_address: Address, + /* + * Execution Specs + */ + pub gas_limit_adjustment_factor: u64, + /* * Altair hard fork params */ @@ -715,6 +720,11 @@ impl ChainSpec { .parse() .expect("chain spec deposit contract address"), + /* + * Execution Specs + */ + gas_limit_adjustment_factor: 1024, + /* * Altair hard fork params */ @@ -1029,6 +1039,11 @@ impl ChainSpec { .parse() .expect("chain spec deposit contract address"), + /* + * Execution Specs + */ + gas_limit_adjustment_factor: 1024, + /* * Altair hard fork params */ @@ -1285,6 +1300,10 @@ pub struct Config { #[serde(with = "serde_utils::address_hex")] deposit_contract_address: Address, + #[serde(default = "default_gas_limit_adjustment_factor")] + #[serde(with = "serde_utils::quoted_u64")] + gas_limit_adjustment_factor: u64, + #[serde(default = "default_gossip_max_size")] #[serde(with = "serde_utils::quoted_u64")] gossip_max_size: u64, @@ -1407,6 +1426,10 @@ const fn default_max_per_epoch_activation_churn_limit() -> u64 { 8 } +const fn default_gas_limit_adjustment_factor() -> u64 { + 1024 +} + const fn default_gossip_max_size() -> u64 { 10485760 } @@ -1659,6 +1682,8 @@ impl Config { deposit_network_id: spec.deposit_network_id, deposit_contract_address: spec.deposit_contract_address, + gas_limit_adjustment_factor: spec.gas_limit_adjustment_factor, + gossip_max_size: spec.gossip_max_size, max_request_blocks: spec.max_request_blocks, min_epochs_for_block_requests: spec.min_epochs_for_block_requests, @@ -1733,6 +1758,7 @@ impl Config { deposit_chain_id, deposit_network_id, deposit_contract_address, + gas_limit_adjustment_factor, gossip_max_size, min_epochs_for_block_requests, max_chunk_size, @@ -1794,6 +1820,7 @@ impl Config { deposit_chain_id, deposit_network_id, deposit_contract_address, + gas_limit_adjustment_factor, terminal_total_difficulty, terminal_block_hash, terminal_block_hash_activation_epoch, diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index b82a897da5d..e68801840af 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -32,6 +32,7 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn prev_randao(&self) -> Hash256; fn block_number(&self) -> u64; fn timestamp(&self) -> u64; + fn extra_data(&self) -> VariableList; fn block_hash(&self) -> ExecutionBlockHash; fn fee_recipient(&self) -> Address; fn gas_limit(&self) -> u64; @@ -225,6 +226,13 @@ impl ExecPayload for FullPayload { }) } + fn extra_data<'a>(&'a self) -> VariableList { + map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload.extra_data.clone() + }) + } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -357,6 +365,13 @@ impl ExecPayload for FullPayloadRef<'_, E> { }) } + fn extra_data<'a>(&'a self) -> VariableList { + map_full_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload.extra_data.clone() + }) + } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { map_full_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); @@ -542,6 +557,13 @@ impl ExecPayload for BlindedPayload { }) } + fn extra_data<'a>(&'a self) -> VariableList::MaxExtraDataBytes> { + map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { + cons(payload); + payload.execution_payload_header.extra_data.clone() + }) + } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { map_blinded_payload_ref!(&'a _, self.to_ref(), move |payload, cons| { cons(payload); @@ -643,6 +665,13 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { }) } + fn extra_data<'a>(&'a self) -> VariableList::MaxExtraDataBytes> { + map_blinded_payload_ref!(&'a _, self, move |payload, cons| { + cons(payload); + payload.execution_payload_header.extra_data.clone() + }) + } + fn block_hash<'a>(&'a self) -> ExecutionBlockHash { map_blinded_payload_ref!(&'a _, self, move |payload, cons| { cons(payload); @@ -745,6 +774,10 @@ macro_rules! impl_exec_payload_common { self.$wrapped_field.timestamp } + fn extra_data(&self) -> VariableList { + self.$wrapped_field.extra_data.clone() + } + fn block_hash(&self) -> ExecutionBlockHash { self.$wrapped_field.block_hash } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 7d4d229fef7..427bcf5e9c5 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -809,10 +809,13 @@ impl Tester { if expected_should_override_fcu.validator_is_connected { el.update_proposer_preparation( next_slot_epoch, - &[ProposerPreparationData { - validator_index: dbg!(proposer_index) as u64, - fee_recipient: Default::default(), - }], + [( + &ProposerPreparationData { + validator_index: dbg!(proposer_index) as u64, + fee_recipient: Default::default(), + }, + &None, + )], ) .await; } else { diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 0289fd4206b..f6645093049 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -3,9 +3,10 @@ use crate::execution_engine::{ }; use crate::transactions::transactions; use ethers_providers::Middleware; +use execution_layer::test_utils::DEFAULT_GAS_LIMIT; use execution_layer::{ BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, - PayloadStatus, + PayloadParameters, PayloadStatus, }; use fork_choice::ForkchoiceUpdateParameters; use reqwest::{header::CONTENT_TYPE, Client}; @@ -251,6 +252,7 @@ impl TestRig { */ let parent_hash = terminal_pow_block_hash; + let parent_gas_limit = DEFAULT_GAS_LIMIT; let timestamp = timestamp_now(); let prev_randao = Hash256::zero(); let head_root = Hash256::zero(); @@ -324,15 +326,22 @@ impl TestRig { Some(vec![]), None, ); + + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit, + proposer_gas_limit: None, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: TEST_FORK, + }; + let block_proposal_content_type = self .ee_a .execution_layer .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - TEST_FORK, &self.spec, None, BlockProductionVersion::FullV2, @@ -476,15 +485,22 @@ impl TestRig { Some(vec![]), None, ); + + let payload_parameters = PayloadParameters { + parent_hash, + parent_gas_limit, + proposer_gas_limit: None, + payload_attributes: &payload_attributes, + forkchoice_update_params: &forkchoice_update_params, + current_fork: TEST_FORK, + }; + let block_proposal_content_type = self .ee_a .execution_layer .get_payload( - parent_hash, - &payload_attributes, - forkchoice_update_params, + payload_parameters, builder_params, - TEST_FORK, &self.spec, None, BlockProductionVersion::FullV2, From 8e891a8bfd139dde3e63a5ed70bc8b76eea896bf Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Mon, 16 Dec 2024 14:44:02 +0900 Subject: [PATCH 12/15] Fix web3signer test fails on macOS (#6588) * Add lighthouse/key_legacy.p12 for macOS * Specify `-days 825` to meet Apple's requirements for TLS server certificates * Remove `-aes256` as it's ignored on exporting The following warning will appear: Warning: output encryption option -aes256 ignored with -export * Update certificates and keys --- testing/web3signer_tests/src/lib.rs | 6 +- testing/web3signer_tests/tls/generate.sh | 21 +++- .../web3signer_tests/tls/lighthouse/cert.pem | 58 +++++----- .../web3signer_tests/tls/lighthouse/key.key | 100 +++++++++--------- .../web3signer_tests/tls/lighthouse/key.p12 | Bin 4371 -> 4387 bytes .../tls/lighthouse/key_legacy.p12 | Bin 0 -> 4221 bytes .../tls/lighthouse/web3signer.pem | 58 +++++----- .../web3signer_tests/tls/web3signer/cert.pem | 58 +++++----- .../web3signer_tests/tls/web3signer/key.key | 100 +++++++++--------- .../web3signer_tests/tls/web3signer/key.p12 | Bin 4371 -> 4387 bytes .../tls/web3signer/known_clients.txt | 2 +- 11 files changed, 210 insertions(+), 193 deletions(-) create mode 100644 testing/web3signer_tests/tls/lighthouse/key_legacy.p12 diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index a58dcb5fa08..bebc8fa13be 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -130,7 +130,11 @@ mod tests { } fn client_identity_path() -> PathBuf { - tls_dir().join("lighthouse").join("key.p12") + if cfg!(target_os = "macos") { + tls_dir().join("lighthouse").join("key_legacy.p12") + } else { + tls_dir().join("lighthouse").join("key.p12") + } } fn client_identity_password() -> String { diff --git a/testing/web3signer_tests/tls/generate.sh b/testing/web3signer_tests/tls/generate.sh index f918e87cf82..3b14dbddba3 100755 --- a/testing/web3signer_tests/tls/generate.sh +++ b/testing/web3signer_tests/tls/generate.sh @@ -1,7 +1,20 @@ #!/bin/bash -openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout web3signer/key.key -out web3signer/cert.pem -config web3signer/config && -openssl pkcs12 -export -aes256 -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && + +# The lighthouse/key_legacy.p12 file is generated specifically for macOS because the default `openssl pkcs12` encoding +# algorithm in OpenSSL v3 is not compatible with the PKCS algorithm used by the Apple Security Framework. The client +# side (using the reqwest crate) relies on the Apple Security Framework to parse PKCS files. +# We don't need to generate web3signer/key_legacy.p12 because the compatibility issue doesn't occur on the web3signer +# side. It seems that web3signer (Java) uses its own implementation to parse PKCS files. +# See https://github.com/sigp/lighthouse/issues/6442#issuecomment-2469252651 + +# We specify `-days 825` when generating the certificate files because Apple requires TLS server certificates to have a +# validity period of 825 days or fewer. +# See https://github.com/sigp/lighthouse/issues/6442#issuecomment-2474979183 + +openssl req -x509 -sha256 -nodes -days 825 -newkey rsa:4096 -keyout web3signer/key.key -out web3signer/cert.pem -config web3signer/config && +openssl pkcs12 -export -out web3signer/key.p12 -inkey web3signer/key.key -in web3signer/cert.pem -password pass:$(cat web3signer/password.txt) && cp web3signer/cert.pem lighthouse/web3signer.pem && -openssl req -x509 -sha256 -nodes -days 36500 -newkey rsa:4096 -keyout lighthouse/key.key -out lighthouse/cert.pem -config lighthouse/config && -openssl pkcs12 -export -aes256 -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && +openssl req -x509 -sha256 -nodes -days 825 -newkey rsa:4096 -keyout lighthouse/key.key -out lighthouse/cert.pem -config lighthouse/config && +openssl pkcs12 -export -out lighthouse/key.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && +openssl pkcs12 -export -legacy -out lighthouse/key_legacy.p12 -inkey lighthouse/key.key -in lighthouse/cert.pem -password pass:$(cat lighthouse/password.txt) && openssl x509 -noout -fingerprint -sha256 -inform pem -in lighthouse/cert.pem | cut -b 20-| sed "s/^/lighthouse /" > web3signer/known_clients.txt diff --git a/testing/web3signer_tests/tls/lighthouse/cert.pem b/testing/web3signer_tests/tls/lighthouse/cert.pem index 24b0a2e5c0e..4aaf66b747d 100644 --- a/testing/web3signer_tests/tls/lighthouse/cert.pem +++ b/testing/web3signer_tests/tls/lighthouse/cert.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIUXZijYo8W4/9dAq58ocFEbZDxohwwDQYJKoZIhvcNAQEL +MIIFuDCCA6CgAwIBAgIUa3O7icWD4W7c5yRMjG/EX422ODUwDQYJKoZIhvcNAQEL BQAwazELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYD -VQQDDApsaWdodGhvdXNlMCAXDTIzMDkyMDAyNTYzNloYDzIxMjMwODI3MDI1NjM2 -WjBrMQswCQYDVQQGEwJVUzELMAkGA1UECAwCVkExETAPBgNVBAcMCFNvbWVDaXR5 -MRIwEAYDVQQKDAlNeUNvbXBhbnkxEzARBgNVBAsMCk15RGl2aXNpb24xEzARBgNV -BAMMCmxpZ2h0aG91c2UwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC1 -R1M9NnRwUsqFvJzNWPKuY1PW7llwRRWCixiWNvcxukGTa6AMLZDrYO1Y7qlw5m52 -aHSA2fs2KyeA61yajG/BsLn1vmTtJMZXgLsG0MIqvhgOoh+ZZbl8biO0gQJSRSDE -jf0ogUVM9TCEt6ydbGnzgs8EESqvyXcreaXfmLI7jiX/BkwCdf+Ru+H3MF96QgAw -Oz1d8/fxYJvIpT/DOx4NuMZouSAcUVXgwcVb6JXeTg0xVcL33lluquhYDR0gD5Fe -V0fPth+e9XMAH7udim8E5wn2Ep8CAVoeVq6K9mBM3NqP7+2YmU//jLbkd6UvKPaI -0vps1zF9Bo8QewiRbM0IRse99ikCVZcjOcZSitw3kwTg59NjZ0Vk9R/2YQt/gGWM -VcR//EtbOZGqzGrLPFKOcWO85Ggz746Saj15N+bqT20hXHyiwYL8DLgJkMR2W9Nr -67Vyi9SWSM6rdRQlezlHq/yNEh+JuY7eoC3VeVw9K1ZXP+OKAwbpcnvd3uLwV91f -kpT6kjc6d2h4bK8fhvF16Em42JypQCl0xMhgg/8MFO+6ZLy5otWAdsSYyO5k9CAa -3zLeqd89dS7HNLdLZ0Y5SFWm6y5Kqu89ErIENafX5DxupHWsruiBV7zhDHNPaGcf -TPFe8xuDYsi155veOfEiDh4g+X1qjL8x8OEDjgsM3QIDAQABo1QwUjALBgNVHQ8E -BAMCBDAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0RBAgwBocEfwAAATAdBgNV -HQ4EFgQU6r7QHkcEsWhEZHpcMpGxwKXQL9swDQYJKoZIhvcNAQELBQADggIBACyO -8xzqotye1J6xhDQCQnQF3dXaPTqfT31Ypg8UeU25V9N+bZO04CJKlOblukuvkedE -x1RDeqG3A81D4JOgTGFmFVoEF4iTk3NBrsHuMzph6ImHTd3TD+5iG5a3GL0i9PAI -dHTT6z6t2wlayjmHotqQ+N4A4msx8IPBRULcCmId319gpSDHsvt2wYbLdh+d9E2h -vI0VleJpJ7eoy05842VTkFJebriSpi75yFphKUnyAKlONiMN3o6eg90wpWdI+1rQ -js5lfm+pxYw8H6eSf+rl30m+amrxUlooqrSCHNVSO2c4+W5m/r3JfOiRqVUTxaO8 -0f/xYXo6SdRxdvJV18LEzOHURvkbqBjLoEfHbCC2EApevWAeCdjhvCBPl1IJZtFP -sYDpYtHhw69JmZ7Nj75cQyRtJMQ5S4GsJ/haYXNZPgRL1XBo1ntuc8K1cLZ2MucQ -1170+2pi3IvwmST+/+7+2fyms1AwF7rj2dVxNfPIvOxi6E9lHmPVxvpbuOYOEhex -XqTum/MjI17Qf6eoipk81ppCFtO9s3qNe9SBSjzYEYnsytaMdZSSjsOhE/IyYPHI -SICMjWE13du03Z5xWwK9i3UiFq+hIPhBHFPGkNFMmkQtcyS9lj9R0tKUmWdFPNa8 -nuhxn5kLUMriv3zsdhMPUC4NwM5XsopdWcuSxfnt +VQQDDApsaWdodGhvdXNlMB4XDTI0MTExNjIyMTI0NloXDTI3MDIxOTIyMTI0Nlow +azELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0eTES +MBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRMwEQYDVQQD +DApsaWdodGhvdXNlMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsAg4 +CkW51XFC0ZlcLXOzAHHD3e1y2tCkvQLCC5YG4QGVnXtva4puSGprs5H2r46TM+92 +7EXqNls+UWARLJE8+cw6Jz2Ibpjyv9TwdHUYqlRjSsAJ1E9kFKWnQuzWSPUilY22 +KfkxkEfauAvL5qXBAX9C31E9t/QWWgFtiGetwk+MuVoqLFCifw2iKfKrKod/t0Ua +ykxm3PUi1LIjZq3yZIg6beiVIGNQ/FWcNK3NeR6LP7ZDvSWl1vJAQ/6EBTcNTYKb +B3rEiHmme20Vpl6QQMvzlZ+e+ZaU0JsycvEfKrBACvPXX1Bi1GVFFstb5XQ4a/f4 +p7LUQ9rJwOkm5mRLgrSkNzq4Nk1lPOIam5QFpdW4GBfeIUL0Q4K9io/fYsxF1DXh +fxCW1N6E6+RKhVG2cEdtnAmQxg9d8vIEMvFtuVMFMYjQ+qkJ5V0Ye11V/9lMo4Vf +H2ialSTLTKxoEjmYfCHXKu7JCba04uGEv9gzaX7Zk+uK9gN1FIMvDT3UIHZTDwtr +cm2kjn3wsuRiK3P974pAVAome+60jmH9M0IsBxLXilCI6aIcYwvHkfoSNwXQr1AI +6rBBA4o8df0OFvMp2/r1Ll9nLDTT7AxtjHu7C2HU46Fy9U01+oRiqW+UCY9+daMD +tQJMTkjfPwOU6b9KUOPKpraDnPubwNU6CXs6ySMCAwEAAaNUMFIwCwYDVR0PBAQD +AgQwMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwHQYDVR0O +BBYEFKbpk6hZNzlzv/AdKtsl6x+dgBo+MA0GCSqGSIb3DQEBCwUAA4ICAQCmICqz +X5WOhwUm6LJJwMvKgFoVkav6ZcG/bEPiLe4waM2BubTpa1KPke8kMSmd/eLRxOiU +o1Z4Wi+bDw/ZGZHhnj/bJBZei9O+uRV4RbHCBh/LutRjY5zrublXMTtmjxCIjjHK +nQnoFFqKelyUGdaOw1ttooRT2FSDriZ6LKJ9vrTx0eCPBPA0EyaxuaxX3e/qYfE6 +sdrseEZSsouAmNCQ6jHnrQlzjeGAE6tlSTC3NVWbDlDbnX6cdRF07kV5PxnfcoyO +HGM3hdrIk5mhLpXrNKZp1nI4Ecd6UKiMCLgVxfexRKVJn00IR1URotRXZ2H9hQnh +xT5CnEBM+9dXoiwIvU+QYpnxo7mc47I6VkvoBI05rnS10bliwAk20yZuqc8iYC7R +r+ISRnhAcSb0otnKvxQQqzRH4Fi13g4mIoxbPJq+xTrNomKe/ywUe5q1Dt8QMhEg +7Sv8yg4ErKEvWIk5N0JOe1PaysobWXkv5n+xH9eJneyuBHGdi8qXe+2JLkK7ZfKB +uuLZyQcbUxb0/FSOhvtYu+2hPUb7nCOFvheAafHJu1P0pOkP8NNpM9X+tNw8Orum +VVFO8rvOh4+pH8sXRZ4tUQ33mbQS96ZSuiMJYCQf6EDkqmtRkOHCAvKkEtRLm2yV +4IRAZKHZaeKYr1UXwaqzpwES+8ZZLjURkvqvnQ== -----END CERTIFICATE----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.key b/testing/web3signer_tests/tls/lighthouse/key.key index d00b6c21229..2b510c6b6db 100644 --- a/testing/web3signer_tests/tls/lighthouse/key.key +++ b/testing/web3signer_tests/tls/lighthouse/key.key @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC1R1M9NnRwUsqF -vJzNWPKuY1PW7llwRRWCixiWNvcxukGTa6AMLZDrYO1Y7qlw5m52aHSA2fs2KyeA -61yajG/BsLn1vmTtJMZXgLsG0MIqvhgOoh+ZZbl8biO0gQJSRSDEjf0ogUVM9TCE -t6ydbGnzgs8EESqvyXcreaXfmLI7jiX/BkwCdf+Ru+H3MF96QgAwOz1d8/fxYJvI -pT/DOx4NuMZouSAcUVXgwcVb6JXeTg0xVcL33lluquhYDR0gD5FeV0fPth+e9XMA -H7udim8E5wn2Ep8CAVoeVq6K9mBM3NqP7+2YmU//jLbkd6UvKPaI0vps1zF9Bo8Q -ewiRbM0IRse99ikCVZcjOcZSitw3kwTg59NjZ0Vk9R/2YQt/gGWMVcR//EtbOZGq -zGrLPFKOcWO85Ggz746Saj15N+bqT20hXHyiwYL8DLgJkMR2W9Nr67Vyi9SWSM6r -dRQlezlHq/yNEh+JuY7eoC3VeVw9K1ZXP+OKAwbpcnvd3uLwV91fkpT6kjc6d2h4 -bK8fhvF16Em42JypQCl0xMhgg/8MFO+6ZLy5otWAdsSYyO5k9CAa3zLeqd89dS7H -NLdLZ0Y5SFWm6y5Kqu89ErIENafX5DxupHWsruiBV7zhDHNPaGcfTPFe8xuDYsi1 -55veOfEiDh4g+X1qjL8x8OEDjgsM3QIDAQABAoICAEP5a1KMPUwzF0Lfr1Jm1JUk -pLb26C2rkf3B56XIFZgddeJwHHMEkQ9Z6JYM5Bd0KJ6Y23rHgiXVN7plRvOiznMs -MAbgblroC8GbAUZ0eCJr5nxyOXQdS1jHufbA21x7FGbvsSqDkrdhR2C0uPLMyMvp -VHP7dey1mEyCkHrP+KFRU5kVxOG1WnBMqdY1Ws/uuMBdLk0xItttdOzfXhH4dHQD -wc5aAJrtusyNDFLC25Og49yIgpPMWe+gAYCm5jFz9PgRtVlDOwcxlX5J5+GSm7+U -XM1bPSmU1TSEH233JbQcqo4HkynB71ftbVUtMhEFhLBYoFO4u5Ncpr+wys0xJY4f -3aJRV5+gtlmAmsKN66GoMA10KNlLp2z7XMlx1EXegOHthcKfgf5D6LKRz8qZhknm -FFgAOg9Bak1mt1DighhPUJ0vLYU6K+u0ZXwysYygOkBJ/yj63ApuPCSTQb7U0JlL -JMgesy1om3rVdN0Oc7hNaxq7VwswkzUTUKS2ZvGozF3MmdPHNm5weJTb3NsWv8Qo -HiK1I88tY9oZ5r91SC82hMErmG4ElXFLxic1B29h3fsIe/l+WjmZRXixD9ugV0gj -CvNa8QD9K3hljlNrR6eSXeO2QOyxAEUr2N1MBlxrnAWZCzXKiTvTx1aKDYhJT0DY -zae/etTLHVjzgdH6GS33AoIBAQDaaWYHa9wkJIJPX4siVCatwWKGTjVfDb5Q9upf -twkxCf58pmbzUOXW3dbaz6S0npR0V6Wqh3S8HW7xaHgDZDMLJ1WxLJrgqDKU3Pqc -k7xnA/krWqoRVSOOGkPnSrnZo6AVc6FR+iwJjfuUu0rFDwiyuqvuXpwNsVwvAOoL -xIbaEbGUHiFsZamm2YkoxrEjXGFkZxQX9+n9f+IAiMxMQc0wezRREc8e61/mTovJ -QJ7ZDd7zLUR7Yeqciy59NOsD57cGtnp1K28I2eKLA4taghgd5bJjPkUaHg9j5Xf6 -nsxU2QCp9kpwXvtMxN7pERKWFsnmu8tfJOiUWCpp8SLbIl6nAoIBAQDUefKKjRLa -6quNW0rOGn2kx0K6sG7T45OhwvWXVjnPAjX3/2mAMALT1wc3t0iKDvpIEfMadW2S -O8x2FwyifdJXmkz943EZ/J5Tq1H0wr4NeClX4UlPIAx3CdFlCphqH6QfKtrpQ+Hf -+e8XzjVvdg8Y/RcbWgPgBtOh2oKT5QHDh13/994nH7GhVM7PjLUVvZVmNWaC77zr -bXcvJFF/81PAPWC2JoV6TL/CXvda2tG2clxbSfykfUBPBpeyEijMoxC4UMuCHhbp -NpLfKJQp9XNqbBG2K4jgLQ8Ipk6Vtia/hktLgORf/pbQ4PxEv7OP5e1AOreDg/CW -RnQtBb+/8czbAoIBABfDA8Cm8WpVNoAgKujvMs4QjgGCnLfcrOnuEw2awjs9lRxG -lki+cmLv+6IOmSK1Zf1KU9G7ru2QXjORZA0qZ4s9GkuOSMNMSUR8zh8ey46Bligr -UvlTw+x/2wdcz99nt9DdpZ1flE7tzYMe5UGPIykeufnS/TNYKmlKtivVk75B0ooE -xSof3Vczr4JqK3dnY4ki1cLNy/0yXookV+Wr+wDdRpHTWC9K+EH8JaUdjKqcobbf -I+Ywfu/NDJ++lBr2qKjoTWZV9VyHJ+hr2Etef/Uwujml2qq+vnnlyynPAPfyK+pR -y0NycfCmMoI0w0rk685YfAW75DnPZb3k6B/jG10CggEBAMxf2DoI5EAKRaUcUOHa -fUxIFhl4p8HMPy7zVkORPt2tZLf8xz/z7mRRirG+7FlPetJj4ZBrr09fkZVtKkwJ -9o8o7jGv2hSC9s/IFHb38tMF586N9nPTgenmWbF09ZHuiXEpSZPiJZvIzn/5a1Ch -IHiKyPUYKm4MYvhmM/+J4Z5v0KzrgJXlWHi0GJFu6KfWyaOcbdQ4QWG6009XAcWv -Cbn5z9KlTvKKbFDMA+UyYVG6wrdUfVzC1V6uGq+/49qiZuzDWlz4EFWWlsNsRsft -Pmz5Mjglu+zVqoZJYYGDydWjmT0w53qmae7U2hJOyqr5ILINSIOKH5qMfiboRr6c -GM0CggEAJTQD/jWjHDIZFRO4SmurNLoyY7bSXJsYAhl77j9Cw/G4vcE+erZYAhp3 -LYu2nrnA8498T9F3H1oKWnK7u4YXO8ViyQd73ql7iKrMjE98CjfGcTPCXwOcPAts -ZpM8ykgFTsJpXEFvIR5cyZ6XFSw2m/Z7CRDpmwQ8es4LpNnYA7V5Yu/zDE4h2/2T -NmftCiZvkxwgj6VyKumOxXBnGK6lB+b6YMTltRrgD/35zmJoKRdqyLb1szPJtQuh -HjRTa/BVPgA66xYFWhifRUiYKpc0bARTYofHeoDgu6yPzcHMuM70NQQGF+WWJySg -vc3Za4ClKSLmb3ZA9giTswYMev+3BQ== +MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCwCDgKRbnVcULR +mVwtc7MAccPd7XLa0KS9AsILlgbhAZWde29rim5IamuzkfavjpMz73bsReo2Wz5R +YBEskTz5zDonPYhumPK/1PB0dRiqVGNKwAnUT2QUpadC7NZI9SKVjbYp+TGQR9q4 +C8vmpcEBf0LfUT239BZaAW2IZ63CT4y5WiosUKJ/DaIp8qsqh3+3RRrKTGbc9SLU +siNmrfJkiDpt6JUgY1D8VZw0rc15Hos/tkO9JaXW8kBD/oQFNw1NgpsHesSIeaZ7 +bRWmXpBAy/OVn575lpTQmzJy8R8qsEAK89dfUGLUZUUWy1vldDhr9/instRD2snA +6SbmZEuCtKQ3Org2TWU84hqblAWl1bgYF94hQvRDgr2Kj99izEXUNeF/EJbU3oTr +5EqFUbZwR22cCZDGD13y8gQy8W25UwUxiND6qQnlXRh7XVX/2UyjhV8faJqVJMtM +rGgSOZh8Idcq7skJtrTi4YS/2DNpftmT64r2A3UUgy8NPdQgdlMPC2tybaSOffCy +5GIrc/3vikBUCiZ77rSOYf0zQiwHEteKUIjpohxjC8eR+hI3BdCvUAjqsEEDijx1 +/Q4W8ynb+vUuX2csNNPsDG2Me7sLYdTjoXL1TTX6hGKpb5QJj351owO1AkxOSN8/ +A5Tpv0pQ48qmtoOc+5vA1ToJezrJIwIDAQABAoICAAav4teBDpSTjBZD3Slc28/u +6NUYnORZe+iYnwZ4DIrZPij29D40ym7pAm5jFrWHyDYqddOqVEHJKMGuniuZpaQk +cSqy2IJbDRDi5fK5zNYSBQBlJMc/IzryXNUOA8kbU6HN+fDEpqPBSjqNOCtRRwoa +uE+dDNspsPx6UWh9IWMTfCUOZ8u6XguCWRN+3g6F8M2yS/I9AZG81898qBueczbR +qTNdQoAyEnS2sj7ODqArQniJIMmh3he5D15SrNefeVt+1D5uGEkwiQ9NqL58ZfGp +zcPa7HWB/H7Wmac3W0rwpxfDa5fgIq3Id93Sm9fh/yka1Z28c8cGgknxxKiIs6Jg +F7CKZIBJ3XxjcgytB223El/R8faHLpMJSPadDZ7uuU3yD/Qvp/JhRrdgkpE5bbzC +rWL92eVL86cbI/Hamup7VZMMfQpvjJg7FXPUr6ACKBetNkvXH0rqAkxHR8ZgfTeM +EwrpSWS0aktxxeMjzPq4DUaKKVGiN2KMDhbHEd5h2ovWMzyr14isohW81Z8w5R68 +F+2jq3IlVTLe06vmTRXAhOpwecj8UpraZjM1qyFpBd/lAolTjjMxzKJ2DcHlWI8Q +7e9LMvt1fj3bbzJVubdrITjdeom5CnDrmDGcErX9xzom8m3auYLszUENp/sfIHru +0DP+LKb2W4BOmXKs3VABAoIBAQDm4HNpOA7X7Jw7oowS4MoZOeeTjzcldT2AP9O7 +jFf2I2t5Ig0mIIrIrEJCL1X+A3i3RblV7lhU3Dpag8dhZUrXhydgnXKEMH/zz3gx +daCY1NO1fxAx5Y4J8VlCMIA7FpZI6sgRPjLBOFdkD34HcKHsUu/r3KQ1A1xZGLOU +o1kxF2WyORGBwn83kWzhzK9RIwFIdx67m7ZLzwoD6nQul4A6qq1EE+QI5x4UYpBx +ZvQsWUtj0EujIKJFszJczivwGQ86Aj0MB7EaHg+bWtYET1kUmDmc/72sksQJVcsK +wYtkv/MsznAvuWfHVjYJo47+Qs1zpuDKEUC1cu768LtlKpljAoIBAQDDL/T2KilF +qK8IW2u7nyWY8ksN/xJOVC79ozx2cGlR/zbeht051NiaLP8YMwVKl618Bw5L+aHG +a1xA0AeuTvuo5TK/ObrWzMAY6A35gXPMd8msN6SJzIKHZSZrcg2GXTSFkn7iCRJp +vl58VX4FubfrNIXy3NGbgF2muz3Rwvk7bj5Ur3NxX574RLSuftw01rDt2fnfYGKD +NfLXzoR3rJ/E+wmS7sjBJbltvmySDZOyjDDJwAgMrn45Xbh9rVT5w62BbAJ78OTY +O3CBf9t40FmeSBlelqwSY6tUmf02+B8FhMTJzxlaCup2qIPn5z0RHIZ43bnqZ/X1 +nkNSs8ko0f1BAoIBABCw9WcL+Ha/0mO1Uq8itTmxp/5RAkmg+jtFYgdTFCDlWqW9 +QnoZLC9p1Lh4N51PnvCRB98ghh5MdaOJl2aBLjH6wWwItfi8kOONgkEBIgUqjcu3 +TfJtiCFL44oXe43KCj9nSeOFPaIecqL3Q8NB71LohBPnNa/neEuwr3r1fENCT8Xc +vllFOHFKADcq1xnkj/kvM3eYwEsmwrCZyKB9r3WOVUxwq7HBE7mhjpPEP67dHcgv +jOhUOacUV3XCKgcHqMQm2Ub/X1xmA/bVUFerbONCRhgFnS7WxXlvTGiQqYU1I11/ +5zhsDQaqQunbe0ECj1vnGqVBLg5wKrrVoJalx8UCggEAE8438wqQKYtWR2jPY7hg +XkanqwHo353XLtFzfykk5rcY4DebFxUr7WkHcXMr5EfDyMQGhVsNOU8Hi2QQg3Vs +P9UR8yludgFMtLpHQLwL/gFhq2HyBjGERSzUWy61hJ7Mh4k36sO05Jn2iHM8WGRh +7zHjLaOOeVLrLdHuEezQ0WD8Xid3dVeYj+SY2OPygEIQrfHiUvI6zMmanJ9N/b68 +b4ZxkEE+iarESAh8h81s4T8sbCxaJL9H+5Yw9D+0UauzXWCSV/U3o2FUpy9MG9Q4 +Y8E5Icn0J+GJLwp5ESzYKP0x4rBrCCH3bJbo240xOx1D39vP06M85/FpL2kizkuQ +gQKCAQBTmQd/wT+0hH2JoEA2yCtB3ylDSmarZr9yZ83j3hy7oJOL48FhzhMTGjNR +BqmwbV3/2Vky85FYXYwcOIHbwI8twKtI4OxOiXLnLkYZ4nNXLm65ckR1SfJhRyrM +8K/alI2l3AxY/RkZiUnnRGEAmjG8hwzka1Y6j9zT7KhFTTBlg0YR5TOD8bsd9/rX +yVR+XkgyxIshgcI6w7MnwdGt+aAGokGjZv+k09vTOnaFF4rcJgOCZ9t4ymnG3m+v +Ac4I2b8BA46WCxA6zeNn5IeKZL0Ibgv1NGbTW3vEzu2D9VNU3pqTm9Pq3QpMAp85 +UyUzHP+SV/CL1Otbg/HjN6JGIcgY -----END PRIVATE KEY----- diff --git a/testing/web3signer_tests/tls/lighthouse/key.p12 b/testing/web3signer_tests/tls/lighthouse/key.p12 index 73468fa084b6f5f1b036afd643967e361d004fc4..f2ef6d20e27199c5f1d39d25763776470f5a9dd0 100644 GIT binary patch literal 4387 zcmai&Ra6v=)`l4vx@7>7lx~Jba_DXtnxR8t=*|I#ZUg~I=@O9c2BjO7mhM(i@Oak$ zukX7&7klls-}hqgyXRdSj3A~(1)zZuM2i^M>_{c#4}1V7pb$Yc0z?oE{FQsa2#l(K zEsR10M($rJ3l)I!x262+0Kl++26TL|4w(1f5f97=Bx;sTUkt6I4HZ%Xm!U5v4WpuB zp@8u)u>Nl=DmFR_m>L7y0;vSBMFRrZfkZ!^_2N#+k1W@GxmYst$>a}1V9s}_QVf7TXLm8vRiqjmr5fqMq6vgT&F0?JILUTbe;WAc{ZIMSSMvm^W zl(@H@opDWnR3BLAXmSHr3tQ(>EW3Fg_aEHje>zh8*p{^z0CUFu>Kpx3q_9Gr#n?TZ z>`)8g&Kt(aO>cNY@6yl7S5Q)*L^2(a+Oj*1*6ZNUl0f^Y2g)9o!KtAJ*>xTHZJ(!3 zPhoP&Kx?7N{^Re5k~WhQ=}M+0kKYG%GSBAhGKlVF3nYpc1_Wo$>^R?xkJ5aiFgE@i zN2bJ5C3^$rKjofQ6;kxE`($Q?S=&QkGw=Zs`nd|Im-f|JlWPI}DipV}!B(QG{HXh@ zExIR2UnnbpF-FZuuO_hCcfL+26jt?7^!HcIhvGmUa`S$&x-n*{scr#e9BQ^yRqx_@ zzghpwMXedf^Nq?FMg-0wR7G8&l_L=l(#mMij3Y#UQO(NwDe(W9F&U*oq z5=ujZP@(2dJo{rTVMwmuQ`5fih(#+=>7a3(?pp?h99nYPQOBGL02~DTq3ZupDCVP+L?`s+V><3-@oXF|!QqW^-pzCRG2^Dg`mH94TAP zAY)GwbtK@!*qW7)YXk?OGs9z3s}eGYWv%RAcd)yyuM0>U>6pCr zU7c(5QDkLgHO1md&I0dw4G-oJuDb0QDY<7{>^yo5Q)`q~)1?&P?cEJ+rUwugos2uo z%Ggbf`CBb2-2Xmt&f4VK)J50VfRcoK+uX{(C^R5C@Ef@~*eZeU7gR_Q_Hp@rW6T~W zah0KEA(%zP7{7gDKy7j9c-9WAT7i?hyx2;*>c&0A6T|r-TLP~h5;+@OJ|&3MjNDZ`xH* z-F_E3T29J?E};;-a_BJ2gg^gQf4umfLX0g>FXV&C{G@A}etJ84!|&@$?m>g@ifzXZ zOc-;H!NBQ8N$R|U=7i9j%4=nX%FDY8AhKXU%;v|-4){#e_@`Pr8Zqwnf!^a9GktR$ z<1n4aMcu9MfCS&8YT|5*FZ91Sf|&#krH4-%dcBoehPhH*e&4^oou%R>9}IeNquxy` zIJ_~7RaYUCTtDLXp75*Q{c{U>%fRX0qJE;J6nZ&{{G&lxvI)Gn^7fNCc@8TPHw^|` zdBNUg589)xU+0YHEDV%iQwJ5kzRb=c4!QW)2th(;0!FZR^mS{&vTM1>x_+p|%z{gC zff{=p>frb~^ZtEk=f-#YGpb@T){xKa*^55u5To|5o7^Ow?(s?k( z2YO#SXlw(lnU!cXe0g6VisYJL@1_-*DZQ4sJ4JLoOGN(-mHj>~uW13$n%InvkH zlepX(HC*jhHiW$t(^w~M7~jsF%eled!eSCgmoiM^jJ(LBa;Q@p3g9!%IjF5d_pbR; znkY==c$?PSOW7^Z3+>d=mn4NedyeKyv!kt1#dO`bI~h3kwA_h4hO3pWwMVDXcinx> z^N2D=8AXsa=$L8UlBG@hy*X@R%hMwOhB7u6`)dRt%xl;0%PBO$IblRNe+x4-kmpbe ziU-mth5lv~Qo^uP-B&lIoxpJ@CzM1`c0_mA#_WQ?jI38`rZ9zQVX45@jOxfgf<{+Z zH~pL(>AYACQ7k&z%2E!1p`FB|lCK6|Tdl%E;?zZs@AD}9gLp}dQ0NQ`&?PkH*9U)} z5e=%TLV6$ONfplm$&R)kFjHe$*A3x(*g5WW9Uzy^>G^tLHFsgwFG0_vWm7u>)GDcr zd5+v{!3Z3@f4K8skYPUp5!m-&1orLUy2QZ!f5zeA08sy$?SIAQ{{^mz+kOrX!`W$E zzCe;gp-hSY2`-Qy<9hb6}Nvrg-vNtvKqP!_xkS33KEJRO423$3F z%Gcg6d_v$RaK*Y%v*Omh#XM+DZ&#t=u{!7m$-s}!mLg+DpYcp6Nj#B&oh%eo{(c0O z`tda6brnL`|Bw6E5nu%(M1|oGW|daG}BHFXKl_=Zx6WzCV$B&~OvqAD!rWfnN1^Y6}zj zsHd^0p5;gg03#WLpfX%{?`wIQkyVo=mrPU%?Iv1}WwF7+a4NL&8yc;}4c$R68Qiw> zlqo?-GOUfbne|in@S1beI4}>7KLW)bV*=ay#(>R-kW`S(zh7%YM)ZuDnevH?qU;GvszHj`lf`VZ>hLGdMHZnX`gEer#;>*H_6^mT-@n61yuN3SWAysz>|wcslXP`8LfMbp zcR_jD0UBptLOa^E-#p%Tn;kI#Q>RwK@RFLtCQ$k?EI1IT;}eYSqrA4^HMGPK9_0-g zKk?1|c2k{}tW#eklFskBns_k$FkAOW$ZCqQOvSLu8ZFiA2e)O9^XE^WxLRI9*D1Z5 z3mA8uv9uyzbcTBz^-s5Gvms)!z=1@$PbBqjD+_&rczI}UZ2s{qxoKHEKUXdY8QDvv z0zXeYhhELs6{OuXO>73YEuT}!lQPKJ-ixFVuE^V15=`_E#z&j#>j8K09c?r&#je^t zo17fTlASawGh4e|!MJ9&J99{9Ht^9~sV5l>t+4Y1$$L=?Rl~cbK{;2PD5>Wzi%9;f z(097CIHWN*#o=F!`3iVbecMSORGTVI)dCg2yO>=o&1ojt(vKy#rvg?v!Q?zG9H(vI z+wnX|(!yL-;F{fY(T|@O-F1sH36U&h#N*@58nKw%_GEN-sK>tyr>&vGX3y@%toHz3XZ&2i zj=m@H9`qk)^7Ox^-|uxaRXzjZZRK*yJezrHW3|k=Xj@d40-;q=+|pf{Hp~kNqIj!k zHXcA9XfCrA^7%Jmqt>AoAJoPL4|y^sDW5yqMKqm$R;ON2-XKP83rkjqS1 zc6HQ5EihTUei&AXHJWbP*hSXx?DiHBr#?8s(>xfR@5e(MKXv-_M-cbo3_GkuP@{_n z3Pe;}$5{9t27qZc-l)O-c-h|*n`o{nLFD)8LQ1VMPUy|&Y9j8aJU~KAkGUj~S6-(6 z!Q2ixzc-1h@xVk3PRm#gOZw9zohvSCK{NNma)54Qj($o&7&|?QNE8 z)V}PrezuMzc%stn4WTU){uI&qJ->psFQ)wQqOS+hl1Dc9}K8dyo z%a1{HE9(zSlPORw`xb^IQ&%d=8|5mLc1Ir%+SRDPJdr3J-)d-XlozNOwG$s^<;C2z z_2{Eav*0&712*ihsIcmBcM+|U`r?x`-VmG5os7P_hK_dE9Fx1iNEd?d<;a{#!e8@L z79Zx;J9%w;&F_+aSPnJgx9zmL+qr`}amliPAXk<)&6N5Id*9>kahi|boZ+Zw@fBLO z6K?b89rnLOZk6<1V#C=Y-_L$Q9AD9wRa&4O*L4uw*RPgCf=b%#HZs*S0X7o6G{O$y z(XXRa$u*jWKZIu|3)`?ws$r5_6XX9;bz14Xj@?Ku=%H*UNQsYedKI8q^X~r5M(C?& z9fY~nd}P^GOQm;^kUVmrsdiG_xpI(eG9~lLRrN??NW)&L_3rE`!Eop+eWhe(nsF`) zPlAexlg2Nl2czl6@D8V)AzO|9>+0F{9xMT^)-!4k4R5v<#FSOL$CV)PTa}MrBv?>+ zKKk2>SBk__d>3hi)C?Vta1no==)ADXy`8EH{Tj8mP6>n@K-5S%{%>DbiujgCjnQ;5 zWstbm?*8sqTv^clKJ*H9(|K3jS9bwUIQk!)R>EONvUG3OK-OLetw+m26Q2;^;&>jof+j8Q5a)>3K;&es`ZUIeBC?Q$~5- zRCBPMzVnm2j>NN|Cbz1Sy%yxq?xDM${}6DkFc{_U7za}3yTF*AG(bh_QAydku6<@` z^Vsplx*;iFipenTUDfJmcUq~AwB^*TSrLUomb5pew5Hj(=p_}w)v>p3QYMP#Yml@~ zOQ}Z9+nB$nPHcywwuUJIPTg4X#MkA0n?qeN3_m{76Nh$u680Q}c#++4J#`q? zesW&?>0`~nX+VrwQK>5%uL!;gb|G6g6oN(&kb+f%1%q-VQ?GL!Ry=+t&i|~+?*b(x zymRPMj<6vFIEy2v&&)AMUbrP|@O{Q(oLQ7*si)bUxt)ET6uN8g3SKfAI%1m2)1gUN zUbTcr+SpRpuMOM3ouRB00@bULbn`NS8U6`+*rdEk{*@Ae0F1vY>R$%{HvH$nCIYL0x&A!~!HigVr3A+yIKx5^5Qv8XCd9$} zzce5S8w1RQ1G0*E0kFly0&ruIGO#mbW;JP_AiQPX&$c~M4u;~eEaD#B)6&$NAyv~x zekn?*qez(oij&tt*X|$b+tSW!+`Ty{JoMnIlLu0XTzaf>g^V0R%GlKWMyLkPyLNFz zc>!BzGKl6DZ#hj$CziZDgS$IgqA7*jqkC_~sb?fXpgVmrZoEcXJp0m_eO~obN+99T z9+&;lIeG2)`RkO&=9J4%WE+?-bt?2)aB3Y5@>=YrWwNn`83$O zLVnXl3qwO3tlS{HPGTBHWFaV%QKbq7>^4bbnDtSQ)gDa6ntBM&K5%{0F?nu{Hwk%j zhYf_VV{-U%xxKVd0%^N|p)f}E(mz2wEj*BPZ`%bmir{35e#VVhw0!$UimMz)O4(5Y zRgdm2CS&V`cY&E^jk@7Y7-dzqxv6`Gs+ciGQn zZP2vQ4rlKc#3Dp>1WvBnxEAf!f9Wh5{oIbeQBG%%)&;&1obaAWDuXdkQ~kDzn{$e` zXc0=*Yd8Q&@_WW;z0pMSW1}NQuU4jeAroC*zMXL450O?guSRerC(6}C(aN)Y=}F~R z>Nu4OPk|^rDR9AI{GW2P&=Q6tK|}B2oYHPz&gyF%%A^%T+&64Gz~N#(MS(O8^J9|^ z%uViXYewTR=Qkxi0%}jQcqR$~Rn11lBk75UZgPgN#d2g=KccZ24!&-ajXPTiW8rR# zk^d&nHkPd}UuMgk*X?iXGAmXTjxfra95qM@viQxF{3NBbTfFq97n!(q_XuE;gr22f z(Fc{$g%e{rTy@#cu~NRuK56XeH`c8}#e$aYC|ldD?}EZLUDEKcQ~AX9$7VG(FCkwP zzMEDcsfS%f(0T&8B1=z1r_?vdbgC}g00GQnD~bNuR8Nz)BKT#L70RSCwg-+lV_Mi_ z#ua4AGAj&egV!U$56&I+6EDn$Q$jy_o}6e0V?c5fY6P|$+Fr?+3g10(;P>A1QWd+) z#^XKHJzF*!*^}#6D@cs9XlN=_pn;N1&Ui-x(~X^q{O@VQtQ3?Vd|7(zo9Bc?qWzdK zUBV6Ha550hPO;aQyzqg#R7Ocaj0uSyQ^w)a(V{xvweoe3QCPYB z*sGz#NbcFBz&G$En_|`e?yJ4HiU!8r>~?GnzGxGChCxqcNEigt*!yq}5@F?14fT;a zk;^KJUZU+Z5ACGaFKckspVoko{fv?Jw_{p9_6p~CR3%R*+wqE9DlXOqMV~wNkujB+ zMA_Mxe&we6VOZ`*_rz5{<_CthrO&(<_{hS%$+aHN@`2RGCC%Rv^3b{K42E1e$@{`n(p0@;O2E&nn-I%-pC+&9h!;4Rl1Fo0T&o`I^-r z_bl5~YNdD3nso9OTTP+S;-8IZEw#4?jerMVUGU2^(6W9H3IX6sJuSgZL`P*cI&IA?pNevtJwDMFQzTR$27$EH1$MM; ziCg10?~uy{`u4{yw+C4}m(we&lywtg8*PC(9kuu~ zks-NgHy9Lcrr(l9u1kN(wenCOBRLIu!bMyh)>x2t94tk_Q(JXn3V-Ss`VkDp$NGmi z{{NkF(I|9mQW%;gD}gshKKAhS)brKyniw-;4e+4Tpun3$cGuD@|; zTC-;cFEf;<5X||l``cHKBCj*|6P)l0`UOt+2~XRWa6P~}zQZs0t^K(LXy1obkJ|`7 zmI5zeu>tvD?Q%TVl&2s1_9(&%Ot!x`^E}&hJROzfQ5{eb$_{_C>p!pF6Y85mHGG1X z>f-tb(-Q_GU{s7TQn&r|b1B!fR64?x*9YJfHr`-Sd_n;q=VTU(v~3_*XYXfR%M-WE zU>e!3mhvQnrcO~S8pRwuhny-HvG&?AzxbEiw?yWhiuavyBI1|cXWn8)uKv&Z~ zV)p#{sMU85b9m31H1j5Cd@YO2JSZ8=@NE3QsuPGhBx({q4vbBxF`lyQTQi6V!5(dJ z%Mka*k%(^&htSOthiWaH8~&+<1r>@8F4ggaTo~b{#D3;K^xk&Yz(qpp7+PD%wdqnq z-(jyH3eGXZq6lbz<_i_ye5I7GA$bS(F%`XB%5eFJml6s9ID`&{lkH^WZMI%xyL5T=nx0u%B^YI zCuGx2i@W$RZW5~_Krwo!y|p4GE`&j~d>^u)Mn^MoT^cr$db!*mcwFOAsa7?gbJ|el z+9yiD-43*pWz{1EE{5Q|huo&b=3FBTQSGbZ9^&A3uQvbJ8f90Y#dM$C>rJ5ty0ynr z@d&Q&XEw!3MIZ-?l2c-SzHG1^J7T0tMO(RyTJe2y3~XhE)WNL`n2;7$t~86pwcv=~x{}2ab_!ML+Ytl6h4Pi$Uapjp!keS+2@O-?xeDu5;&F`EAcCNROgtgl}OOg3^<0KAzE%^ zld``uD%it}ilU~va<9XoaT42*I&31>FJiCbI=ymzn6p2@Pr=_qg=_N@J_X*+XUxi3 zs!)8yoK!?o*?7`wJI?Z46NP2zI2pLF3jN+VA#V3?A^zsp;2+2W!y4bH$bqhr{3LX* zYqoUhs{$uYDh*0Q`piz&7xupm$(T~!ECe}SvF4gtOAl4Kr#PVDJ}wv7OB=7Ok8R|c zoBhJo8_QV&WHEpj)d1*n?oRjW8Yu^NHve%l(Hy68Kieax^)V)QD8M5!KwgoPSOtpe zDyT0{@FF29srhv~JAhWz5Vck=F&VC$`z)~QgA=XFT#qyxJ+wJYK#AH*vcI@xP>$p- zgT{Jm0C-_rJVz;Y{X8^a&4y+5xLXW?!vb5!`$}h;)dFc>!)NqYrI%zrcv;V7_JqYr zYQd%^FF$-rm-8^z;?sM9jgoFy98v*ynDN6E4OiEC9X|K+GS(_AcKrm_W>9+6o!i6f zR5F;#n(Cw5HMF~VDya5(pugCvy@mZ%-wxYy6bdm<+lAi&78axL-n?80;C9A;zxFaU zoj%Wk>`fy2d#F{7PcWQ|96etiyVO$N{FID+(RN%S;wz3*zX&--8KM`web2J= zBMRjwutdYMIi$Mgt3Ey0%C|@x-FbWq7fAD6ZUyOZT?$8sQuubiijJiO#Do6i6MhHu#LMc47M6mJlLIV$e)wG`og z26*qk89lO`Tva}y5T%uzqne{y^}1X=(CwiUbR?04IR8qyEioX*?;KXh|Gw46aTQDa&mkReeV$LZajU0OJBsSY>qt@pR~nKCHBc4IxIBJ0%l66& zd{Yby2{K~d6fdUczY)H#K&}c!xN1f3Mi_Dz;_z=&RpRzhemUQryEa8?D~?ow(Xw9E z_f`HEru4UBpC(dcy(d~^x8B}XWV-!0A(6ll#D42J-^9$@6;2^W-%Uj$(qbTng`9Fk zFvJ9o=`*8eT*%#jn-nHtv;$*&>-!jPe`a07E3ScE$)RSrRW2U(KK^+Wrc(8Ab8EFJ!kNFAe{hjEHLF>VVOn3D!Np zWNv-LB%uEix{K?N5#DYo)<^cC^dP^f)jV^d$6CeCk8f|b-g zD)?k1wx|4e%!YUcOo_Ms5gukwtc-e~^rj;|eb^s~v}*4zWb&TW$cUf8HvVunk5s~= ztw{QEYR&ehw?iF@FPcR%v1j2_Mh~A$8{bYDo42#}j5XJ@lc=9izw+ShTTK2bY^Nmk z#iQM0@DUCoZN|4FPwm3H!9Y|iS%At6>rUR8U@~yyacriPcT~ZKnt;?Hk$6nrk>63% zSIKO`2()srxhn;!`@t9z;v)AHAsDl>e^dne{E7Mpi;X_L&%t=G*@@jnedfDBYPCjt zxae8^uI{+tEAz|jO~@q+p%t8&Vs)+h)+rpDHjv26WgKYH71hvMl~DOOn;iLlK$jgUV2#vX_DKm@#~> zohbDDsRukN~OeX z{(!YDo|dJuyxu}Z_%f4HL`b(yQnOQdAPiAdDDz|M=jj%3A=GURru|mZqnw$HW(@F| znT1#%D*P3ZG~AZwK`G~MERTeT+d>q#jK2?Xjdf$b*+;u8#Vb1owdLtM&DpwzI1gHr zS(UzE{pj0Cn#h&VdpjMZ+{L3F%CjH7_5B;%?^dtq7@Ln**=OaET z>QeuZ@=4Rb@+Pd71Es65T1Y0Zgbhlm2hfeMW(DYP`^CM&ABeceSe5JG`|KP7#k~1ruS@wWehjf9i{#@Vq+u+Zg(3mb z%YtlR)_)2fh)R$ei_*_qRlgj&k;=0gVZ%FVUT}o3eg#$l3xbLMc_#n?*cezWn^q~a uJd}-fxwW7H*w&c=YBk6}Udc*44EFeYeHJSl79JV3Sd*hn^}~Ow-hTkP>l@wx diff --git a/testing/web3signer_tests/tls/lighthouse/key_legacy.p12 b/testing/web3signer_tests/tls/lighthouse/key_legacy.p12 new file mode 100644 index 0000000000000000000000000000000000000000..c3394fae9af893142c035e087fa752c5225eefde GIT binary patch literal 4221 zcmV-@5Q6V8f)IHE0Ru3C5I+V9Duzgg_YDCD0ic2qFa&}SEHHu)C@_KsUj_*(hDe6@ z4FLxRpn?WaFoFh50s#Opf(Atf2`Yw2hW8Bt2LUh~1_~;MNQU0s;sCfPw}XdgI(fT@^x}(`3PBdm*Ho@y&3CpRAjR6oM;$OHD}Ua=k?g4zRjN z&8&g~)8unA{ALdXZg=g;)g_slEs!#9S%wqAuQ9vt%lJl@;8lFZ&6UT;pa$K>LMD^cWp>u;r*zC(?(!;$|*(0aLS$qhYu zTUgS;*sYz^t`G*GC+Skgl#0|qKv?N;A-OurZSTalB{C-jKhOb8Qg*%==NrW?1Ka?H1<^kC zue}11j^=3pMz#^>B3~l50cex-pzF2+8OR2@G&0Edt)CF?UCE4=0f9|Q6R|uFcjbvM zR_LAYTuTbh`C7H_|DomHrEwW4E4D+eWVkla@DoY2_pUjGAy@4)Wi%b9fsYk0!Q#Ag zY<>;7{uPXBt`%R8`x5>p|K;X@8^Rc`>tj)HetCd0L19U_dJK%G!*TP$Rc_!1sIN>T zD5(N@bD{y~f!>+3S!MN?vZQ@{tddtUB;b1g{(;S!qSd!|$DYcR2M?9{XtT$tV)Xx> ziB3zY%M`h=peo>SQ5)#hy0v#Q>P#sspt2IwAF4o8o0G$oUs=EXZ=%4@2z8kl*j3Ec zVc^gkVWRzuvmsz?%lMR%3EYOkZL5XnR`$D?Q$Iw%O%XAb2Pg_R1;HCIjHZj)1x&*_ zRCC%P)vVbEGfWIw_!6XWXc;afqfFAy#1U49mjnYUj5N^HDS+2r(i==k-YVmjQ z2|m4l^rSSC4Dl=99viQsIxr+UH3))Ww$a>d|C6V$6eU4gHE;j(_ z>p?L&Nm!zGY2vN-?C^B5QJ^1ptFYZ^C*w1t@!WXSjpMOgpN`JhxJ8yBpKV}mSPbyf z*BL79>gSq2=V z#>e_HOKlAOHY4@PUzNxM_T+@VMne>uwus>crfR^LJG1&|88u@r(^Y+;rBf`C!Raid z%PAU;+_Dr^a;*tycT)F-%8$-m4)+8*U>Xe1;k%#E1!jAwmFmBA zvRML7E9?a5oYvr$`|<%EbZcw1cmDKrH>Z_%pGCyVTqTl%V+Mi)FJ6mwVYTq#DFy69 z<*zy*L3T@(W)O#RGwFEY<%jLD&Mt605;axuN*{9n?9dVRy@?15J3JZv&;$bY=%mU%PDmr9;p+I{m9RV z*ZkNIEGA^t-xv%`w+RS?LiL3ZdNODCDyWG>qPtyuF@H7p`*#PPU^2$yLbMe~L=Qsh zgJu`z;rx8@cK_p?vYqz0a7|#ggYFRef;#0BM2J_+A7xU7JmgbKPt}FNP-XT5QMja& zpiqA@fBcb)mmr;xJ@nnOis5axzEay6(F3MqNK#cOYOkB&AlX0E?%f{&LNQU zF1^c{btRsP&MF&>=zph=`XWp8fEfg=+m`t)B+ABMTxUG<7s2BDQ0V!*x#yP1iaSIs zc+rect$YCR%0>qT!|-m&=~mCQB_gjzMgj0& zX4g-Jf>)mb!19Ol=-zK!>{>FjwM1IM>H7{iqu{o58m zMnod1G!^M~+Aw79%$N+1^2P1h4X~Z( ze|&U{tSs#*?3_BwL0bb2>@ARsbz%ODij;z(!*qcp`*8`4ZyJ(F2WSfRGIb@VT`?a6 zt1|Zkg8&LUb)mevsp*NNi$YAfs8xWZq9_uka7i%Z0f7gOa(l^y=*=!Lt1{T~h53 zInjFdRjd}OV}cIA?%5G>Z$F1IVERB+10gvkDvMz3S*>fVKj3Do!JN66CoZAWIaFrl9v}ZyQWt+49R25d1-JHHj+;%k@X&6=g zTB>|F$VQu6hmVK%+mig=b&E#6Ip6t!=Qj8Mxl5ok*_TFLB1Pbl$P^{h^W(^f)eO5oMQaO6L^k&a_}Q2MW}@RY*~$&hvYfAUU$Rx zf;_Z(^dFLOck_sPz+&m~_uBZAG3ors#kvI%8+(Q4I?FjC4F@dg~rgUcf6S~9F z9(_y0Og2|&c_SpwcWg2)grz{gIdV^VhRhse@FG(jU(aKnI?96f$Y zS)5{lTAslO6pn-wN!s4QH$K;Wp{L^)Y^jLu9llms#mBDeJOsM$3B_qVp^@e9dZF&a>wvUpi@of-t^o`(Ncyx#TF{xU zFsmR96e>r;b8X~z8E+)@b&W051yrqQ#C8q2R^xR{7K~2lsfQ zxjKbD?wTJcg?KRmSyP1){X#Q;(jZ;kE8`|4WSXpngK)*xZ~iYd|cMgj<<8RfF6(dK~JmgZT)C5D?}TJBVTDoJWctVL|@vJUM$` z1mSp`juk*69)bLSOfw`4PYJ@iul70hXgGPFguz&@bC^E zjXF?JciTq4MSk<D;-@i=an zH@6H;+Upk=n~$`;(^Q}i7zQ&ws|B*;p(h3iE-+a1>AJ97y<# zQ?B)Li|pKA>!!{JNR-BBQ4>`;q?i7iSnE~W>2KC8-qQaY7+DyIW6w*L9BGU!0B$%< z@DE?9zJ$n%fkRjqW{AreqUz*#E0}FLUCex6{EufX^SR?71_b3&_<`{M^`(E2xQ+f( z;yJNon9073Qx zd!}}`DT*|3Voz3YC8icW=ltaa*o&v#vi>Yy#1>g}`nPftsq&d&bVeuAjT!h1spKl( z;Y`R))OaPA9_h!3M~_NHq^lwYwiSzC!2*XjyxM7(I}jy>xeP=Uof~7+hvPs3lV}M7 z3s10xq2OjRG?LfqM9)!Fe3&DDuzgg_YDCF6)_eB6u|?x z6Cx|aZ@(C)=e31t!hKSr3NSG+AutIB1uG5%0vZJX1Qdy85~ufG97a}O7z;%xFch3+ Tn{osQX`OMNnGfDI}dfD?^~y!Iye1#ezWUtIFu?T1BWC=7$apNJxf zSjO=#Bh3LWhSW7=Yn_r~l^iCbSQ)_wdjnk)Bg8A5n;5uk;UPIyyuJR~?ln1Vfydb@ zyWC9Urc$0t{$(6dPZXeURYPXRM@tqV8jz0?PNkpEgMu|FWwrA4^;pO$a`8pK1xk(0 zdaFa{T?!0ko23SUJ~{b%4g0?otnJ@}#;8U0d^qo)E>8e)Sz2XL?sZ0C~@T&O&yjIevq_ zdMQ$}KH1@wKBC~~fL{eG-ebX_yEOG!dYq~VfW9o~vs^VeienSH?FT&T-uZo_Z-%t5 zEF`Mn#^K?U^S>u8yjG*-%(WBHzoTOOSP_XWRa!+$QGBbxPNC?=*bzX8D?tI=3n{f-Cj13c*+T;;MHU%2%&x3QA9=39D1AJxWSqvr zBxZDWPco92=kfvGVB#$%DMPZ>_nIi5sH)Q%nFb|E^(fjIhG;xJ53~E29iyI;q;+wC zC4Jw?be0{e~Mtt?%LpB ztuIL?b{W=}w5&8TsAhji*vMKnw#`spsWE#oc489)<-E-5NXf0)BPsih`jj`am{27n zv4~V0wt++eC?pIY-YP1ulRWBO0a?UusrxsEb;@x6)^@ljqEm8CD%U8$nh5VQzqL6r zNX6>grLY+3s-#R0pWT?cAsh~qHvM2zrTWr~Wtpy#ofq0KeL-RBfFEy6y9Y{{Q*wCk zX4?3hC20dh;4n2N>>PI&Pa;k_T}m&o71>cgOph+cIHTFBz1*Dh)g!rVZZKpNo@WI% zYb8>7-CtXyzU6ml8b4ItlQ*t+C1_O_GP)g=u_9?`nPiA1lqsLznhvUl%!%iL1vm8z zF>`B5lqrhu%tIRo$dvsSET}BpLH5)LWC#;fPHnSA>bPOF-OJGrY1_kRH2#>1O(po# zhQ7}>DvQ{9F{uUL0Ist$*>}>Ts^_lETTO$M;OpUIA(0d!8?!gW&MdHxF$dnK91WY} z(bPX?ZYn$!n7$TT4`R`&bvoU&aoQG0kU4&t**7n2?Z}sGTLYXrk|Xp=vGTxrRj(3n ztIjj=Lp<=^dLV8^Td={-c!uQmXVN#MA99iWku97Ck1aJ>!4ouyCFo5AN zpu2B{-c%Ru?4=h9^{ZR7+{-=1c{5Aeg{(!wHKclyqPjs*{0Gr*gS%<4 zz<^M@y>{`@kzVHanS4B}9?pdWu?kV_87B6Z4O$zk>Q)fI3(v5yAyr~szLZ1#zg*FJ zczE;Zk=%%E&T<37V)W5v56XI+kL(yFT|BB` zU?@J$k7y*O%9oURhWNlwR@@cQ9%Wg*y2L_lk1uI7Ny>aw7v(3_HRrfbrL98lm`)`b z%b%UQCoHjM&gIg*z7Zdo3zJx%E90--iG=rGcXu%lg5GRL6t4R)`Iit&O2}Dm);uOK z4+w3DbWW&gjR-2q(nrg) zn4zZce+Rf_iAUa1?FhpAUiE&pA>&zU^aUClVK7#JKb>2|uoV`*E)XBi&;Tpg18nMM zmDZUdFdV#pxbt6-VLzh5u#PQOR%qn!iLCNHta>0*V^XU?{Q?M&Wh5p>gYS2Cc9$L~AOZu)rxg@mm%>bJktlEIzz2Z`PQ#VTZU> z`ir{S7-R)QOj%@QcaYD;?lf@-R*Ez2^M)YDs{1ZDnAvfDJ<_I(@g@R?3cz(>W#i^W zsGA2K{T1T|4q;E(UN%xu2uB!7eD)DY4$-NHCrI*obPaN@7A;VB{Y<(M{sEbXzd`PZ zzjCZub-kD7$0Xwr4e3=I^%Py(BN%CPfKmf_imqqJBZXUd3h;^ z-ZHNp-gH$w`C7TLWV#@k+XNiMn%_qAV@0y8;V)d#L; z2Bjk{1<3x@%ScIVDGwJCLl$HEbk^%p1*e&jdCXjqItd~vLss!MJ7vbp4x_SO(cebg z56y`)Hu=*S6KkogvU+fAe81|LM*T+S-wB-`@Jx)cEfE}^;;(cb2A?h$+!V>mIEE8k z_Da((8zjfbXAWVWZjqt{>W0_sg`e692D9bD&ZxF<$5!aYYqV+#BL_h(O9xJ+r(WUI zZaq&4W^ig}A z5;qZ8pm;bjZi3iBQhsB;{l$;Fb@9NV7%+gZOfmrDF#_+fno3Z*% zEb?V}go~hoZ-a)`12kFFfv>lh9x7w=3hSy(E9|mvUb-!)P{LLG#{W){KQgfW5}6AD z&uuQ8xm-dqHu0K^(qd0I%mbO0apS}~p0{02a$?IHCtx8jMA!kP4NC-vNg*8zY{!z_ zG`MUr(>^0VGhE9DJ5bEz?=zF_pYV5(>0T@PsG}wFxus?~9{iT9%rz!L7rI|No)0r&3&B(%T+aETkIUbBiR^a0 z6OM+xx0qtu4P{lIpd@R@Vd)ONW7-VB8RTE}j<|T!u&=|p@3P3l-%&^eXv8I1sOV6! zrSj)`7Ho{a76K@^D7gIeE+R6LVG;=_eIXwtNAcwfEvZPb=yfBKc1DmA0oFz)p?nxF zrF+9uC!7*GX@PjB{pQ5?;GW#b>z2GY1S^;l8)CU?!%aw=tYDffi#TCS$~iWk-Z2}{ zQAi!A#Z0k{uQ9h?X6F28!x-3*-VNJPLzz0Rsufnhyo4ih4q2Q7SgDjW)po_{OxeQ_ z3RKkl15~B(8GD-Y0vV6^S{u;@t=7YGKo}-=fS|B`MIWbZxKOZNXHb?wOO4w-Z~Gc^ zpF4thx7pr>t^8gpPEf=m++7i5wK2`KFm;K(CAlz-b zI@iQo+vtj+()zS;WiaXq8jGrKLY`&FzqBIoj}cKpCIxnN7-Y`)GE?<*7*NSt9J$bQ z!WzxPkcfm&zd{A#_QPW8L<)BU_m^@8>-diS0KB+oSub>y_}RZmKEyf=eW#uk3Ez)p z2(YioQPNLrPW^CAuFW^;sEcW;S=KV$Rb^sY;5030voc7oC0)(GtKsc6IX_DDSGnLS z<@2xnXBI-&w{u=7V?2T_43qe>;dc4njoWKqKeL#5E&cR_qnqS`6zNimMcJT}Mqoq8 z`sc^KlWi%w(YtIc>+j(-r{qv~n3vYEJM$o2maGRMzXmUknY5eE^zWvn)xOX;2mXSs z%K<+lErMuF7d9tm(#_!fNVGYMK~ByGg)IdD*qiaZ{<^DPsNUl9w{(ua8ws^tOA{Dx zdLZdh78FCxtYv1Rv(v`J=zH7DD(z{mD%-ZOh+~sT`kQ|D&6WxO#-G7G>!dBV`e9yH z4>2y7ltAAYBF0y@G)aQw0Pcdc98cPv<21@QBmNA1Q3$l&#{5s`hL?G~Urr9DX9X=t zdz>`zFp~6>G9yuAeRLQXggv;7#Rkj4+uqw2Tz|14wWMaYnL-5#gb)kv;}A1 zcPk7~k&XWtV>V@abE|GIW_&LMxgZD8fpUNKM-LPIE|Pnk3L37c^X*GttgS>#CQNK3 zuHd$C52mp6l<}^(qnBb7bM{yDjXX((Y_p+a%U~lrX_u}e_%s^>Y4x7J{w({!+_&f9 z`7)4*mTS+dGc`^3FpW}`f)yHPkUIfV?rP~7B+ff<1hxLs54?_{7DX_|qfK3en;kF$ zO?X?C20!&GW|M0R0VysP3k0M4uS2ts?a9o>OEO%huq{y|DY?A-XxiGGXvS^CCN}jKjUI+uM&K%qf#p43NJ|x; z2lT8`wvNJ@?lVUk0-Mu)VZgHaNrrCUHkN>Iwt zsS*PE`akb`p8v=9!+q{^U)P80OwmcmSHd z{;ymEL6evM*CH=Qlc)ccQb>r2{;t^nI>eBN{~lztkoyqP|DH4uFn}^(W(TVSN~9qn zp(KLPkW>C|8VQh$2*O1Uw28h){EQSpEC!(WHfHUr{Yo8l2IkBa&i51yN0aktnNzp3 zw(MVY{sSNC8TNnG&zZN5t{G#qcEqbR4;eO&zdu6Xac+&{>u!ibC}4MSx|8p|y}{Xo z55gZrnW>@6NjZ3ADHQkhY?q@gg|m-ZM#WWNGk78Q`=VJ74$c*i3f2*2o|@qwGICEq zXzmjpI&65(D$=>yz)9z>=AD1Z~i&vS8thCWaBo{!pE;uy_T^aFoBrN59} zkL;UVf-0##2<;L{lrWmOJHh&Cl!*CMn5dxkp{58dnzMSl$^Q3y^XwsL% z@mOk2v7}#?LXv~XQg7o+0Il6FUva9t4?iF1kBy?madH>L&d`7irwqWPFAlhw_cBG(oI*Sl#NXgnpyY z9TcX!5|)|S<`tr8tF12f%^LR>=bS}bjgecDIAetZRD7Prv9-QJclX0@)GV-zz=Q{X zOjc!-YP&~~_U8&$aX*M8S$2L5#qxc2?y4^`g%f{bmwC&BYpDx|Vw*=O|C#YfPIk0f z>K_6;XJR&LwPClfPiDN5edFI^ThoO!v9oqyD=E5_UmiZcB7IA(=IZ>@%1-2$SF;m< zrx;OI$@oY^C#zqC$+6kgCPq~Okac8;as8y;dIZ!wehOQ>V}`8ZS7rWX978LleZy{h zj7E_!ZR<)S34McAfd-02G%`)nDa^CvS%wXu{dgnLAjv3IQ*b#0GMfi!&d%AR} z%lCwIRfCP!^wRS?!PC~_dF+Z&yR&K@&Nd9f$Xlc2!gT4ywC{4MWr5DpJ@9-*RUv6i zMwmS}Ua^I9yuWe@Y9ne_AZ;-?y;zXM+y2&i&wBC#F>brGDe{S3Cd7yeDo+Pgc1}m`VC=J{d)nf?2w}x^Cd(>%`lg`<4Bb6oQN4G3gy7S*EGVdegR)LEmH&RJ=9u}wA zb?CYX`S2|=!=`lQ$GV27XgPTr$F4b#3R$ZHb^XlLgP-sYy#?6_#fdMJh$9Wd`JrT? z94=j2Po;=F-5;n+4)7Tieesekr5iTU62tC2-wr=9FZQaRw$!e5A84jDzonF_PL)-+ z28|q^9Z@{$uk_7wcI)U3VwyO}gs<7cFy5*!l9K&U&%NXT7wgR9lA25L>z==SdGG2D zs@ueJYqu}*R}_d?o|zWIv=Gps%PB!S5C|uaI?|hn(8=>0iMk#_A`a?>ZXADD1+3LR zPDy_F+pi~kgZGAeZ7z(kuo&lMF;4xcXWjm*Y^+O`F>4e}(u0R)aqu_$J2UCvGOsKj zf?sog(1OvlAhfyQdxz5S%@l>+6**k`c)*(}qAqA&f}s3T(0c{DkkeaffMQrSG#-4h zSUxe3#uCzS|AcXjuw(WfHt=xdPm6Fkd64`G+7Z|MV(|0xb($gL} zeD|F8xgSy_X=M$_+=TAP$1VC{%9uLNP$CrT%k4VQ+_4C{Lb_Ry!EL%Uj>e}i)KtSN zqMk@o`uR-SaEN#t(G=>>7kYEuQI>+@y&zZHWBPA8=B|Jn39<6^?LRlUS);|kSLEV{ zA)~vFz8%$62F|eAaB1=BO$XGqSG@(p*U9orZvi%bZ56BU5C-wnX8l&>;Z;Nb4r3Ku zc4yvcU0qeGUQTL#kdZ(iewH>W{6)^=SUwnKUfC3dl+Pn|HwHFND(}fpN88_tJ6G<& zlia8x=(eaQk2@2=xhrXb6KYP_-OBp|-I>D$ykFLeUsg882Q4G2ZVtmX^eB<~>bQ(2 z2QMelaN=7dMx_MxkBgTCk=x*g78Z?RiMi-W|y{{S@G-Me`{$F5$ zGQyM{MMv7sjJ&x+~d% zGyCj15_~5_+>n&LV@NrlNcR4to;M7oI&Ixqt?WF#q0ugU=~-Cu)0R)T>WByTP!7^m zVPx7MA0)UwU;Cxk*^%Z*YFsE<`12_O6Gnkb_xMRL74G7k2~kmBhSK(=k5Wkp$l!0L9e6kMFdzK%Jmqyf0zi2 zJN!eKI2eTP>SQVTUFojMkgeVS@qU)X^?|uk)~qUB*1_J`;q&@G`&tYo-3Tvj3~Ksk z*Ug2x2Pi|n!H*a*@N0B6H3NM!st*ii;h5)<2*efk&qBTwhYIXouDHO^pQlj__(ZaJ z?yP*{1{W)345y${QrqI1LUJa24m74!d@0iTgxHGt?Np&%@BS|nX0IbSQBS_gi#rkW zA3Z|mJx}KZ>mYAPJ)X~VefU-&!0WZt$IO9L7KRKxOD4BW@D_=dt5*L=jQ(mMg3RZm zi{!l#V6SuY=&aarI%i>*kS){qrHdtdgaFk4UOl6dc<#y*v-<~Qg{+vE?rgRsuIHme z|Eb9>TOL=G&H)eQP4~U1Im_@G!Cq6vTkE0b__030V-*+Ny|{Nom zDn8X;=hc}0?`Nh54l7Fgy1BmMzdZM6;(rV~Bltk>{0v=|+n!tB3c}8;^ip?O=9pjD zC{Ug3_aM1eGRwyLUIeJ;4rlXKbXe_zL26{(6Vlvb)YeHQv$cyoqWGpRcu{F&I83j7-r_k=Ti5wHc18%`yWln`;T*O#_ zN`8FadnB5Q0l#>ZfGoA<* zGY0=EI^}W0taMnbXVh7SjoC0no=0GFgvu`&Qau`3m_N52`4s^GseVLle&6l;sm27x zQhlPeKqAzi5>|y&1Q3+>EL6^&P=Rsb@gytl?iydYt!IBaH9;i>nGZ~9g2zL-nu6}? z4^|EGdq3KnApv9TxUfpYLWcgz#XJNH=(lQR+Lxpn5?kW9MXP-lR{jHnQmG!l=}`Mx z-Ln=I>!*wnm#PvIX)DX+;k}dew|Sk34V-W<&wV)qHcgo!8I6iwrDVt=e8Ha>k9a;E zg;th;wM5a}=n+HYm12r`p2(-=FOMjIjMi6bA$8+?+R#}HWIAA@u=H1G8c|D>Qd~0K z6phXaR0GAHvF?etGzGI+^{dj-!`ZCpwv3LJhV1+K6&tT4139bCUUp3YIpQ*{(dn~zPet#7XYlXh3kVYc^_t)9^wM=|i0a%pYX zZg*cqvzR9a>zT>cEFdI?-+O(KTXF&jHI-WzE1O83x|!X`ADX zPvfF23aj7gbeID*b1})kIqIapPp5EUZ{quTwi&c1focPAio#M}V@lDEu47KZ_WTYD zJV^>d<#6CBQvFcJ^ZWgT#criBgpGm#`n9!R{y00Wr@x(`D|I;=bs2NCMK)eg9`87G zWQ$ujVSAD4=5qPnK}jJ%IdSLRX#STde&4a{d7rP9=Dt6CYYn%&dl{vf5(M2deLpaJ zX(hh)bqjTbddLklh|gccHD@fvu2E4)9=6ZVWF}^8vQdyjoS(+yS z-hxrHOjFY}c%bm12MoaXt9x!e^7JNG<)pUK@E|JF=|kc)gYOOPZ$WvnAmTU;9Xk!` zSmY*>tphIb(zor6?d9H%R?KqC<{)Y`bYT_1+j+sGAXk)}Rw}WmOk4GSO$4!{`Sa-x zJehoK_CPTo;-aB#cAke1OGObVb=cfAPe9!fzZY9lVYFSmC-JGN;u01M6HrIOy688vS% zM9R)==q(vr5eJ<)ml2xSHVM1#&fji_w!CuA7NWDksYb>GUWnUVz+%$UtcSeJ79* zlMw-U){3&tr(1eRxAK;84*AW`D{Z!CD+VSI*D*2V1J2k@0Oh)S8TT2fquPI}-v0n$ CpCCy9 diff --git a/testing/web3signer_tests/tls/web3signer/known_clients.txt b/testing/web3signer_tests/tls/web3signer/known_clients.txt index c4722fe5876..86d61fba75f 100644 --- a/testing/web3signer_tests/tls/web3signer/known_clients.txt +++ b/testing/web3signer_tests/tls/web3signer/known_clients.txt @@ -1 +1 @@ -lighthouse 02:D0:A8:C0:6A:59:90:40:54:67:D4:BD:AE:5A:D4:F5:14:A9:79:38:98:E0:62:93:C1:77:13:FC:B4:60:65:CE +lighthouse 49:99:C9:A4:05:4C:EC:BE:FD:0B:C3:C3:C1:2F:A4:D3:AB:70:96:47:51:F5:5B:3B:37:65:31:56:18:B7:B8:AD From 75d90795be0fe3ddbcb78402d35aab345dc88e2c Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Mon, 16 Dec 2024 14:44:06 +0900 Subject: [PATCH 13/15] Remove req_id from CustodyId (#6589) * Remove req_id from CustodyId because it's not used --- beacon_node/lighthouse_network/src/service/api_types.rs | 1 - beacon_node/network/src/sync/network_context.rs | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index cb228153908..85fabbb0c3c 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -67,7 +67,6 @@ pub struct SamplingRequestId(pub usize); #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub struct CustodyId { pub requester: CustodyRequester, - pub req_id: Id, } /// Downstream components that perform custody by root requests. diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index c4d987e8582..b6b7b315f3f 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -763,8 +763,7 @@ impl SyncNetworkContext { let requester = CustodyRequester(id); let mut request = ActiveCustodyRequest::new( block_root, - // TODO(das): req_id is duplicated here, also present in id - CustodyId { requester, req_id }, + CustodyId { requester }, &custody_indexes_to_fetch, self.log.clone(), ); From 1c5be34def7ea46297524180d3b5a1fd2b4c1ac7 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 16 Dec 2024 13:44:10 +0800 Subject: [PATCH 14/15] Write range sync tests in external event-driven form (#6618) * Write range sync tests in external event-driven form * Fix remaining test * Drop unused generics * Merge branch 'unstable' into range-sync-tests * Add reference to test author * Use async await * Fix failing test. Not sure how it was passing before without an EL. --- beacon_node/network/src/sync/manager.rs | 10 + .../src/sync/range_sync/block_storage.rs | 13 - .../src/sync/range_sync/chain_collection.rs | 21 +- .../network/src/sync/range_sync/mod.rs | 3 +- .../network/src/sync/range_sync/range.rs | 482 +----------------- .../network/src/sync/range_sync/sync_type.rs | 9 +- beacon_node/network/src/sync/tests/lookups.rs | 30 +- beacon_node/network/src/sync/tests/range.rs | 272 ++++++++++ 8 files changed, 328 insertions(+), 512 deletions(-) delete mode 100644 beacon_node/network/src/sync/range_sync/block_storage.rs diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 344e91711c4..5d02be2b4c1 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -362,6 +362,16 @@ impl SyncManager { self.sampling.get_request_status(block_root, index) } + #[cfg(test)] + pub(crate) fn range_sync_state(&self) -> super::range_sync::SyncChainStatus { + self.range_sync.state() + } + + #[cfg(test)] + pub(crate) fn update_execution_engine_state(&mut self, state: EngineState) { + self.handle_new_execution_engine_state(state); + } + fn network_globals(&self) -> &NetworkGlobals { self.network.network_globals() } diff --git a/beacon_node/network/src/sync/range_sync/block_storage.rs b/beacon_node/network/src/sync/range_sync/block_storage.rs deleted file mode 100644 index df49543a6b6..00000000000 --- a/beacon_node/network/src/sync/range_sync/block_storage.rs +++ /dev/null @@ -1,13 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use types::Hash256; - -/// Trait that helps maintain RangeSync's implementation split from the BeaconChain -pub trait BlockStorage { - fn is_block_known(&self, block_root: &Hash256) -> bool; -} - -impl BlockStorage for BeaconChain { - fn is_block_known(&self, block_root: &Hash256) -> bool { - self.block_is_known_to_fork_choice(block_root) - } -} diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 1217fbf8fed..c030d0a19e8 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -3,12 +3,11 @@ //! Each chain type is stored in it's own map. A variety of helper functions are given along with //! this struct to simplify the logic of the other layers of sync. -use super::block_storage::BlockStorage; use super::chain::{ChainId, ProcessingResult, RemoveChain, SyncingChain}; use super::sync_type::RangeSyncType; use crate::metrics; use crate::sync::network_context::SyncNetworkContext; -use beacon_chain::BeaconChainTypes; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use fnv::FnvHashMap; use lighthouse_network::PeerId; use lighthouse_network::SyncInfo; @@ -37,10 +36,13 @@ pub enum RangeSyncState { Idle, } +pub type SyncChainStatus = + Result, &'static str>; + /// A collection of finalized and head chains currently being processed. -pub struct ChainCollection { +pub struct ChainCollection { /// The beacon chain for processing. - beacon_chain: Arc, + beacon_chain: Arc>, /// The set of finalized chains being synced. finalized_chains: FnvHashMap>, /// The set of head chains being synced. @@ -51,8 +53,8 @@ pub struct ChainCollection { log: slog::Logger, } -impl ChainCollection { - pub fn new(beacon_chain: Arc, log: slog::Logger) -> Self { +impl ChainCollection { + pub fn new(beacon_chain: Arc>, log: slog::Logger) -> Self { ChainCollection { beacon_chain, finalized_chains: FnvHashMap::default(), @@ -213,9 +215,7 @@ impl ChainCollection { } } - pub fn state( - &self, - ) -> Result, &'static str> { + pub fn state(&self) -> SyncChainStatus { match self.state { RangeSyncState::Finalized(ref syncing_id) => { let chain = self @@ -409,7 +409,8 @@ impl ChainCollection { let log_ref = &self.log; let is_outdated = |target_slot: &Slot, target_root: &Hash256| { - target_slot <= &local_finalized_slot || beacon_chain.is_block_known(target_root) + target_slot <= &local_finalized_slot + || beacon_chain.block_is_known_to_fork_choice(target_root) }; // Retain only head peers that remain relevant diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index d0f2f9217eb..8f881fba90f 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -2,7 +2,6 @@ //! peers. mod batch; -mod block_storage; mod chain; mod chain_collection; mod range; @@ -13,5 +12,7 @@ pub use batch::{ ByRangeRequestType, }; pub use chain::{BatchId, ChainId, EPOCHS_PER_BATCH}; +#[cfg(test)] +pub use chain_collection::SyncChainStatus; pub use range::RangeSync; pub use sync_type::RangeSyncType; diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 0ef99838dee..78679403bb4 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -39,9 +39,8 @@ //! Each chain is downloaded in batches of blocks. The batched blocks are processed sequentially //! and further batches are requested as current blocks are being processed. -use super::block_storage::BlockStorage; use super::chain::{BatchId, ChainId, RemoveChain, SyncingChain}; -use super::chain_collection::ChainCollection; +use super::chain_collection::{ChainCollection, SyncChainStatus}; use super::sync_type::RangeSyncType; use crate::metrics; use crate::status::ToStatusMessage; @@ -56,7 +55,7 @@ use lru_cache::LRUTimeCache; use slog::{crit, debug, trace, warn}; use std::collections::HashMap; use std::sync::Arc; -use types::{Epoch, EthSpec, Hash256, Slot}; +use types::{Epoch, EthSpec, Hash256}; /// For how long we store failed finalized chains to prevent retries. const FAILED_CHAINS_EXPIRY_SECONDS: u64 = 30; @@ -64,27 +63,26 @@ const FAILED_CHAINS_EXPIRY_SECONDS: u64 = 30; /// The primary object dealing with long range/batch syncing. This contains all the active and /// non-active chains that need to be processed before the syncing is considered complete. This /// holds the current state of the long range sync. -pub struct RangeSync> { +pub struct RangeSync { /// The beacon chain for processing. - beacon_chain: Arc, + beacon_chain: Arc>, /// Last known sync info of our useful connected peers. We use this information to create Head /// chains after all finalized chains have ended. awaiting_head_peers: HashMap, /// A collection of chains that need to be downloaded. This stores any head or finalized chains /// that need to be downloaded. - chains: ChainCollection, + chains: ChainCollection, /// Chains that have failed and are stored to prevent being retried. failed_chains: LRUTimeCache, /// The syncing logger. log: slog::Logger, } -impl RangeSync +impl RangeSync where - C: BlockStorage + ToStatusMessage, T: BeaconChainTypes, { - pub fn new(beacon_chain: Arc, log: slog::Logger) -> Self { + pub fn new(beacon_chain: Arc>, log: slog::Logger) -> Self { RangeSync { beacon_chain: beacon_chain.clone(), chains: ChainCollection::new(beacon_chain, log.clone()), @@ -96,9 +94,7 @@ where } } - pub fn state( - &self, - ) -> Result, &'static str> { + pub fn state(&self) -> SyncChainStatus { self.chains.state() } @@ -382,465 +378,3 @@ where } } } - -#[cfg(test)] -mod tests { - use crate::network_beacon_processor::NetworkBeaconProcessor; - use crate::sync::SyncMessage; - use crate::NetworkMessage; - - use super::*; - use crate::sync::network_context::{BlockOrBlob, RangeRequestId}; - use beacon_chain::builder::Witness; - use beacon_chain::eth1_chain::CachingEth1Backend; - use beacon_chain::parking_lot::RwLock; - use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; - use beacon_chain::EngineState; - use beacon_processor::WorkEvent as BeaconWorkEvent; - use lighthouse_network::service::api_types::SyncRequestId; - use lighthouse_network::{ - rpc::StatusMessage, service::api_types::AppRequestId, NetworkConfig, NetworkGlobals, - }; - use slog::{o, Drain}; - use slot_clock::TestingSlotClock; - use std::collections::HashSet; - use store::MemoryStore; - use tokio::sync::mpsc; - use types::{FixedBytesExtended, ForkName, MinimalEthSpec as E}; - - #[derive(Debug)] - struct FakeStorage { - known_blocks: RwLock>, - status: RwLock, - } - - impl Default for FakeStorage { - fn default() -> Self { - FakeStorage { - known_blocks: RwLock::new(HashSet::new()), - status: RwLock::new(StatusMessage { - fork_digest: [0; 4], - finalized_root: Hash256::zero(), - finalized_epoch: 0usize.into(), - head_root: Hash256::zero(), - head_slot: 0usize.into(), - }), - } - } - } - - impl FakeStorage { - fn remember_block(&self, block_root: Hash256) { - self.known_blocks.write().insert(block_root); - } - - #[allow(dead_code)] - fn forget_block(&self, block_root: &Hash256) { - self.known_blocks.write().remove(block_root); - } - } - - impl BlockStorage for FakeStorage { - fn is_block_known(&self, block_root: &store::Hash256) -> bool { - self.known_blocks.read().contains(block_root) - } - } - - impl ToStatusMessage for FakeStorage { - fn status_message(&self) -> StatusMessage { - self.status.read().clone() - } - } - - type TestBeaconChainType = - Witness, E, MemoryStore, MemoryStore>; - - fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { - let decorator = slog_term::TermDecorator::new().build(); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).build().fuse(); - - if enabled { - slog::Logger::root(drain.filter_level(level).fuse(), o!()) - } else { - slog::Logger::root(drain.filter(|_| false).fuse(), o!()) - } - } - - #[allow(unused)] - struct TestRig { - log: slog::Logger, - /// To check what does sync send to the beacon processor. - beacon_processor_rx: mpsc::Receiver>, - /// To set up different scenarios where sync is told about known/unknown blocks. - chain: Arc, - /// Needed by range to handle communication with the network. - cx: SyncNetworkContext, - /// To check what the network receives from Range. - network_rx: mpsc::UnboundedReceiver>, - /// To modify what the network declares about various global variables, in particular about - /// the sync state of a peer. - globals: Arc>, - } - - impl RangeSync { - fn assert_state(&self, expected_state: RangeSyncType) { - assert_eq!( - self.state() - .expect("State is ok") - .expect("Range is syncing") - .0, - expected_state - ) - } - - #[allow(dead_code)] - fn assert_not_syncing(&self) { - assert!( - self.state().expect("State is ok").is_none(), - "Range should not be syncing." - ); - } - } - - impl TestRig { - fn local_info(&self) -> SyncInfo { - let StatusMessage { - fork_digest: _, - finalized_root, - finalized_epoch, - head_root, - head_slot, - } = self.chain.status.read().clone(); - SyncInfo { - head_slot, - head_root, - finalized_epoch, - finalized_root, - } - } - - /// Reads an BlocksByRange request to a given peer from the network receiver channel. - #[track_caller] - fn grab_request( - &mut self, - expected_peer: &PeerId, - fork_name: ForkName, - ) -> (AppRequestId, Option) { - let block_req_id = if let Ok(NetworkMessage::SendRequest { - peer_id, - request: _, - request_id, - }) = self.network_rx.try_recv() - { - assert_eq!(&peer_id, expected_peer); - request_id - } else { - panic!("Should have sent a batch request to the peer") - }; - let blob_req_id = if fork_name.deneb_enabled() { - if let Ok(NetworkMessage::SendRequest { - peer_id, - request: _, - request_id, - }) = self.network_rx.try_recv() - { - assert_eq!(&peer_id, expected_peer); - Some(request_id) - } else { - panic!("Should have sent a batch request to the peer") - } - } else { - None - }; - (block_req_id, blob_req_id) - } - - fn complete_range_block_and_blobs_response( - &mut self, - block_req: AppRequestId, - blob_req_opt: Option, - ) -> (ChainId, BatchId, Id) { - if blob_req_opt.is_some() { - match block_req { - AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }) => { - let _ = self - .cx - .range_block_and_blob_response(id, BlockOrBlob::Block(None)); - let response = self - .cx - .range_block_and_blob_response(id, BlockOrBlob::Blob(None)) - .unwrap(); - let (chain_id, batch_id) = - TestRig::unwrap_range_request_id(response.sender_id); - (chain_id, batch_id, id) - } - other => panic!("unexpected request {:?}", other), - } - } else { - match block_req { - AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }) => { - let response = self - .cx - .range_block_and_blob_response(id, BlockOrBlob::Block(None)) - .unwrap(); - let (chain_id, batch_id) = - TestRig::unwrap_range_request_id(response.sender_id); - (chain_id, batch_id, id) - } - other => panic!("unexpected request {:?}", other), - } - } - } - - fn unwrap_range_request_id(sender_id: RangeRequestId) -> (ChainId, BatchId) { - if let RangeRequestId::RangeSync { chain_id, batch_id } = sender_id { - (chain_id, batch_id) - } else { - panic!("expected RangeSync request: {:?}", sender_id) - } - } - - /// Produce a head peer - fn head_peer( - &self, - ) -> ( - PeerId, - SyncInfo, /* Local info */ - SyncInfo, /* Remote info */ - ) { - let local_info = self.local_info(); - - // Get a peer with an advanced head - let head_root = Hash256::random(); - let head_slot = local_info.head_slot + 1; - let remote_info = SyncInfo { - head_root, - head_slot, - ..local_info - }; - let peer_id = PeerId::random(); - (peer_id, local_info, remote_info) - } - - fn finalized_peer( - &self, - ) -> ( - PeerId, - SyncInfo, /* Local info */ - SyncInfo, /* Remote info */ - ) { - let local_info = self.local_info(); - - let finalized_root = Hash256::random(); - let finalized_epoch = local_info.finalized_epoch + 2; - let head_slot = finalized_epoch.start_slot(E::slots_per_epoch()); - let head_root = Hash256::random(); - let remote_info = SyncInfo { - finalized_epoch, - finalized_root, - head_slot, - head_root, - }; - - let peer_id = PeerId::random(); - (peer_id, local_info, remote_info) - } - - #[track_caller] - fn expect_empty_processor(&mut self) { - match self.beacon_processor_rx.try_recv() { - Ok(work) => { - panic!( - "Expected empty processor. Instead got {}", - work.work_type_str() - ); - } - Err(e) => match e { - mpsc::error::TryRecvError::Empty => {} - mpsc::error::TryRecvError::Disconnected => unreachable!("bad coded test?"), - }, - } - } - - #[track_caller] - fn expect_chain_segment(&mut self) { - match self.beacon_processor_rx.try_recv() { - Ok(work) => { - assert_eq!(work.work_type(), beacon_processor::WorkType::ChainSegment); - } - other => panic!("Expected chain segment process, found {:?}", other), - } - } - } - - fn range(log_enabled: bool) -> (TestRig, RangeSync) { - let log = build_log(slog::Level::Trace, log_enabled); - // Initialise a new beacon chain - let harness = BeaconChainHarness::>::builder(E) - .default_spec() - .logger(log.clone()) - .deterministic_keypairs(1) - .fresh_ephemeral_store() - .build(); - let chain = harness.chain; - - let fake_store = Arc::new(FakeStorage::default()); - let range_sync = RangeSync::::new( - fake_store.clone(), - log.new(o!("component" => "range")), - ); - let (network_tx, network_rx) = mpsc::unbounded_channel(); - let (sync_tx, _sync_rx) = mpsc::unbounded_channel::>(); - let network_config = Arc::new(NetworkConfig::default()); - let globals = Arc::new(NetworkGlobals::new_test_globals( - Vec::new(), - &log, - network_config, - chain.spec.clone(), - )); - let (network_beacon_processor, beacon_processor_rx) = - NetworkBeaconProcessor::null_for_testing( - globals.clone(), - sync_tx, - chain.clone(), - harness.runtime.task_executor.clone(), - log.clone(), - ); - let cx = SyncNetworkContext::new( - network_tx, - Arc::new(network_beacon_processor), - chain, - log.new(o!("component" => "network_context")), - ); - let test_rig = TestRig { - log, - beacon_processor_rx, - chain: fake_store, - cx, - network_rx, - globals, - }; - (test_rig, range_sync) - } - - #[test] - fn head_chain_removed_while_finalized_syncing() { - // NOTE: this is a regression test. - let (mut rig, mut range) = range(false); - - // Get a peer with an advanced head - let (head_peer, local_info, remote_info) = rig.head_peer(); - range.add_peer(&mut rig.cx, local_info, head_peer, remote_info); - range.assert_state(RangeSyncType::Head); - - let fork = rig - .cx - .chain - .spec - .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); - - // Sync should have requested a batch, grab the request. - let _ = rig.grab_request(&head_peer, fork); - - // Now get a peer with an advanced finalized epoch. - let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); - range.add_peer(&mut rig.cx, local_info, finalized_peer, remote_info); - range.assert_state(RangeSyncType::Finalized); - - // Sync should have requested a batch, grab the request - let _ = rig.grab_request(&finalized_peer, fork); - - // Fail the head chain by disconnecting the peer. - range.remove_peer(&mut rig.cx, &head_peer); - range.assert_state(RangeSyncType::Finalized); - } - - #[test] - fn state_update_while_purging() { - // NOTE: this is a regression test. - let (mut rig, mut range) = range(true); - - // Get a peer with an advanced head - let (head_peer, local_info, head_info) = rig.head_peer(); - let head_peer_root = head_info.head_root; - range.add_peer(&mut rig.cx, local_info, head_peer, head_info); - range.assert_state(RangeSyncType::Head); - - let fork = rig - .cx - .chain - .spec - .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); - - // Sync should have requested a batch, grab the request. - let _ = rig.grab_request(&head_peer, fork); - - // Now get a peer with an advanced finalized epoch. - let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); - let finalized_peer_root = remote_info.finalized_root; - range.add_peer(&mut rig.cx, local_info, finalized_peer, remote_info); - range.assert_state(RangeSyncType::Finalized); - - // Sync should have requested a batch, grab the request - let _ = rig.grab_request(&finalized_peer, fork); - - // Now the chain knows both chains target roots. - rig.chain.remember_block(head_peer_root); - rig.chain.remember_block(finalized_peer_root); - - // Add an additional peer to the second chain to make range update it's status - let (finalized_peer, local_info, remote_info) = rig.finalized_peer(); - range.add_peer(&mut rig.cx, local_info, finalized_peer, remote_info); - } - - #[test] - fn pause_and_resume_on_ee_offline() { - let (mut rig, mut range) = range(true); - let fork = rig - .cx - .chain - .spec - .fork_name_at_epoch(rig.cx.chain.epoch().unwrap()); - - // add some peers - let (peer1, local_info, head_info) = rig.head_peer(); - range.add_peer(&mut rig.cx, local_info, peer1, head_info); - let (block_req, blob_req_opt) = rig.grab_request(&peer1, fork); - - let (chain1, batch1, id1) = - rig.complete_range_block_and_blobs_response(block_req, blob_req_opt); - - // make the ee offline - rig.cx.update_execution_engine_state(EngineState::Offline); - - // send the response to the request - range.blocks_by_range_response(&mut rig.cx, peer1, chain1, batch1, id1, vec![]); - - // the beacon processor shouldn't have received any work - rig.expect_empty_processor(); - - // while the ee is offline, more peers might arrive. Add a new finalized peer. - let (peer2, local_info, finalized_info) = rig.finalized_peer(); - range.add_peer(&mut rig.cx, local_info, peer2, finalized_info); - let (block_req, blob_req_opt) = rig.grab_request(&peer2, fork); - - let (chain2, batch2, id2) = - rig.complete_range_block_and_blobs_response(block_req, blob_req_opt); - - // send the response to the request - range.blocks_by_range_response(&mut rig.cx, peer2, chain2, batch2, id2, vec![]); - - // the beacon processor shouldn't have received any work - rig.expect_empty_processor(); - - // make the beacon processor available again. - rig.cx.update_execution_engine_state(EngineState::Online); - - // now resume range, we should have two processing requests in the beacon processor. - range.resume(&mut rig.cx); - - rig.expect_chain_segment(); - rig.expect_chain_segment(); - } -} diff --git a/beacon_node/network/src/sync/range_sync/sync_type.rs b/beacon_node/network/src/sync/range_sync/sync_type.rs index d6ffd4a5dfb..4ff7e393101 100644 --- a/beacon_node/network/src/sync/range_sync/sync_type.rs +++ b/beacon_node/network/src/sync/range_sync/sync_type.rs @@ -1,10 +1,9 @@ //! Contains logic about identifying which Sync to perform given PeerSyncInfo of ourselves and //! of a remote. +use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::SyncInfo; -use super::block_storage::BlockStorage; - /// The type of Range sync that should be done relative to our current state. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum RangeSyncType { @@ -17,8 +16,8 @@ pub enum RangeSyncType { impl RangeSyncType { /// Determines the type of sync given our local `PeerSyncInfo` and the remote's /// `PeerSyncInfo`. - pub fn new( - chain: &C, + pub fn new( + chain: &BeaconChain, local_info: &SyncInfo, remote_info: &SyncInfo, ) -> RangeSyncType { @@ -29,7 +28,7 @@ impl RangeSyncType { // not seen the finalized hash before. if remote_info.finalized_epoch > local_info.finalized_epoch - && !chain.is_block_known(&remote_info.finalized_root) + && !chain.block_is_known_to_fork_choice(&remote_info.finalized_root) { RangeSyncType::Finalized } else { diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 9f2c9ef66f0..94aacad3e81 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -83,6 +83,7 @@ impl TestRig { .logger(log.clone()) .deterministic_keypairs(1) .fresh_ephemeral_store() + .mock_execution_layer() .testing_slot_clock(TestingSlotClock::new( Slot::new(0), Duration::from_secs(0), @@ -144,7 +145,7 @@ impl TestRig { } } - fn test_setup() -> Self { + pub fn test_setup() -> Self { Self::test_setup_with_config(None) } @@ -168,11 +169,11 @@ impl TestRig { } } - fn log(&self, msg: &str) { + pub fn log(&self, msg: &str) { info!(self.log, "TEST_RIG"; "msg" => msg); } - fn after_deneb(&self) -> bool { + pub fn after_deneb(&self) -> bool { matches!(self.fork_name, ForkName::Deneb | ForkName::Electra) } @@ -238,7 +239,7 @@ impl TestRig { (parent, block, parent_root, block_root) } - fn send_sync_message(&mut self, sync_message: SyncMessage) { + pub fn send_sync_message(&mut self, sync_message: SyncMessage) { self.sync_manager.handle_message(sync_message); } @@ -369,7 +370,7 @@ impl TestRig { self.expect_empty_network(); } - fn new_connected_peer(&mut self) -> PeerId { + pub fn new_connected_peer(&mut self) -> PeerId { self.network_globals .peers .write() @@ -811,7 +812,7 @@ impl TestRig { } } - fn peer_disconnected(&mut self, peer_id: PeerId) { + pub fn peer_disconnected(&mut self, peer_id: PeerId) { self.send_sync_message(SyncMessage::Disconnect(peer_id)); } @@ -827,7 +828,7 @@ impl TestRig { } } - fn pop_received_network_event) -> Option>( + pub fn pop_received_network_event) -> Option>( &mut self, predicate_transform: F, ) -> Result { @@ -847,7 +848,7 @@ impl TestRig { } } - fn pop_received_processor_event) -> Option>( + pub fn pop_received_processor_event) -> Option>( &mut self, predicate_transform: F, ) -> Result { @@ -871,6 +872,16 @@ impl TestRig { } } + pub fn expect_empty_processor(&mut self) { + self.drain_processor_rx(); + if !self.beacon_processor_rx_queue.is_empty() { + panic!( + "Expected processor to be empty, but has events: {:?}", + self.beacon_processor_rx_queue + ); + } + } + fn find_block_lookup_request( &mut self, for_block: Hash256, @@ -2173,7 +2184,8 @@ fn custody_lookup_happy_path() { mod deneb_only { use super::*; use beacon_chain::{ - block_verification_types::RpcBlock, data_availability_checker::AvailabilityCheckError, + block_verification_types::{AsBlock, RpcBlock}, + data_availability_checker::AvailabilityCheckError, }; use ssz_types::VariableList; use std::collections::VecDeque; diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index 8b137891791..6faa8b72472 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -1 +1,273 @@ +use super::*; +use crate::status::ToStatusMessage; +use crate::sync::manager::SLOT_IMPORT_TOLERANCE; +use crate::sync::range_sync::RangeSyncType; +use crate::sync::SyncMessage; +use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; +use beacon_chain::EngineState; +use lighthouse_network::rpc::{RequestType, StatusMessage}; +use lighthouse_network::service::api_types::{AppRequestId, Id, SyncRequestId}; +use lighthouse_network::{PeerId, SyncInfo}; +use std::time::Duration; +use types::{EthSpec, Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot}; +const D: Duration = Duration::new(0, 0); + +impl TestRig { + /// Produce a head peer with an advanced head + fn add_head_peer(&mut self) -> PeerId { + self.add_head_peer_with_root(Hash256::random()) + } + + /// Produce a head peer with an advanced head + fn add_head_peer_with_root(&mut self, head_root: Hash256) -> PeerId { + let local_info = self.local_info(); + self.add_peer(SyncInfo { + head_root, + head_slot: local_info.head_slot + 1 + Slot::new(SLOT_IMPORT_TOLERANCE as u64), + ..local_info + }) + } + + // Produce a finalized peer with an advanced finalized epoch + fn add_finalized_peer(&mut self) -> PeerId { + self.add_finalized_peer_with_root(Hash256::random()) + } + + // Produce a finalized peer with an advanced finalized epoch + fn add_finalized_peer_with_root(&mut self, finalized_root: Hash256) -> PeerId { + let local_info = self.local_info(); + let finalized_epoch = local_info.finalized_epoch + 2; + self.add_peer(SyncInfo { + finalized_epoch, + finalized_root, + head_slot: finalized_epoch.start_slot(E::slots_per_epoch()), + head_root: Hash256::random(), + }) + } + + fn local_info(&self) -> SyncInfo { + let StatusMessage { + fork_digest: _, + finalized_root, + finalized_epoch, + head_root, + head_slot, + } = self.harness.chain.status_message(); + SyncInfo { + head_slot, + head_root, + finalized_epoch, + finalized_root, + } + } + + fn add_peer(&mut self, remote_info: SyncInfo) -> PeerId { + // Create valid peer known to network globals + let peer_id = self.new_connected_peer(); + // Send peer to sync + self.send_sync_message(SyncMessage::AddPeer(peer_id, remote_info.clone())); + peer_id + } + + fn assert_state(&self, state: RangeSyncType) { + assert_eq!( + self.sync_manager + .range_sync_state() + .expect("State is ok") + .expect("Range should be syncing") + .0, + state, + "not expected range sync state" + ); + } + + #[track_caller] + fn expect_chain_segment(&mut self) { + self.pop_received_processor_event(|ev| { + (ev.work_type() == beacon_processor::WorkType::ChainSegment).then_some(()) + }) + .unwrap_or_else(|e| panic!("Expect ChainSegment work event: {e:?}")); + } + + fn update_execution_engine_state(&mut self, state: EngineState) { + self.log(&format!("execution engine state updated: {state:?}")); + self.sync_manager.update_execution_engine_state(state); + } + + fn find_blocks_by_range_request(&mut self, target_peer_id: &PeerId) -> (Id, Option) { + let block_req_id = self + .pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id, + request: RequestType::BlocksByRange(_), + request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + } if peer_id == target_peer_id => Some(*id), + _ => None, + }) + .expect("Should have a blocks by range request"); + + let blob_req_id = if self.after_deneb() { + Some( + self.pop_received_network_event(|ev| match ev { + NetworkMessage::SendRequest { + peer_id, + request: RequestType::BlobsByRange(_), + request_id: AppRequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id }), + } if peer_id == target_peer_id => Some(*id), + _ => None, + }) + .expect("Should have a blobs by range request"), + ) + } else { + None + }; + + (block_req_id, blob_req_id) + } + + fn find_and_complete_blocks_by_range_request(&mut self, target_peer_id: PeerId) { + let (blocks_req_id, blobs_req_id) = self.find_blocks_by_range_request(&target_peer_id); + + // Complete the request with a single stream termination + self.log(&format!( + "Completing BlocksByRange request {blocks_req_id} with empty stream" + )); + self.send_sync_message(SyncMessage::RpcBlock { + request_id: SyncRequestId::RangeBlockAndBlobs { id: blocks_req_id }, + peer_id: target_peer_id, + beacon_block: None, + seen_timestamp: D, + }); + + if let Some(blobs_req_id) = blobs_req_id { + // Complete the request with a single stream termination + self.log(&format!( + "Completing BlobsByRange request {blobs_req_id} with empty stream" + )); + self.send_sync_message(SyncMessage::RpcBlob { + request_id: SyncRequestId::RangeBlockAndBlobs { id: blobs_req_id }, + peer_id: target_peer_id, + blob_sidecar: None, + seen_timestamp: D, + }); + } + } + + async fn create_canonical_block(&mut self) -> SignedBeaconBlock { + self.harness.advance_slot(); + + let block_root = self + .harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness + .chain + .store + .get_full_block(&block_root) + .unwrap() + .unwrap() + } + + async fn remember_block(&mut self, block: SignedBeaconBlock) { + self.harness + .process_block(block.slot(), block.canonical_root(), (block.into(), None)) + .await + .unwrap(); + } +} + +#[test] +fn head_chain_removed_while_finalized_syncing() { + // NOTE: this is a regression test. + // Added in PR https://github.com/sigp/lighthouse/pull/2821 + let mut rig = TestRig::test_setup(); + + // Get a peer with an advanced head + let head_peer = rig.add_head_peer(); + rig.assert_state(RangeSyncType::Head); + + // Sync should have requested a batch, grab the request. + let _ = rig.find_blocks_by_range_request(&head_peer); + + // Now get a peer with an advanced finalized epoch. + let finalized_peer = rig.add_finalized_peer(); + rig.assert_state(RangeSyncType::Finalized); + + // Sync should have requested a batch, grab the request + let _ = rig.find_blocks_by_range_request(&finalized_peer); + + // Fail the head chain by disconnecting the peer. + rig.peer_disconnected(head_peer); + rig.assert_state(RangeSyncType::Finalized); +} + +#[tokio::test] +async fn state_update_while_purging() { + // NOTE: this is a regression test. + // Added in PR https://github.com/sigp/lighthouse/pull/2827 + let mut rig = TestRig::test_setup(); + + // Create blocks on a separate harness + let mut rig_2 = TestRig::test_setup(); + // Need to create blocks that can be inserted into the fork-choice and fit the "known + // conditions" below. + let head_peer_block = rig_2.create_canonical_block().await; + let head_peer_root = head_peer_block.canonical_root(); + let finalized_peer_block = rig_2.create_canonical_block().await; + let finalized_peer_root = finalized_peer_block.canonical_root(); + + // Get a peer with an advanced head + let head_peer = rig.add_head_peer_with_root(head_peer_root); + rig.assert_state(RangeSyncType::Head); + + // Sync should have requested a batch, grab the request. + let _ = rig.find_blocks_by_range_request(&head_peer); + + // Now get a peer with an advanced finalized epoch. + let finalized_peer = rig.add_finalized_peer_with_root(finalized_peer_root); + rig.assert_state(RangeSyncType::Finalized); + + // Sync should have requested a batch, grab the request + let _ = rig.find_blocks_by_range_request(&finalized_peer); + + // Now the chain knows both chains target roots. + rig.remember_block(head_peer_block).await; + rig.remember_block(finalized_peer_block).await; + + // Add an additional peer to the second chain to make range update it's status + rig.add_finalized_peer(); +} + +#[test] +fn pause_and_resume_on_ee_offline() { + let mut rig = TestRig::test_setup(); + + // add some peers + let peer1 = rig.add_head_peer(); + // make the ee offline + rig.update_execution_engine_state(EngineState::Offline); + // send the response to the request + rig.find_and_complete_blocks_by_range_request(peer1); + // the beacon processor shouldn't have received any work + rig.expect_empty_processor(); + + // while the ee is offline, more peers might arrive. Add a new finalized peer. + let peer2 = rig.add_finalized_peer(); + + // send the response to the request + rig.find_and_complete_blocks_by_range_request(peer2); + // the beacon processor shouldn't have received any work + rig.expect_empty_processor(); + // make the beacon processor available again. + // update_execution_engine_state implicitly calls resume + // now resume range, we should have two processing requests in the beacon processor. + rig.update_execution_engine_state(EngineState::Online); + + rig.expect_chain_segment(); + rig.expect_chain_segment(); +} From 847c8019c7867e3eaf65168e5259ea33e7e0eb5a Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 16 Dec 2024 16:44:14 +1100 Subject: [PATCH 15/15] Fix peer down-scoring behaviour when gossip blobs/columns are received after `getBlobs` or reconstruction (#6686) * Fix peer disconnection when gossip blobs/columns are received after they are recieved from the EL or available via column reconstruction. --- .../gossip_methods.rs | 26 +++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 4fc83b09230..f3c48e42f0b 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -710,8 +710,19 @@ impl NetworkBeaconProcessor { MessageAcceptance::Reject, ); } + GossipDataColumnError::PriorKnown { .. } => { + // Data column is available via either the EL or reconstruction. + // Do not penalise the peer. + // Gossip filter should filter any duplicates received after this. + debug!( + self.log, + "Received already available column sidecar. Ignoring the column sidecar"; + "slot" => %slot, + "block_root" => %block_root, + "index" => %index, + ) + } GossipDataColumnError::FutureSlot { .. } - | GossipDataColumnError::PriorKnown { .. } | GossipDataColumnError::PastFinalizedSlot { .. } => { debug!( self.log, @@ -852,7 +863,18 @@ impl NetworkBeaconProcessor { MessageAcceptance::Reject, ); } - GossipBlobError::FutureSlot { .. } | GossipBlobError::RepeatBlob { .. } => { + GossipBlobError::RepeatBlob { .. } => { + // We may have received the blob from the EL. Do not penalise the peer. + // Gossip filter should filter any duplicates received after this. + debug!( + self.log, + "Received already available blob sidecar. Ignoring the blob sidecar"; + "slot" => %slot, + "root" => %root, + "index" => %index, + ) + } + GossipBlobError::FutureSlot { .. } => { debug!( self.log, "Could not verify blob sidecar for gossip. Ignoring the blob sidecar";