diff --git a/.github/mergify.yml b/.github/mergify.yml index ae01e3ffd2d..4c4046cf67a 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -4,6 +4,12 @@ queue_rules: batch_max_wait_time: 60 s checks_timeout: 10800 s merge_method: squash + commit_message_template: | + {{ title }} (#{{ number }}) + + {% for commit in commits %} + * {{ commit.commit_message }} + {% endfor %} queue_conditions: - "#approved-reviews-by >= 1" - "check-success=license/cla" diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 007070dbb5b..bdd7b626532 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -152,10 +152,12 @@ jobs: - name: Dockerhub login run: | echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin - - name: Build lcli dockerfile (with push) - run: | - docker build \ - --build-arg PORTABLE=true \ - --tag ${LCLI_IMAGE_NAME}:${VERSION}${VERSION_SUFFIX} \ - --file ./lcli/Dockerfile . - docker push ${LCLI_IMAGE_NAME}:${VERSION}${VERSION_SUFFIX} + - name: Build lcli and push + uses: docker/build-push-action@v5 + with: + build-args: | + FEATURES=portable + context: . + push: true + file: ./lcli/Dockerfile + tags: ${{ env.LCLI_IMAGE_NAME }}:${{ env.VERSION }}${{ env.VERSION_SUFFIX }} diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index a8a639fb063..2f0da3abc53 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4023,7 +4023,7 @@ impl BeaconChain { .task_executor .spawn_blocking_handle( move || chain.load_state_for_block_production(slot), - "produce_partial_beacon_block", + "load_state_for_block_production", ) .ok_or(BlockProductionError::ShuttingDown)? .await diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 21cac9a2649..48d505e9e7b 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -545,6 +545,18 @@ pub struct AvailableBlock { } impl AvailableBlock { + pub fn __new_for_testing( + block_root: Hash256, + block: Arc>, + blobs: Option>, + ) -> Self { + Self { + block_root, + block, + blobs, + } + } + pub fn block(&self) -> &SignedBeaconBlock { &self.block } diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 83f092611ba..a2377d6a2e0 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -141,13 +141,14 @@ impl BeaconChain { prev_block_slot = block.slot(); expected_block_root = block.message().parent_root(); + signed_blocks.push(block); // If we've reached genesis, add the genesis block root to the batch for all slots // between 0 and the first block slot, and set the anchor slot to 0 to indicate // completion. if expected_block_root == self.genesis_block_root { let genesis_slot = self.spec.genesis_slot; - for slot in genesis_slot.as_u64()..block.slot().as_u64() { + for slot in genesis_slot.as_u64()..prev_block_slot.as_u64() { cold_batch.push(KeyValueStoreOp::PutKeyValue( get_key_for_col(DBColumn::BeaconBlockRoots.into(), &slot.to_be_bytes()), self.genesis_block_root.as_bytes().to_vec(), @@ -157,7 +158,6 @@ impl BeaconChain { expected_block_root = Hash256::zero(); break; } - signed_blocks.push(block); } // these were pushed in reverse order so we reverse again signed_blocks.reverse(); diff --git a/beacon_node/beacon_chain/src/observed_aggregates.rs b/beacon_node/beacon_chain/src/observed_aggregates.rs index 18a761e29e8..aa513da547d 100644 --- a/beacon_node/beacon_chain/src/observed_aggregates.rs +++ b/beacon_node/beacon_chain/src/observed_aggregates.rs @@ -43,7 +43,7 @@ impl Consts for Attestation { /// We need to keep attestations for each slot of the current epoch. fn max_slot_capacity() -> usize { - T::slots_per_epoch() as usize + 2 * T::slots_per_epoch() as usize } /// As a DoS protection measure, the maximum number of distinct `Attestations` or diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index 605a1343210..a1c6adc3e07 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -24,18 +24,16 @@ use types::{Epoch, EthSpec, Hash256, Slot, Unsigned}; /// The maximum capacity of the `AutoPruningEpochContainer`. /// -/// Fits the next, current and previous epochs. We require the next epoch due to the -/// `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. We require the previous epoch since the specification -/// declares: +/// If the current epoch is N, this fits epoch N + 1, N, N - 1, and N - 2. We require the next epoch due +/// to the `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. We require the N - 2 epoch since the specification declares: /// /// ```ignore -/// aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE -/// >= current_slot >= aggregate.data.slot +/// the epoch of `aggregate.data.slot` is either the current or previous epoch /// ``` /// -/// This means that during the current epoch we will always accept an attestation -/// from at least one slot in the previous epoch. -pub const MAX_CACHED_EPOCHS: u64 = 3; +/// This means that during the current epoch we will always accept an attestation from +/// at least one slot in the epoch prior to the previous epoch. +pub const MAX_CACHED_EPOCHS: u64 = 4; pub type ObservedAttesters = AutoPruningEpochContainer; pub type ObservedSyncContributors = diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 827d4e66de2..5bf33d5a968 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3,6 +3,7 @@ use beacon_chain::attestation_verification::Error as AttnError; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::builder::BeaconChainBuilder; +use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::test_utils::{ mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, @@ -2305,6 +2306,25 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { } } + // Corrupt the signature on the 1st block to ensure that the backfill processor is checking + // signatures correctly. Regression test for https://github.com/sigp/lighthouse/pull/5120. + let mut batch_with_invalid_first_block = available_blocks.clone(); + batch_with_invalid_first_block[0] = { + let (block_root, block, blobs) = available_blocks[0].clone().deconstruct(); + let mut corrupt_block = (*block).clone(); + *corrupt_block.signature_mut() = Signature::empty(); + AvailableBlock::__new_for_testing(block_root, Arc::new(corrupt_block), blobs) + }; + + // Importing the invalid batch should error. + assert!(matches!( + beacon_chain + .import_historical_block_batch(batch_with_invalid_first_block) + .unwrap_err(), + BeaconChainError::HistoricalBlockError(HistoricalBlockError::InvalidSignature) + )); + + // Importing the batch with valid signatures should succeed. beacon_chain .import_historical_block_batch(available_blocks.clone()) .unwrap(); diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index d25c7be524d..4655ada9454 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -4265,36 +4265,6 @@ pub fn serve( }, ); - // GET lighthouse/beacon/states/{state_id}/ssz - let get_lighthouse_beacon_states_ssz = warp::path("lighthouse") - .and(warp::path("beacon")) - .and(warp::path("states")) - .and(warp::path::param::()) - .and(warp::path("ssz")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P1, move || { - // This debug endpoint provides no indication of optimistic status. - let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; - Response::builder() - .status(200) - .body(state.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }) - }) - }, - ); - // GET lighthouse/staking let get_lighthouse_staking = warp::path("lighthouse") .and(warp::path("staking")) @@ -4629,7 +4599,6 @@ pub fn serve( .uor(get_lighthouse_eth1_syncing) .uor(get_lighthouse_eth1_block_cache) .uor(get_lighthouse_eth1_deposit_cache) - .uor(get_lighthouse_beacon_states_ssz) .uor(get_lighthouse_staking) .uor(get_lighthouse_database_info) .uor(get_lighthouse_block_rewards) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index d559a310df2..462752324be 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -5057,26 +5057,6 @@ impl ApiTester { self } - pub async fn test_get_lighthouse_beacon_states_ssz(self) -> Self { - for state_id in self.interesting_state_ids() { - let result = self - .client - .get_lighthouse_beacon_states_ssz(&state_id.0, &self.chain.spec) - .await - .unwrap(); - - let mut expected = state_id - .state(&self.chain) - .ok() - .map(|(state, _execution_optimistic, _finalized)| state); - expected.as_mut().map(|state| state.drop_all_caches()); - - assert_eq!(result, expected, "{:?}", state_id); - } - - self - } - pub async fn test_get_lighthouse_staking(self) -> Self { let result = self.client.get_lighthouse_staking().await.unwrap(); @@ -6373,8 +6353,6 @@ async fn lighthouse_endpoints() { .await .test_get_lighthouse_eth1_deposit_cache() .await - .test_get_lighthouse_beacon_states_ssz() - .await .test_get_lighthouse_staking() .await .test_get_lighthouse_database_info() diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 5bc13d39748..1a3decef96b 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -463,20 +463,6 @@ curl -X GET "http://localhost:5052/lighthouse/eth1/deposit_cache" -H "accept: a } ``` -### `/lighthouse/beacon/states/{state_id}/ssz` - -Obtains a `BeaconState` in SSZ bytes. Useful for obtaining a genesis state. - -The `state_id` parameter is identical to that used in the [Standard Beacon Node API -`beacon/state` -routes](https://ethereum.github.io/beacon-APIs/#/Beacon/getStateRoot). - -```bash -curl -X GET "http://localhost:5052/lighthouse/beacon/states/0/ssz" | jq -``` - -*Example omitted for brevity, the body simply contains SSZ bytes.* - ### `/lighthouse/liveness` POST request that checks if any of the given validators have attested in the given epoch. Returns a list @@ -515,7 +501,7 @@ curl "http://localhost:5052/lighthouse/database/info" | jq ```json { - "schema_version": 16, + "schema_version": 18, "config": { "slots_per_restore_point": 8192, "slots_per_restore_point_set_explicitly": false, @@ -523,18 +509,26 @@ curl "http://localhost:5052/lighthouse/database/info" | jq "historic_state_cache_size": 1, "compact_on_init": false, "compact_on_prune": true, - "prune_payloads": true + "prune_payloads": true, + "prune_blobs": true, + "epochs_per_blob_prune": 1, + "blob_prune_margin_epochs": 0 }, "split": { - "slot": "5485952", - "state_root": "0xcfe5d41e6ab5a9dab0de00d89d97ae55ecaeed3b08e4acda836e69b2bef698b4" + "slot": "7454656", + "state_root": "0xbecfb1c8ee209854c611ebc967daa77da25b27f1a8ef51402fdbe060587d7653", + "block_root": "0x8730e946901b0a406313d36b3363a1b7091604e1346a3410c1a7edce93239a68" }, "anchor": { - "anchor_slot": "5414688", - "oldest_block_slot": "0", - "oldest_block_parent": "0x0000000000000000000000000000000000000000000000000000000000000000", - "state_upper_limit": "5414912", - "state_lower_limit": "8192" + "anchor_slot": "7451168", + "oldest_block_slot": "3962593", + "oldest_block_parent": "0x4a39f21367b3b9cc272744d1e38817bda5daf38d190dc23dc091f09fb54acd97", + "state_upper_limit": "7454720", + "state_lower_limit": "0" + }, + "blob_info": { + "oldest_blob_slot": "7413769", + "blobs_db": true } } ``` diff --git a/book/src/builders.md b/book/src/builders.md index 014e432117e..930d330d994 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -41,7 +41,7 @@ With the `--prefer-builder-proposals` flag, the validator client will always pre lighthouse vc --builder-boost-factor ``` With the `--builder-boost-factor` flag, a percentage multiplier is applied to the builder's payload value when choosing between a -builder payload header and payload from the paired execution node. +builder payload header and payload from the paired execution node. For example, `--builder-boost-factor 50` will only use the builder payload if it is 2x more profitable than the local payload. In order to configure whether a validator queries for blinded blocks check out [this section.](#validator-client-configuration) @@ -157,6 +157,7 @@ You can also directly configure these fields in the `validator_definitions.yml` suggested_fee_recipient: "0x6cc8dcbca744a6e4ffedb98e1d0df903b10abd21" gas_limit: 30000001 builder_proposals: true + builder_boost_factor: 50 - enabled: false voting_public_key: "0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477" type: local_keystore voting_keystore_path: /home/paul/.lighthouse/validators/0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477/voting-keystore.json @@ -164,6 +165,7 @@ You can also directly configure these fields in the `validator_definitions.yml` suggested_fee_recipient: "0xa2e334e71511686bcfe38bb3ee1ad8f6babcc03d" gas_limit: 33333333 builder_proposals: true + prefer_builder_proposals: true ``` ## Circuit breaker conditions diff --git a/book/src/checkpoint-sync.md b/book/src/checkpoint-sync.md index 0c375a5f009..00afea15675 100644 --- a/book/src/checkpoint-sync.md +++ b/book/src/checkpoint-sync.md @@ -1,9 +1,8 @@ # Checkpoint Sync -Since version 2.0.0 Lighthouse supports syncing from a recent finalized checkpoint. This is -substantially faster than syncing from genesis, while still providing all the same features. +Lighthouse supports syncing from a recent finalized checkpoint. This is substantially faster than syncing from genesis, while still providing all the same features. Checkpoint sync is also safer as it protects the node from long-range attacks. Since 4.6.0, checkpoint sync is required by default and genesis sync will no longer work without the use of `--allow-insecure-genesis-sync`. -If you would like to quickly get started with checkpoint sync, read the sections below on: +To quickly get started with checkpoint sync, read the sections below on: 1. [Automatic Checkpoint Sync](#automatic-checkpoint-sync) 2. [Backfilling Blocks](#backfilling-blocks) diff --git a/book/src/contributing.md b/book/src/contributing.md index 6b84843a694..5b0ab48e866 100644 --- a/book/src/contributing.md +++ b/book/src/contributing.md @@ -49,7 +49,7 @@ into the canonical spec. ## Rust Lighthouse adheres to Rust code conventions as outlined in the [**Rust -Styleguide**](https://github.com/rust-dev-tools/fmt-rfcs/blob/master/guide/guide.md). +Styleguide**](https://doc.rust-lang.org/nightly/style-guide/). Please use [clippy](https://github.com/rust-lang/rust-clippy) and [rustfmt](https://github.com/rust-lang/rustfmt) to detect common mistakes and diff --git a/book/src/merge-migration.md b/book/src/merge-migration.md index bab520b569b..e2dab9652fa 100644 --- a/book/src/merge-migration.md +++ b/book/src/merge-migration.md @@ -207,6 +207,6 @@ guidance for specific setups. - [Ethereum.org: The Merge](https://ethereum.org/en/upgrades/merge/) - [Ethereum Staking Launchpad: Merge Readiness](https://launchpad.ethereum.org/en/merge-readiness). -- [CoinCashew: Ethereum Merge Upgrade Checklist](https://www.coincashew.com/coins/overview-eth/ethereum-merge-upgrade-checklist-for-home-stakers-and-validators) +- [CoinCashew: Ethereum Merge Upgrade Checklist](https://www.coincashew.com/coins/overview-eth/archived-guides/ethereum-merge-upgrade-checklist-for-home-stakers-and-validators) - [EthDocker: Merge Preparation](https://eth-docker.net/About/MergePrep/) - [Remy Roy: How to join the Goerli/Prater merge testnet](https://github.com/remyroy/ethstaker/blob/main/merge-goerli-prater.md) diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 11706f30946..538f1a42d1c 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -8,15 +8,12 @@ mod standard_block_rewards; mod sync_committee_rewards; use crate::{ - ok_or_error, types::{ - BeaconState, ChainSpec, DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock, - GenericResponse, ValidatorId, + DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock, GenericResponse, ValidatorId, }, - BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, Slot, StateId, StatusCode, + BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, Slot, }; use proto_array::core::ProtoArray; -use reqwest::IntoUrl; use serde::{Deserialize, Serialize}; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; @@ -371,27 +368,6 @@ pub struct DatabaseInfo { } impl BeaconNodeHttpClient { - /// Perform a HTTP GET request, returning `None` on a 404 error. - async fn get_bytes_opt(&self, url: U) -> Result>, Error> { - let response = self.client.get(url).send().await.map_err(Error::from)?; - match ok_or_error(response).await { - Ok(resp) => Ok(Some( - resp.bytes() - .await - .map_err(Error::from)? - .into_iter() - .collect::>(), - )), - Err(err) => { - if err.status() == Some(StatusCode::NOT_FOUND) { - Ok(None) - } else { - Err(err) - } - } - } - } - /// `GET lighthouse/health` pub async fn get_lighthouse_health(&self) -> Result, Error> { let mut path = self.server.full.clone(); @@ -516,28 +492,6 @@ impl BeaconNodeHttpClient { self.get(path).await } - /// `GET lighthouse/beacon/states/{state_id}/ssz` - pub async fn get_lighthouse_beacon_states_ssz( - &self, - state_id: &StateId, - spec: &ChainSpec, - ) -> Result>, Error> { - let mut path = self.server.full.clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("beacon") - .push("states") - .push(&state_id.to_string()) - .push("ssz"); - - self.get_bytes_opt(path) - .await? - .map(|bytes| BeaconState::from_ssz_bytes(&bytes, spec).map_err(Error::InvalidSsz)) - .transpose() - } - /// `GET lighthouse/staking` pub async fn get_lighthouse_staking(&self) -> Result { let mut path = self.server.full.clone(); diff --git a/lcli/Dockerfile b/lcli/Dockerfile index 2ff4706a919..aed3628cf32 100644 --- a/lcli/Dockerfile +++ b/lcli/Dockerfile @@ -4,10 +4,10 @@ FROM rust:1.73.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev COPY . lighthouse -ARG PORTABLE -ENV PORTABLE $PORTABLE +ARG FEATURES +ENV FEATURES $FEATURES RUN cd lighthouse && make install-lcli FROM ubuntu:22.04 RUN apt-get update && apt-get -y upgrade && apt-get clean && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/cargo/bin/lcli /usr/local/bin/lcli \ No newline at end of file +COPY --from=builder /usr/local/cargo/bin/lcli /usr/local/bin/lcli diff --git a/scripts/cli.sh b/scripts/cli.sh index 768ec7b3018..7ba98d08bac 100755 --- a/scripts/cli.sh +++ b/scripts/cli.sh @@ -90,7 +90,7 @@ rm -f help_general.md help_bn.md help_vc.md help_am.md help_vm.md help_vm_create # only exit at the very end if [[ $changes == true ]]; then - echo "Exiting with error to indicate changes occurred. To fix, run `make cli-local` or `make cli` and commit the changes." + echo "Exiting with error to indicate changes occurred. To fix, run 'make cli-local' or 'make cli' and commit the changes." exit 1 else echo "CLI help texts are up to date." diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index 2862fde0759..74dc4739b4e 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -193,3 +193,9 @@ Update the genesis time to now using: ./start_local_testnet.sh -p genesis.json ``` 4. Block production using builder flow will start at epoch 4. + +### Testing sending a transaction + +Some addresses in the local testnet are seeded with testnet ETH, allowing users to carry out transactions. To send a transaction, we first add the address to a wallet, such as [Metamask](https://metamask.io/). The private keys for the addresses are listed [here](https://github.com/sigp/lighthouse/blob/441fc1691b69f9edc4bbdc6665f3efab16265c9b/testing/execution_engine_integration/src/execution_engine.rs#L13-L14). + +Next, we add the local testnet to Metamask, a brief guide can be found [here](https://support.metamask.io/hc/en-us/articles/360043227612-How-to-add-a-custom-network-RPC). If you start the local testnet with default settings, the network RPC is: http://localhost:6001 and the `Chain ID` is `4242`, as defined in [`vars.env`](https://github.com/sigp/lighthouse/blob/441fc1691b69f9edc4bbdc6665f3efab16265c9b/scripts/local_testnet/vars.env#L42). Once the network and account are added, you should see that the account contains testnet ETH which allow us to carry out transactions. \ No newline at end of file diff --git a/scripts/local_testnet/beacon_node.sh b/scripts/local_testnet/beacon_node.sh index 3f39f29caa1..813fb47886b 100755 --- a/scripts/local_testnet/beacon_node.sh +++ b/scripts/local_testnet/beacon_node.sh @@ -67,5 +67,6 @@ exec $lighthouse_binary \ --target-peers $((BN_COUNT - 1)) \ --execution-endpoint $execution_endpoint \ --execution-jwt $execution_jwt \ + --http-allow-sync-stalled \ --unsafe-and-dangerous-mode \ $BN_ARGS