diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index ed018aab1b..252f82dbe1 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -87,7 +87,8 @@ EJECTION_BALANCE: 16000000000 MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 - +# [New in Deneb:EIP7514] 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 # Fork choice # --------------------------------------------------------------- @@ -140,3 +141,9 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 + +# Whisk +# `Epoch(2**8)` +WHISK_EPOCHS_PER_SHUFFLING_PHASE: 256 +# `Epoch(2)` +WHISK_PROPOSER_SELECTION_GAP: 2 diff --git a/configs/minimal.yaml b/configs/minimal.yaml index 238598b0e0..a3b1a8d5ad 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -82,10 +82,12 @@ INACTIVITY_SCORE_BIAS: 4 INACTIVITY_SCORE_RECOVERY_RATE: 16 # 2**4 * 10**9 (= 16,000,000,000) Gwei EJECTION_BALANCE: 16000000000 -# 2**2 (= 4) -MIN_PER_EPOCH_CHURN_LIMIT: 4 +# [customized] more easily demonstrate the difference between this value and the activation churn limit +MIN_PER_EPOCH_CHURN_LIMIT: 2 # [customized] scale queue churn at much lower validator counts for testing CHURN_LIMIT_QUOTIENT: 32 +# [New in Deneb:EIP7514] [customized] +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 4 # Fork choice @@ -141,3 +143,7 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 + +# Whisk +WHISK_EPOCHS_PER_SHUFFLING_PHASE: 4 +WHISK_PROPOSER_SELECTION_GAP: 1 diff --git a/presets/mainnet/whisk.yaml b/presets/mainnet/whisk.yaml index 3086ff29de..f39b15bd33 100644 --- a/presets/mainnet/whisk.yaml +++ b/presets/mainnet/whisk.yaml @@ -8,12 +8,8 @@ CURDLEPROOFS_N_BLINDERS: 4 WHISK_CANDIDATE_TRACKERS_COUNT: 16384 # `uint64(2**13)` must be < WHISK_CANDIDATE_TRACKERS_COUNT WHISK_PROPOSER_TRACKERS_COUNT: 8192 -# `Epoch(2**8)` -WHISK_EPOCHS_PER_SHUFFLING_PHASE: 256 # `uint64(2**7 - CURDLEPROOFS_N_BLINDERS)` WHISK_VALIDATORS_PER_SHUFFLE: 124 -# `Epoch(2)` -WHISK_PROPOSER_SELECTION_GAP: 2 # `uint64(2**15)` TODO: will be replaced by a fix format once there's a serialized format WHISK_MAX_SHUFFLE_PROOF_SIZE: 32768 # `uint64(2**10)` TODO: will be replaced by a fix format once there's a serialized format diff --git a/presets/minimal/whisk.yaml b/presets/minimal/whisk.yaml index 1a726f79c2..0cb4b3a60c 100644 --- a/presets/minimal/whisk.yaml +++ b/presets/minimal/whisk.yaml @@ -9,11 +9,7 @@ WHISK_CANDIDATE_TRACKERS_COUNT: 32 # [customized] WHISK_PROPOSER_TRACKERS_COUNT: 16 # [customized] -WHISK_EPOCHS_PER_SHUFFLING_PHASE: 4 -# [customized] WHISK_VALIDATORS_PER_SHUFFLE: 4 -# [customized] -WHISK_PROPOSER_SELECTION_GAP: 1 # `uint64(2**15)` TODO: will be replaced by a fix format once there's a serialized format WHISK_MAX_SHUFFLE_PROOF_SIZE: 32768 # `uint64(2**10)` TODO: will be replaced by a fix format once there's a serialized format diff --git a/pysetup/spec_builders/deneb.py b/pysetup/spec_builders/deneb.py index b4e180c2ae..c32bee8305 100644 --- a/pysetup/spec_builders/deneb.py +++ b/pysetup/spec_builders/deneb.py @@ -21,9 +21,9 @@ def preparations(cls): @classmethod def sundry_functions(cls) -> str: return ''' -def retrieve_blobs_and_proofs(beacon_block_root: Root) -> PyUnion[Tuple[Blob, KZGProof], Tuple[str, str]]: +def retrieve_blobs_and_proofs(beacon_block_root: Root) -> Tuple[Sequence[Blob], Sequence[KZGProof]]: # pylint: disable=unused-argument - return ("TEST", "TEST")''' + return [], []''' @classmethod def execution_engine_cls(cls) -> str: diff --git a/setup.py b/setup.py index efa57ac2b1..b46423bdc1 100644 --- a/setup.py +++ b/setup.py @@ -519,6 +519,6 @@ def run(self): "lru-dict==1.2.0", MARKO_VERSION, "py_arkworks_bls12381==0.3.4", - "curdleproofs @ git+https://github.com/nalinbhardwaj/curdleproofs.pie@805d06785b6ff35fde7148762277dd1ae678beeb#egg=curdleproofs&subdirectory=curdleproofs", + "curdleproofs==0.1.1", ] ) diff --git a/specs/_features/whisk/beacon-chain.md b/specs/_features/whisk/beacon-chain.md index c955585903..c719dabe1b 100644 --- a/specs/_features/whisk/beacon-chain.md +++ b/specs/_features/whisk/beacon-chain.md @@ -12,6 +12,7 @@ - [Constants](#constants) - [Domain types](#domain-types) - [Preset](#preset) +- [Configuration](#configuration) - [Cryptography](#cryptography) - [BLS](#bls) - [Curdleproofs and opening proofs](#curdleproofs-and-opening-proofs) @@ -52,12 +53,17 @@ This document details the beacon chain additions and changes of to support the W | `CURDLEPROOFS_N_BLINDERS` | `uint64(4)` | number of blinders for curdleproofs | | `WHISK_CANDIDATE_TRACKERS_COUNT` | `uint64(2**14)` (= 16,384) | number of candidate trackers | | `WHISK_PROPOSER_TRACKERS_COUNT` | `uint64(2**13)` (= 8,192) | number of proposer trackers | -| `WHISK_EPOCHS_PER_SHUFFLING_PHASE` | `Epoch(2**8)` (= 256) | epochs per shuffling phase | | `WHISK_VALIDATORS_PER_SHUFFLE` | `uint64(2**7 - 4)` (= 124) | number of validators shuffled per shuffle step | -| `WHISK_PROPOSER_SELECTION_GAP` | `Epoch(2)` | gap between proposer selection and the block proposal phase | | `WHISK_MAX_SHUFFLE_PROOF_SIZE` | `uint64(2**15)` | max size of a shuffle proof | | `WHISK_MAX_OPENING_PROOF_SIZE` | `uint64(2**10)` | max size of a opening proof | +## Configuration + +| Name | Value | Description | +| ---------------------------------- | -------------------------- | ----------------------------------------------------------- | +| `WHISK_EPOCHS_PER_SHUFFLING_PHASE` | `Epoch(2**8)` (= 256) | epochs per shuffling phase | +| `WHISK_PROPOSER_SELECTION_GAP` | `Epoch(2)` | gap between proposer selection and the block proposal phase | + ## Cryptography ### BLS @@ -318,30 +324,25 @@ def get_shuffle_indices(randao_reveal: BLSSignature) -> Sequence[uint64]: ```python def process_shuffled_trackers(state: BeaconState, body: BeaconBlockBody) -> None: - # Check the shuffle proof - shuffle_indices = get_shuffle_indices(body.randao_reveal) - pre_shuffle_trackers = [state.whisk_candidate_trackers[i] for i in shuffle_indices] - shuffle_epoch = get_current_epoch(state) % WHISK_EPOCHS_PER_SHUFFLING_PHASE if shuffle_epoch + WHISK_PROPOSER_SELECTION_GAP + 1 >= WHISK_EPOCHS_PER_SHUFFLING_PHASE: # Require trackers set to zero during cooldown assert body.whisk_post_shuffle_trackers == Vector[WhiskTracker, WHISK_VALIDATORS_PER_SHUFFLE]() assert body.whisk_shuffle_proof_M_commitment == BLSG1Point() assert body.whisk_shuffle_proof == WhiskShuffleProof() - post_shuffle_trackers = pre_shuffle_trackers else: # Require shuffled trackers during shuffle + shuffle_indices = get_shuffle_indices(body.randao_reveal) + pre_shuffle_trackers = [state.whisk_candidate_trackers[i] for i in shuffle_indices] assert IsValidWhiskShuffleProof( pre_shuffle_trackers, body.whisk_post_shuffle_trackers, body.whisk_shuffle_proof_M_commitment, body.whisk_shuffle_proof, ) - post_shuffle_trackers = body.whisk_post_shuffle_trackers - - # Shuffle candidate trackers - for i, shuffle_index in enumerate(shuffle_indices): - state.whisk_candidate_trackers[shuffle_index] = post_shuffle_trackers[i] + # Shuffle candidate trackers + for i, shuffle_index in enumerate(shuffle_indices): + state.whisk_candidate_trackers[shuffle_index] = body.whisk_post_shuffle_trackers[i] ``` ```python diff --git a/specs/altair/fork.md b/specs/altair/fork.md index bf8499a219..60b048abda 100644 --- a/specs/altair/fork.md +++ b/specs/altair/fork.md @@ -22,8 +22,6 @@ This document describes the process of the first upgrade of the beacon chain: th ## Configuration -Warning: this configuration is not definitive. - | Name | Value | | - | - | | `ALTAIR_FORK_VERSION` | `Version('0x01000000')` | diff --git a/specs/altair/p2p-interface.md b/specs/altair/p2p-interface.md index 0f278b08c5..fac540fb99 100644 --- a/specs/altair/p2p-interface.md +++ b/specs/altair/p2p-interface.md @@ -13,7 +13,6 @@ Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery -- [Warning](#warning) - [Modifications in Altair](#modifications-in-altair) - [MetaData](#metadata) - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) @@ -38,11 +37,6 @@ Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery -## Warning - -This document is currently illustrative for early Altair testnets and some parts are subject to change. -Refer to the note in the [validator guide](./validator.md) for further details. - ## Modifications in Altair ### MetaData diff --git a/specs/altair/validator.md b/specs/altair/validator.md index 013a516490..3602377acd 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -10,7 +10,6 @@ This is an accompanying document to [Altair -- The Beacon Chain](./beacon-chain. - [Introduction](#introduction) - [Prerequisites](#prerequisites) -- [Warning](#warning) - [Constants](#constants) - [Misc](#misc) - [Containers](#containers) @@ -63,10 +62,6 @@ Block proposers incorporate the (aggregated) sync committee signatures into each All terminology, constants, functions, and protocol mechanics defined in the [Altair -- The Beacon Chain](./beacon-chain.md) doc are requisite for this document and used throughout. Please see this document before continuing and use as a reference throughout. -## Warning - -This document is currently illustrative for early Altair testnets and some parts are subject to change, especially pending implementation and profiling of Altair testnets. - ## Constants ### Misc diff --git a/specs/bellatrix/fork-choice.md b/specs/bellatrix/fork-choice.md index 68519ff908..c8475195fc 100644 --- a/specs/bellatrix/fork-choice.md +++ b/specs/bellatrix/fork-choice.md @@ -194,7 +194,8 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Add proposer score boost if the block is timely time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT - if get_current_slot(store) == block.slot and is_before_attesting_interval: + is_first_block = store.proposer_boost_root == Root() + if get_current_slot(store) == block.slot and is_before_attesting_interval and is_first_block: store.proposer_boost_root = hash_tree_root(block) # Update checkpoints in store if necessary diff --git a/specs/bellatrix/fork.md b/specs/bellatrix/fork.md index a114e5a5fa..569dccdc66 100644 --- a/specs/bellatrix/fork.md +++ b/specs/bellatrix/fork.md @@ -22,8 +22,6 @@ This document describes the process of Bellatrix upgrade. ## Configuration -Warning: this configuration is not definitive. - | Name | Value | | - | - | | `BELLATRIX_FORK_VERSION` | `Version('0x02000000')` | diff --git a/specs/bellatrix/p2p-interface.md b/specs/bellatrix/p2p-interface.md index 7d80d40a83..032bc9ebec 100644 --- a/specs/bellatrix/p2p-interface.md +++ b/specs/bellatrix/p2p-interface.md @@ -12,7 +12,6 @@ Readers should understand the Phase 0 and Altair documents and use them as a bas - - [Warning](#warning) - [Modifications in Bellatrix](#modifications-in-bellatrix) - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - [Topics and messages](#topics-and-messages) @@ -33,11 +32,6 @@ Readers should understand the Phase 0 and Altair documents and use them as a bas -## Warning - -This document is currently illustrative for early Bellatrix testnets and some parts are subject to change. -Refer to the note in the [validator guide](./validator.md) for further details. - ## Modifications in Bellatrix ### The gossip domain: gossipsub diff --git a/specs/capella/fork-choice.md b/specs/capella/fork-choice.md index 87fec02f8c..a830080c11 100644 --- a/specs/capella/fork-choice.md +++ b/specs/capella/fork-choice.md @@ -106,7 +106,8 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Add proposer score boost if the block is timely time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT - if get_current_slot(store) == block.slot and is_before_attesting_interval: + is_first_block = store.proposer_boost_root == Root() + if get_current_slot(store) == block.slot and is_before_attesting_interval and is_first_block: store.proposer_boost_root = hash_tree_root(block) # Update checkpoints in store if necessary diff --git a/specs/capella/fork.md b/specs/capella/fork.md index 95bdf79aee..73d4ba2b71 100644 --- a/specs/capella/fork.md +++ b/specs/capella/fork.md @@ -22,8 +22,6 @@ This document describes the process of the Capella upgrade. ## Configuration -Warning: this configuration is not definitive. - | Name | Value | | - | - | | `CAPELLA_FORK_VERSION` | `Version('0x03000000')` | diff --git a/specs/deneb/beacon-chain.md b/specs/deneb/beacon-chain.md index 2328b20e03..b98ac12592 100644 --- a/specs/deneb/beacon-chain.md +++ b/specs/deneb/beacon-chain.md @@ -16,6 +16,7 @@ - [Preset](#preset) - [Execution](#execution) - [Configuration](#configuration) + - [Validator cycle](#validator-cycle) - [Containers](#containers) - [Extended containers](#extended-containers) - [`BeaconBlockBody`](#beaconblockbody) @@ -26,6 +27,7 @@ - [`kzg_commitment_to_versioned_hash`](#kzg_commitment_to_versioned_hash) - [Beacon state accessors](#beacon-state-accessors) - [Modified `get_attestation_participation_flag_indices`](#modified-get_attestation_participation_flag_indices) + - [New `get_validator_activation_churn_limit`](#new-get_validator_activation_churn_limit) - [Beacon chain state transition function](#beacon-chain-state-transition-function) - [Execution engine](#execution-engine) - [Request data](#request-data) @@ -40,6 +42,8 @@ - [Execution payload](#execution-payload) - [Modified `process_execution_payload`](#modified-process_execution_payload) - [Modified `process_voluntary_exit`](#modified-process_voluntary_exit) + - [Epoch processing](#epoch-processing) + - [Registry updates](#registry-updates) - [Testing](#testing) @@ -50,8 +54,9 @@ Deneb is a consensus-layer upgrade containing a number of features. Including: * [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788): Beacon block root in the EVM * [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844): Shard Blob Transactions scale data-availability of Ethereum in a simple, forwards-compatible manner -* [EIP-7044](https://github.com/ethereum/EIPs/pull/7044): Perpetually Valid Signed Voluntary Exits +* [EIP-7044](https://eips.ethereum.org/EIPS/eip-7044): Perpetually Valid Signed Voluntary Exits * [EIP-7045](https://eips.ethereum.org/EIPS/eip-7045): Increase Max Attestation Inclusion Slot +* [EIP-7514](https://eips.ethereum.org/EIPS/eip-7514): Add Max Epoch Churn Limit ## Custom types @@ -89,6 +94,12 @@ and are limited by `MAX_BLOB_GAS_PER_BLOCK // GAS_PER_BLOB`. However the CL limi ## Configuration +### Validator cycle + +| Name | Value | +| - | - | +| `MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT` | `uint64(2**3)` (= 8) | + ## Containers ### Extended containers @@ -211,6 +222,16 @@ def get_attestation_participation_flag_indices(state: BeaconState, return participation_flag_indices ``` +#### New `get_validator_activation_churn_limit` + +```python +def get_validator_activation_churn_limit(state: BeaconState) -> uint64: + """ + Return the validator activation churn limit for the current epoch. + """ + return min(MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT, get_validator_churn_limit(state)) +``` + ## Beacon chain state transition function ### Execution engine @@ -415,6 +436,38 @@ def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVolu initiate_validator_exit(state, voluntary_exit.validator_index) ``` +### Epoch processing + +#### Registry updates + +*Note*: The function `process_registry_updates` is modified to utilize `get_validator_activation_churn_limit()` to rate limit the activation queue for EIP-7514. + +```python +def process_registry_updates(state: BeaconState) -> None: + # Process activation eligibility and ejections + for index, validator in enumerate(state.validators): + if is_eligible_for_activation_queue(validator): + validator.activation_eligibility_epoch = get_current_epoch(state) + 1 + + if ( + is_active_validator(validator, get_current_epoch(state)) + and validator.effective_balance <= EJECTION_BALANCE + ): + initiate_validator_exit(state, ValidatorIndex(index)) + + # Queue validators eligible for activation and not yet dequeued for activation + activation_queue = sorted([ + index for index, validator in enumerate(state.validators) + if is_eligible_for_activation(state, validator) + # Order by the sequence of activation_eligibility_epoch setting and then index + ], key=lambda index: (state.validators[index].activation_eligibility_epoch, index)) + # Dequeued validators for activation up to activation churn limit + # [Modified in Deneb:EIP7514] + for index in activation_queue[:get_validator_activation_churn_limit(state)]: + validator = state.validators[index] + validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state)) +``` + ## Testing *Note*: The function `initialize_beacon_state_from_eth1` is modified for pure Deneb testing only. diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 23eef436c1..5a700cc7a6 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -55,11 +55,6 @@ def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZ # `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` blobs, proofs = retrieve_blobs_and_proofs(beacon_block_root) - # For testing, `retrieve_blobs_and_proofs` returns ("TEST", "TEST"). - # TODO: Remove it once we have a way to inject `BlobSidecar` into tests. - if isinstance(blobs, str) or isinstance(proofs, str): - return True - return verify_blob_kzg_proof_batch(blobs, blob_kzg_commitments, proofs) ``` @@ -111,7 +106,8 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Add proposer score boost if the block is timely time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT - if get_current_slot(store) == block.slot and is_before_attesting_interval: + is_first_block = store.proposer_boost_root == Root() + if get_current_slot(store) == block.slot and is_before_attesting_interval and is_first_block: store.proposer_boost_root = hash_tree_root(block) # Update checkpoints in store if necessary diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index be6edca643..8b52186dda 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -539,7 +539,8 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Add proposer score boost if the block is timely time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT - if get_current_slot(store) == block.slot and is_before_attesting_interval: + is_first_block = store.proposer_boost_root == Root() + if get_current_slot(store) == block.slot and is_before_attesting_interval and is_first_block: store.proposer_boost_root = hash_tree_root(block) # Update checkpoints in store if necessary diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index bbb4c4d427..a374443b8c 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -158,11 +158,11 @@ This applies to transports that are natively incapable of multiplexing (e.g. TCP and is omitted for capable transports (e.g. QUIC). Two multiplexers are commonplace in libp2p implementations: -[mplex](https://github.com/libp2p/specs/tree/master/mplex) and [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). +[mplex](https://github.com/libp2p/specs/tree/master/mplex) and [yamux](https://github.com/libp2p/specs/blob/master/yamux/README.md). Their protocol IDs are, respectively: `/mplex/6.7.0` and `/yamux/1.0.0`. Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) -and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). +and MAY support [yamux](https://github.com/libp2p/specs/blob/master/yamux/README.md). If both are supported by the client, yamux MUST take precedence during negotiation. See the [Rationale](#design-decision-rationale) section below for tradeoffs. diff --git a/specs/phase0/validator.md b/specs/phase0/validator.md index 602df09736..86b230654c 100644 --- a/specs/phase0/validator.md +++ b/specs/phase0/validator.md @@ -606,7 +606,7 @@ def get_aggregate_and_proof_signature(state: BeaconState, "Slashing" is the burning of some amount of validator funds and immediate ejection from the active validator set. In Phase 0, there are two ways in which funds can be slashed: [proposer slashing](#proposer-slashing) and [attester slashing](#attester-slashing). Although being slashed has serious repercussions, it is simple enough to avoid being slashed all together by remaining _consistent_ with respect to the messages a validator has previously signed. -*Note*: Signed data must be within a sequential `Fork` context to conflict. Messages cannot be slashed across diverging forks. If the previous fork version is 1 and the chain splits into fork 2 and 102, messages from 1 can slashable against messages in forks 1, 2, and 102. Messages in 2 cannot be slashable against messages in 102, and vice versa. +*Note*: Signed data must be within a sequential `Fork` context to conflict. Messages cannot be slashed across diverging forks. If the previous fork version is 1 and the chain splits into fork 2 and 102, messages from 1 can be slashable against messages in forks 1, 2, and 102. Messages in 2 cannot be slashable against messages in 102, and vice versa. ### Proposer slashing diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt index 1b4859bc51..5a5d178b3a 100644 --- a/tests/core/pyspec/eth2spec/VERSION.txt +++ b/tests/core/pyspec/eth2spec/VERSION.txt @@ -1 +1 @@ -1.4.0-beta.1 +1.4.0-beta.2 diff --git a/tests/core/pyspec/eth2spec/gen_helpers/gen_from_tests/gen.py b/tests/core/pyspec/eth2spec/gen_helpers/gen_from_tests/gen.py index b951a6a85c..17ffe0b468 100644 --- a/tests/core/pyspec/eth2spec/gen_helpers/gen_from_tests/gen.py +++ b/tests/core/pyspec/eth2spec/gen_helpers/gen_from_tests/gen.py @@ -10,6 +10,10 @@ from eth2spec.gen_helpers.gen_base.gen_typing import TestCase, TestProvider +def generate_case_fn(tfn, generator_mode, phase, preset, bls_active): + return lambda: tfn(generator_mode=generator_mode, phase=phase, preset=preset, bls_active=bls_active) + + def generate_from_tests(runner_name: str, handler_name: str, src: Any, fork_name: SpecForkName, preset_name: PresetBaseName, bls_active: bool = True, @@ -52,7 +56,7 @@ def generate_from_tests(runner_name: str, handler_name: str, src: Any, suite_name=getattr(tfn, 'suite_name', 'pyspec_tests'), case_name=case_name, # TODO: with_all_phases and other per-phase tooling, should be replaced with per-fork equivalent. - case_fn=lambda: tfn(generator_mode=True, phase=phase, preset=preset_name, bls_active=bls_active) + case_fn=generate_case_fn(tfn, generator_mode=True, phase=phase, preset=preset_name, bls_active=bls_active) ) diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index 0c9d4a1ec5..7289fdf0fa 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -162,14 +162,34 @@ def default_balances(spec: Spec): return [spec.MAX_EFFECTIVE_BALANCE] * num_validators -def scaled_churn_balances(spec: Spec): +def scaled_churn_balances_min_churn_limit(spec: Spec): """ Helper method to create enough validators to scale the churn limit. (This is *firmly* over the churn limit -- thus the +2 instead of just +1) See the second argument of ``max`` in ``get_validator_churn_limit``. - Usage: `@with_custom_state(balances_fn=scaled_churn_balances, ...)` + Usage: `@with_custom_state(balances_fn=scaled_churn_balances_min_churn_limit, ...)` """ - num_validators = spec.config.CHURN_LIMIT_QUOTIENT * (2 + spec.config.MIN_PER_EPOCH_CHURN_LIMIT) + num_validators = spec.config.CHURN_LIMIT_QUOTIENT * (spec.config.MIN_PER_EPOCH_CHURN_LIMIT + 2) + return [spec.MAX_EFFECTIVE_BALANCE] * num_validators + + +def scaled_churn_balances_equal_activation_churn_limit(spec: Spec): + """ + Helper method to create enough validators to scale the churn limit. + (This is *firmly* over the churn limit -- thus the +2 instead of just +1) + Usage: `@with_custom_state(balances_fn=scaled_churn_balances_exceed_activation_churn_limit, ...)` + """ + num_validators = spec.config.CHURN_LIMIT_QUOTIENT * (spec.config.MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT) + return [spec.MAX_EFFECTIVE_BALANCE] * num_validators + + +def scaled_churn_balances_exceed_activation_churn_limit(spec: Spec): + """ + Helper method to create enough validators to scale the churn limit. + (This is *firmly* over the churn limit -- thus the +2 instead of just +1) + Usage: `@with_custom_state(balances_fn=scaled_churn_balances_exceed_activation_churn_limit, ...)` + """ + num_validators = spec.config.CHURN_LIMIT_QUOTIENT * (spec.config.MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT + 2) return [spec.MAX_EFFECTIVE_BALANCE] * num_validators diff --git a/tests/core/pyspec/eth2spec/test/deneb/epoch_processing/__init__.py b/tests/core/pyspec/eth2spec/test/deneb/epoch_processing/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/deneb/epoch_processing/test_process_registry_updates.py b/tests/core/pyspec/eth2spec/test/deneb/epoch_processing/test_process_registry_updates.py new file mode 100644 index 0000000000..4cbcc1ed5c --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/deneb/epoch_processing/test_process_registry_updates.py @@ -0,0 +1,90 @@ +from eth2spec.test.helpers.keys import pubkeys +from eth2spec.test.helpers.constants import MINIMAL +from eth2spec.test.context import ( + with_deneb_and_later, + spec_test, + spec_state_test, + single_phase, + with_custom_state, + with_presets, + scaled_churn_balances_exceed_activation_churn_limit, + scaled_churn_balances_equal_activation_churn_limit, +) +from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with + + +def run_process_registry_updates(spec, state): + yield from run_epoch_processing_with(spec, state, 'process_registry_updates') + + +def run_test_activation_churn_limit(spec, state): + mock_activations = spec.get_validator_activation_churn_limit(state) * 2 + + validator_count_0 = len(state.validators) + + for i in range(mock_activations): + index = validator_count_0 + i + validator = spec.Validator( + pubkey=pubkeys[index], + withdrawal_credentials=spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX + b'\x00' * 11 + b'\x56' * 20, + activation_eligibility_epoch=0, + activation_epoch=spec.FAR_FUTURE_EPOCH, + exit_epoch=spec.FAR_FUTURE_EPOCH, + withdrawable_epoch=spec.FAR_FUTURE_EPOCH, + effective_balance=spec.MAX_EFFECTIVE_BALANCE, + ) + state.validators.append(validator) + state.balances.append(spec.MAX_EFFECTIVE_BALANCE) + state.previous_epoch_participation.append(spec.ParticipationFlags(0b0000_0000)) + state.current_epoch_participation.append(spec.ParticipationFlags(0b0000_0000)) + state.inactivity_scores.append(0) + state.validators[index].activation_epoch = spec.FAR_FUTURE_EPOCH + + churn_limit_0 = spec.get_validator_activation_churn_limit(state) + + yield from run_process_registry_updates(spec, state) + + # Half should churn in first run of registry update + for i in range(mock_activations): + index = validator_count_0 + i + if index < validator_count_0 + churn_limit_0: + # The eligible validators within the activation churn limit should have been activated + assert state.validators[index].activation_epoch < spec.FAR_FUTURE_EPOCH + else: + assert state.validators[index].activation_epoch == spec.FAR_FUTURE_EPOCH + + +@with_deneb_and_later +@with_presets([MINIMAL], + reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") +@spec_test +@with_custom_state(balances_fn=scaled_churn_balances_exceed_activation_churn_limit, + threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@single_phase +def test_activation_churn_limit__greater_than_activation_limit(spec, state): + assert spec.get_validator_activation_churn_limit(state) == spec.config.MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT + assert spec.get_validator_churn_limit(state) > spec.config.MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT + yield from run_test_activation_churn_limit(spec, state) + + +@with_deneb_and_later +@with_presets([MINIMAL], + reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") +@spec_test +@with_custom_state(balances_fn=scaled_churn_balances_equal_activation_churn_limit, + threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@single_phase +def test_activation_churn_limit__equal_to_activation_limit(spec, state): + assert spec.get_validator_activation_churn_limit(state) == spec.config.MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT + assert spec.get_validator_churn_limit(state) == spec.config.MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT + yield from run_test_activation_churn_limit(spec, state) + + +@with_deneb_and_later +@with_presets([MINIMAL], + reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") +@spec_state_test +def test_activation_churn_limit__less_than_activation_limit(spec, state): + assert spec.get_validator_activation_churn_limit(state) < spec.config.MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT + assert spec.get_validator_churn_limit(state) < spec.config.MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT + yield from run_test_activation_churn_limit(spec, state) diff --git a/tests/core/pyspec/eth2spec/test/deneb/fork_choice/__init__.py b/tests/core/pyspec/eth2spec/test/deneb/fork_choice/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/deneb/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/deneb/fork_choice/test_on_block.py new file mode 100644 index 0000000000..12451f4ca3 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/deneb/fork_choice/test_on_block.py @@ -0,0 +1,182 @@ +from random import Random + +from eth2spec.test.context import ( + spec_state_test, + with_deneb_and_later, +) + +from eth2spec.test.helpers.block import ( + build_empty_block_for_next_slot, +) +from eth2spec.test.helpers.execution_payload import ( + compute_el_block_hash, +) +from eth2spec.test.helpers.fork_choice import ( + BlobData, + get_genesis_forkchoice_store_and_block, + on_tick_and_append_step, + tick_and_add_block_with_data, +) +from eth2spec.test.helpers.state import ( + state_transition_and_sign_block, +) +from eth2spec.test.helpers.sharding import ( + get_sample_opaque_tx, +) + + +def get_block_with_blob(spec, state, rng=None): + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, blobs, blob_kzg_commitments, blob_kzg_proofs = get_sample_opaque_tx(spec, blob_count=1, rng=rng) + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + block.body.blob_kzg_commitments = blob_kzg_commitments + return block, blobs, blob_kzg_proofs + + +@with_deneb_and_later +@spec_state_test +def test_simple_blob_data(spec, state): + rng = Random(1234) + + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # On receiving a block of `GENESIS_SLOT + 1` slot + block, blobs, blob_kzg_proofs = get_block_with_blob(spec, state, rng=rng) + signed_block = state_transition_and_sign_block(spec, state, block) + blob_data = BlobData(blobs, blob_kzg_proofs) + + yield from tick_and_add_block_with_data(spec, store, signed_block, test_steps, blob_data) + + assert spec.get_head(store) == signed_block.message.hash_tree_root() + + # On receiving a block of next epoch + store.time = current_time + spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH + block, blobs, blob_kzg_proofs = get_block_with_blob(spec, state, rng=rng) + signed_block = state_transition_and_sign_block(spec, state, block) + blob_data = BlobData(blobs, blob_kzg_proofs) + + yield from tick_and_add_block_with_data(spec, store, signed_block, test_steps, blob_data) + + assert spec.get_head(store) == signed_block.message.hash_tree_root() + + yield 'steps', test_steps + + +@with_deneb_and_later +@spec_state_test +def test_invalid_incorrect_proof(spec, state): + rng = Random(1234) + + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # On receiving a block of `GENESIS_SLOT + 1` slot + block, blobs, _ = get_block_with_blob(spec, state, rng=rng) + signed_block = state_transition_and_sign_block(spec, state, block) + # Insert incorrect proof + blob_kzg_proofs = [b'\xc0' + b'\x00' * 47] + blob_data = BlobData(blobs, blob_kzg_proofs) + + yield from tick_and_add_block_with_data(spec, store, signed_block, test_steps, blob_data, valid=False) + + assert spec.get_head(store) != signed_block.message.hash_tree_root() + + yield 'steps', test_steps + + +@with_deneb_and_later +@spec_state_test +def test_invalid_data_unavailable(spec, state): + rng = Random(1234) + + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # On receiving a block of `GENESIS_SLOT + 1` slot + block, _, _ = get_block_with_blob(spec, state, rng=rng) + signed_block = state_transition_and_sign_block(spec, state, block) + + # data unavailable + blob_data = BlobData([], []) + + yield from tick_and_add_block_with_data(spec, store, signed_block, test_steps, blob_data, valid=False) + + assert spec.get_head(store) != signed_block.message.hash_tree_root() + + yield 'steps', test_steps + + +@with_deneb_and_later +@spec_state_test +def test_invalid_wrong_proofs_length(spec, state): + rng = Random(1234) + + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # On receiving a block of `GENESIS_SLOT + 1` slot + block, blobs, _ = get_block_with_blob(spec, state, rng=rng) + signed_block = state_transition_and_sign_block(spec, state, block) + + # unavailable proofs + blob_data = BlobData(blobs, []) + + yield from tick_and_add_block_with_data(spec, store, signed_block, test_steps, blob_data, valid=False) + + assert spec.get_head(store) != signed_block.message.hash_tree_root() + + yield 'steps', test_steps + + +@with_deneb_and_later +@spec_state_test +def test_invalid_wrong_blobs_length(spec, state): + rng = Random(1234) + + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # On receiving a block of `GENESIS_SLOT + 1` slot + block, _, blob_kzg_proofs = get_block_with_blob(spec, state, rng=rng) + signed_block = state_transition_and_sign_block(spec, state, block) + + # unavailable blobs + blob_data = BlobData([], blob_kzg_proofs) + + yield from tick_and_add_block_with_data(spec, store, signed_block, test_steps, blob_data, valid=False) + + assert spec.get_head(store) != signed_block.message.hash_tree_root() + + yield 'steps', test_steps diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py index af231d87ff..e0e3547222 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py +++ b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py @@ -1,3 +1,5 @@ +from typing import NamedTuple, Sequence, Any + from eth_utils import encode_hex from eth2spec.test.exceptions import BlockNotFoundException from eth2spec.test.helpers.attestations import ( @@ -7,6 +9,40 @@ ) +class BlobData(NamedTuple): + """ + The return values of ``retrieve_blobs_and_proofs`` helper. + """ + blobs: Sequence[Any] + proofs: Sequence[bytes] + + +def with_blob_data(spec, blob_data, func): + """ + This helper runs the given ``func`` with monkeypatched ``retrieve_blobs_and_proofs`` + that returns ``blob_data.blobs, blob_data.proofs``. + """ + def retrieve_blobs_and_proofs(beacon_block_root): + return blob_data.blobs, blob_data.proofs + + retrieve_blobs_and_proofs_backup = spec.retrieve_blobs_and_proofs + spec.retrieve_blobs_and_proofs = retrieve_blobs_and_proofs + + class AtomicBoolean(): + value = False + is_called = AtomicBoolean() + + def wrap(flag: AtomicBoolean): + yield from func() + flag.value = True + + try: + yield from wrap(is_called) + finally: + spec.retrieve_blobs_and_proofs = retrieve_blobs_and_proofs_backup + assert is_called.value + + def get_anchor_root(spec, state): anchor_block_header = state.latest_block_header.copy() if anchor_block_header.state_root == spec.Bytes32(): @@ -15,7 +51,8 @@ def get_anchor_root(spec, state): def tick_and_add_block(spec, store, signed_block, test_steps, valid=True, - merge_block=False, block_not_found=False, is_optimistic=False): + merge_block=False, block_not_found=False, is_optimistic=False, + blob_data=None): pre_state = store.block_states[signed_block.message.parent_root] if merge_block: assert spec.is_merge_transition_block(pre_state, signed_block.message.body) @@ -30,11 +67,19 @@ def tick_and_add_block(spec, store, signed_block, test_steps, valid=True, valid=valid, block_not_found=block_not_found, is_optimistic=is_optimistic, + blob_data=blob_data, ) return post_state +def tick_and_add_block_with_data(spec, store, signed_block, test_steps, blob_data, valid=True): + def run_func(): + yield from tick_and_add_block(spec, store, signed_block, test_steps, blob_data=blob_data, valid=valid) + + yield from with_blob_data(spec, blob_data, run_func) + + def add_attestation(spec, store, attestation, test_steps, is_from_block=False): spec.on_attestation(store, attestation, is_from_block=is_from_block) yield get_attestation_file_name(attestation), attestation @@ -94,6 +139,13 @@ def get_attester_slashing_file_name(attester_slashing): return f"attester_slashing_{encode_hex(attester_slashing.hash_tree_root())}" +def get_blobs_file_name(blobs=None, blobs_root=None): + if blobs: + return f"blobs_{encode_hex(blobs.hash_tree_root())}" + else: + return f"blobs_{encode_hex(blobs_root)}" + + def on_tick_and_append_step(spec, store, time, test_steps): spec.on_tick(store, time) test_steps.append({'tick': int(time)}) @@ -119,35 +171,52 @@ def add_block(spec, test_steps, valid=True, block_not_found=False, - is_optimistic=False): + is_optimistic=False, + blob_data=None): """ Run on_block and on_attestation """ yield get_block_file_name(signed_block), signed_block - if not valid: - if is_optimistic: - run_on_block(spec, store, signed_block, valid=True) + # Check blob_data + if blob_data is not None: + blobs = spec.List[spec.Blob, spec.MAX_BLOBS_PER_BLOCK](blob_data.blobs) + blobs_root = blobs.hash_tree_root() + yield get_blobs_file_name(blobs_root=blobs_root), blobs + + is_blob_data_test = blob_data is not None + + def _append_step(is_blob_data_test, valid=True): + if is_blob_data_test: test_steps.append({ 'block': get_block_file_name(signed_block), - 'valid': False, + 'blobs': get_blobs_file_name(blobs_root=blobs_root), + 'proofs': [encode_hex(proof) for proof in blob_data.proofs], + 'valid': valid, }) + else: + test_steps.append({ + 'block': get_block_file_name(signed_block), + 'valid': valid, + }) + + if not valid: + if is_optimistic: + run_on_block(spec, store, signed_block, valid=True) + _append_step(is_blob_data_test, valid=False) else: try: run_on_block(spec, store, signed_block, valid=True) except (AssertionError, BlockNotFoundException) as e: if isinstance(e, BlockNotFoundException) and not block_not_found: assert False - test_steps.append({ - 'block': get_block_file_name(signed_block), - 'valid': False, - }) + _append_step(is_blob_data_test, valid=False) return else: assert False else: run_on_block(spec, store, signed_block, valid=True) - test_steps.append({'block': get_block_file_name(signed_block)}) + _append_step(is_blob_data_test) # An on_block step implies receiving block's attestations for attestation in signed_block.message.body.attestations: diff --git a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_voluntary_exit.py b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_voluntary_exit.py index 4a7286d523..97208dfcdd 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_voluntary_exit.py +++ b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_voluntary_exit.py @@ -3,7 +3,8 @@ spec_state_test, always_bls, with_all_phases, with_presets, spec_test, single_phase, - with_custom_state, scaled_churn_balances, + with_custom_state, + scaled_churn_balances_min_churn_limit, ) from eth2spec.test.helpers.keys import pubkey_to_privkey from eth2spec.test.helpers.voluntary_exits import ( @@ -102,7 +103,8 @@ def test_success_exit_queue__min_churn(spec, state): @with_presets([MINIMAL], reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") @spec_test -@with_custom_state(balances_fn=scaled_churn_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@with_custom_state(balances_fn=scaled_churn_balances_min_churn_limit, + threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @single_phase def test_success_exit_queue__scaled_churn(spec, state): churn_limit = spec.get_validator_churn_limit(state) diff --git a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_registry_updates.py b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_registry_updates.py index b4c5f81a0d..b7a7be76ab 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_registry_updates.py +++ b/tests/core/pyspec/eth2spec/test/phase0/epoch_processing/test_process_registry_updates.py @@ -5,7 +5,7 @@ spec_test, spec_state_test, with_all_phases, single_phase, with_custom_state, with_presets, - scaled_churn_balances, + scaled_churn_balances_min_churn_limit, ) from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with @@ -164,7 +164,8 @@ def test_activation_queue_efficiency_min(spec, state): @with_presets([MINIMAL], reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") @spec_test -@with_custom_state(balances_fn=scaled_churn_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@with_custom_state(balances_fn=scaled_churn_balances_min_churn_limit, + threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @single_phase def test_activation_queue_efficiency_scaled(spec, state): assert spec.get_validator_churn_limit(state) > spec.config.MIN_PER_EPOCH_CHURN_LIMIT @@ -227,7 +228,8 @@ def test_ejection_past_churn_limit_min(spec, state): @with_presets([MINIMAL], reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") @spec_test -@with_custom_state(balances_fn=scaled_churn_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@with_custom_state(balances_fn=scaled_churn_balances_min_churn_limit, + threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @single_phase def test_ejection_past_churn_limit_scaled(spec, state): assert spec.get_validator_churn_limit(state) > spec.config.MIN_PER_EPOCH_CHURN_LIMIT @@ -324,7 +326,8 @@ def test_activation_queue_activation_and_ejection__exceed_churn_limit(spec, stat @with_presets([MINIMAL], reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") @spec_test -@with_custom_state(balances_fn=scaled_churn_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@with_custom_state(balances_fn=scaled_churn_balances_min_churn_limit, + threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @single_phase def test_activation_queue_activation_and_ejection__scaled_churn_limit(spec, state): churn_limit = spec.get_validator_churn_limit(state) @@ -336,7 +339,8 @@ def test_activation_queue_activation_and_ejection__scaled_churn_limit(spec, stat @with_presets([MINIMAL], reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") @spec_test -@with_custom_state(balances_fn=scaled_churn_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@with_custom_state(balances_fn=scaled_churn_balances_min_churn_limit, + threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @single_phase def test_activation_queue_activation_and_ejection__exceed_scaled_churn_limit(spec, state): churn_limit = spec.get_validator_churn_limit(state) diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py index 30f94b854c..886fcbd209 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py @@ -34,9 +34,6 @@ ) -rng = random.Random(1001) - - @with_altair_and_later @spec_state_test def test_genesis(spec, state): @@ -271,6 +268,7 @@ def test_proposer_boost_correct_head(spec, state): next_slots(spec, state_2, 2) block_2 = build_empty_block_for_next_slot(spec, state_2) signed_block_2 = state_transition_and_sign_block(spec, state_2.copy(), block_2) + rng = random.Random(1001) while spec.hash_tree_root(block_1) >= spec.hash_tree_root(block_2): block_2.body.graffiti = spec.Bytes32(hex(rng.getrandbits(8 * 32))[2:].zfill(64)) signed_block_2 = state_transition_and_sign_block(spec, state_2.copy(), block_2) @@ -339,6 +337,7 @@ def test_discard_equivocations_on_attester_slashing(spec, state): next_slots(spec, state_2, 2) block_2 = build_empty_block_for_next_slot(spec, state_2) signed_block_2 = state_transition_and_sign_block(spec, state_2.copy(), block_2) + rng = random.Random(1001) while spec.hash_tree_root(block_1) >= spec.hash_tree_root(block_2): block_2.body.graffiti = spec.Bytes32(hex(rng.getrandbits(8 * 32))[2:].zfill(64)) signed_block_2 = state_transition_and_sign_block(spec, state_2.copy(), block_2) diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py index 840413a364..cd41350496 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py @@ -539,6 +539,56 @@ def test_proposer_boost_root_same_slot_untimely_block(spec, state): yield 'steps', test_steps +@with_altair_and_later +@spec_state_test +def test_proposer_boost_is_first_block(spec, state): + test_steps = [] + genesis_state = state.copy() + + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + + # Build block that serves as head ONLY on timely arrival, and ONLY in that slot + state = genesis_state.copy() + next_slots(spec, state, 3) + pre_state = state.copy() + block_a = build_empty_block_for_next_slot(spec, state) + signed_block_a = state_transition_and_sign_block(spec, state, block_a) + + # Process block on timely arrival just before end of boost interval + time = (store.genesis_time + block_a.slot * spec.config.SECONDS_PER_SLOT + + spec.config.SECONDS_PER_SLOT // spec.INTERVALS_PER_SLOT - 1) + on_tick_and_append_step(spec, store, time, test_steps) + yield from add_block(spec, store, signed_block_a, test_steps) + # `proposer_boost_root` is now `block_a` + assert store.proposer_boost_root == spec.hash_tree_root(block_a) + assert spec.get_weight(store, spec.hash_tree_root(block_a)) > 0 + test_steps.append({ + 'checks': { + 'proposer_boost_root': encode_hex(store.proposer_boost_root), + } + }) + + # make a different block at the same slot + state = pre_state.copy() + block_b = block_a.copy() + block_b.body.graffiti = b'\x34' * 32 + signed_block_b = state_transition_and_sign_block(spec, state, block_b) + yield from add_block(spec, store, signed_block_b, test_steps) + # `proposer_boost_root` is still `block_a` + assert store.proposer_boost_root == spec.hash_tree_root(block_a) + assert spec.get_weight(store, spec.hash_tree_root(block_b)) == 0 + test_steps.append({ + 'checks': { + 'proposer_boost_root': encode_hex(store.proposer_boost_root), + } + }) + + yield 'steps', test_steps + + @with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/test_validator_unittest.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/test_validator_unittest.py index 918ab96e2e..f5f417bbaf 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/test_validator_unittest.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/test_validator_unittest.py @@ -485,7 +485,7 @@ def test_get_aggregate_and_proof_signature(spec, state): def run_compute_subscribed_subnets_arguments(spec, rng=random.Random(1111)): - node_id = rng.randint(0, 2**40 - 1) # try VALIDATOR_REGISTRY_LIMIT + node_id = rng.randint(0, 2**256 - 1) epoch = rng.randint(0, 2**64 - 1) subnets = spec.compute_subscribed_subnets(node_id, epoch) assert len(subnets) == spec.config.SUBNETS_PER_NODE diff --git a/tests/formats/fork_choice/README.md b/tests/formats/fork_choice/README.md index 3b28837de7..d23de865b3 100644 --- a/tests/formats/fork_choice/README.md +++ b/tests/formats/fork_choice/README.md @@ -2,6 +2,30 @@ The aim of the fork choice tests is to provide test coverage of the various components of the fork choice. +## Table of contents + + + + +- [Test case format](#test-case-format) + - [`meta.yaml`](#metayaml) + - [`anchor_state.ssz_snappy`](#anchor_statessz_snappy) + - [`anchor_block.ssz_snappy`](#anchor_blockssz_snappy) + - [`steps.yaml`](#stepsyaml) + - [`on_tick` execution step](#on_tick-execution-step) + - [`on_attestation` execution step](#on_attestation-execution-step) + - [`on_block` execution step](#on_block-execution-step) + - [`on_merge_block` execution step](#on_merge_block-execution-step) + - [`on_attester_slashing` execution step](#on_attester_slashing-execution-step) + - [`on_payload_info` execution step](#on_payload_info-execution-step) + - [Checks step](#checks-step) + - [`attestation_<32-byte-root>.ssz_snappy`](#attestation_32-byte-rootssz_snappy) + - [`block_<32-byte-root>.ssz_snappy`](#block_32-byte-rootssz_snappy) +- [Condition](#condition) + + + + ## Test case format ### `meta.yaml` @@ -59,14 +83,20 @@ The parameter that is required for executing `on_block(store, block)`. ```yaml { - block: string -- the name of the `block_<32-byte-root>.ssz_snappy` file. - To execute `on_block(store, block)` with the given attestation. - valid: bool -- optional, default to `true`. - If it's `false`, this execution step is expected to be invalid. + block: string -- the name of the `block_<32-byte-root>.ssz_snappy` file. + To execute `on_block(store, block)` with the given attestation. + blobs: string -- optional, the name of the `blobs_<32-byte-root>.ssz_snappy` file. + The blobs file content is a `List[Blob, MAX_BLOBS_PER_BLOCK]` SSZ object. + proofs: array of byte48 hex string -- optional, the proofs of blob commitments. + valid: bool -- optional, default to `true`. + If it's `false`, this execution step is expected to be invalid. } ``` + The file is located in the same folder (see below). +`blobs` and `proofs` are new fields from Deneb EIP-4844. These fields indicate the expected values from `retrieve_blobs_and_proofs()` helper inside `is_data_available()` helper. If these two fields are not provided, `retrieve_blobs_and_proofs()` returns empty lists. + After this step, the `store` object may have been updated. #### `on_merge_block` execution step diff --git a/tests/formats/kzg/verify_blob_kzg_proof_batch.md b/tests/formats/kzg/verify_blob_kzg_proof_batch.md index 3bcc74d6bb..82e668497d 100644 --- a/tests/formats/kzg/verify_blob_kzg_proof_batch.md +++ b/tests/formats/kzg/verify_blob_kzg_proof_batch.md @@ -1,6 +1,6 @@ # Test format: Verify blob KZG proof batch -Use the blob KZG proofs to verify that the KZG commitments for given `blob`s are correct +Use the blob KZG proofs to verify that the KZG commitments for given `blobs` are correct ## Test case format @@ -8,13 +8,13 @@ The test data is declared in a `data.yaml` file: ```yaml input: - blob: List[Blob] -- the data blob - commitment: List[KZGCommitment] -- the KZG commitment to the data blob - proof: List[KZGProof] -- The KZG proof + blobs: List[Blob] -- the data blob + commitments: List[KZGCommitment] -- the KZG commitment to the data blob + proofs: List[KZGProof] -- The KZG proof output: bool -- true (all proofs are valid) or false (some proofs incorrect) ``` -- `blob`s here are encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`. +- `blobs` here are encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`. All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. diff --git a/tests/generators/bls/main.py b/tests/generators/bls/main.py index 8c187c68c1..35bd2b6a95 100644 --- a/tests/generators/bls/main.py +++ b/tests/generators/bls/main.py @@ -519,7 +519,7 @@ def cases_fn() -> Iterable[gen_typing.TestCase]: preset_name='general', runner_name='bls', handler_name=handler_name, - suite_name='small', + suite_name='bls', case_name=case_name, case_fn=lambda: [('data', 'data', case_content)] ) diff --git a/tests/generators/epoch_processing/main.py b/tests/generators/epoch_processing/main.py index 645c84cb6b..63c2a548fd 100644 --- a/tests/generators/epoch_processing/main.py +++ b/tests/generators/epoch_processing/main.py @@ -32,7 +32,10 @@ ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) - deneb_mods = capella_mods + _new_deneb_mods = {key: 'eth2spec.test.deneb.epoch_processing.test_process_' + key for key in [ + 'registry_updates', + ]} + deneb_mods = combine_mods(_new_deneb_mods, capella_mods) eip6110_mods = deneb_mods diff --git a/tests/generators/fork_choice/main.py b/tests/generators/fork_choice/main.py index b0c9a9bb9d..7ff028cd80 100644 --- a/tests/generators/fork_choice/main.py +++ b/tests/generators/fork_choice/main.py @@ -19,7 +19,13 @@ ]} bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) capella_mods = bellatrix_mods # No additional Capella specific fork choice tests - deneb_mods = capella_mods # No additional Deneb specific fork choice tests + + # Deneb adds `is_data_available` tests + _new_deneb_mods = {key: 'eth2spec.test.deneb.fork_choice.test_' + key for key in [ + 'on_block', + ]} + deneb_mods = combine_mods(_new_deneb_mods, capella_mods) + eip6110_mods = deneb_mods # No additional EIP6110 specific fork choice tests all_mods = { diff --git a/tests/generators/kzg_4844/main.py b/tests/generators/kzg_4844/main.py index 9297e524ac..1c5aefc61f 100644 --- a/tests/generators/kzg_4844/main.py +++ b/tests/generators/kzg_4844/main.py @@ -527,7 +527,7 @@ def cases_fn() -> Iterable[gen_typing.TestCase]: preset_name='general', runner_name='kzg', handler_name=handler_name, - suite_name='small', + suite_name='kzg-mainnet', case_name=case_name, case_fn=lambda: [('data', 'data', case_content)] ) diff --git a/tests/generators/ssz_generic/ssz_container.py b/tests/generators/ssz_generic/ssz_container.py index 1b30d687ac..2c1d37da88 100644 --- a/tests/generators/ssz_generic/ssz_container.py +++ b/tests/generators/ssz_generic/ssz_container.py @@ -46,11 +46,11 @@ class BitsStruct(Container): E: Bitvector[8] -def container_case_fn(rng: Random, mode: RandomizationMode, typ: Type[View]): +def container_case_fn(rng: Random, mode: RandomizationMode, typ: Type[View], chaos: bool=False): return get_random_ssz_object(rng, typ, max_bytes_length=2000, max_list_length=2000, - mode=mode, chaos=False) + mode=mode, chaos=chaos) PRESET_CONTAINERS: Dict[str, Tuple[Type[View], Sequence[int]]] = { @@ -68,17 +68,23 @@ def valid_cases(): for (name, (typ, offsets)) in PRESET_CONTAINERS.items(): for mode in [RandomizationMode.mode_zero, RandomizationMode.mode_max]: yield f'{name}_{mode.to_name()}', valid_test_case(lambda: container_case_fn(rng, mode, typ)) - random_modes = [RandomizationMode.mode_random, RandomizationMode.mode_zero, RandomizationMode.mode_max] - if len(offsets) != 0: - random_modes.extend([RandomizationMode.mode_nil_count, - RandomizationMode.mode_one_count, - RandomizationMode.mode_max_count]) - for mode in random_modes: - for variation in range(10): - yield f'{name}_{mode.to_name()}_{variation}', \ - valid_test_case(lambda: container_case_fn(rng, mode, typ)) + + if len(offsets) == 0: + modes = [RandomizationMode.mode_random, RandomizationMode.mode_zero, RandomizationMode.mode_max] + else: + modes = list(RandomizationMode) + + for mode in modes: for variation in range(3): yield f'{name}_{mode.to_name()}_chaos_{variation}', \ + valid_test_case(lambda: container_case_fn(rng, mode, typ, chaos=True)) + # Notes: Below is the second wave of iteration, and only the random mode is selected + # for container without offset since ``RandomizationMode.mode_zero`` and ``RandomizationMode.mode_max`` + # are deterministic. + modes = [RandomizationMode.mode_random] if len(offsets) == 0 else list(RandomizationMode) + for mode in modes: + for variation in range(10): + yield f'{name}_{mode.to_name()}_{variation}', \ valid_test_case(lambda: container_case_fn(rng, mode, typ)) diff --git a/tests/generators/ssz_generic/ssz_uints.py b/tests/generators/ssz_generic/ssz_uints.py index 896443f4cc..abf7fc75b2 100644 --- a/tests/generators/ssz_generic/ssz_uints.py +++ b/tests/generators/ssz_generic/ssz_uints.py @@ -18,13 +18,16 @@ def uint_case_fn(rng: Random, mode: RandomizationMode, typ: Type[BasicView]): def valid_cases(): rng = Random(1234) for uint_type in UINT_TYPES: + mode = RandomizationMode.mode_random byte_len = uint_type.type_byte_length() yield f'uint_{byte_len * 8}_last_byte_empty', \ valid_test_case(lambda: uint_type((2 ** ((byte_len - 1) * 8)) - 1)) for variation in range(5): - for mode in [RandomizationMode.mode_random, RandomizationMode.mode_zero, RandomizationMode.mode_max]: - yield f'uint_{byte_len * 8}_{mode.to_name()}_{variation}', \ - valid_test_case(lambda: uint_case_fn(rng, mode, uint_type)) + yield f'uint_{byte_len * 8}_{mode.to_name()}_{variation}', \ + valid_test_case(lambda: uint_case_fn(rng, mode, uint_type)) + for mode in [RandomizationMode.mode_zero, RandomizationMode.mode_max]: + yield f'uint_{byte_len * 8}_{mode.to_name()}', \ + valid_test_case(lambda: uint_case_fn(rng, mode, uint_type)) def invalid_cases():