diff --git a/.gitignore b/.gitignore
index 3586b356ca..ad62aa177e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,6 +25,7 @@ tests/core/pyspec/eth2spec/electra/
tests/core/pyspec/eth2spec/whisk/
tests/core/pyspec/eth2spec/eip7594/
tests/core/pyspec/eth2spec/eip6800/
+tests/core/pyspec/eth2spec/eip7732/
# coverage reports
.htmlcov
diff --git a/Makefile b/Makefile
index bdf4bdde7a..23b6a6035d 100644
--- a/Makefile
+++ b/Makefile
@@ -14,7 +14,6 @@ SOLIDITY_FILE_NAME = deposit_contract.json
DEPOSIT_CONTRACT_TESTER_DIR = ${SOLIDITY_DEPOSIT_CONTRACT_DIR}/web3_tester
CONFIGS_DIR = ./configs
TEST_PRESET_TYPE ?= minimal
-NUMBER_OF_CORES=16
# Collect a list of generator names
GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/.)))
# Map this list of generator paths to "gen_{generator name}" entries
@@ -35,7 +34,7 @@ MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/*/*.md) \
$(wildcard $(SPEC_DIR)/_features/*/*/*.md) \
$(wildcard $(SSZ_DIR)/*.md)
-ALL_EXECUTABLE_SPEC_NAMES = phase0 altair bellatrix capella deneb electra whisk eip6800
+ALL_EXECUTABLE_SPEC_NAMES = phase0 altair bellatrix capella deneb electra whisk eip6800 eip7732
# The parameters for commands. Use `foreach` to avoid listing specs again.
COVERAGE_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPEC_NAMES), --cov=eth2spec.$S.$(TEST_PRESET_TYPE))
PYLINT_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPEC_NAMES), ./eth2spec/$S)
@@ -118,7 +117,7 @@ install_test: preinstallation
# Testing against `minimal` or `mainnet` config by default
test: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
- python3 -m pytest -n 4 --disable-bls $(COVERAGE_SCOPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
+ python3 -m pytest -n auto --disable-bls $(COVERAGE_SCOPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
# Testing against `minimal` or `mainnet` config by default
find_test: pyspec
@@ -129,10 +128,10 @@ citest: pyspec
mkdir -p $(TEST_REPORT_DIR);
ifdef fork
. venv/bin/activate; cd $(PY_SPEC_DIR); \
- python3 -m pytest -n $(NUMBER_OF_CORES) --bls-type=fastest --preset=$(TEST_PRESET_TYPE) --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec
+ python3 -m pytest -n auto --bls-type=fastest --preset=$(TEST_PRESET_TYPE) --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec
else
. venv/bin/activate; cd $(PY_SPEC_DIR); \
- python3 -m pytest -n $(NUMBER_OF_CORES) --bls-type=fastest --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec
+ python3 -m pytest -n auto --bls-type=fastest --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec
endif
diff --git a/README.md b/README.md
index 58bff5b9e4..c62a4171dc 100644
--- a/README.md
+++ b/README.md
@@ -26,7 +26,7 @@ Features are researched and developed in parallel, and then consolidated into se
### In-development Specifications
| Code Name or Topic | Specs | Notes |
| - | - | - |
-| Electra |
- Core
- [Beacon Chain changes](specs/electra/beacon-chain.md)
- [EIP-6110 fork](specs/electra/fork.md)
- Additions
- [Honest validator guide changes](specs/electra/validator.md)
|
+| Electra | - Core
- [Beacon Chain changes](specs/electra/beacon-chain.md)
- [EIP-6110 fork](specs/electra/fork.md)
- Additions
- [Light client sync protocol changes](specs/electra/light-client/sync-protocol.md) ([fork](specs/electra/light-client/fork.md), [full node](specs/electra/light-client/full-node.md), [networking](specs/electra/light-client/p2p-interface.md))
- [Honest validator guide changes](specs/electra/validator.md)
|
| Sharding (outdated) | - Core
- [Beacon Chain changes](specs/_features/sharding/beacon-chain.md)
- Additions
- [P2P networking](specs/_features/sharding/p2p-interface.md)
|
| Custody Game (outdated) | - Core
- [Beacon Chain changes](specs/_features/custody_game/beacon-chain.md)
- Additions
- [Honest validator guide changes](specs/_features/custody_game/validator.md)
| Dependent on sharding |
| Data Availability Sampling (outdated) | - Core
- [Core types and functions](specs/_features/das/das-core.md)
- [Fork choice changes](specs/_features/das/fork-choice.md)
- Additions
- [P2P Networking](specs/_features/das/p2p-interface.md)
- [Sampling process](specs/_features/das/sampling.md)
| - Dependent on sharding
- [Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)
|
diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml
index 7051873ce9..1205d7f86f 100644
--- a/configs/mainnet.yaml
+++ b/configs/mainnet.yaml
@@ -59,6 +59,9 @@ EIP7594_FORK_EPOCH: 18446744073709551615
# WHISK
WHISK_FORK_VERSION: 0x08000000 # temporary stub
WHISK_FORK_EPOCH: 18446744073709551615
+# EIP7732
+EIP7732_FORK_VERSION: 0x09000000 # temporary stub
+EIP7732_FORK_EPOCH: 18446744073709551615
# Time parameters
# ---------------------------------------------------------------
@@ -147,6 +150,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096
# `6`
BLOB_SIDECAR_SUBNET_COUNT: 6
+## `uint64(6)`
+MAX_BLOBS_PER_BLOCK: 6
# Whisk
# `Epoch(2**8)`
diff --git a/configs/minimal.yaml b/configs/minimal.yaml
index 8e2a222d47..9c2b3812fb 100644
--- a/configs/minimal.yaml
+++ b/configs/minimal.yaml
@@ -58,6 +58,9 @@ EIP7594_FORK_EPOCH: 18446744073709551615
# WHISK
WHISK_FORK_VERSION: 0x08000001
WHISK_FORK_EPOCH: 18446744073709551615
+# EIP7732
+EIP7732_FORK_VERSION: 0x09000001
+EIP7732_FORK_EPOCH: 18446744073709551615
# Time parameters
# ---------------------------------------------------------------
@@ -148,6 +151,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096
# `6`
BLOB_SIDECAR_SUBNET_COUNT: 6
+## `uint64(6)`
+MAX_BLOBS_PER_BLOCK: 6
# Whisk
WHISK_EPOCHS_PER_SHUFFLING_PHASE: 4
diff --git a/docker/README.md b/docker/README.md
index 6d5b21e59d..4f0c1ecca9 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -6,15 +6,15 @@ This dockerfile sets up the dependencies required to run consensus-spec tests. T
Handy commands:
- `docker run -it $IMAGE_NAME /bin/sh` will give you a shell inside the docker container to manually run any tests
-- `docker run $IMAGE_NAME make citest` will run the make citest command inside the docker container
+- `docker run $IMAGE_NAME make citest` will run the make citest command inside the docker container
Ideally manual running of docker containers is for advanced users, we recommend the script based approach described below for most users.
-The `scripts/build_run_docker_tests.sh` script will cover most usecases. The script allows the user to configure the fork(altair/bellatrix/capella..), `$IMAGE_NAME` (specifies the container to use), number of cores, preset type (mainnet/minimal), and test all forks flags. Ideally, this is the main way that users interact with the spec tests instead of running it locally with varying versions of dependencies.
+The `scripts/build_run_docker_tests.sh` script will cover most usecases. The script allows the user to configure the fork(altair/bellatrix/capella..), `$IMAGE_NAME` (specifies the container to use), preset type (mainnet/minimal), and test all forks flags. Ideally, this is the main way that users interact with the spec tests instead of running it locally with varying versions of dependencies.
E.g:
-- `./build_run_test.sh --p mainnet --n 16` will run the mainnet preset tests with 16 threads
+- `./build_run_test.sh --p mainnet` will run the mainnet preset tests
- `./build_run_test.sh --a` will run all the tests across all the forks
-- `./build_run_test.sh --f deneb --n 16` will only run deneb tests on 16 threads
+- `./build_run_test.sh --f deneb` will only run deneb tests
Results are always placed in a folder called `./testResults`. The results are `.xml` files and contain the fork they represent and the date/time they were run at.
\ No newline at end of file
diff --git a/fork_choice/safe-block.md b/fork_choice/safe-block.md
index b76285b3a8..d4af9060d0 100644
--- a/fork_choice/safe-block.md
+++ b/fork_choice/safe-block.md
@@ -7,7 +7,7 @@
- [Introduction](#introduction)
- [`get_safe_beacon_block_root`](#get_safe_beacon_block_root)
-- [`get_safe_execution_payload_hash`](#get_safe_execution_payload_hash)
+- [`get_safe_execution_block_hash`](#get_safe_execution_block_hash)
@@ -31,10 +31,10 @@ def get_safe_beacon_block_root(store: Store) -> Root:
*Note*: Currently safe block algorithm simply returns `store.justified_checkpoint.root`
and is meant to be improved in the future.
-## `get_safe_execution_payload_hash`
+## `get_safe_execution_block_hash`
```python
-def get_safe_execution_payload_hash(store: Store) -> Hash32:
+def get_safe_execution_block_hash(store: Store) -> Hash32:
safe_block_root = get_safe_beacon_block_root(store)
safe_block = store.blocks[safe_block_root]
diff --git a/presets/mainnet/deneb.yaml b/presets/mainnet/deneb.yaml
index 0f56b8bdfa..f426d3ae1a 100644
--- a/presets/mainnet/deneb.yaml
+++ b/presets/mainnet/deneb.yaml
@@ -6,7 +6,5 @@
FIELD_ELEMENTS_PER_BLOB: 4096
# `uint64(2**12)` (= 4096)
MAX_BLOB_COMMITMENTS_PER_BLOCK: 4096
-# `uint64(6)`
-MAX_BLOBS_PER_BLOCK: 6
# `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 17
diff --git a/presets/mainnet/eip-7732.yaml b/presets/mainnet/eip-7732.yaml
new file mode 100644
index 0000000000..eb43c981a9
--- /dev/null
+++ b/presets/mainnet/eip-7732.yaml
@@ -0,0 +1,9 @@
+# Mainnet preset - EIP7732
+
+# Execution
+# ---------------------------------------------------------------
+# 2**9 (= 512)
+PTC_SIZE: 512
+# 2**2 (= 4)
+MAX_PAYLOAD_ATTESTATIONS: 4
+KZG_COMMITMENT_INCLUSION_PROOF_DEPTH_EIP7732: 13
diff --git a/presets/minimal/deneb.yaml b/presets/minimal/deneb.yaml
index bc4fe4369a..9d0db086b8 100644
--- a/presets/minimal/deneb.yaml
+++ b/presets/minimal/deneb.yaml
@@ -6,7 +6,5 @@
FIELD_ELEMENTS_PER_BLOB: 4096
# [customized]
MAX_BLOB_COMMITMENTS_PER_BLOCK: 16
-# `uint64(6)`
-MAX_BLOBS_PER_BLOCK: 6
# [customized] `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 4 = 9
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 9
diff --git a/presets/minimal/eip-7732.yaml b/presets/minimal/eip-7732.yaml
new file mode 100644
index 0000000000..751c1325a5
--- /dev/null
+++ b/presets/minimal/eip-7732.yaml
@@ -0,0 +1,9 @@
+# Minimal preset - EIP7732
+
+# Execution
+# ---------------------------------------------------------------
+# 2**1(= 2)
+PTC_SIZE: 2
+# 2**2 (= 4)
+MAX_PAYLOAD_ATTESTATIONS: 4
+KZG_COMMITMENT_INCLUSION_PROOF_DEPTH_EIP7732: 13
diff --git a/pysetup/constants.py b/pysetup/constants.py
index e26efb8e06..6bf22865b2 100644
--- a/pysetup/constants.py
+++ b/pysetup/constants.py
@@ -8,6 +8,7 @@
EIP7594 = 'eip7594'
EIP6800 = 'eip6800'
WHISK = 'whisk'
+EIP7732 = 'eip7732'
# The helper functions that are used when defining constants
diff --git a/pysetup/helpers.py b/pysetup/helpers.py
index 589ae6ab58..212eb98c10 100644
--- a/pysetup/helpers.py
+++ b/pysetup/helpers.py
@@ -124,13 +124,22 @@ def format_constant(name: str, vardef: VariableDefinition) -> str:
# Keep engine from the most recent fork
execution_engine_cls = reduce(lambda txt, builder: builder.execution_engine_cls() or txt, builders, "")
+ # Remove deprecated constants
+ deprecate_constants = reduce(lambda obj, builder: obj.union(builder.deprecate_constants()), builders, set())
+ # constant_vars = {k: v for k, v in spec_object.constant_vars.items() if k not in deprecate_constants}
+ filtered_ssz_dep_constants = {k: v for k, v in hardcoded_ssz_dep_constants.items() if k not in deprecate_constants}
+ # Remove deprecated presets
+ deprecate_presets = reduce(lambda obj, builder: obj.union(builder.deprecate_presets()), builders, set())
+ # preset_vars = {k: v for k, v in spec_object.constant_vars.items() if k not in deprecate_constants}
+ filtered_hardcoded_func_dep_presets = {k: v for k, v in hardcoded_func_dep_presets.items() if k not in deprecate_presets}
+
constant_vars_spec = '# Constant vars\n' + '\n'.join(format_constant(k, v) for k, v in spec_object.constant_vars.items())
preset_vars_spec = '# Preset vars\n' + '\n'.join(format_constant(k, v) for k, v in spec_object.preset_vars.items())
ordered_class_objects_spec = '\n\n\n'.join(ordered_class_objects.values())
ssz_dep_constants = '\n'.join(map(lambda x: '%s = %s' % (x, hardcoded_ssz_dep_constants[x]), hardcoded_ssz_dep_constants))
- ssz_dep_constants_verification = '\n'.join(map(lambda x: 'assert %s == %s' % (x, spec_object.ssz_dep_constants[x]), hardcoded_ssz_dep_constants))
+ ssz_dep_constants_verification = '\n'.join(map(lambda x: 'assert %s == %s' % (x, spec_object.ssz_dep_constants[x]), filtered_ssz_dep_constants))
custom_type_dep_constants = '\n'.join(map(lambda x: '%s = %s' % (x, hardcoded_custom_type_dep_constants[x]), hardcoded_custom_type_dep_constants))
- func_dep_presets_verification = '\n'.join(map(lambda x: 'assert %s == %s # noqa: E501' % (x, spec_object.func_dep_presets[x]), hardcoded_func_dep_presets))
+ func_dep_presets_verification = '\n'.join(map(lambda x: 'assert %s == %s # noqa: E501' % (x, spec_object.func_dep_presets[x]), filtered_hardcoded_func_dep_presets))
spec_strs = [
imports,
preparations,
diff --git a/pysetup/md_doc_paths.py b/pysetup/md_doc_paths.py
index 28ebc71379..d99fc122ac 100644
--- a/pysetup/md_doc_paths.py
+++ b/pysetup/md_doc_paths.py
@@ -10,6 +10,7 @@
WHISK,
EIP7594,
EIP6800,
+ EIP7732,
)
@@ -23,6 +24,7 @@
WHISK: CAPELLA,
EIP7594: DENEB,
EIP6800: DENEB,
+ EIP7732: ELECTRA,
}
ALL_FORKS = list(PREVIOUS_FORK_OF.keys())
diff --git a/pysetup/spec_builders/__init__.py b/pysetup/spec_builders/__init__.py
index 922cee18b2..c5bbcf39eb 100644
--- a/pysetup/spec_builders/__init__.py
+++ b/pysetup/spec_builders/__init__.py
@@ -7,12 +7,13 @@
from .whisk import WhiskSpecBuilder
from .eip7594 import EIP7594SpecBuilder
from .eip6800 import EIP6800SpecBuilder
+from .eip7732 import EIP7732SpecBuilder
spec_builders = {
builder.fork: builder
for builder in (
Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder,
- ElectraSpecBuilder, WhiskSpecBuilder, EIP7594SpecBuilder, EIP6800SpecBuilder,
+ ElectraSpecBuilder, WhiskSpecBuilder, EIP7594SpecBuilder, EIP6800SpecBuilder, EIP7732SpecBuilder,
)
}
diff --git a/pysetup/spec_builders/altair.py b/pysetup/spec_builders/altair.py
index aed5a8a1e9..830f396a9d 100644
--- a/pysetup/spec_builders/altair.py
+++ b/pysetup/spec_builders/altair.py
@@ -35,7 +35,7 @@ def get_generalized_index(ssz_class: Any, *path: PyUnion[int, SSZVariableName])
def compute_merkle_proof(object: SSZObject,
- index: GeneralizedIndex) -> Sequence[Bytes32]:
+ index: GeneralizedIndex) -> list[Bytes32]:
return build_proof(object.get_backing(), index)'''
diff --git a/pysetup/spec_builders/base.py b/pysetup/spec_builders/base.py
index ad9a2cb4c4..a8c648a0f0 100644
--- a/pysetup/spec_builders/base.py
+++ b/pysetup/spec_builders/base.py
@@ -1,5 +1,5 @@
from abc import ABC, abstractmethod
-from typing import Sequence, Dict
+from typing import Sequence, Dict, Set
from pathlib import Path
class BaseSpecBuilder(ABC):
@@ -54,3 +54,11 @@ def hardcoded_func_dep_presets(cls, spec_object) -> Dict[str, str]:
@classmethod
def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]:
return functions
+
+ @classmethod
+ def deprecate_constants(cls) -> Set[str]:
+ return set()
+
+ @classmethod
+ def deprecate_presets(cls) -> Set[str]:
+ return set()
diff --git a/pysetup/spec_builders/deneb.py b/pysetup/spec_builders/deneb.py
index dc3c175836..436ae70b1d 100644
--- a/pysetup/spec_builders/deneb.py
+++ b/pysetup/spec_builders/deneb.py
@@ -70,7 +70,7 @@ def hardcoded_custom_type_dep_constants(cls, spec_object) -> Dict[str, str]:
return {
'BYTES_PER_FIELD_ELEMENT': spec_object.constant_vars['BYTES_PER_FIELD_ELEMENT'].value,
'FIELD_ELEMENTS_PER_BLOB': spec_object.preset_vars['FIELD_ELEMENTS_PER_BLOB'].value,
- 'MAX_BLOBS_PER_BLOCK': spec_object.preset_vars['MAX_BLOBS_PER_BLOCK'].value,
+ 'MAX_BLOBS_PER_BLOCK': spec_object.config_vars['MAX_BLOBS_PER_BLOCK'].value,
'MAX_BLOB_COMMITMENTS_PER_BLOCK': spec_object.preset_vars['MAX_BLOB_COMMITMENTS_PER_BLOCK'].value,
}
diff --git a/pysetup/spec_builders/eip7732.py b/pysetup/spec_builders/eip7732.py
new file mode 100644
index 0000000000..0c335fe1d9
--- /dev/null
+++ b/pysetup/spec_builders/eip7732.py
@@ -0,0 +1,45 @@
+from typing import Dict, Set
+
+from .base import BaseSpecBuilder
+from ..constants import EIP7732
+
+
+class EIP7732SpecBuilder(BaseSpecBuilder):
+ fork: str = EIP7732
+
+ @classmethod
+ def imports(cls, preset_name: str):
+ return f'''
+from eth2spec.electra import {preset_name} as electra
+'''
+
+ @classmethod
+ def sundry_functions(cls) -> str:
+ return '''
+def concat_generalized_indices(*indices: GeneralizedIndex) -> GeneralizedIndex:
+ o = GeneralizedIndex(1)
+ for i in indices:
+ o = GeneralizedIndex(o * bit_floor(i) + (i - bit_floor(i)))
+ return o'''
+
+
+ @classmethod
+ def hardcoded_custom_type_dep_constants(cls, spec_object) -> Dict[str, str]:
+ return {
+ 'PTC_SIZE': spec_object.preset_vars['PTC_SIZE'].value,
+ 'MAX_PAYLOAD_ATTESTATIONS': spec_object.preset_vars['MAX_PAYLOAD_ATTESTATIONS'].value,
+ 'KZG_COMMITMENT_INCLUSION_PROOF_DEPTH_EIP7732':
+ spec_object.preset_vars['KZG_COMMITMENT_INCLUSION_PROOF_DEPTH_EIP7732'].value,
+ }
+
+ @classmethod
+ def deprecate_constants(cls) -> Set[str]:
+ return set([
+ 'EXECUTION_PAYLOAD_GINDEX',
+ ])
+
+ @classmethod
+ def deprecate_presets(cls) -> Set[str]:
+ return set([
+ 'KZG_COMMITMENT_INCLUSION_PROOF_DEPTH',
+ ])
diff --git a/pysetup/spec_builders/electra.py b/pysetup/spec_builders/electra.py
index 1f968a817d..ca02ee927c 100644
--- a/pysetup/spec_builders/electra.py
+++ b/pysetup/spec_builders/electra.py
@@ -12,12 +12,10 @@ def imports(cls, preset_name: str):
from eth2spec.deneb import {preset_name} as deneb
'''
-## TODO: deal with changed gindices
-
@classmethod
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
return {
- 'FINALIZED_ROOT_GINDEX': 'GeneralizedIndex(169)',
- 'CURRENT_SYNC_COMMITTEE_GINDEX': 'GeneralizedIndex(86)',
- 'NEXT_SYNC_COMMITTEE_GINDEX': 'GeneralizedIndex(87)',
+ 'FINALIZED_ROOT_GINDEX_ELECTRA': 'GeneralizedIndex(169)',
+ 'CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA': 'GeneralizedIndex(86)',
+ 'NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA': 'GeneralizedIndex(87)',
}
diff --git a/scripts/build_run_docker_tests.sh b/scripts/build_run_docker_tests.sh
index 8b20cfae62..9d2740ca27 100755
--- a/scripts/build_run_docker_tests.sh
+++ b/scripts/build_run_docker_tests.sh
@@ -13,7 +13,6 @@
ALL_EXECUTABLE_SPECS=("phase0" "altair" "bellatrix" "capella" "deneb" "electra" "whisk")
TEST_PRESET_TYPE=minimal
FORK_TO_TEST=phase0
-NUMBER_OF_CORES=4
WORKDIR="//consensus-specs//tests//core//pyspec"
ETH2SPEC_FOLDER_NAME="eth2spec"
CONTAINER_NAME="consensus-specs-tests"
@@ -21,17 +20,15 @@ DATE=$(date +"%Y%m%d-%H-%M")
# Default flag values
version=$(git log --pretty=format:'%h' -n 1)
IMAGE_NAME="consensus-specs:$version"
-number_of_core=4
# displays the available options
display_help() {
echo "Run 'consensus-specs' tests from a container instance."
echo "Be sure to launch Docker before running this script."
echo
- echo "Syntax: build_run_test.sh [--v TAG | --n NUMBER_OF_CORE | --f FORK_TO_TEST | --p PRESET_TYPE | --a | --h HELP]"
+ echo "Syntax: build_run_test.sh [--v TAG | --f FORK_TO_TEST | --p PRESET_TYPE | --a | --h HELP]"
echo " --f Specify the fork to test"
echo " --i Specify the docker image to use"
- echo " --n Specify the number of cores"
echo " --p Specify the test preset type"
echo " --a Test all forks"
echo " --h Display this help and exit"
@@ -63,7 +60,6 @@ while [[ "$#" -gt 0 ]]; do
case $1 in
--f) FORK_TO_TEST="$2"; shift ;;
--v) IMAGE_NAME="$2"; shift ;;
- --n) NUMBER_OF_CORES="$2"; shift ;;
--p) TEST_PRESET_TYPE="$2"; shift ;;
--a) FORK_TO_TEST="all" ;;
--h) display_help; exit 0 ;;
@@ -90,12 +86,12 @@ fi
if [ "$FORK_TO_TEST" == "all" ]; then
for fork in "${ALL_EXECUTABLE_SPECS[@]}"; do
docker run --name $CONTAINER_NAME $IMAGE_NAME \
- make citest fork=$fork TEST_PRESET_TYPE=$TEST_PRESET_TYPE NUMBER_OF_CORES=$NUMBER_OF_CORES
+ make citest fork=$fork TEST_PRESET_TYPE=$TEST_PRESET_TYPE
copy_test_results $fork
done
else
docker run --name $CONTAINER_NAME $IMAGE_NAME \
- make citest fork=$FORK_TO_TEST TEST_PRESET_TYPE=$TEST_PRESET_TYPE NUMBER_OF_CORES=$NUMBER_OF_CORES
+ make citest fork=$FORK_TO_TEST TEST_PRESET_TYPE=$TEST_PRESET_TYPE
copy_test_results $FORK_TO_TEST
fi
diff --git a/setup.py b/setup.py
index e5c348ada5..539db215b7 100644
--- a/setup.py
+++ b/setup.py
@@ -552,7 +552,7 @@ def run(self):
"pycryptodome==3.15.0",
"py_ecc==6.0.0",
"milagro_bls_binding==1.9.0",
- "remerkleable==0.1.27",
+ "remerkleable==0.1.28",
"trie==2.0.2",
RUAMEL_YAML_VERSION,
"lru-dict==1.2.0",
diff --git a/specs/_features/eip7594/das-core.md b/specs/_features/eip7594/das-core.md
index 9faf77b9ab..97bce0d0eb 100644
--- a/specs/_features/eip7594/das-core.md
+++ b/specs/_features/eip7594/das-core.md
@@ -23,6 +23,7 @@
- [`compute_extended_matrix`](#compute_extended_matrix)
- [`recover_matrix`](#recover_matrix)
- [`get_data_column_sidecars`](#get_data_column_sidecars)
+ - [`get_extended_sample_count`](#get_extended_sample_count)
- [Custody](#custody)
- [Custody requirement](#custody-requirement)
- [Public, deterministic selection](#public-deterministic-selection)
@@ -31,6 +32,8 @@
- [Column gossip](#column-gossip)
- [Parameters](#parameters)
- [Peer sampling](#peer-sampling)
+ - [Sample selection](#sample-selection)
+ - [Sample queries](#sample-queries)
- [Peer scoring](#peer-scoring)
- [Reconstruction and cross-seeding](#reconstruction-and-cross-seeding)
- [DAS providers](#das-providers)
@@ -65,8 +68,8 @@ The following values are (non-configurable) constants used throughout the specif
| Name | Value | Description |
| - | - | - |
-| `NUMBER_OF_COLUMNS` | `uint64(CELLS_PER_EXT_BLOB)` (= 128) | Number of columns in the extended data matrix. |
-| `MAX_CELLS_IN_EXTENDED_MATRIX` | `uint64(MAX_BLOBS_PER_BLOCK * NUMBER_OF_COLUMNS)` (= 768) | The data size of `ExtendedMatrix`. |
+| `NUMBER_OF_COLUMNS` | `uint64(CELLS_PER_EXT_BLOB)` (= 128) | Number of columns in the extended data matrix |
+| `MAX_CELLS_IN_EXTENDED_MATRIX` | `uint64(MAX_BLOBS_PER_BLOCK * NUMBER_OF_COLUMNS)` (= 768) | The data size of `ExtendedMatrix` |
### Networking
@@ -176,9 +179,8 @@ def recover_matrix(partial_matrix: Sequence[MatrixEntry],
for blob_index in range(blob_count):
cell_indices = [e.column_index for e in partial_matrix if e.row_index == blob_index]
cells = [e.cell for e in partial_matrix if e.row_index == blob_index]
- proofs = [e.kzg_proof for e in partial_matrix if e.row_index == blob_index]
- recovered_cells, recovered_proofs = recover_cells_and_kzg_proofs(cell_indices, cells, proofs)
+ recovered_cells, recovered_proofs = recover_cells_and_kzg_proofs(cell_indices, cells)
for cell_index, (cell, proof) in enumerate(zip(recovered_cells, recovered_proofs)):
extended_matrix.append(MatrixEntry(
cell=cell,
@@ -193,27 +195,31 @@ def recover_matrix(partial_matrix: Sequence[MatrixEntry],
```python
def get_data_column_sidecars(signed_block: SignedBeaconBlock,
- blobs: Sequence[Blob]) -> Sequence[DataColumnSidecar]:
+ cells_and_kzg_proofs: Sequence[Tuple[
+ Vector[Cell, CELLS_PER_EXT_BLOB],
+ Vector[KZGProof, CELLS_PER_EXT_BLOB]]]) -> Sequence[DataColumnSidecar]:
+ """
+ Given a signed block and the cells/proofs associated with each blob in the
+ block, assemble the sidecars which can be distributed to peers.
+ """
+ blob_kzg_commitments = signed_block.message.body.blob_kzg_commitments
+ assert len(cells_and_kzg_proofs) == len(blob_kzg_commitments)
signed_block_header = compute_signed_block_header(signed_block)
- block = signed_block.message
kzg_commitments_inclusion_proof = compute_merkle_proof(
- block.body,
+ signed_block.message.body,
get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments'),
)
- cells_and_proofs = [compute_cells_and_kzg_proofs(blob) for blob in blobs]
- blob_count = len(blobs)
- cells = [cells_and_proofs[i][0] for i in range(blob_count)]
- proofs = [cells_and_proofs[i][1] for i in range(blob_count)]
+
sidecars = []
for column_index in range(NUMBER_OF_COLUMNS):
- column_cells = [cells[row_index][column_index]
- for row_index in range(blob_count)]
- column_proofs = [proofs[row_index][column_index]
- for row_index in range(blob_count)]
+ column_cells, column_proofs = [], []
+ for cells, proofs in cells_and_kzg_proofs:
+ column_cells.append(cells[column_index])
+ column_proofs.append(proofs[column_index])
sidecars.append(DataColumnSidecar(
index=column_index,
column=column_cells,
- kzg_commitments=block.body.blob_kzg_commitments,
+ kzg_commitments=blob_kzg_commitments,
kzg_proofs=column_proofs,
signed_block_header=signed_block_header,
kzg_commitments_inclusion_proof=kzg_commitments_inclusion_proof,
@@ -221,6 +227,48 @@ def get_data_column_sidecars(signed_block: SignedBeaconBlock,
return sidecars
```
+#### `get_extended_sample_count`
+
+```python
+def get_extended_sample_count(allowed_failures: uint64) -> uint64:
+ assert 0 <= allowed_failures <= NUMBER_OF_COLUMNS // 2
+ """
+ Return the sample count if allowing failures.
+
+ This helper demonstrates how to calculate the number of columns to query per slot when
+ allowing given number of failures, assuming uniform random selection without replacement.
+ Nested functions are direct replacements of Python library functions math.comb and
+ scipy.stats.hypergeom.cdf, with the same signatures.
+ """
+
+ def math_comb(n: int, k: int) -> int:
+ if not 0 <= k <= n:
+ return 0
+ r = 1
+ for i in range(min(k, n - k)):
+ r = r * (n - i) // (i + 1)
+ return r
+
+ def hypergeom_cdf(k: uint64, M: uint64, n: uint64, N: uint64) -> float:
+ # NOTE: It contains float-point computations.
+ # Convert uint64 to Python integers before computations.
+ k = int(k)
+ M = int(M)
+ n = int(n)
+ N = int(N)
+ return sum([math_comb(n, i) * math_comb(M - n, N - i) / math_comb(M, N)
+ for i in range(k + 1)])
+
+ worst_case_missing = NUMBER_OF_COLUMNS // 2 + 1
+ false_positive_threshold = hypergeom_cdf(0, NUMBER_OF_COLUMNS,
+ worst_case_missing, SAMPLES_PER_SLOT)
+ for sample_count in range(SAMPLES_PER_SLOT, NUMBER_OF_COLUMNS + 1):
+ if hypergeom_cdf(allowed_failures, NUMBER_OF_COLUMNS,
+ worst_case_missing, sample_count) <= false_positive_threshold:
+ break
+ return sample_count
+```
+
## Custody
### Custody requirement
@@ -259,11 +307,33 @@ In this construction, we extend the blobs using a one-dimensional erasure coding
For each column -- use `data_column_sidecar_{subnet_id}` subnets, where `subnet_id` can be computed with the `compute_subnet_for_data_column_sidecar(column_index: ColumnIndex)` helper. The sidecars can be computed with the `get_data_column_sidecars(signed_block: SignedBeaconBlock, blobs: Sequence[Blob])` helper.
-To custody a particular column, a node joins the respective gossip subnet. Verifiable samples from their respective column are gossiped on the assigned subnet.
+Verifiable samples from their respective column are distributed on the assigned subnet. To custody a particular column, a node joins the respective gossipsub subnet. If a node fails to get a column on the column subnet, a node can also utilize the Req/Resp protocol to query the missing column from other peers.
## Peer sampling
-A node SHOULD maintain a diverse set of peers for each column and each slot by verifying responsiveness to sample queries. At each slot, a node makes `SAMPLES_PER_SLOT` queries for samples from their peers via `DataColumnSidecarsByRoot` request. A node utilizes `get_custody_columns` helper to determine which peer(s) to request from. If a node has enough good/honest peers across all rows and columns, this has a high chance of success.
+### Sample selection
+
+At each slot, a node SHOULD select at least `SAMPLES_PER_SLOT` column IDs for sampling. It is recommended to use uniform random selection without replacement based on local randomness. Sampling is considered successful if the node manages to retrieve all selected columns.
+
+Alternatively, a node MAY use a method that selects more than `SAMPLES_PER_SLOT` columns while allowing some missing, respecting the same target false positive threshold (the probability of successful sampling of an unavailable block) as dictated by the `SAMPLES_PER_SLOT` parameter. If using uniform random selection without replacement, a node can use the `get_extended_sample_count(allowed_failures) -> sample_count` helper function to determine the sample count (number of unique column IDs) for any selected number of allowed failures. Sampling is then considered successful if any `sample_count - allowed_failures` columns are retrieved successfully.
+
+For reference, the table below shows the number of samples and the number of allowed missing columns assuming `NUMBER_OF_COLUMNS = 128` and `SAMPLES_PER_SLOT = 16`.
+
+| Allowed missing | 0| 1| 2| 3| 4| 5| 6| 7| 8|
+|-----------------|--|--|--|--|--|--|--|--|--|
+| Sample count |16|20|24|27|29|32|35|37|40|
+
+### Sample queries
+
+A node SHOULD maintain a diverse set of peers for each column and each slot by verifying responsiveness to sample queries.
+
+A node SHOULD query for samples from selected peers via `DataColumnSidecarsByRoot` request. A node utilizes `get_custody_columns` helper to determine which peer(s) it could request from, identifying a list of candidate peers for each selected column.
+
+If more than one candidate peer is found for a given column, a node SHOULD randomize its peer selection to distribute sample query load in the network. Nodes MAY use peer scoring to tune this selection (for example, by using weighted selection or by using a cut-off threshold). If possible, it is also recommended to avoid requesting many columns from the same peer in order to avoid relying on and exposing the sample selection to a single peer.
+
+If a node already has a column because of custody, it is not required to send out queries for that column.
+
+If a node has enough good/honest peers across all columns, and the data is being made available, the above procedure has a high chance of success.
## Peer scoring
@@ -271,11 +341,9 @@ Due to the deterministic custody functions, a node knows exactly what a peer sho
## Reconstruction and cross-seeding
-If the node obtains 50%+ of all the columns, they can reconstruct the full data matrix via `recover_matrix` helper.
-
-If a node fails to sample a peer or fails to get a column on the column subnet, a node can utilize the Req/Resp message to query the missing column from other peers.
+If the node obtains 50%+ of all the columns, it SHOULD reconstruct the full data matrix via `recover_matrix` helper. Nodes MAY delay this reconstruction allowing time for other columns to arrive over the network. If delaying reconstruction, nodes may use a random delay in order to desynchronize reconstruction among nodes, thus reducing overall CPU load.
-Once the node obtain the column, the node SHOULD send the missing columns to the column subnets.
+Once the node obtains a column through reconstruction, the node MUST expose the new column as if it had received it over the network. If the node is subscribed to the subnet corresponding to the column, it MUST send the reconstructed DataColumnSidecar to its topic mesh neighbors. If instead the node is not subscribed to the corresponding subnet, it SHOULD still expose the availability of the DataColumnSidecar as part of the gossip emission process.
*Note*: A node always maintains a matrix view of the rows and columns they are following, able to cross-reference and cross-seed in either direction.
diff --git a/specs/_features/eip7594/p2p-interface.md b/specs/_features/eip7594/p2p-interface.md
index 582ee65140..1b25c5fc5a 100644
--- a/specs/_features/eip7594/p2p-interface.md
+++ b/specs/_features/eip7594/p2p-interface.md
@@ -17,6 +17,7 @@
- [`verify_data_column_sidecar_kzg_proofs`](#verify_data_column_sidecar_kzg_proofs)
- [`verify_data_column_sidecar_inclusion_proof`](#verify_data_column_sidecar_inclusion_proof)
- [`compute_subnet_for_data_column_sidecar`](#compute_subnet_for_data_column_sidecar)
+ - [MetaData](#metadata)
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
- [Topics and messages](#topics-and-messages)
- [Blob subnets](#blob-subnets)
@@ -26,6 +27,7 @@
- [Messages](#messages)
- [DataColumnSidecarsByRoot v1](#datacolumnsidecarsbyroot-v1)
- [DataColumnSidecarsByRange v1](#datacolumnsidecarsbyrange-v1)
+ - [GetMetaData v3](#getmetadata-v3)
- [The discovery domain: discv5](#the-discovery-domain-discv5)
- [ENR structure](#enr-structure)
- [Custody subnet count](#custody-subnet-count)
@@ -37,19 +39,18 @@
### Preset
-| Name | Value | Description |
-| - | - | - |
+| Name | Value | Description |
+|-----------------------------------------|-------------------------------------------------------------------------------------------|-------------------------------------------------------------------|
| `KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH` | `uint64(floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')))` (= 4) | Merkle proof index for `blob_kzg_commitments` |
-
### Configuration
-*[New in Deneb:EIP4844]*
+*[New in EIP7594]*
-| Name | Value | Description |
-|------------------------------------------|-----------------------------------|---------------------------------------------------------------------|
-| `MAX_REQUEST_DATA_COLUMN_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS` | Maximum number of data column sidecars in a single request |
-| `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve data column sidecars |
+| Name | Value | Description |
+|------------------------------------------------|------------------------------------------------|---------------------------------------------------------------------------|
+| `MAX_REQUEST_DATA_COLUMN_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS` | Maximum number of data column sidecars in a single request |
+| `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve data column sidecars |
### Containers
@@ -68,19 +69,18 @@ class DataColumnIdentifier(Container):
```python
def verify_data_column_sidecar_kzg_proofs(sidecar: DataColumnSidecar) -> bool:
"""
- Verify if the proofs are correct
+ Verify if the proofs are correct.
"""
assert sidecar.index < NUMBER_OF_COLUMNS
assert len(sidecar.column) == len(sidecar.kzg_commitments) == len(sidecar.kzg_proofs)
- row_indices = [RowIndex(i) for i in range(len(sidecar.column))]
- column_indices = [sidecar.index] * len(sidecar.column)
+ # The column index also represents the cell index
+ cell_indices = [CellIndex(sidecar.index)] * len(sidecar.column)
- # KZG batch verifies that the cells match the corresponding commitments and proofs
+ # Batch verify that the cells match the corresponding commitments and proofs
return verify_cell_kzg_proof_batch(
- row_commitments_bytes=sidecar.kzg_commitments,
- row_indices=row_indices, # all rows
- column_indices=column_indices, # specific column
+ commitments_bytes=sidecar.kzg_commitments,
+ cell_indices=cell_indices,
cells=sidecar.column,
proofs_bytes=sidecar.kzg_proofs,
)
@@ -110,6 +110,24 @@ def compute_subnet_for_data_column_sidecar(column_index: ColumnIndex) -> SubnetI
return SubnetID(column_index % DATA_COLUMN_SIDECAR_SUBNET_COUNT)
```
+### MetaData
+
+The `MetaData` stored locally by clients is updated with an additional field to communicate the custody subnet count.
+
+```
+(
+ seq_number: uint64
+ attnets: Bitvector[ATTESTATION_SUBNET_COUNT]
+ syncnets: Bitvector[SYNC_COMMITTEE_SUBNET_COUNT]
+ custody_subnet_count: uint64
+)
+```
+
+Where
+
+- `seq_number`, `attnets`, and `syncnets` have the same meaning defined in the Altair document.
+- `custody_subnet_count` represents the node's custody subnet count. Clients MAY reject ENRs with a value less than `CUSTODY_REQUIREMENT`.
+
### The gossip domain: gossipsub
Some gossip meshes are upgraded in the EIP-7594 fork to support upgraded types.
@@ -161,8 +179,8 @@ The `` field is calculated as `context = compute_fork_digest(fork
[1]: # (eth2spec: skip)
-| `fork_version` | Chunk SSZ type |
-| - | - |
+| `fork_version` | Chunk SSZ type |
+|------------------------|-----------------------------|
| `EIP7594_FORK_VERSION` | `eip7594.DataColumnSidecar` |
Request Content:
@@ -209,9 +227,9 @@ The `` field is calculated as `context = compute_fork_digest(fork
[1]: # (eth2spec: skip)
-| `fork_version` | Chunk SSZ type |
-|--------------------------|-------------------------------|
-| `EIP7594_FORK_VERSION` | `eip7594.DataColumnSidecar` |
+| `fork_version` | Chunk SSZ type |
+|------------------------|-----------------------------|
+| `EIP7594_FORK_VERSION` | `eip7594.DataColumnSidecar` |
Request Content:
```
@@ -280,6 +298,22 @@ Clients MUST respond with data column sidecars that are consistent from a single
After the initial data column sidecar, clients MAY stop in the process of responding if their fork choice changes the view of the chain in the context of the request.
+##### GetMetaData v3
+
+**Protocol ID:** `/eth2/beacon_chain/req/metadata/3/`
+
+No Request Content.
+
+Response Content:
+
+```
+(
+ MetaData
+)
+```
+
+Requests the MetaData of a peer, using the new `MetaData` definition given above that is extended from Altair. Other conditions for the `GetMetaData` protocol are unchanged from the Altair p2p networking document.
+
### The discovery domain: discv5
#### ENR structure
@@ -288,6 +322,6 @@ After the initial data column sidecar, clients MAY stop in the process of respon
A new field is added to the ENR under the key `csc` to facilitate custody data column discovery.
-| Key | Value |
-|:------|:-----------------------------------------|
-| `csc` | Custody subnet count, big endian integer |
+| Key | Value |
+|--------|------------------------------------------|
+| `csc` | Custody subnet count, big endian integer |
diff --git a/specs/_features/eip7594/polynomial-commitments-sampling.md b/specs/_features/eip7594/polynomial-commitments-sampling.md
index 7be1a4a059..869f471844 100644
--- a/specs/_features/eip7594/polynomial-commitments-sampling.md
+++ b/specs/_features/eip7594/polynomial-commitments-sampling.md
@@ -9,7 +9,6 @@
- [Introduction](#introduction)
- [Public Methods](#public-methods)
- [Custom types](#custom-types)
-- [Constants](#constants)
- [Preset](#preset)
- [Cells](#cells)
- [Helper functions](#helper-functions)
@@ -22,6 +21,7 @@
- [`_fft_field`](#_fft_field)
- [`fft_field`](#fft_field)
- [`coset_fft_field`](#coset_fft_field)
+ - [`compute_verify_cell_kzg_proof_batch_challenge`](#compute_verify_cell_kzg_proof_batch_challenge)
- [Polynomials in coefficient form](#polynomials-in-coefficient-form)
- [`polynomial_eval_to_coeff`](#polynomial_eval_to_coeff)
- [`add_polynomialcoeff`](#add_polynomialcoeff)
@@ -33,18 +33,19 @@
- [`evaluate_polynomialcoeff`](#evaluate_polynomialcoeff)
- [KZG multiproofs](#kzg-multiproofs)
- [`compute_kzg_proof_multi_impl`](#compute_kzg_proof_multi_impl)
- - [`verify_kzg_proof_multi_impl`](#verify_kzg_proof_multi_impl)
+ - [`verify_cell_kzg_proof_batch_impl`](#verify_cell_kzg_proof_batch_impl)
- [Cell cosets](#cell-cosets)
+ - [`coset_shift_for_cell`](#coset_shift_for_cell)
- [`coset_for_cell`](#coset_for_cell)
- [Cells](#cells-1)
- [Cell computation](#cell-computation)
+ - [`compute_cells_and_kzg_proofs_polynomialcoeff`](#compute_cells_and_kzg_proofs_polynomialcoeff)
- [`compute_cells_and_kzg_proofs`](#compute_cells_and_kzg_proofs)
- [Cell verification](#cell-verification)
- - [`verify_cell_kzg_proof`](#verify_cell_kzg_proof)
- [`verify_cell_kzg_proof_batch`](#verify_cell_kzg_proof_batch)
- [Reconstruction](#reconstruction)
- [`construct_vanishing_polynomial`](#construct_vanishing_polynomial)
- - [`recover_data`](#recover_data)
+ - [`recover_polynomialcoeff`](#recover_polynomialcoeff)
- [`recover_cells_and_kzg_proofs`](#recover_cells_and_kzg_proofs)
@@ -63,7 +64,6 @@ Public functions MUST accept raw bytes as input and perform the required cryptog
The following is a list of the public methods:
- [`compute_cells_and_kzg_proofs`](#compute_cells_and_kzg_proofs)
-- [`verify_cell_kzg_proof`](#verify_cell_kzg_proof)
- [`verify_cell_kzg_proof_batch`](#verify_cell_kzg_proof_batch)
- [`recover_cells_and_kzg_proofs`](#recover_cells_and_kzg_proofs)
@@ -76,11 +76,7 @@ The following is a list of the public methods:
| `CosetEvals` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_CELL]` | The internal representation of a cell (the evaluations over its Coset) |
| `Cell` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_CELL]` | The unit of blob data that can come with its own KZG proof |
| `CellIndex` | `uint64` | Validation: `x < CELLS_PER_EXT_BLOB` |
-
-## Constants
-
-| Name | Value | Notes |
-| - | - | - |
+| `CommitmentIndex` | `uint64` | The type which represents the index of an element in the list of commitments |
## Preset
@@ -193,17 +189,17 @@ def coset_fft_field(vals: Sequence[BLSFieldElement],
roots_of_unity: Sequence[BLSFieldElement],
inv: bool=False) -> Sequence[BLSFieldElement]:
"""
- Computes an FFT/IFFT over a coset of the roots of unity.
- This is useful for when one wants to divide by a polynomial which
+ Computes an FFT/IFFT over a coset of the roots of unity.
+ This is useful for when one wants to divide by a polynomial which
vanishes on one or more elements in the domain.
"""
vals = vals.copy()
-
+
def shift_vals(vals: Sequence[BLSFieldElement], factor: BLSFieldElement) -> Sequence[BLSFieldElement]:
- """
- Multiply each entry in `vals` by succeeding powers of `factor`
- i.e., [vals[0] * factor^0, vals[1] * factor^1, ..., vals[n] * factor^n]
- """
+ """
+ Multiply each entry in `vals` by succeeding powers of `factor`
+ i.e., [vals[0] * factor^0, vals[1] * factor^1, ..., vals[n] * factor^n]
+ """
shift = 1
for i in range(len(vals)):
vals[i] = BLSFieldElement((int(vals[i]) * shift) % BLS_MODULUS)
@@ -222,6 +218,36 @@ def coset_fft_field(vals: Sequence[BLSFieldElement],
return fft_field(vals, roots_of_unity, inv)
```
+#### `compute_verify_cell_kzg_proof_batch_challenge`
+
+```python
+def compute_verify_cell_kzg_proof_batch_challenge(commitments: Sequence[KZGCommitment],
+ commitment_indices: Sequence[CommitmentIndex],
+ cell_indices: Sequence[CellIndex],
+ cosets_evals: Sequence[CosetEvals],
+ proofs: Sequence[KZGProof]) -> BLSFieldElement:
+ """
+ Compute a random challenge ``r`` used in the universal verification equation. To compute the
+ challenge, ``RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN`` and all data that can influence the
+ verification is hashed together to deterministically generate a "random" field element via
+ the Fiat-Shamir heuristic.
+ """
+ hashinput = RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN
+ hashinput += int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 8, KZG_ENDIANNESS)
+ hashinput += int.to_bytes(FIELD_ELEMENTS_PER_CELL, 8, KZG_ENDIANNESS)
+ hashinput += int.to_bytes(len(commitments), 8, KZG_ENDIANNESS)
+ hashinput += int.to_bytes(len(cell_indices), 8, KZG_ENDIANNESS)
+ for commitment in commitments:
+ hashinput += commitment
+ for k, coset_evals in enumerate(cosets_evals):
+ hashinput += int.to_bytes(commitment_indices[k], 8, KZG_ENDIANNESS)
+ hashinput += int.to_bytes(cell_indices[k], 8, KZG_ENDIANNESS)
+ for coset_eval in coset_evals:
+ hashinput += bls_field_to_bytes(coset_eval)
+ hashinput += proofs[k]
+ return hash_to_bls_field(hashinput)
+```
+
### Polynomials in coefficient form
#### `polynomial_eval_to_coeff`
@@ -362,12 +388,12 @@ def compute_kzg_proof_multi_impl(
"""
Compute a KZG multi-evaluation proof for a set of `k` points.
- This is done by committing to the following quotient polynomial:
+ This is done by committing to the following quotient polynomial:
Q(X) = f(X) - I(X) / Z(X)
Where:
- I(X) is the degree `k-1` polynomial that agrees with f(x) at all `k` points
- Z(X) is the degree `k` polynomial that evaluates to zero on all `k` points
-
+
We further note that since the degree of I(X) is less than the degree of Z(X),
the computation can be simplified in monomial form to Q(X) = f(X) / Z(X)
"""
@@ -384,53 +410,139 @@ def compute_kzg_proof_multi_impl(
return KZGProof(g1_lincomb(KZG_SETUP_G1_MONOMIAL[:len(quotient_polynomial)], quotient_polynomial)), ys
```
-#### `verify_kzg_proof_multi_impl`
+#### `verify_cell_kzg_proof_batch_impl`
```python
-def verify_kzg_proof_multi_impl(commitment: KZGCommitment,
- zs: Coset,
- ys: CosetEvals,
- proof: KZGProof) -> bool:
- """
- Verify a KZG multi-evaluation proof for a set of `k` points.
-
- This is done by checking if the following equation holds:
- Q(x) Z(x) = f(X) - I(X)
- Where:
- f(X) is the polynomial that we want to verify opens at `k` points to `k` values
- Q(X) is the quotient polynomial computed by the prover
- I(X) is the degree k-1 polynomial that evaluates to `ys` at all `zs`` points
- Z(X) is the polynomial that evaluates to zero on all `k` points
-
- The verifier receives the commitments to Q(X) and f(X), so they check the equation
- holds by using the following pairing equation:
- e([Q(X)]_1, [Z(X)]_2) == e([f(X)]_1 - [I(X)]_1, [1]_2)
- """
-
- assert len(zs) == len(ys)
-
- # Compute [Z(X)]_2
- zero_poly = g2_lincomb(KZG_SETUP_G2_MONOMIAL[:len(zs) + 1], vanishing_polynomialcoeff(zs))
- # Compute [I(X)]_1
- interpolated_poly = g1_lincomb(KZG_SETUP_G1_MONOMIAL[:len(zs)], interpolate_polynomialcoeff(zs, ys))
-
+def verify_cell_kzg_proof_batch_impl(commitments: Sequence[KZGCommitment],
+ commitment_indices: Sequence[CommitmentIndex],
+ cell_indices: Sequence[CellIndex],
+ cosets_evals: Sequence[CosetEvals],
+ proofs: Sequence[KZGProof]) -> bool:
+ """
+ Helper: Verify that a set of cells belong to their corresponding commitment.
+
+ Given a list of ``commitments`` (which contains no duplicates) and four lists representing
+ tuples of (``commitment_index``, ``cell_index``, ``evals``, ``proof``), the function
+ verifies ``proof`` which shows that ``evals`` are the evaluations of the polynomial associated
+ with ``commitments[commitment_index]``, evaluated over the domain specified by ``cell_index``.
+
+ This function is the internal implementation of ``verify_cell_kzg_proof_batch``.
+ """
+ assert len(commitment_indices) == len(cell_indices) == len(cosets_evals) == len(proofs)
+ assert len(commitments) == len(set(commitments))
+ for commitment_index in commitment_indices:
+ assert commitment_index < len(commitments)
+
+ # The verification equation that we will check is pairing (LL, LR) = pairing (RL, [1]), where
+ # LL = sum_k r^k proofs[k],
+ # LR = [s^n]
+ # RL = RLC - RLI + RLP, where
+ # RLC = sum_i weights[i] commitments[i]
+ # RLI = [sum_k r^k interpolation_poly_k(s)]
+ # RLP = sum_k (r^k * h_k^n) proofs[k]
+ #
+ # Here, the variables have the following meaning:
+ # - k < len(cell_indices) is an index iterating over all cells in the input
+ # - r is a random coefficient, derived from hashing all data provided by the prover
+ # - s is the secret embedded in the KZG setup
+ # - n = FIELD_ELEMENTS_PER_CELL is the size of the evaluation domain
+ # - i ranges over all provided commitments
+ # - weights[i] is a weight computed for commitment i
+ # - It depends on r and on which cells are associated with commitment i
+ # - interpolation_poly_k is the interpolation polynomial for the kth cell
+ # - h_k is the coset shift specifying the evaluation domain of the kth cell
+
+ # Preparation
+ num_cells = len(cell_indices)
+ n = FIELD_ELEMENTS_PER_CELL
+ num_commitments = len(commitments)
+
+ # Step 1: Compute a challenge r and its powers r^0, ..., r^{num_cells-1}
+ r = compute_verify_cell_kzg_proof_batch_challenge(
+ commitments,
+ commitment_indices,
+ cell_indices,
+ cosets_evals,
+ proofs
+ )
+ r_powers = compute_powers(r, num_cells)
+
+ # Step 2: Compute LL = sum_k r^k proofs[k]
+ ll = bls.bytes48_to_G1(g1_lincomb(proofs, r_powers))
+
+ # Step 3: Compute LR = [s^n]
+ lr = bls.bytes96_to_G2(KZG_SETUP_G2_MONOMIAL[n])
+
+ # Step 4: Compute RL = RLC - RLI + RLP
+ # Step 4.1: Compute RLC = sum_i weights[i] commitments[i]
+ # Step 4.1a: Compute weights[i]: the sum of all r^k for which cell k is associated with commitment i.
+ # Note: we do that by iterating over all k and updating the correct weights[i] accordingly
+ weights = [0] * num_commitments
+ for k in range(num_cells):
+ i = commitment_indices[k]
+ weights[i] = (weights[i] + int(r_powers[k])) % BLS_MODULUS
+ # Step 4.1b: Linearly combine the weights with the commitments to get RLC
+ rlc = bls.bytes48_to_G1(g1_lincomb(commitments, weights))
+
+ # Step 4.2: Compute RLI = [sum_k r^k interpolation_poly_k(s)]
+ # Note: an efficient implementation would use the IDFT based method explained in the blog post
+ sum_interp_polys_coeff = [0] * n
+ for k in range(num_cells):
+ interp_poly_coeff = interpolate_polynomialcoeff(coset_for_cell(cell_indices[k]), cosets_evals[k])
+ interp_poly_scaled_coeff = multiply_polynomialcoeff([r_powers[k]], interp_poly_coeff)
+ sum_interp_polys_coeff = add_polynomialcoeff(sum_interp_polys_coeff, interp_poly_scaled_coeff)
+ rli = bls.bytes48_to_G1(g1_lincomb(KZG_SETUP_G1_MONOMIAL[:n], sum_interp_polys_coeff))
+
+ # Step 4.3: Compute RLP = sum_k (r^k * h_k^n) proofs[k]
+ weighted_r_powers = []
+ for k in range(num_cells):
+ h_k = int(coset_shift_for_cell(cell_indices[k]))
+ h_k_pow = pow(h_k, n, BLS_MODULUS)
+ wrp = (int(r_powers[k]) * h_k_pow) % BLS_MODULUS
+ weighted_r_powers.append(wrp)
+ rlp = bls.bytes48_to_G1(g1_lincomb(proofs, weighted_r_powers))
+
+ # Step 4.4: Compute RL = RLC - RLI + RLP
+ rl = bls.add(rlc, bls.neg(rli))
+ rl = bls.add(rl, rlp)
+
+ # Step 5: Check pairing (LL, LR) = pairing (RL, [1])
return (bls.pairing_check([
- [bls.bytes48_to_G1(proof), bls.bytes96_to_G2(zero_poly)],
- [
- bls.add(bls.bytes48_to_G1(commitment), bls.neg(bls.bytes48_to_G1(interpolated_poly))),
- bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2_MONOMIAL[0])),
- ],
+ [ll, lr],
+ [rl, bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2_MONOMIAL[0]))],
]))
```
### Cell cosets
+#### `coset_shift_for_cell`
+
+```python
+def coset_shift_for_cell(cell_index: CellIndex) -> BLSFieldElement:
+ """
+ Get the shift that determines the coset for a given ``cell_index``.
+ Precisely, consider the group of roots of unity of order FIELD_ELEMENTS_PER_CELL * CELLS_PER_EXT_BLOB.
+ Let G = {1, g, g^2, ...} denote its subgroup of order FIELD_ELEMENTS_PER_CELL.
+ Then, the coset is defined as h * G = {h, hg, hg^2, ...} for an element h.
+ This function returns h.
+ """
+ assert cell_index < CELLS_PER_EXT_BLOB
+ roots_of_unity_brp = bit_reversal_permutation(
+ compute_roots_of_unity(FIELD_ELEMENTS_PER_EXT_BLOB)
+ )
+ return roots_of_unity_brp[FIELD_ELEMENTS_PER_CELL * cell_index]
+```
+
#### `coset_for_cell`
```python
def coset_for_cell(cell_index: CellIndex) -> Coset:
"""
Get the coset for a given ``cell_index``.
+ Precisely, consider the group of roots of unity of order FIELD_ELEMENTS_PER_CELL * CELLS_PER_EXT_BLOB.
+ Let G = {1, g, g^2, ...} denote its subgroup of order FIELD_ELEMENTS_PER_CELL.
+ Then, the coset is defined as h * G = {h, hg, hg^2, ...}.
+ This function, returns the coset.
"""
assert cell_index < CELLS_PER_EXT_BLOB
roots_of_unity_brp = bit_reversal_permutation(
@@ -443,110 +555,92 @@ def coset_for_cell(cell_index: CellIndex) -> Coset:
### Cell computation
-#### `compute_cells_and_kzg_proofs`
+#### `compute_cells_and_kzg_proofs_polynomialcoeff`
```python
-def compute_cells_and_kzg_proofs(blob: Blob) -> Tuple[
+def compute_cells_and_kzg_proofs_polynomialcoeff(polynomial_coeff: PolynomialCoeff) -> Tuple[
Vector[Cell, CELLS_PER_EXT_BLOB],
Vector[KZGProof, CELLS_PER_EXT_BLOB]]:
"""
- Compute all the cell proofs for an extended blob. This is an inefficient O(n^2) algorithm,
- for performant implementation the FK20 algorithm that runs in O(n log n) should be
- used instead.
-
- Public method.
+ Helper function which computes cells/proofs for a polynomial in coefficient form.
"""
- assert len(blob) == BYTES_PER_BLOB
-
- polynomial = blob_to_polynomial(blob)
- polynomial_coeff = polynomial_eval_to_coeff(polynomial)
-
- cells = []
- proofs = []
-
+ cells, proofs = [], []
for i in range(CELLS_PER_EXT_BLOB):
coset = coset_for_cell(CellIndex(i))
proof, ys = compute_kzg_proof_multi_impl(polynomial_coeff, coset)
cells.append(coset_evals_to_cell(ys))
proofs.append(proof)
-
return cells, proofs
```
-### Cell verification
-
-#### `verify_cell_kzg_proof`
+#### `compute_cells_and_kzg_proofs`
```python
-def verify_cell_kzg_proof(commitment_bytes: Bytes48,
- cell_index: CellIndex,
- cell: Cell,
- proof_bytes: Bytes48) -> bool:
+def compute_cells_and_kzg_proofs(blob: Blob) -> Tuple[
+ Vector[Cell, CELLS_PER_EXT_BLOB],
+ Vector[KZGProof, CELLS_PER_EXT_BLOB]]:
"""
- Check a cell proof
+ Compute all the cell proofs for an extended blob. This is an inefficient O(n^2) algorithm,
+ for performant implementation the FK20 algorithm that runs in O(n log n) should be
+ used instead.
Public method.
"""
- assert len(commitment_bytes) == BYTES_PER_COMMITMENT
- assert cell_index < CELLS_PER_EXT_BLOB
- assert len(cell) == BYTES_PER_CELL
- assert len(proof_bytes) == BYTES_PER_PROOF
-
- coset = coset_for_cell(cell_index)
-
- return verify_kzg_proof_multi_impl(
- bytes_to_kzg_commitment(commitment_bytes),
- coset,
- cell_to_coset_evals(cell),
- bytes_to_kzg_proof(proof_bytes))
+ assert len(blob) == BYTES_PER_BLOB
+
+ polynomial = blob_to_polynomial(blob)
+ polynomial_coeff = polynomial_eval_to_coeff(polynomial)
+ return compute_cells_and_kzg_proofs_polynomialcoeff(polynomial_coeff)
```
+### Cell verification
+
#### `verify_cell_kzg_proof_batch`
```python
-def verify_cell_kzg_proof_batch(row_commitments_bytes: Sequence[Bytes48],
- row_indices: Sequence[RowIndex],
- column_indices: Sequence[ColumnIndex],
+def verify_cell_kzg_proof_batch(commitments_bytes: Sequence[Bytes48],
+ cell_indices: Sequence[CellIndex],
cells: Sequence[Cell],
proofs_bytes: Sequence[Bytes48]) -> bool:
"""
- Verify a set of cells, given their corresponding proofs and their coordinates (row_index, column_index) in the blob
- matrix. The list of all commitments is also provided in row_commitments_bytes.
+ Verify that a set of cells belong to their corresponding commitments.
- This function implements the naive algorithm of checking every cell
- individually; an efficient algorithm can be found here:
- https://ethresear.ch/t/a-universal-verification-equation-for-data-availability-sampling/13240
+ Given four lists representing tuples of (``commitment``, ``cell_index``, ``cell``, ``proof``),
+ the function verifies ``proof`` which shows that ``cell`` are the evaluations of the polynomial
+ associated with ``commitment``, evaluated over the domain specified by ``cell_index``.
- This implementation does not require randomness, but for the algorithm that
- requires it, `RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN` should be used to compute
- the challenge value.
+ This function implements the universal verification equation that has been introduced here:
+ https://ethresear.ch/t/a-universal-verification-equation-for-data-availability-sampling/13240
Public method.
"""
- assert len(cells) == len(proofs_bytes) == len(row_indices) == len(column_indices)
- for commitment_bytes in row_commitments_bytes:
+
+ assert len(commitments_bytes) == len(cells) == len(proofs_bytes) == len(cell_indices)
+ for commitment_bytes in commitments_bytes:
assert len(commitment_bytes) == BYTES_PER_COMMITMENT
- for row_index in row_indices:
- assert row_index < len(row_commitments_bytes)
- for column_index in column_indices:
- assert column_index < CELLS_PER_EXT_BLOB
+ for cell_index in cell_indices:
+ assert cell_index < CELLS_PER_EXT_BLOB
for cell in cells:
assert len(cell) == BYTES_PER_CELL
for proof_bytes in proofs_bytes:
assert len(proof_bytes) == BYTES_PER_PROOF
- # Get commitments via row indices
- commitments_bytes = [row_commitments_bytes[row_index] for row_index in row_indices]
+ # Create the list of deduplicated commitments we are dealing with
+ deduplicated_commitments = [bytes_to_kzg_commitment(commitment_bytes)
+ for commitment_bytes in set(commitments_bytes)]
+ # Create indices list mapping initial commitments (that may contain duplicates) to the deduplicated commitments
+ commitment_indices = [deduplicated_commitments.index(commitment_bytes) for commitment_bytes in commitments_bytes]
- # Get objects from bytes
- commitments = [bytes_to_kzg_commitment(commitment_bytes) for commitment_bytes in commitments_bytes]
cosets_evals = [cell_to_coset_evals(cell) for cell in cells]
proofs = [bytes_to_kzg_proof(proof_bytes) for proof_bytes in proofs_bytes]
- return all(
- verify_kzg_proof_multi_impl(commitment, coset_for_cell(column_index), coset_evals, proof)
- for commitment, column_index, coset_evals, proof in zip(commitments, column_indices, cosets_evals, proofs)
- )
+ # Do the actual verification
+ return verify_cell_kzg_proof_batch_impl(
+ deduplicated_commitments,
+ commitment_indices,
+ cell_indices,
+ cosets_evals,
+ proofs)
```
## Reconstruction
@@ -558,7 +652,7 @@ def construct_vanishing_polynomial(missing_cell_indices: Sequence[CellIndex]) ->
"""
Given the cells indices that are missing from the data, compute the polynomial that vanishes at every point that
corresponds to a missing field element.
-
+
This method assumes that all of the cells cannot be missing. In this case the vanishing polynomial
could be computed as Z(x) = x^n - 1, where `n` is FIELD_ELEMENTS_PER_EXT_BLOB.
@@ -582,21 +676,21 @@ def construct_vanishing_polynomial(missing_cell_indices: Sequence[CellIndex]) ->
return zero_poly_coeff
```
-### `recover_data`
+### `recover_polynomialcoeff`
```python
-def recover_data(cell_indices: Sequence[CellIndex],
- cells: Sequence[Cell],
- ) -> Sequence[BLSFieldElement]:
+def recover_polynomialcoeff(cell_indices: Sequence[CellIndex],
+ cells: Sequence[Cell]) -> Sequence[BLSFieldElement]:
"""
- Recover the missing evaluations for the extended blob, given at least half of the evaluations.
+ Recover the polynomial in coefficient form that when evaluated at the roots of unity will give the extended blob.
"""
-
# Get the extended domain. This will be referred to as the FFT domain.
roots_of_unity_extended = compute_roots_of_unity(FIELD_ELEMENTS_PER_EXT_BLOB)
- # Flatten the cells into evaluations.
+ # Flatten the cells into evaluations
# If a cell is missing, then its evaluation is zero.
+ # We let E(x) be a polynomial of degree FIELD_ELEMENTS_PER_EXT_BLOB - 1
+ # that interpolates the evaluations including the zeros for missing ones.
extended_evaluation_rbo = [0] * FIELD_ELEMENTS_PER_EXT_BLOB
for cell_index, cell in zip(cell_indices, cells):
start = cell_index * FIELD_ELEMENTS_PER_CELL
@@ -604,8 +698,8 @@ def recover_data(cell_indices: Sequence[CellIndex],
extended_evaluation_rbo[start:end] = cell
extended_evaluation = bit_reversal_permutation(extended_evaluation_rbo)
- # Compute Z(x) in monomial form
- # Z(x) is the polynomial which vanishes on all of the evaluations which are missing
+ # Compute the vanishing polynomial Z(x) in coefficient form.
+ # Z(x) is the polynomial which vanishes on all of the evaluations which are missing.
missing_cell_indices = [CellIndex(cell_index) for cell_index in range(CELLS_PER_EXT_BLOB)
if cell_index not in cell_indices]
zero_poly_coeff = construct_vanishing_polynomial(missing_cell_indices)
@@ -614,43 +708,44 @@ def recover_data(cell_indices: Sequence[CellIndex],
zero_poly_eval = fft_field(zero_poly_coeff, roots_of_unity_extended)
# Compute (E*Z)(x) = E(x) * Z(x) in evaluation form over the FFT domain
+ # Note: over the FFT domain, the polynomials (E*Z)(x) and (P*Z)(x) agree, where
+ # P(x) is the polynomial we want to reconstruct (degree FIELD_ELEMENTS_PER_BLOB - 1).
extended_evaluation_times_zero = [BLSFieldElement(int(a) * int(b) % BLS_MODULUS)
for a, b in zip(zero_poly_eval, extended_evaluation)]
- # Convert (E*Z)(x) to monomial form
+ # We know that (E*Z)(x) and (P*Z)(x) agree over the FFT domain,
+ # and we know that (P*Z)(x) has degree at most FIELD_ELEMENTS_PER_EXT_BLOB - 1.
+ # Thus, an inverse FFT of the evaluations of (E*Z)(x) (= evaluations of (P*Z)(x))
+ # yields the coefficient form of (P*Z)(x).
extended_evaluation_times_zero_coeffs = fft_field(extended_evaluation_times_zero, roots_of_unity_extended, inv=True)
- # Convert (E*Z)(x) to evaluation form over a coset of the FFT domain
+ # Next step is to divide the polynomial (P*Z)(x) by polynomial Z(x) to get P(x).
+ # We do this in evaluation form over a coset of the FFT domain to avoid division by 0.
+
+ # Convert (P*Z)(x) to evaluation form over a coset of the FFT domain
extended_evaluations_over_coset = coset_fft_field(extended_evaluation_times_zero_coeffs, roots_of_unity_extended)
# Convert Z(x) to evaluation form over a coset of the FFT domain
zero_poly_over_coset = coset_fft_field(zero_poly_coeff, roots_of_unity_extended)
- # Compute Q_3(x) = (E*Z)(x) / Z(x) in evaluation form over a coset of the FFT domain
- reconstructed_poly_over_coset = [
- div(a, b)
- for a, b in zip(extended_evaluations_over_coset, zero_poly_over_coset)
- ]
+ # Compute P(x) = (P*Z)(x) / Z(x) in evaluation form over a coset of the FFT domain
+ reconstructed_poly_over_coset = [div(a, b) for a, b in zip(extended_evaluations_over_coset, zero_poly_over_coset)]
- # Convert Q_3(x) to monomial form
+ # Convert P(x) to coefficient form
reconstructed_poly_coeff = coset_fft_field(reconstructed_poly_over_coset, roots_of_unity_extended, inv=True)
- # Convert Q_3(x) to evaluation form over the FFT domain and bit reverse the result
- reconstructed_data = bit_reversal_permutation(fft_field(reconstructed_poly_coeff, roots_of_unity_extended))
-
- return reconstructed_data
+ return reconstructed_poly_coeff[:FIELD_ELEMENTS_PER_BLOB]
```
### `recover_cells_and_kzg_proofs`
```python
def recover_cells_and_kzg_proofs(cell_indices: Sequence[CellIndex],
- cells: Sequence[Cell],
- proofs_bytes: Sequence[Bytes48]) -> Tuple[
+ cells: Sequence[Cell]) -> Tuple[
Vector[Cell, CELLS_PER_EXT_BLOB],
Vector[KZGProof, CELLS_PER_EXT_BLOB]]:
"""
- Given at least 50% of cells/proofs for a blob, recover all the cells/proofs.
+ Given at least 50% of cells for a blob, recover all the cells/proofs.
This algorithm uses FFTs to recover cells faster than using Lagrange
implementation, as can be seen here:
https://ethresear.ch/t/reed-solomon-erasure-code-recovery-in-n-log-2-n-time-with-ffts/3039
@@ -660,7 +755,8 @@ def recover_cells_and_kzg_proofs(cell_indices: Sequence[CellIndex],
Public method.
"""
- assert len(cell_indices) == len(cells) == len(proofs_bytes)
+ # Check we have the same number of cells and indices
+ assert len(cell_indices) == len(cells)
# Check we have enough cells to be able to perform the reconstruction
assert CELLS_PER_EXT_BLOB / 2 <= len(cell_indices) <= CELLS_PER_EXT_BLOB
# Check for duplicates
@@ -671,35 +767,13 @@ def recover_cells_and_kzg_proofs(cell_indices: Sequence[CellIndex],
# Check that each cell is the correct length
for cell in cells:
assert len(cell) == BYTES_PER_CELL
- # Check that each proof is the correct length
- for proof_bytes in proofs_bytes:
- assert len(proof_bytes) == BYTES_PER_PROOF
- # Convert cells to coset evals
+ # Convert cells to coset evaluations
cosets_evals = [cell_to_coset_evals(cell) for cell in cells]
- reconstructed_data = recover_data(cell_indices, cosets_evals)
+ # Given the coset evaluations, recover the polynomial in coefficient form
+ polynomial_coeff = recover_polynomialcoeff(cell_indices, cosets_evals)
- for cell_index, coset_evals in zip(cell_indices, cosets_evals):
- start = cell_index * FIELD_ELEMENTS_PER_CELL
- end = (cell_index + 1) * FIELD_ELEMENTS_PER_CELL
- assert reconstructed_data[start:end] == coset_evals
-
- recovered_cells = [
- coset_evals_to_cell(reconstructed_data[i * FIELD_ELEMENTS_PER_CELL:(i + 1) * FIELD_ELEMENTS_PER_CELL])
- for i in range(CELLS_PER_EXT_BLOB)]
-
- polynomial_eval = reconstructed_data[:FIELD_ELEMENTS_PER_BLOB]
- polynomial_coeff = polynomial_eval_to_coeff(polynomial_eval)
- recovered_proofs = [None] * CELLS_PER_EXT_BLOB
- for i, cell_index in enumerate(cell_indices):
- recovered_proofs[cell_index] = bytes_to_kzg_proof(proofs_bytes[i])
- for i in range(CELLS_PER_EXT_BLOB):
- if recovered_proofs[i] is None:
- coset = coset_for_cell(CellIndex(i))
- proof, ys = compute_kzg_proof_multi_impl(polynomial_coeff, coset)
- assert coset_evals_to_cell(ys) == recovered_cells[i]
- recovered_proofs[i] = proof
-
- return recovered_cells, recovered_proofs
+ # Recompute all cells/proofs
+ return compute_cells_and_kzg_proofs_polynomialcoeff(polynomial_coeff)
```
diff --git a/specs/_features/eip7732/beacon-chain.md b/specs/_features/eip7732/beacon-chain.md
new file mode 100644
index 0000000000..fdcb2b3008
--- /dev/null
+++ b/specs/_features/eip7732/beacon-chain.md
@@ -0,0 +1,716 @@
+# EIP-7732 -- The Beacon Chain
+
+## Table of contents
+
+
+
+
+
+- [Introduction](#introduction)
+- [Constants](#constants)
+ - [Payload status](#payload-status)
+- [Preset](#preset)
+ - [Misc](#misc)
+ - [Domain types](#domain-types)
+ - [Max operations per block](#max-operations-per-block)
+- [Containers](#containers)
+ - [New containers](#new-containers)
+ - [`PayloadAttestationData`](#payloadattestationdata)
+ - [`PayloadAttestation`](#payloadattestation)
+ - [`PayloadAttestationMessage`](#payloadattestationmessage)
+ - [`IndexedPayloadAttestation`](#indexedpayloadattestation)
+ - [`SignedExecutionPayloadHeader`](#signedexecutionpayloadheader)
+ - [`ExecutionPayloadEnvelope`](#executionpayloadenvelope)
+ - [`SignedExecutionPayloadEnvelope`](#signedexecutionpayloadenvelope)
+ - [Modified containers](#modified-containers)
+ - [`BeaconBlockBody`](#beaconblockbody)
+ - [`ExecutionPayloadHeader`](#executionpayloadheader)
+ - [`BeaconState`](#beaconstate)
+- [Helper functions](#helper-functions)
+ - [Math](#math)
+ - [`bit_floor`](#bit_floor)
+ - [Misc](#misc-1)
+ - [`remove_flag`](#remove_flag)
+ - [Predicates](#predicates)
+ - [`is_valid_indexed_payload_attestation`](#is_valid_indexed_payload_attestation)
+ - [`is_parent_block_full`](#is_parent_block_full)
+ - [Beacon State accessors](#beacon-state-accessors)
+ - [`get_ptc`](#get_ptc)
+ - [Modified `get_attesting_indices`](#modified-get_attesting_indices)
+ - [`get_payload_attesting_indices`](#get_payload_attesting_indices)
+ - [`get_indexed_payload_attestation`](#get_indexed_payload_attestation)
+- [Beacon chain state transition function](#beacon-chain-state-transition-function)
+ - [Block processing](#block-processing)
+ - [Withdrawals](#withdrawals)
+ - [Modified `process_withdrawals`](#modified-process_withdrawals)
+ - [Execution payload header](#execution-payload-header)
+ - [New `verify_execution_payload_header_signature`](#new-verify_execution_payload_header_signature)
+ - [New `process_execution_payload_header`](#new-process_execution_payload_header)
+ - [Operations](#operations)
+ - [Modified `process_operations`](#modified-process_operations)
+ - [Payload Attestations](#payload-attestations)
+ - [`process_payload_attestation`](#process_payload_attestation)
+ - [Modified `process_execution_payload`](#modified-process_execution_payload)
+ - [New `verify_execution_payload_envelope_signature`](#new-verify_execution_payload_envelope_signature)
+ - [Modified `is_merge_transition_complete`](#modified-is_merge_transition_complete)
+ - [Modified `validate_merge_block`](#modified-validate_merge_block)
+
+
+
+
+## Introduction
+
+This is the beacon chain specification of the enshrined proposer builder separation feature.
+
+*Note:* This specification is built upon [Electra](../../electra/beacon-chain.md) and is under active development.
+
+This feature adds new staked consensus participants called *Builders* and new honest validators duties called *payload timeliness attestations*. The slot is divided in **four** intervals. Honest validators gather *signed bids* (a `SignedExecutionPayloadHeader`) from builders and submit their consensus blocks (a `SignedBeaconBlock`) including these bids at the beginning of the slot. At the start of the second interval, honest validators submit attestations just as they do previous to this feature). At the start of the third interval, aggregators aggregate these attestations and the builder broadcasts either a full payload or a message indicating that they are withholding the payload (a `SignedExecutionPayloadEnvelope`). At the start of the fourth interval, some validators selected to be members of the new **Payload Timeliness Committee** (PTC) attest to the presence and timeliness of the builder's payload.
+
+At any given slot, the status of the blockchain's head may be either
+- A block from a previous slot (e.g. the current slot's proposer did not submit its block).
+- An *empty* block from the current slot (e.g. the proposer submitted a timely block, but the builder did not reveal the payload on time).
+- A full block for the current slot (both the proposer and the builder revealed on time).
+
+## Constants
+
+### Payload status
+
+| Name | Value |
+| - | - |
+| `PAYLOAD_ABSENT` | `uint8(0)` |
+| `PAYLOAD_PRESENT` | `uint8(1)` |
+| `PAYLOAD_WITHHELD` | `uint8(2)` |
+| `PAYLOAD_INVALID_STATUS` | `uint8(3)` |
+
+## Preset
+
+### Misc
+
+| Name | Value |
+| - | - |
+| `PTC_SIZE` | `uint64(2**9)` (=512) # (New in EIP-7732) |
+
+### Domain types
+
+| Name | Value |
+| - | - |
+| `DOMAIN_BEACON_BUILDER` | `DomainType('0x1B000000')` # (New in EIP-7732)|
+| `DOMAIN_PTC_ATTESTER` | `DomainType('0x0C000000')` # (New in EIP-7732)|
+
+### Max operations per block
+
+| Name | Value |
+| - | - |
+| `MAX_PAYLOAD_ATTESTATIONS` | `2**2` (= 4) # (New in EIP-7732) |
+
+## Containers
+
+### New containers
+
+#### `PayloadAttestationData`
+
+```python
+class PayloadAttestationData(Container):
+ beacon_block_root: Root
+ slot: Slot
+ payload_status: uint8
+```
+
+#### `PayloadAttestation`
+
+```python
+class PayloadAttestation(Container):
+ aggregation_bits: Bitvector[PTC_SIZE]
+ data: PayloadAttestationData
+ signature: BLSSignature
+```
+
+#### `PayloadAttestationMessage`
+
+```python
+class PayloadAttestationMessage(Container):
+ validator_index: ValidatorIndex
+ data: PayloadAttestationData
+ signature: BLSSignature
+```
+
+#### `IndexedPayloadAttestation`
+
+```python
+class IndexedPayloadAttestation(Container):
+ attesting_indices: List[ValidatorIndex, PTC_SIZE]
+ data: PayloadAttestationData
+ signature: BLSSignature
+```
+
+#### `SignedExecutionPayloadHeader`
+
+```python
+class SignedExecutionPayloadHeader(Container):
+ message: ExecutionPayloadHeader
+ signature: BLSSignature
+```
+
+#### `ExecutionPayloadEnvelope`
+
+```python
+class ExecutionPayloadEnvelope(Container):
+ payload: ExecutionPayload
+ builder_index: ValidatorIndex
+ beacon_block_root: Root
+ blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
+ payload_withheld: boolean
+ state_root: Root
+```
+
+#### `SignedExecutionPayloadEnvelope`
+
+```python
+class SignedExecutionPayloadEnvelope(Container):
+ message: ExecutionPayloadEnvelope
+ signature: BLSSignature
+```
+
+### Modified containers
+
+#### `BeaconBlockBody`
+
+**Note:** The Beacon Block body is modified to contain a `Signed ExecutionPayloadHeader`. The containers `BeaconBlock` and `SignedBeaconBlock` are modified indirectly.
+
+```python
+class BeaconBlockBody(Container):
+ randao_reveal: BLSSignature
+ eth1_data: Eth1Data # Eth1 data vote
+ graffiti: Bytes32 # Arbitrary data
+ # Operations
+ proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS]
+ attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS]
+ attestations: List[Attestation, MAX_ATTESTATIONS]
+ deposits: List[Deposit, MAX_DEPOSITS]
+ voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
+ sync_aggregate: SyncAggregate
+ # Execution
+ # Removed execution_payload [Removed in EIP-7732]
+ # Removed blob_kzg_commitments [Removed in EIP-7732]
+ bls_to_execution_changes: List[SignedBLSToExecutionChange, MAX_BLS_TO_EXECUTION_CHANGES]
+ # PBS
+ signed_execution_payload_header: SignedExecutionPayloadHeader # [New in EIP-7732]
+ payload_attestations: List[PayloadAttestation, MAX_PAYLOAD_ATTESTATIONS] # [New in EIP-7732]
+```
+
+#### `ExecutionPayloadHeader`
+
+**Note:** The `ExecutionPayloadHeader` is modified to only contain the block hash of the committed `ExecutionPayload` in addition to the builder's payment information, gas limit and KZG commitments root to verify the inclusion proofs.
+
+```python
+class ExecutionPayloadHeader(Container):
+ parent_block_hash: Hash32
+ parent_block_root: Root
+ block_hash: Hash32
+ gas_limit: uint64
+ builder_index: ValidatorIndex
+ slot: Slot
+ value: Gwei
+ blob_kzg_commitments_root: Root
+```
+
+#### `BeaconState`
+
+*Note*: The `BeaconState` is modified to track the last withdrawals honored in the CL. The `latest_execution_payload_header` is modified semantically to refer not to a past committed `ExecutionPayload` but instead it corresponds to the state's slot builder's bid. Another addition is to track the last committed block hash and the last slot that was full, that is in which there were both consensus and execution blocks included.
+
+```python
+class BeaconState(Container):
+ # Versioning
+ genesis_time: uint64
+ genesis_validators_root: Root
+ slot: Slot
+ fork: Fork
+ # History
+ latest_block_header: BeaconBlockHeader
+ block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
+ state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
+ historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] # Frozen in Capella, replaced by historical_summaries
+ # Eth1
+ eth1_data: Eth1Data
+ eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
+ eth1_deposit_index: uint64
+ # Registry
+ validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
+ balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
+ # Randomness
+ randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR]
+ # Slashings
+ slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
+ # Participation
+ previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
+ current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
+ # Finality
+ justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch
+ previous_justified_checkpoint: Checkpoint
+ current_justified_checkpoint: Checkpoint
+ finalized_checkpoint: Checkpoint
+ # Inactivity
+ inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT]
+ # Sync
+ current_sync_committee: SyncCommittee
+ next_sync_committee: SyncCommittee
+ # Execution
+ latest_execution_payload_header: ExecutionPayloadHeader
+ # Withdrawals
+ next_withdrawal_index: WithdrawalIndex
+ next_withdrawal_validator_index: ValidatorIndex
+ # Deep history valid from Capella onwards
+ historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT]
+ deposit_requests_start_index: uint64
+ deposit_balance_to_consume: Gwei
+ exit_balance_to_consume: Gwei
+ earliest_exit_epoch: Epoch
+ consolidation_balance_to_consume: Gwei
+ earliest_consolidation_epoch: Epoch
+ pending_balance_deposits: List[PendingBalanceDeposit, PENDING_BALANCE_DEPOSITS_LIMIT]
+ pending_partial_withdrawals: List[PendingPartialWithdrawal, PENDING_PARTIAL_WITHDRAWALS_LIMIT]
+ pending_consolidations: List[PendingConsolidation, PENDING_CONSOLIDATIONS_LIMIT]
+ # PBS
+ latest_block_hash: Hash32 # [New in EIP-7732]
+ latest_full_slot: Slot # [New in EIP-7732]
+ latest_withdrawals_root: Root # [New in EIP-7732]
+```
+
+## Helper functions
+
+### Math
+
+#### `bit_floor`
+
+```python
+def bit_floor(n: uint64) -> uint64:
+ """
+ if ``n`` is not zero, returns the largest power of `2` that is not greater than `n`.
+ """
+ if n == 0:
+ return 0
+ return uint64(1) << (n.bit_length() - 1)
+```
+
+### Misc
+
+#### `remove_flag`
+
+```python
+def remove_flag(flags: ParticipationFlags, flag_index: int) -> ParticipationFlags:
+ flag = ParticipationFlags(2**flag_index)
+ return flags & ~flag
+```
+
+### Predicates
+
+#### `is_valid_indexed_payload_attestation`
+
+```python
+def is_valid_indexed_payload_attestation(
+ state: BeaconState,
+ indexed_payload_attestation: IndexedPayloadAttestation) -> bool:
+ """
+ Check if ``indexed_payload_attestation`` is not empty, has sorted and unique indices and has
+ a valid aggregate signature.
+ """
+ # Verify the data is valid
+ if indexed_payload_attestation.data.payload_status >= PAYLOAD_INVALID_STATUS:
+ return False
+
+ # Verify indices are sorted and unique
+ indices = indexed_payload_attestation.attesting_indices
+ if len(indices) == 0 or not indices == sorted(set(indices)):
+ return False
+
+ # Verify aggregate signature
+ pubkeys = [state.validators[i].pubkey for i in indices]
+ domain = get_domain(state, DOMAIN_PTC_ATTESTER, None)
+ signing_root = compute_signing_root(indexed_payload_attestation.data, domain)
+ return bls.FastAggregateVerify(pubkeys, signing_root, indexed_payload_attestation.signature)
+```
+
+#### `is_parent_block_full`
+
+This function returns true if the last committed payload header was fulfilled with a payload, this can only happen when both beacon block and payload were present. This function must be called on a beacon state before processing the execution payload header in the block.
+
+```python
+def is_parent_block_full(state: BeaconState) -> bool:
+ return state.latest_execution_payload_header.block_hash == state.latest_block_hash
+```
+
+### Beacon State accessors
+
+#### `get_ptc`
+
+```python
+def get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
+ """
+ Get the payload timeliness committee for the given ``slot``
+ """
+ epoch = compute_epoch_at_slot(slot)
+ committees_per_slot = bit_floor(min(get_committee_count_per_slot(state, epoch), PTC_SIZE))
+ members_per_committee = PTC_SIZE // committees_per_slot
+
+ validator_indices: List[ValidatorIndex] = []
+ for idx in range(committees_per_slot):
+ beacon_committee = get_beacon_committee(state, slot, CommitteeIndex(idx))
+ validator_indices += beacon_committee[:members_per_committee]
+ return validator_indices
+```
+
+#### Modified `get_attesting_indices`
+
+`get_attesting_indices` is modified to ignore PTC votes
+
+```python
+def get_attesting_indices(state: BeaconState, attestation: Attestation) -> Set[ValidatorIndex]:
+ """
+ Return the set of attesting indices corresponding to ``aggregation_bits`` and ``committee_bits``.
+ """
+ output: Set[ValidatorIndex] = set()
+ committee_indices = get_committee_indices(attestation.committee_bits)
+ committee_offset = 0
+ for index in committee_indices:
+ committee = get_beacon_committee(state, attestation.data.slot, index)
+ committee_attesters = set(
+ index for i, index in enumerate(committee) if attestation.aggregation_bits[committee_offset + i])
+ output = output.union(committee_attesters)
+ committee_offset += len(committee)
+
+ if compute_epoch_at_slot(attestation.data.slot) < EIP7732_FORK_EPOCH:
+ return output
+ ptc = get_ptc(state, attestation.data.slot)
+ return set(i for i in output if i not in ptc)
+```
+
+#### `get_payload_attesting_indices`
+
+```python
+def get_payload_attesting_indices(state: BeaconState, slot: Slot,
+ payload_attestation: PayloadAttestation) -> Set[ValidatorIndex]:
+ """
+ Return the set of attesting indices corresponding to ``payload_attestation``.
+ """
+ ptc = get_ptc(state, slot)
+ return set(index for i, index in enumerate(ptc) if payload_attestation.aggregation_bits[i])
+```
+
+#### `get_indexed_payload_attestation`
+
+```python
+def get_indexed_payload_attestation(state: BeaconState, slot: Slot,
+ payload_attestation: PayloadAttestation) -> IndexedPayloadAttestation:
+ """
+ Return the indexed payload attestation corresponding to ``payload_attestation``.
+ """
+ attesting_indices = get_payload_attesting_indices(state, slot, payload_attestation)
+
+ return IndexedPayloadAttestation(
+ attesting_indices=sorted(attesting_indices),
+ data=payload_attestation.data,
+ signature=payload_attestation.signature,
+ )
+```
+
+## Beacon chain state transition function
+
+*Note*: state transition is fundamentally modified in EIP-7732. The full state transition is broken in two parts, first importing a signed block and then importing an execution payload.
+
+The post-state corresponding to a pre-state `state` and a signed beacon block `signed_block` is defined as `state_transition(state, signed_block)`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. State transitions that cause a `uint64` overflow or underflow are also considered invalid.
+
+The post-state corresponding to a pre-state `state` and a signed execution payload envelope `signed_envelope` is defined as `process_execution_payload(state, signed_envelope)`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. State transitions that cause an `uint64` overflow or underflow are also considered invalid.
+
+### Block processing
+
+```python
+def process_block(state: BeaconState, block: BeaconBlock) -> None:
+ process_block_header(state, block)
+ process_withdrawals(state) # [Modified in EIP-7732]
+ process_execution_payload_header(state, block) # [Modified in EIP-7732, removed process_execution_payload]
+ process_randao(state, block.body)
+ process_eth1_data(state, block.body)
+ process_operations(state, block.body) # [Modified in EIP-7732]
+ process_sync_aggregate(state, block.body.sync_aggregate)
+```
+
+#### Withdrawals
+
+##### Modified `process_withdrawals`
+
+**Note:** This is modified to take only the `state` as parameter. Withdrawals are deterministic given the beacon state, any execution payload that has the corresponding block as parent beacon block is required to honor these withdrawals in the execution layer. This function must be called before `process_execution_payload_header` as this latter function affects validator balances.
+
+```python
+def process_withdrawals(state: BeaconState) -> None:
+ # return early if the parent block was empty
+ if not is_parent_block_full(state):
+ return
+
+ withdrawals, partial_withdrawals_count = get_expected_withdrawals(state)
+ withdrawals_list = List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD](withdrawals)
+ state.latest_withdrawals_root = hash_tree_root(withdrawals_list)
+ for withdrawal in withdrawals:
+ decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
+
+ # Update pending partial withdrawals
+ state.pending_partial_withdrawals = state.pending_partial_withdrawals[partial_withdrawals_count:]
+
+ # Update the next withdrawal index if this block contained withdrawals
+ if len(withdrawals) != 0:
+ latest_withdrawal = withdrawals[-1]
+ state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
+
+ # Update the next validator index to start the next withdrawal sweep
+ if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
+ # Next sweep starts after the latest withdrawal's validator index
+ next_validator_index = ValidatorIndex((withdrawals[-1].validator_index + 1) % len(state.validators))
+ state.next_withdrawal_validator_index = next_validator_index
+ else:
+ # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
+ next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
+ next_validator_index = ValidatorIndex(next_index % len(state.validators))
+ state.next_withdrawal_validator_index = next_validator_index
+```
+
+#### Execution payload header
+
+##### New `verify_execution_payload_header_signature`
+
+```python
+def verify_execution_payload_header_signature(state: BeaconState,
+ signed_header: SignedExecutionPayloadHeader) -> bool:
+ # Check the signature
+ builder = state.validators[signed_header.message.builder_index]
+ signing_root = compute_signing_root(signed_header.message, get_domain(state, DOMAIN_BEACON_BUILDER))
+ return bls.Verify(builder.pubkey, signing_root, signed_header.signature)
+```
+
+##### New `process_execution_payload_header`
+
+```python
+def process_execution_payload_header(state: BeaconState, block: BeaconBlock) -> None:
+ # Verify the header signature
+ signed_header = block.body.signed_execution_payload_header
+ assert verify_execution_payload_header_signature(state, signed_header)
+
+ # Check that the builder has funds to cover the bid
+ header = signed_header.message
+ builder_index = header.builder_index
+ amount = header.value
+ assert state.balances[builder_index] >= amount
+
+ # Verify that the bid is for the current slot
+ assert header.slot == block.slot
+ # Verify that the bid is for the right parent block
+ assert header.parent_block_hash == state.latest_block_hash
+ assert header.parent_block_root == block.parent_root
+
+ # Transfer the funds from the builder to the proposer
+ decrease_balance(state, builder_index, amount)
+ increase_balance(state, block.proposer_index, amount)
+
+ # Cache the signed execution payload header
+ state.latest_execution_payload_header = header
+```
+
+#### Operations
+
+##### Modified `process_operations`
+
+**Note:** `process_operations` is modified to process PTC attestations
+
+```python
+def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
+ # Verify that outstanding deposits are processed up to the maximum number of deposits
+ assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index)
+
+ def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
+ for operation in operations:
+ fn(state, operation)
+
+ for_ops(body.proposer_slashings, process_proposer_slashing)
+ for_ops(body.attester_slashings, process_attester_slashing)
+ for_ops(body.attestations, process_attestation)
+ for_ops(body.deposits, process_deposit)
+ for_ops(body.voluntary_exits, process_voluntary_exit)
+ for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
+ # Removed `process_deposit_request` in EIP-7732
+ # Removed `process_withdrawal_request` in EIP-7732
+ # Removed `process_consolidation_request` in EIP-7732
+ for_ops(body.payload_attestations, process_payload_attestation) # [New in EIP-7732]
+```
+
+##### Payload Attestations
+
+###### `process_payload_attestation`
+
+```python
+def process_payload_attestation(state: BeaconState, payload_attestation: PayloadAttestation) -> None:
+ # Check that the attestation is for the parent beacon block
+ data = payload_attestation.data
+ assert data.beacon_block_root == state.latest_block_header.parent_root
+ # Check that the attestation is for the previous slot
+ assert data.slot + 1 == state.slot
+
+ # Verify signature
+ indexed_payload_attestation = get_indexed_payload_attestation(state, data.slot, payload_attestation)
+ assert is_valid_indexed_payload_attestation(state, indexed_payload_attestation)
+
+ if state.slot % SLOTS_PER_EPOCH == 0:
+ epoch_participation = state.previous_epoch_participation
+ else:
+ epoch_participation = state.current_epoch_participation
+
+ # Return early if the attestation is for the wrong payload status
+ payload_was_present = data.slot == state.latest_full_slot
+ voted_present = data.payload_status == PAYLOAD_PRESENT
+ proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
+ proposer_index = get_beacon_proposer_index(state)
+ if voted_present != payload_was_present:
+ # Unset the flags in case they were set by an equivocating ptc attestation
+ proposer_penalty_numerator = 0
+ for index in indexed_payload_attestation.attesting_indices:
+ for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
+ if has_flag(epoch_participation[index], flag_index):
+ epoch_participation[index] = remove_flag(epoch_participation[index], flag_index)
+ proposer_penalty_numerator += get_base_reward(state, index) * weight
+ # Penalize the proposer
+ proposer_penalty = Gwei(2 * proposer_penalty_numerator // proposer_reward_denominator)
+ decrease_balance(state, proposer_index, proposer_penalty)
+ return
+
+ # Reward the proposer and set all the participation flags in case of correct attestations
+ proposer_reward_numerator = 0
+ for index in indexed_payload_attestation.attesting_indices:
+ for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
+ if not has_flag(epoch_participation[index], flag_index):
+ epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
+ proposer_reward_numerator += get_base_reward(state, index) * weight
+
+ # Reward proposer
+ proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator)
+ increase_balance(state, proposer_index, proposer_reward)
+```
+
+#### Modified `process_execution_payload`
+
+##### New `verify_execution_payload_envelope_signature`
+
+```python
+def verify_execution_payload_envelope_signature(
+ state: BeaconState, signed_envelope: SignedExecutionPayloadEnvelope) -> bool:
+ builder = state.validators[signed_envelope.message.builder_index]
+ signing_root = compute_signing_root(signed_envelope.message, get_domain(state, DOMAIN_BEACON_BUILDER))
+ return bls.Verify(builder.pubkey, signing_root, signed_envelope.signature)
+```
+
+*Note*: `process_execution_payload` is now an independent check in state transition. It is called when importing a signed execution payload proposed by the builder of the current slot.
+
+```python
+def process_execution_payload(state: BeaconState,
+ signed_envelope: SignedExecutionPayloadEnvelope,
+ execution_engine: ExecutionEngine, verify: bool = True) -> None:
+ # Verify signature
+ if verify:
+ assert verify_execution_payload_envelope_signature(state, signed_envelope)
+ envelope = signed_envelope.message
+ payload = envelope.payload
+ # Cache latest block header state root
+ previous_state_root = hash_tree_root(state)
+ if state.latest_block_header.state_root == Root():
+ state.latest_block_header.state_root = previous_state_root
+
+ # Verify consistency with the beacon block
+ assert envelope.beacon_block_root == hash_tree_root(state.latest_block_header)
+
+ # Verify consistency with the committed header
+ committed_header = state.latest_execution_payload_header
+ assert envelope.builder_index == committed_header.builder_index
+ assert committed_header.blob_kzg_commitments_root == hash_tree_root(envelope.blob_kzg_commitments)
+
+ if not envelope.payload_withheld:
+ # Verify the withdrawals root
+ assert hash_tree_root(payload.withdrawals) == state.latest_withdrawals_root
+
+ # Verify the gas_limit
+ assert committed_header.gas_limit == payload.gas_limit
+
+ assert committed_header.block_hash == payload.block_hash
+ # Verify consistency of the parent hash with respect to the previous execution payload
+ assert payload.parent_hash == state.latest_block_hash
+ # Verify prev_randao
+ assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state))
+ # Verify timestamp
+ assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
+ # Verify commitments are under limit
+ assert len(envelope.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK
+ # Verify the execution payload is valid
+ versioned_hashes = [kzg_commitment_to_versioned_hash(commitment)
+ for commitment in envelope.blob_kzg_commitments]
+ assert execution_engine.verify_and_notify_new_payload(
+ NewPayloadRequest(
+ execution_payload=payload,
+ versioned_hashes=versioned_hashes,
+ parent_beacon_block_root=state.latest_block_header.parent_root,
+ )
+ )
+
+ # Process Electra operations
+ def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
+ for operation in operations:
+ fn(state, operation)
+
+ for_ops(payload.deposit_requests, process_deposit_request)
+ for_ops(payload.withdrawal_requests, process_withdrawal_request)
+ for_ops(payload, process_consolidation_request)
+
+ # Cache the execution payload header and proposer
+ state.latest_block_hash = payload.block_hash
+ state.latest_full_slot = state.slot
+
+ # Verify the state root
+ if verify:
+ assert envelope.state_root == hash_tree_root(state)
+```
+
+#### Modified `is_merge_transition_complete`
+
+`is_merge_transition_complete` is modified only for testing purposes to add the blob kzg commitments root for an empty list
+
+```python
+def is_merge_transition_complete(state: BeaconState) -> bool:
+ header = ExecutionPayloadHeader()
+ kzgs = List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]()
+ header.blob_kzg_commitments_root = kzgs.hash_tree_root()
+
+ return state.latest_execution_payload_header != header
+```
+
+#### Modified `validate_merge_block`
+`validate_merge_block` is modified to use the new `signed_execution_payload_header` message in the Beacon Block Body
+
+```python
+def validate_merge_block(block: BeaconBlock) -> None:
+ """
+ Check the parent PoW block of execution payload is a valid terminal PoW block.
+
+ Note: Unavailable PoW block(s) may later become available,
+ and a client software MAY delay a call to ``validate_merge_block``
+ until the PoW block(s) become available.
+ """
+ if TERMINAL_BLOCK_HASH != Hash32():
+ # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
+ assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
+ assert block.body.signed_execution_payload_header.message.parent_block_hash == TERMINAL_BLOCK_HASH
+ return
+
+ # Modified in EIP-7732
+ pow_block = get_pow_block(block.body.signed_execution_payload_header.message.parent_block_hash)
+ # Check if `pow_block` is available
+ assert pow_block is not None
+ pow_parent = get_pow_block(pow_block.parent_hash)
+ # Check if `pow_parent` is available
+ assert pow_parent is not None
+ # Check if `pow_block` is a valid terminal PoW block
+ assert is_valid_terminal_pow_block(pow_block, pow_parent)
+```
diff --git a/specs/_features/eip7732/builder.md b/specs/_features/eip7732/builder.md
new file mode 100644
index 0000000000..491f625d89
--- /dev/null
+++ b/specs/_features/eip7732/builder.md
@@ -0,0 +1,128 @@
+# EIP-7732 -- Honest Builder
+
+This is an accompanying document which describes the expected actions of a "builder" participating in the Ethereum proof-of-stake protocol.
+
+
+
+
+- [Introduction](#introduction)
+- [Builders attributions](#builders-attributions)
+ - [Constructing the payload bid](#constructing-the-payload-bid)
+ - [Constructing the `BlobSidecar`s](#constructing-the-blobsidecars)
+ - [Constructing the execution payload envelope](#constructing-the-execution-payload-envelope)
+ - [Honest payload withheld messages](#honest-payload-withheld-messages)
+
+
+
+## Introduction
+
+With the EIP-7732 Fork, the protocol includes new staked participants of the protocol called *Builders*. While Builders are a subset of the validator set, they have extra attributions that are optional. Validators may opt to not be builders and as such we collect the set of guidelines for those validators that want to act as builders in this document.
+
+## Builders attributions
+
+Builders can submit bids to produce execution payloads. They can broadcast these bids in the form of `SignedExecutionPayloadHeader` objects, these objects encode a commitment to reveal an execution payload in exchange for a payment. When their bids are chosen by the corresponding proposer, builders are expected to broadcast an accompanying `SignedExecutionPayloadEnvelope` object honoring the commitment.
+
+Thus, builders tasks are divided in two, submitting bids, and submitting payloads.
+
+### Constructing the payload bid
+
+Builders can broadcast a payload bid for the current or the next slot's proposer to include. They produce a `SignedExecutionPayloadHeader` as follows.
+
+1. Set `header.parent_block_hash` to the current head of the execution chain (this can be obtained from the beacon state as `state.last_block_hash`).
+2. Set `header.parent_block_root` to be the head of the consensus chain (this can be obtained from the beacon state as `hash_tree_root(state.latest_block_header)`. The `parent_block_root` and `parent_block_hash` must be compatible, in the sense that they both should come from the same `state` by the method described in this and the previous point.
+3. Construct an execution payload. This can be performed with an external execution engine with a call to `engine_getPayloadV4`.
+4. Set `header.block_hash` to be the block hash of the constructed payload, that is `payload.block_hash`.
+5. Set `header.gas_limit` to be the gas limit of the constructed payload, that is `payload.gas_limit`.
+6. Set `header.builder_index` to be the validator index of the builder performing these actions.
+7. Set `header.slot` to be the slot for which this bid is aimed. This slot **MUST** be either the current slot or the next slot.
+8. Set `header.value` to be the value that the builder will pay the proposer if the bid is accepted. The builder **MUST** have balance enough to fulfill this bid.
+9. Set `header.kzg_commitments_root` to be the `hash_tree_root` of the `blobsbundle.commitments` field returned by `engine_getPayloadV4`.
+
+After building the `header`, the builder obtains a `signature` of the header by using
+
+```python
+def get_execution_payload_header_signature(
+ state: BeaconState, header: ExecutionPayloadHeader, privkey: int) -> BLSSignature:
+ domain = get_domain(state, DOMAIN_BEACON_BUILDER, compute_epoch_at_slot(header.slot))
+ signing_root = compute_signing_root(header, domain)
+ return bls.Sign(privkey, signing_root)
+```
+
+The builder assembles then `signed_execution_payload_header = SignedExecutionPayloadHeader(message=header, signature=signature)` and broadcasts it on the `execution_payload_header` global gossip topic.
+
+### Constructing the `BlobSidecar`s
+
+[Modified in EIP-7732]
+
+The `BlobSidecar` container is modified indirectly because the constant `KZG_COMMITMENT_INCLUSION_PROOF_DEPTH` is modified. The function `get_blob_sidecars` is modified because the KZG commitments are no longer included in the beacon block but rather in the `ExecutionPayloadEnvelope`, the builder has to send the commitments as parameters to this function.
+
+```python
+def get_blob_sidecars(signed_block: SignedBeaconBlock,
+ blobs: Sequence[Blob],
+ blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK],
+ blob_kzg_proofs: Sequence[KZGProof]) -> Sequence[BlobSidecar]:
+ block = signed_block.message
+ block_header = BeaconBlockHeader(
+ slot=block.slot,
+ proposer_index=block.proposer_index,
+ parent_root=block.parent_root,
+ state_root=block.state_root,
+ body_root=hash_tree_root(block.body),
+ )
+ signed_block_header = SignedBeaconBlockHeader(message=block_header, signature=signed_block.signature)
+ sidecars: List[BlobSidecar] = []
+ for index, blob in enumerate(blobs):
+ proof = compute_merkle_proof(
+ blob_kzg_commitments,
+ get_generalized_index(List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK], index),
+ )
+ proof += compute_merkle_proof(
+ block.body,
+ get_generalized_index(
+ BeaconBlockBody,
+ "signed_execution_payload_header",
+ "message",
+ "blob_kzg_commitments_root",
+ ),
+ )
+ sidecars.append(
+ BlobSidecar(
+ index=index,
+ blob=blob,
+ kzg_commitment=blob_kzg_commitments[index],
+ kzg_proof=blob_kzg_proofs[index],
+ signed_block_header=signed_block_header,
+ kzg_commitment_inclusion_proof=proof
+ )
+ )
+ return sidecars
+```
+
+### Constructing the execution payload envelope
+
+When the proposer publishes a valid `SignedBeaconBlock` containing a signed commitment by the builder, the builder is later expected to broadcast the corresponding `SignedExecutionPayloadEnvelope` that fulfills this commitment. See below for a special case of an *honestly withheld payload*.
+
+To construct the `execution_payload_envelope` the builder must perform the following steps, we alias `header` to be the committed `ExecutionPayloadHeader` in the beacon block.
+
+1. Set the `payload` field to be the `ExecutionPayload` constructed when creating the corresponding bid. This payload **MUST** have the same block hash as `header.block_hash`.
+2. Set the `builder_index` field to be the validator index of the builder performing these steps. This field **MUST** be `header.builder_index`.
+3. Set `beacon_block_root` to be the `hash_tree_root` of the corresponding beacon block.
+4. Set `blob_kzg_commitments` to be the `commitments` field of the blobs bundle constructed when constructing the bid. This field **MUST** have a `hash_tree_root` equal to `header.blob_kzg_commitments_root`.
+5. Set `payload_witheld` to `False`.
+
+After setting these parameters, the builder should run `process_execution_payload(state, signed_envelope, verify=False)` and this function should not trigger an exception.
+
+6. Set `state_root` to `hash_tree_root(state)`.
+After preparing the `envelope` the builder should sign the envelope using:
+```python
+def get_execution_payload_envelope_signature(
+ state: BeaconState, envelope: ExecutionPayloadEnvelope, privkey: int) -> BLSSignature:
+ domain = get_domain(state, DOMAIN_BEACON_BUILDER, compute_epoch_at_slot(state.slot))
+ signing_root = compute_signing_root(envelope, domain)
+ return bls.Sign(privkey, signing_root)
+```
+The builder assembles then `signed_execution_payload_envelope = SignedExecutionPayloadEnvelope(message=envelope, signature=signature)` and broadcasts it on the `execution_payload` global gossip topic.
+
+### Honest payload withheld messages
+
+An honest builder that has seen a `SignedBeaconBlock` referencing his signed bid, but that block was not timely and thus it is not the head of the builder's chain, may choose to withhold their execution payload. For this the builder should simply act as if it were building an empty payload, without any transactions, withdrawals, etc. The `payload.block_hash` may not be equal to `header.block_hash`. The builder may then sets `payload_withheld` to `True`. If the PTC sees this message and votes for it, validators will attribute a *withholding boost* to the builder, which would increase the forkchoice weight of the parent block, favoring it and preventing the builder from being charged for the bid by not revealing.
diff --git a/specs/_features/eip7732/fork-choice.md b/specs/_features/eip7732/fork-choice.md
new file mode 100644
index 0000000000..0eb49ddfc1
--- /dev/null
+++ b/specs/_features/eip7732/fork-choice.md
@@ -0,0 +1,563 @@
+# EIP-7732 -- Fork Choice
+
+## Table of contents
+
+
+
+
+- [Introduction](#introduction)
+- [Constants](#constants)
+- [Containers](#containers)
+ - [New `ChildNode`](#new-childnode)
+- [Helpers](#helpers)
+ - [Modified `LatestMessage`](#modified-latestmessage)
+ - [Modified `update_latest_messages`](#modified-update_latest_messages)
+ - [Modified `Store`](#modified-store)
+ - [Modified `get_forkchoice_store`](#modified-get_forkchoice_store)
+ - [`notify_ptc_messages`](#notify_ptc_messages)
+ - [`is_payload_present`](#is_payload_present)
+ - [`is_parent_node_full`](#is_parent_node_full)
+ - [Modified `get_ancestor`](#modified-get_ancestor)
+ - [Modified `get_checkpoint_block`](#modified-get_checkpoint_block)
+ - [`is_supporting_vote`](#is_supporting_vote)
+ - [New `compute_proposer_boost`](#new-compute_proposer_boost)
+ - [New `compute_withhold_boost`](#new-compute_withhold_boost)
+ - [New `compute_reveal_boost`](#new-compute_reveal_boost)
+ - [Modified `get_weight`](#modified-get_weight)
+ - [Modified `get_head`](#modified-get_head)
+- [Updated fork-choice handlers](#updated-fork-choice-handlers)
+ - [Modified `on_block`](#modified-on_block)
+- [New fork-choice handlers](#new-fork-choice-handlers)
+ - [New `on_execution_payload`](#new-on_execution_payload)
+ - [`seconds_into_slot`](#seconds_into_slot)
+ - [Modified `on_tick_per_slot`](#modified-on_tick_per_slot)
+ - [`on_payload_attestation_message`](#on_payload_attestation_message)
+
+
+
+
+## Introduction
+
+This is the modification of the fork choice accompanying the EIP-7732 upgrade.
+
+## Constants
+
+| Name | Value |
+| -------------------- | ----------- |
+| `PAYLOAD_TIMELY_THRESHOLD` | `PTC_SIZE / 2` (=`uint64(256)`) |
+| `INTERVALS_PER_SLOT` | `4` # [modified in EIP-7732] |
+| `PROPOSER_SCORE_BOOST` | `20` # [modified in EIP-7732] |
+| `PAYLOAD_WITHHOLD_BOOST` | `40` |
+| `PAYLOAD_REVEAL_BOOST` | `40` |
+
+## Containers
+
+### New `ChildNode`
+Auxiliary class to consider `(block, slot, bool)` LMD voting
+
+```python
+class ChildNode(Container):
+ root: Root
+ slot: Slot
+ is_payload_present: boolean
+```
+
+## Helpers
+
+### Modified `LatestMessage`
+**Note:** The class is modified to keep track of the slot instead of the epoch.
+
+```python
+@dataclass(eq=True, frozen=True)
+class LatestMessage(object):
+ slot: Slot
+ root: Root
+```
+
+### Modified `update_latest_messages`
+**Note:** the function `update_latest_messages` is updated to use the attestation slot instead of target. Notice that this function is only called on validated attestations and validators cannot attest twice in the same epoch without equivocating. Notice also that target epoch number and slot number are validated on `validate_on_attestation`.
+
+```python
+def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation) -> None:
+ slot = attestation.data.slot
+ beacon_block_root = attestation.data.beacon_block_root
+ non_equivocating_attesting_indices = [i for i in attesting_indices if i not in store.equivocating_indices]
+ for i in non_equivocating_attesting_indices:
+ if i not in store.latest_messages or slot > store.latest_messages[i].slot:
+ store.latest_messages[i] = LatestMessage(slot=slot, root=beacon_block_root)
+```
+
+### Modified `Store`
+**Note:** `Store` is modified to track the intermediate states of "empty" consensus blocks, that is, those consensus blocks for which the corresponding execution payload has not been revealed or has not been included on chain.
+
+```python
+@dataclass
+class Store(object):
+ time: uint64
+ genesis_time: uint64
+ justified_checkpoint: Checkpoint
+ finalized_checkpoint: Checkpoint
+ unrealized_justified_checkpoint: Checkpoint
+ unrealized_finalized_checkpoint: Checkpoint
+ proposer_boost_root: Root
+ payload_withhold_boost_root: Root # [New in EIP-7732]
+ payload_withhold_boost_full: boolean # [New in EIP-7732]
+ payload_reveal_boost_root: Root # [New in EIP-7732]
+ equivocating_indices: Set[ValidatorIndex]
+ blocks: Dict[Root, BeaconBlock] = field(default_factory=dict)
+ block_states: Dict[Root, BeaconState] = field(default_factory=dict)
+ block_timeliness: Dict[Root, boolean] = field(default_factory=dict)
+ checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict)
+ latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict)
+ unrealized_justifications: Dict[Root, Checkpoint] = field(default_factory=dict)
+ execution_payload_states: Dict[Root, BeaconState] = field(default_factory=dict) # [New in EIP-7732]
+ ptc_vote: Dict[Root, Vector[uint8, PTC_SIZE]] = field(default_factory=dict) # [New in EIP-7732]
+```
+
+### Modified `get_forkchoice_store`
+
+```python
+def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -> Store:
+ assert anchor_block.state_root == hash_tree_root(anchor_state)
+ anchor_root = hash_tree_root(anchor_block)
+ anchor_epoch = get_current_epoch(anchor_state)
+ justified_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root)
+ finalized_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root)
+ proposer_boost_root = Root()
+ return Store(
+ time=uint64(anchor_state.genesis_time + SECONDS_PER_SLOT * anchor_state.slot),
+ genesis_time=anchor_state.genesis_time,
+ justified_checkpoint=justified_checkpoint,
+ finalized_checkpoint=finalized_checkpoint,
+ unrealized_justified_checkpoint=justified_checkpoint,
+ unrealized_finalized_checkpoint=finalized_checkpoint,
+ proposer_boost_root=proposer_boost_root,
+ payload_withhold_boost_root=proposer_boost_root, # [New in EIP-7732]
+ payload_withhold_boost_full=True, # [New in EIP-7732]
+ payload_reveal_boost_root=proposer_boost_root, # [New in EIP-7732]
+ equivocating_indices=set(),
+ blocks={anchor_root: copy(anchor_block)},
+ block_states={anchor_root: copy(anchor_state)},
+ checkpoint_states={justified_checkpoint: copy(anchor_state)},
+ unrealized_justifications={anchor_root: justified_checkpoint},
+ execution_payload_states={anchor_root: copy(anchor_state)}, # [New in EIP-7732]
+ ptc_vote={anchor_root: Vector[uint8, PTC_SIZE]()},
+ )
+```
+
+### `notify_ptc_messages`
+
+```python
+def notify_ptc_messages(store: Store, state: BeaconState, payload_attestations: Sequence[PayloadAttestation]) -> None:
+ """
+ Extracts a list of ``PayloadAttestationMessage`` from ``payload_attestations`` and updates the store with them
+ These Payload attestations are assumed to be in the beacon block hence signature verification is not needed
+ """
+ if state.slot == 0:
+ return
+ for payload_attestation in payload_attestations:
+ indexed_payload_attestation = get_indexed_payload_attestation(state, Slot(state.slot - 1), payload_attestation)
+ for idx in indexed_payload_attestation.attesting_indices:
+ on_payload_attestation_message(
+ store,
+ PayloadAttestationMessage(
+ validator_index=idx,
+ data=payload_attestation.data,
+ signature=BLSSignature(),
+ is_from_block=True
+ )
+ )
+```
+
+### `is_payload_present`
+
+```python
+def is_payload_present(store: Store, beacon_block_root: Root) -> bool:
+ """
+ Return whether the execution payload for the beacon block with root ``beacon_block_root`` was voted as present
+ by the PTC
+ """
+ # The beacon block root must be known
+ assert beacon_block_root in store.ptc_vote
+ return store.ptc_vote[beacon_block_root].count(PAYLOAD_PRESENT) > PAYLOAD_TIMELY_THRESHOLD
+```
+
+### `is_parent_node_full`
+
+```python
+def is_parent_node_full(store: Store, block: BeaconBlock) -> bool:
+ parent = store.blocks[block.parent_root]
+ parent_block_hash = block.body.signed_execution_payload_header.message.parent_block_hash
+ message_block_hash = parent.body.signed_execution_payload_header.message.block_hash
+ return parent_block_hash == message_block_hash
+```
+
+### Modified `get_ancestor`
+**Note:** `get_ancestor` is modified to return whether the chain is based on an *empty* or *full* block.
+
+```python
+def get_ancestor(store: Store, root: Root, slot: Slot) -> ChildNode:
+ """
+ Returns the beacon block root, the slot and the payload status of the ancestor of the beacon block
+ with ``root`` at ``slot``. If the beacon block with ``root`` is already at ``slot`` or we are
+ requesting an ancestor "in the future" it returns its PTC status instead of the actual payload content.
+ """
+ block = store.blocks[root]
+ if block.slot <= slot:
+ return ChildNode(root=root, slot=slot, is_payload_present=is_payload_present(store, root))
+
+ parent = store.blocks[block.parent_root]
+ if parent.slot > slot:
+ return get_ancestor(store, block.parent_root, slot)
+ return ChildNode(root=block.parent_root, slot=parent.slot, is_payload_present=is_parent_node_full(store, block))
+```
+
+### Modified `get_checkpoint_block`
+**Note:** `get_checkpoint_block` is modified to use the new `get_ancestor`
+
+```python
+def get_checkpoint_block(store: Store, root: Root, epoch: Epoch) -> Root:
+ """
+ Compute the checkpoint block for epoch ``epoch`` in the chain of block ``root``
+ """
+ epoch_first_slot = compute_start_slot_at_epoch(epoch)
+ return get_ancestor(store, root, epoch_first_slot).root
+```
+
+
+### `is_supporting_vote`
+
+```python
+def is_supporting_vote(store: Store, node: ChildNode, message: LatestMessage) -> bool:
+ """
+ Returns whether a vote for ``message.root`` supports the chain containing the beacon block ``node.root`` with the
+ payload contents indicated by ``node.is_payload_present`` as head during slot ``node.slot``.
+ """
+ if node.root == message.root:
+ # an attestation for a given root always counts for that root regardless if full or empty
+ # as long as the attestation happened after the requested slot.
+ return node.slot <= message.slot
+ message_block = store.blocks[message.root]
+ if node.slot >= message_block.slot:
+ return False
+ ancestor = get_ancestor(store, message.root, node.slot)
+ return (node.root == ancestor.root) and (node.is_payload_present == ancestor.is_payload_present)
+```
+
+### New `compute_proposer_boost`
+This is a helper to compute the proposer boost. It applies the proposer boost to any ancestor of the proposer boost root taking into account the payload presence. There is one exception: if the requested node has the same root and slot as the block with the proposer boost root, then the proposer boost is applied to both empty and full versions of the node.
+```python
+def compute_proposer_boost(store: Store, state: BeaconState, node: ChildNode) -> Gwei:
+ if store.proposer_boost_root == Root():
+ return Gwei(0)
+ ancestor = get_ancestor(store, store.proposer_boost_root, node.slot)
+ if ancestor.root != node.root:
+ return Gwei(0)
+ proposer_boost_slot = store.blocks[store.proposer_boost_root].slot
+ # Proposer boost is not applied after skipped slots
+ if node.slot > proposer_boost_slot:
+ return Gwei(0)
+ if (node.slot < proposer_boost_slot) and (ancestor.is_payload_present != node.is_payload_present):
+ return Gwei(0)
+ committee_weight = get_total_active_balance(state) // SLOTS_PER_EPOCH
+ return (committee_weight * PROPOSER_SCORE_BOOST) // 100
+```
+
+### New `compute_withhold_boost`
+This is a similar helper that applies for the withhold boost. In this case this always takes into account the reveal status.
+
+```python
+def compute_withhold_boost(store: Store, state: BeaconState, node: ChildNode) -> Gwei:
+ if store.payload_withhold_boost_root == Root():
+ return Gwei(0)
+ ancestor = get_ancestor(store, store.payload_withhold_boost_root, node.slot)
+ if ancestor.root != node.root:
+ return Gwei(0)
+ if node.slot >= store.blocks[store.payload_withhold_boost_root].slot:
+ ancestor.is_payload_present = store.payload_withhold_boost_full
+ if ancestor.is_payload_present != node.is_payload_present:
+ return Gwei(0)
+
+ committee_weight = get_total_active_balance(state) // SLOTS_PER_EPOCH
+ return (committee_weight * PAYLOAD_WITHHOLD_BOOST) // 100
+```
+
+### New `compute_reveal_boost`
+This is a similar helper to the last two, the only difference is that the reveal boost is only applied to the full version of the node when querying for the same slot as the revealed payload.
+
+```python
+def compute_reveal_boost(store: Store, state: BeaconState, node: ChildNode) -> Gwei:
+ if store.payload_reveal_boost_root == Root():
+ return Gwei(0)
+ ancestor = get_ancestor(store, store.payload_reveal_boost_root, node.slot)
+ if ancestor.root != node.root:
+ return Gwei(0)
+ if node.slot >= store.blocks[store.payload_reveal_boost_root].slot:
+ ancestor.is_payload_present = True
+ if ancestor.is_payload_present != node.is_payload_present:
+ return Gwei(0)
+ committee_weight = get_total_active_balance(state) // SLOTS_PER_EPOCH
+ return (committee_weight * PAYLOAD_REVEAL_BOOST) // 100
+```
+
+### Modified `get_weight`
+
+**Note:** `get_weight` is modified to only count votes for descending chains that support the status of a triple `Root, Slot, bool`, where the `bool` indicates if the block was full or not. `Slot` is needed for a correct implementation of `(Block, Slot)` voting.
+
+```python
+def get_weight(store: Store, node: ChildNode) -> Gwei:
+ state = store.checkpoint_states[store.justified_checkpoint]
+ unslashed_and_active_indices = [
+ i for i in get_active_validator_indices(state, get_current_epoch(state))
+ if not state.validators[i].slashed
+ ]
+ attestation_score = Gwei(sum(
+ state.validators[i].effective_balance for i in unslashed_and_active_indices
+ if (i in store.latest_messages
+ and i not in store.equivocating_indices
+ and is_supporting_vote(store, node, store.latest_messages[i]))
+ ))
+
+ # Compute boosts
+ proposer_score = compute_proposer_boost(store, state, node)
+ builder_reveal_score = compute_reveal_boost(store, state, node)
+ builder_withhold_score = compute_withhold_boost(store, state, node)
+
+ return attestation_score + proposer_score + builder_reveal_score + builder_withhold_score
+```
+
+### Modified `get_head`
+
+**Note:** `get_head` is a modified to use the new `get_weight` function. It returns the `ChildNode` object corresponidng to the head block.
+
+```python
+def get_head(store: Store) -> ChildNode:
+ # Get filtered block tree that only includes viable branches
+ blocks = get_filtered_block_tree(store)
+ # Execute the LMD-GHOST fork choice
+ justified_root = store.justified_checkpoint.root
+ justified_block = store.blocks[justified_root]
+ justified_slot = justified_block.slot
+ justified_full = is_payload_present(store, justified_root)
+ best_child = ChildNode(root=justified_root, slot=justified_slot, is_payload_present=justified_full)
+ while True:
+ children = [
+ ChildNode(root=root, slot=block.slot, is_payload_present=present) for (root, block) in blocks.items()
+ if block.parent_root == best_child.root and block.slot > best_child.slot and
+ (best_child.root == justified_root or is_parent_node_full(store, block) == best_child.is_payload_present)
+ for present in (True, False) if root in store.execution_payload_states or not present
+ ]
+ if len(children) == 0:
+ return best_child
+ # if we have children we consider the current head advanced as a possible head
+ highest_child_slot = max(child.slot for child in children)
+ children += [
+ ChildNode(root=best_child.root, slot=best_child.slot + 1, is_payload_present=best_child.is_payload_present)
+ ]
+ # Sort by latest attesting balance with
+ # Ties broken by the block's slot
+ # Ties are broken by the PTC vote
+ # Ties are then broken by favoring full blocks
+ # Ties then broken by favoring block with lexicographically higher root
+ new_best_child = max(children, key=lambda child: (
+ get_weight(store, child),
+ blocks[child.root].slot,
+ is_payload_present(store, child.root),
+ child.is_payload_present,
+ child.root
+ )
+ )
+ if new_best_child.root == best_child.root and new_best_child.slot >= highest_child_slot:
+ return new_best_child
+ best_child = new_best_child
+```
+
+## Updated fork-choice handlers
+
+### Modified `on_block`
+
+*Note*: The handler `on_block` is modified to consider the pre `state` of the given consensus beacon block depending not only on the parent block root, but also on the parent blockhash. In addition we delay the checking of blob data availability until the processing of the execution payload.
+
+```python
+def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
+ """
+ Run ``on_block`` upon receiving a new block.
+ """
+ block = signed_block.message
+ # Parent block must be known
+ assert block.parent_root in store.block_states
+
+ # Check if this blocks builds on empty or full parent block
+ parent_block = store.blocks[block.parent_root]
+ header = block.body.signed_execution_payload_header.message
+ parent_header = parent_block.body.signed_execution_payload_header.message
+ # Make a copy of the state to avoid mutability issues
+ if is_parent_node_full(store, block):
+ assert block.parent_root in store.execution_payload_states
+ state = copy(store.execution_payload_states[block.parent_root])
+ else:
+ assert header.parent_block_hash == parent_header.parent_block_hash
+ state = copy(store.block_states[block.parent_root])
+
+ # Blocks cannot be in the future. If they are, their consideration must be delayed until they are in the past.
+ current_slot = get_current_slot(store)
+ assert current_slot >= block.slot
+
+ # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
+ finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
+ assert block.slot > finalized_slot
+ # Check block is a descendant of the finalized block at the checkpoint finalized slot
+ finalized_checkpoint_block = get_checkpoint_block(
+ store,
+ block.parent_root,
+ store.finalized_checkpoint.epoch,
+ )
+ assert store.finalized_checkpoint.root == finalized_checkpoint_block
+
+ # Check the block is valid and compute the post-state
+ block_root = hash_tree_root(block)
+ state_transition(state, signed_block, True)
+
+ # Add new block to the store
+ store.blocks[block_root] = block
+ # Add new state for this block to the store
+ store.block_states[block_root] = state
+ # Add a new PTC voting for this block to the store
+ store.ptc_vote[block_root] = [PAYLOAD_ABSENT] * PTC_SIZE
+
+ # Notify the store about the payload_attestations in the block
+ notify_ptc_messages(store, state, block.body.payload_attestations)
+ # Add proposer score boost if the block is timely
+ time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
+ is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
+ is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
+ store.block_timeliness[hash_tree_root(block)] = is_timely
+
+ # Add proposer score boost if the block is timely and not conflicting with an existing block
+ is_first_block = store.proposer_boost_root == Root()
+ if is_timely and is_first_block:
+ store.proposer_boost_root = hash_tree_root(block)
+
+ # Update checkpoints in store if necessary
+ update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
+
+ # Eagerly compute unrealized justification and finality.
+ compute_pulled_up_tip(store, block_root)
+```
+
+## New fork-choice handlers
+
+### New `on_execution_payload`
+
+The handler `on_execution_payload` is called when the node receives a `SignedExecutionPayloadEnvelope` to sync.
+
+```python
+def on_execution_payload(store: Store, signed_envelope: SignedExecutionPayloadEnvelope) -> None:
+ """
+ Run ``on_execution_payload`` upon receiving a new execution payload.
+ """
+ envelope = signed_envelope.message
+ # The corresponding beacon block root needs to be known
+ assert envelope.beacon_block_root in store.block_states
+
+ # Check if blob data is available
+ # If not, this payload MAY be queued and subsequently considered when blob data becomes available
+ assert is_data_available(envelope.beacon_block_root, envelope.blob_kzg_commitments)
+
+ # Make a copy of the state to avoid mutability issues
+ state = copy(store.block_states[envelope.beacon_block_root])
+
+ # Process the execution payload
+ process_execution_payload(state, signed_envelope, EXECUTION_ENGINE)
+
+ # Add new state for this payload to the store
+ store.execution_payload_states[envelope.beacon_block_root] = state
+```
+
+### `seconds_into_slot`
+
+```python
+def seconds_into_slot(store: Store) -> uint64:
+ return (store.time - store.genesis_time) % SECONDS_PER_SLOT
+```
+
+### Modified `on_tick_per_slot`
+
+Modified to reset the payload boost roots
+
+```python
+def on_tick_per_slot(store: Store, time: uint64) -> None:
+ previous_slot = get_current_slot(store)
+
+ # Update store time
+ store.time = time
+
+ current_slot = get_current_slot(store)
+
+ # If this is a new slot, reset store.proposer_boost_root
+ if current_slot > previous_slot:
+ store.proposer_boost_root = Root()
+ else:
+ # Reset the payload boost if this is the attestation time
+ if seconds_into_slot(store) >= SECONDS_PER_SLOT // INTERVALS_PER_SLOT:
+ store.payload_withhold_boost_root = Root()
+ store.payload_withhold_boost_full = False
+ store.payload_reveal_boost_root = Root()
+
+ # If a new epoch, pull-up justification and finalization from previous epoch
+ if current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0:
+ update_checkpoints(store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint)
+```
+
+### `on_payload_attestation_message`
+
+```python
+def on_payload_attestation_message(
+ store: Store, ptc_message: PayloadAttestationMessage, is_from_block: bool=False) -> None:
+ """
+ Run ``on_payload_attestation_message`` upon receiving a new ``ptc_message`` directly on the wire.
+ """
+ # The beacon block root must be known
+ data = ptc_message.data
+ # PTC attestation must be for a known block. If block is unknown, delay consideration until the block is found
+ state = store.block_states[data.beacon_block_root]
+ ptc = get_ptc(state, data.slot)
+ # PTC votes can only change the vote for their assigned beacon block, return early otherwise
+ if data.slot != state.slot:
+ return
+ # Check that the attester is from the PTC
+ assert ptc_message.validator_index in ptc
+
+ # Verify the signature and check that its for the current slot if it is coming from the wire
+ if not is_from_block:
+ # Check that the attestation is for the current slot
+ assert data.slot == get_current_slot(store)
+ # Verify the signature
+ assert is_valid_indexed_payload_attestation(
+ state,
+ IndexedPayloadAttestation(
+ attesting_indices=[ptc_message.validator_index],
+ data=data,
+ signature=ptc_message.signature
+ )
+ )
+ # Update the ptc vote for the block
+ ptc_index = ptc.index(ptc_message.validator_index)
+ ptc_vote = store.ptc_vote[data.beacon_block_root]
+ ptc_vote[ptc_index] = data.payload_status
+
+ # Only update payload boosts with attestations from a block if the block is for the current slot and it's early
+ if is_from_block and data.slot + 1 != get_current_slot(store):
+ return
+ time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
+ if is_from_block and time_into_slot >= SECONDS_PER_SLOT // INTERVALS_PER_SLOT:
+ return
+
+ # Update the payload boosts if threshold has been achieved
+ if ptc_vote.count(PAYLOAD_PRESENT) > PAYLOAD_TIMELY_THRESHOLD:
+ store.payload_reveal_boost_root = data.beacon_block_root
+ if ptc_vote.count(PAYLOAD_WITHHELD) > PAYLOAD_TIMELY_THRESHOLD:
+ block = store.blocks[data.beacon_block_root]
+ store.payload_withhold_boost_root = block.parent_root
+ store.payload_withhold_boost_full = is_parent_node_full(store, block)
+```
diff --git a/specs/_features/eip7732/fork.md b/specs/_features/eip7732/fork.md
new file mode 100644
index 0000000000..942b9c30cc
--- /dev/null
+++ b/specs/_features/eip7732/fork.md
@@ -0,0 +1,139 @@
+# EIP-7732 -- Fork Logic
+
+**Notice**: This document is a work-in-progress for researchers and implementers.
+
+## Table of contents
+
+
+
+
+- [Introduction](#introduction)
+- [Configuration](#configuration)
+- [Helper functions](#helper-functions)
+ - [Misc](#misc)
+ - [Modified `compute_fork_version`](#modified-compute_fork_version)
+- [Fork to EIP-7732](#fork-to-eip-7732)
+ - [Fork trigger](#fork-trigger)
+ - [Upgrading the state](#upgrading-the-state)
+
+
+
+## Introduction
+
+This document describes the process of the EIP-7732 upgrade.
+
+## Configuration
+
+Warning: this configuration is not definitive.
+
+| Name | Value |
+|---------------------| - |
+| `EIP7732_FORK_VERSION` | `Version('0x09000000')` |
+| `EIP7732_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
+
+## Helper functions
+
+### Misc
+
+#### Modified `compute_fork_version`
+
+```python
+def compute_fork_version(epoch: Epoch) -> Version:
+ """
+ Return the fork version at the given ``epoch``.
+ """
+ if epoch >= EIP7732_FORK_EPOCH:
+ return EIP7732_FORK_VERSION
+ if epoch >= ELECTRA_FORK_EPOCH:
+ return ELECTRA_FORK_VERSION
+ if epoch >= DENEB_FORK_EPOCH:
+ return DENEB_FORK_VERSION
+ if epoch >= CAPELLA_FORK_EPOCH:
+ return CAPELLA_FORK_VERSION
+ if epoch >= BELLATRIX_FORK_EPOCH:
+ return BELLATRIX_FORK_VERSION
+ if epoch >= ALTAIR_FORK_EPOCH:
+ return ALTAIR_FORK_VERSION
+ return GENESIS_FORK_VERSION
+```
+
+## Fork to EIP-7732
+
+### Fork trigger
+
+TBD. This fork is defined for testing purposes, the EIP may be combined with other
+consensus-layer upgrade.
+For now, we assume the condition will be triggered at epoch `EIP7732_FORK_EPOCH`.
+
+### Upgrading the state
+
+If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == EIP7732_FORK_EPOCH`,
+an irregular state change is made to upgrade to EIP-7732.
+
+```python
+def upgrade_to_eip7732(pre: electra.BeaconState) -> BeaconState:
+ epoch = electra.get_current_epoch(pre)
+
+ post = BeaconState(
+ # Versioning
+ genesis_time=pre.genesis_time,
+ genesis_validators_root=pre.genesis_validators_root,
+ slot=pre.slot,
+ fork=Fork(
+ previous_version=pre.fork.current_version,
+ current_version=EIP7732_FORK_VERSION, # [Modified in EIP-7732]
+ epoch=epoch,
+ ),
+ # History
+ latest_block_header=pre.latest_block_header,
+ block_roots=pre.block_roots,
+ state_roots=pre.state_roots,
+ historical_roots=pre.historical_roots,
+ # Eth1
+ eth1_data=pre.eth1_data,
+ eth1_data_votes=pre.eth1_data_votes,
+ eth1_deposit_index=pre.eth1_deposit_index,
+ # Registry
+ validators=pre.validators,
+ balances=pre.balances,
+ # Randomness
+ randao_mixes=pre.randao_mixes,
+ # Slashings
+ slashings=pre.slashings,
+ # Participation
+ previous_epoch_participation=pre.previous_epoch_participation,
+ current_epoch_participation=pre.current_epoch_participation,
+ # Finality
+ justification_bits=pre.justification_bits,
+ previous_justified_checkpoint=pre.previous_justified_checkpoint,
+ current_justified_checkpoint=pre.current_justified_checkpoint,
+ finalized_checkpoint=pre.finalized_checkpoint,
+ # Inactivity
+ inactivity_scores=pre.inactivity_scores,
+ # Sync
+ current_sync_committee=pre.current_sync_committee,
+ next_sync_committee=pre.next_sync_committee,
+ # Execution-layer
+ latest_execution_payload_header=ExecutionPayloadHeader(), # [Modified in EIP-7732]
+ # Withdrawals
+ next_withdrawal_index=pre.next_withdrawal_index,
+ next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
+ # Deep history valid from Capella onwards
+ historical_summaries=pre.historical_summaries,
+ deposit_requests_start_index=pre.deposit_requests_start_index,
+ deposit_balance_to_consume=pre.deposit_balance_to_consume,
+ exit_balance_to_consume=pre.exit_balance_to_consume,
+ earliest_exit_epoch=pre.earliest_exit_epoch,
+ consolidation_balance_to_consume=pre.consolidation_balance_to_consume,
+ earliest_consolidation_epoch=pre.earliest_consolidation_epoch,
+ pending_balance_deposits=pre.pending_balance_deposits,
+ pending_partial_withdrawals=pre.pending_partial_withdrawals,
+ pending_consolidations=pre.pending_consolidations,
+ # ePBS
+ latest_block_hash=pre.latest_execution_payload_header.block_hash, # [New in EIP-7732]
+ latest_full_slot=pre.slot, # [New in EIP-7732]
+ latest_withdrawals_root=Root(), # [New in EIP-7732]
+ )
+
+ return post
+```
diff --git a/specs/_features/eip7732/p2p-interface.md b/specs/_features/eip7732/p2p-interface.md
new file mode 100644
index 0000000000..86ab89568c
--- /dev/null
+++ b/specs/_features/eip7732/p2p-interface.md
@@ -0,0 +1,267 @@
+# EIP-7732 -- Networking
+
+This document contains the consensus-layer networking specification for EIP7732.
+
+
+
+
+- [Modification in EIP-7732](#modification-in-eip-7732)
+ - [Preset](#preset)
+ - [Containers](#containers)
+ - [`BlobSidecar`](#blobsidecar)
+ - [Helpers](#helpers)
+ - [Modified `verify_blob_sidecar_inclusion_proof`](#modified-verify_blob_sidecar_inclusion_proof)
+ - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
+ - [Topics and messages](#topics-and-messages)
+ - [Global topics](#global-topics)
+ - [`beacon_block`](#beacon_block)
+ - [`execution_payload`](#execution_payload)
+ - [`payload_attestation_message`](#payload_attestation_message)
+ - [`execution_payload_header`](#execution_payload_header)
+ - [The Req/Resp domain](#the-reqresp-domain)
+ - [Messages](#messages)
+ - [BeaconBlocksByRange v3](#beaconblocksbyrange-v3)
+ - [BeaconBlocksByRoot v3](#beaconblocksbyroot-v3)
+ - [BlobSidecarsByRoot v2](#blobsidecarsbyroot-v2)
+ - [ExecutionPayloadEnvelopeByRoot v1](#executionpayloadenvelopebyroot-v1)
+
+
+
+## Modification in EIP-7732
+
+### Preset
+
+*[Modified in EIP-7732]*
+
+| Name | Value | Description |
+|------------------------------------------|-----------------------------------|---------------------------------------------------------------------|
+| `KZG_COMMITMENT_INCLUSION_PROOF_DEPTH_EIP7732` | `13` # TODO: Compute it when the spec stabilizes | Merkle proof depth for the `blob_kzg_commitments` list item |
+
+
+### Containers
+
+#### `BlobSidecar`
+
+The `BlobSidecar` container is modified indirectly because the constant `KZG_COMMITMENT_INCLUSION_PROOF_DEPTH` is modified.
+
+```python
+class BlobSidecar(Container):
+ index: BlobIndex # Index of blob in block
+ blob: Blob
+ kzg_commitment: KZGCommitment
+ kzg_proof: KZGProof # Allows for quick verification of kzg_commitment
+ signed_block_header: SignedBeaconBlockHeader
+ kzg_commitment_inclusion_proof: Vector[Bytes32, KZG_COMMITMENT_INCLUSION_PROOF_DEPTH_EIP7732]
+```
+
+#### Helpers
+
+##### Modified `verify_blob_sidecar_inclusion_proof`
+
+`verify_blob_sidecar_inclusion_proof` is modified in EIP-7732 to account for the fact that the KZG commitments are included in the `ExecutionPayloadEnvelope` and no longer in the beacon block body.
+
+```python
+def verify_blob_sidecar_inclusion_proof(blob_sidecar: BlobSidecar) -> bool:
+ inner_gindex = get_generalized_index(
+ List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK],
+ blob_sidecar.index
+ )
+ outer_gindex = get_generalized_index(
+ BeaconBlockBody,
+ "signed_execution_payload_header",
+ "message",
+ "blob_kzg_commitments_root",
+ )
+ gindex = get_subtree_index(concat_generalized_indices(outer_gindex, inner_gindex))
+
+ return is_valid_merkle_branch(
+ leaf=blob_sidecar.kzg_commitment.hash_tree_root(),
+ branch=blob_sidecar.kzg_commitment_inclusion_proof,
+ depth=KZG_COMMITMENT_INCLUSION_PROOF_DEPTH_EIP7732,
+ index=gindex,
+ root=blob_sidecar.signed_block_header.message.body_root,
+ )
+```
+
+### The gossip domain: gossipsub
+
+Some gossip meshes are upgraded in the fork of EIP-7732 to support upgraded types.
+
+#### Topics and messages
+
+Topics follow the same specification as in prior upgrades.
+
+The `beacon_block` topic is updated to support the modified type
+| Name | Message Type |
+| --- | --- |
+| `beacon_block` | `SignedBeaconBlock` [modified in EIP-7732] |
+
+The new topics along with the type of the `data` field of a gossipsub message are given in this table:
+
+| Name | Message Type |
+|-------------------------------|------------------------------------------------------|
+| `execution_payload_header` | `SignedExecutionPayloadHeader` [New in EIP-7732] |
+| `execution_payload` | `SignedExecutionPayloadEnvelope` [New in EIP-7732] |
+| `payload_attestation_message` | `PayloadAttestationMessage` [New in EIP-7732] |
+
+##### Global topics
+
+EIP-7732 introduces new global topics for execution header, execution payload and payload attestation.
+
+###### `beacon_block`
+
+[Modified in EIP-7732]
+
+The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in [the Beacon Chain changes](./beacon-chain.md).
+
+There are no new validations for this topic. However, all validations with regards to the `ExecutionPayload` are removed:
+
+- _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- i.e. validate that len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK
+- _[REJECT]_ The block's execution payload timestamp is correct with respect to the slot
+ -- i.e. `execution_payload.timestamp == compute_timestamp_at_slot(state, block.slot)`.
+- If `execution_payload` verification of block's parent by an execution node is *not* complete:
+ - [REJECT] The block's parent (defined by `block.parent_root`) passes all validation (excluding execution node verification of the `block.body.execution_payload`).
+- otherwise:
+ - [IGNORE] The block's parent (defined by `block.parent_root`) passes all validation (including execution node verification of the `block.body.execution_payload`).
+- [REJECT] The block's parent (defined by `block.parent_root`) passes validation.
+
+And instead the following validations are set in place with the alias `header = signed_execution_payload_header.message`:
+
+- If `execution_payload` verification of block's execution payload parent by an execution node **is complete**:
+ - [REJECT] The block's execution payload parent (defined by `header.parent_block_hash`) passes all validation.
+- [REJECT] The block's parent (defined by `block.parent_root`) passes validation.
+
+###### `execution_payload`
+
+This topic is used to propagate execution payload messages as `SignedExecutionPayloadEnvelope`.
+
+The following validations MUST pass before forwarding the `signed_execution_payload_envelope` on the network, assuming the alias `envelope = signed_execution_payload_envelope.message`, `payload = payload_envelope.payload`:
+
+- _[IGNORE]_ The envelope's block root `envelope.block_root` has been seen (via both gossip and non-gossip sources) (a client MAY queue payload for processing once the block is retrieved).
+- _[IGNORE]_ The node has not seen another valid `SignedExecutionPayloadEnvelope` for this block root from this builder.
+
+Let `block` be the block with `envelope.beacon_block_root`.
+Let `header` alias `block.body.signed_execution_payload_header.message` (notice that this can be obtained from the `state.signed_execution_payload_header`)
+- _[REJECT]_ `block` passes validation.
+- _[REJECT]_ `envelope.builder_index == header.builder_index`
+- if `envelope.payload_withheld == False` then
+ - _[REJECT]_ `payload.block_hash == header.block_hash`
+- _[REJECT]_ The builder signature, `signed_execution_payload_envelope.signature`, is valid with respect to the builder's public key.
+
+###### `payload_attestation_message`
+
+This topic is used to propagate signed payload attestation message.
+
+The following validations MUST pass before forwarding the `payload_attestation_message` on the network, assuming the alias `data = payload_attestation_message.data`:
+
+- _[IGNORE]_ The message's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `data.slot == current_slot`.
+- _[REJECT]_ The message's payload status is a valid status, i.e. `data.payload_status < PAYLOAD_INVALID_STATUS`.
+- _[IGNORE]_ The `payload_attestation_message` is the first valid message received from the validator with index `payload_attestation_message.validate_index`.
+- _[IGNORE]_ The message's block `data.beacon_block_root` has been seen (via both gossip and non-gossip sources) (a client MAY queue attestation for processing once the block is retrieved. Note a client might want to request payload after).
+- _[REJECT]_ The message's block `data.beacon_block_root` passes validation.
+- _[REJECT]_ The message's validator index is within the payload committee in `get_ptc(state, data.slot)`. The `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice.
+- _[REJECT]_ The message's signature of `payload_attestation_message.signature` is valid with respect to the validator index.
+
+###### `execution_payload_header`
+
+This topic is used to propagate signed bids as `SignedExecutionPayloadHeader`.
+
+The following validations MUST pass before forwarding the `signed_execution_payload_header` on the network, assuming the alias `header = signed_execution_payload_header.message`:
+
+- _[IGNORE]_ this is the first signed bid seen with a valid signature from the given builder for this slot.
+- _[IGNORE]_ this bid is the highest value bid seen for the pair of the corresponding slot and the given parent block hash.
+- _[REJECT]_ The signed builder bid, `header.builder_index` is a valid and non-slashed builder index in state.
+- _[IGNORE]_ The signed builder bid value, `header.value`, is less or equal than the builder's balance in state. i.e. `MIN_BUILDER_BALANCE + header.value < state.builder_balances[header.builder_index]`.
+- _[IGNORE]_ `header.parent_block_hash` is the block hash of a known execution payload in fork choice.
+- _[IGNORE]_ `header.slot` is the current slot or the next slot.
+- _[REJECT]_ The builder signature, `signed_execution_payload_header_envelope.signature`, is valid with respect to the `header_envelope.builder_index`.
+
+### The Req/Resp domain
+
+#### Messages
+
+##### BeaconBlocksByRange v3
+
+**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/3/`
+
+[0]: # (eth2spec: skip)
+
+| `fork_version` | Chunk SSZ type |
+|--------------------------|-------------------------------|
+| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
+| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
+| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
+| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
+| `DENEB_FORK_VERSION` | `deneb.SignedBeaconBlock` |
+| `EIP7732_FORK_VERSION` | `eip7732.SignedBeaconBlock` |
+
+##### BeaconBlocksByRoot v3
+
+**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/3/`
+
+Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
+
+[1]: # (eth2spec: skip)
+
+| `fork_version` | Chunk SSZ type |
+|--------------------------|-------------------------------|
+| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
+| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
+| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
+| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
+| `DENEB_FORK_VERSION` | `deneb.SignedBeaconBlock` |
+| `EIP7732_FORK_VERSION` | `eip7732.SignedBeaconBlock` |
+
+
+##### BlobSidecarsByRoot v2
+
+**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/2/`
+
+[1]: # (eth2spec: skip)
+
+| `fork_version` | Chunk SSZ type |
+|--------------------------|-------------------------------|
+| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` |
+| `EIP7732_FORK_VERSION` | `eip7732.BlobSidecar` |
+
+
+##### ExecutionPayloadEnvelopeByRoot v1
+
+**Protocol ID:** `/eth2/beacon_chain/req/execution_payload_envelope_by_root/1/`
+
+The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`:
+
+[1]: # (eth2spec: skip)
+
+| `fork_version` | Chunk SSZ type |
+|------------------------|------------------------------------------|
+| `EIP7732_FORK_VERSION` | `eip7732.SignedExecutionPayloadEnvelope` |
+
+Request Content:
+
+```
+(
+ List[Root, MAX_REQUEST_PAYLOAD]
+)
+```
+
+Response Content:
+
+```
+(
+ List[SignedExecutionPayloadEnvelope, MAX_REQUEST_PAYLOAD]
+)
+```
+Requests execution payload envelope by `signed_execution_payload_envelope.message.block_root`. The response is a list of `SignedExecutionPayloadEnvelope` whose length is less than or equal to the number of requested execution payload envelopes. It may be less in the case that the responding peer is missing payload envelopes.
+
+No more than `MAX_REQUEST_PAYLOAD` may be requested at a time.
+
+ExecutionPayloadEnvelopeByRoot is primarily used to recover recent execution payload envelope (e.g. when receiving a payload attestation with revealed status as true but never received a payload).
+
+The request MUST be encoded as an SSZ-field.
+
+The response MUST consist of zero or more `response_chunk`. Each successful `response_chunk` MUST contain a single `SignedExecutionPayloadEnvelope` payload.
+
+Clients MUST support requesting payload envelopes since the latest finalized epoch.
+
+Clients MUST respond with at least one payload envelope, if they have it. Clients MAY limit the number of payload envelopes in the response.
diff --git a/specs/_features/eip7732/validator.md b/specs/_features/eip7732/validator.md
new file mode 100644
index 0000000000..6a6cbeecef
--- /dev/null
+++ b/specs/_features/eip7732/validator.md
@@ -0,0 +1,134 @@
+# EIP-7732 -- Honest Validator
+
+This document represents the changes and additions to the Honest validator guide included in the EIP-7732 fork.
+
+
+
+**Table of Contents**
+
+- [Validator assignment](#validator-assignment)
+ - [Lookahead](#lookahead)
+- [Beacon chain responsibilities](#beacon-chain-responsibilities)
+ - [Attestation](#attestation)
+ - [Sync Committee participations](#sync-committee-participations)
+ - [Block proposal](#block-proposal)
+ - [Constructing the new `signed_execution_payload_header` field in `BeaconBlockBody`](#constructing-the-new-signed_execution_payload_header-field-in--beaconblockbody)
+ - [Constructing the new `payload_attestations` field in `BeaconBlockBody`](#constructing-the-new-payload_attestations-field-in--beaconblockbody)
+ - [Blob sidecars](#blob-sidecars)
+ - [Payload timeliness attestation](#payload-timeliness-attestation)
+ - [Constructing a payload attestation](#constructing-a-payload-attestation)
+
+
+
+## Validator assignment
+
+A validator may be a member of the new Payload Timeliness Committee (PTC) for a given slot. To check for PTC assignments the validator uses the helper `get_ptc_assignment(state, epoch, validator_index)` where `epoch <= next_epoch`.
+
+PTC committee selection is only stable within the context of the current and next epoch.
+
+```python
+def get_ptc_assignment(
+ state: BeaconState,
+ epoch: Epoch,
+ validator_index: ValidatorIndex) -> Optional[Slot]:
+ """
+ Returns the slot during the requested epoch in which the validator with index `validator_index`
+ is a member of the PTC. Returns None if no assignment is found.
+ """
+ next_epoch = Epoch(get_current_epoch(state) + 1)
+ assert epoch <= next_epoch
+
+ start_slot = compute_start_slot_at_epoch(epoch)
+ for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH):
+ if validator_index in get_ptc(state, Slot(slot)):
+ return Slot(slot)
+ return None
+```
+
+### Lookahead
+
+[New in EIP-7732]
+
+`get_ptc_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments by noting their assigned PTC slot.
+
+## Beacon chain responsibilities
+
+All validator responsibilities remain unchanged other than the following:
+
+- Proposers are no longer required to broadcast `BlobSidecar` objects, as this becomes a builder's duty.
+- Some validators are selected per slot to become PTC members, these validators must broadcast `PayloadAttestationMessage` objects during the assigned slot before the deadline of `3 * SECONDS_PER_SLOT // INTERVALS_PER_SLOT` seconds into the slot.
+
+### Attestation
+
+Attestation duties are not changed for validators, however the attestation deadline is implicitly changed by the change in `INTERVALS_PER_SLOT`.
+
+### Sync Committee participations
+
+Sync committee duties are not changed for validators, however the submission deadline is implicitly changed by the change in `INTERVALS_PER_SLOT`.
+
+
+### Block proposal
+
+Validators are still expected to propose `SignedBeaconBlock` at the beginning of any slot during which `is_proposer(state, validator_index)` returns `true`. The mechanism to prepare this beacon block and related sidecars differs from previous forks as follows
+
+#### Constructing the new `signed_execution_payload_header` field in `BeaconBlockBody`
+
+To obtain `signed_execution_payload_header`, a block proposer building a block on top of a `state` must take the following actions:
+* Listen to the `execution_payload_header` gossip global topic and save an accepted `signed_execution_payload_header` from a builder. Proposer MAY obtain these signed messages by other off-protocol means.
+* The `signed_execution_payload_header` must satisfy the verification conditions found in `process_execution_payload_header`, that is
+ - The header signature must be valid
+ - The builder balance can cover the header value
+ - The header slot is for the proposal block slot
+ - The header parent block hash equals the state's `latest_block_hash`.
+ - The header parent block root equals the current block's `parent_root`.
+* Select one bid and set `body.signed_execution_payload_header = signed_execution_payload_header`
+
+#### Constructing the new `payload_attestations` field in `BeaconBlockBody`
+
+Up to `MAX_PAYLOAD_ATTESTATIONS`, aggregate payload attestations can be included in the block. The validator will have to
+* Listen to the `payload_attestation_message` gossip global topic
+* The payload attestations added must satisfy the verification conditions found in payload attestation gossip validation and payload attestation processing. This means
+ - The `data.beacon_block_root` corresponds to `block.parent_root`.
+ - The slot of the parent block is exactly one slot before the proposing slot.
+ - The signature of the payload attestation data message verifies correctly.
+* The proposer needs to aggregate all payload attestations with the same data into a given `PayloadAttestation` object. For this it needs to fill the `aggregation_bits` field by using the relative position of the validator indices with respect to the PTC that is obtained from `get_ptc(state, block_slot - 1)`.
+* The proposer should only include payload attestations that are consistent with the current block they are proposing. That is, if the previous block had a payload, they should only include attestations with `payload_status = PAYLOAD_PRESENT`. Proposers are penalized for attestations that are not-consistent with their view.
+
+#### Blob sidecars
+The blob sidecars are no longer broadcast by the validator, and thus their construction is not necessary. This deprecates the corresponding sections from the honest validator guide in the Electra fork, moving them, albeit with some modifications, to the [honest Builder guide](./builder.md)
+
+### Payload timeliness attestation
+
+Some validators are selected to submit payload timeliness attestations. Validators should call `get_ptc_assignment` at the beginning of an epoch to be prepared to submit their PTC attestations during the next epoch.
+
+A validator should create and broadcast the `payload_attestation_message` to the global execution attestation subnet not after `SECONDS_PER_SLOT * 3 / INTERVALS_PER_SLOT` seconds since the start of `slot`
+
+#### Constructing a payload attestation
+
+If a validator is in the payload attestation committee for the current slot (as obtained from `get_ptc_assignment` above) then the validator should prepare a `PayloadAttestationMessage` for the current slot,
+according to the logic in `get_payload_attestation_message` below and broadcast it not after `SECONDS_PER_SLOT * 3 / INTERVALS_PER_SLOT` seconds since the start of the slot, to the global `payload_attestation_message` pubsub topic.
+
+The validator creates `payload_attestation_message` as follows:
+* If the validator has not seen any beacon block for the assigned slot, do not submit a payload attestation. It will be ignored anyway.
+* Set `data.beacon_block_root` be the HTR of the beacon block seen for the assigned slot
+* Set `data.slot` to be the assigned slot.
+* Set `data.payload_status` as follows
+ - If a `SignedExecutionPayloadEnvelope` has been seen referencing the block `data.beacon_block_root` and the envelope has `payload_withheld = False`, set to `PAYLOAD_PRESENT`.
+ - If a `SignedExecutionPayloadEnvelope` has been seen referencing the block `data.beacon_block_root` and the envelope has `payload_withheld = True`, set to `PAYLOAD_WITHHELD`.
+ - If no `SignedExecutionPayloadEnvelope` has been seen referencing the block `data.beacon_block_root` set to `PAYLOAD_ABSENT`.
+* Set `payload_attestation_message.validator_index = validator_index` where `validator_index` is the validator chosen to submit. The private key mapping to `state.validators[validator_index].pubkey` is used to sign the payload timeliness attestation.
+* Sign the `payload_attestation_message.data` using the helper `get_payload_attestation_message_signature`.
+
+Notice that the attester only signs the `PayloadAttestationData` and not the `validator_index` field in the message. Proposers need to aggregate these attestations as described above.
+
+```python
+def get_payload_attestation_message_signature(
+ state: BeaconState, attestation: PayloadAttestationMessage, privkey: int) -> BLSSignature:
+ domain = get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot))
+ signing_root = compute_signing_root(attestation.data, domain)
+ return bls.Sign(privkey, signing_root)
+```
+
+**Remark** Validators do not need to check the full validity of the `ExecutionPayload` contained in within the envelope, but the checks in the [P2P guide](./p2p-interface.md) should pass for the `SignedExecutionPayloadEnvelope`.
+
+
diff --git a/specs/altair/light-client/full-node.md b/specs/altair/light-client/full-node.md
index cea38613db..d887691c67 100644
--- a/specs/altair/light-client/full-node.md
+++ b/specs/altair/light-client/full-node.md
@@ -76,7 +76,7 @@ def create_light_client_bootstrap(state: BeaconState,
header=block_to_light_client_header(block),
current_sync_committee=state.current_sync_committee,
current_sync_committee_branch=CurrentSyncCommitteeBranch(
- compute_merkle_proof(state, CURRENT_SYNC_COMMITTEE_GINDEX)),
+ compute_merkle_proof(state, current_sync_committee_gindex_at_slot(state.slot))),
)
```
@@ -124,7 +124,7 @@ def create_light_client_update(state: BeaconState,
if update_attested_period == update_signature_period:
update.next_sync_committee = attested_state.next_sync_committee
update.next_sync_committee_branch = NextSyncCommitteeBranch(
- compute_merkle_proof(attested_state, NEXT_SYNC_COMMITTEE_GINDEX))
+ compute_merkle_proof(attested_state, next_sync_committee_gindex_at_slot(attested_state.slot)))
# Indicate finality whenever possible
if finalized_block is not None:
@@ -134,7 +134,7 @@ def create_light_client_update(state: BeaconState,
else:
assert attested_state.finalized_checkpoint.root == Bytes32()
update.finality_branch = FinalityBranch(
- compute_merkle_proof(attested_state, FINALIZED_ROOT_GINDEX))
+ compute_merkle_proof(attested_state, finalized_root_gindex_at_slot(attested_state.slot)))
update.sync_aggregate = block.message.body.sync_aggregate
update.signature_slot = block.message.slot
diff --git a/specs/altair/light-client/sync-protocol.md b/specs/altair/light-client/sync-protocol.md
index 2585889bf9..f7f38d1041 100644
--- a/specs/altair/light-client/sync-protocol.md
+++ b/specs/altair/light-client/sync-protocol.md
@@ -21,6 +21,9 @@
- [`LightClientOptimisticUpdate`](#lightclientoptimisticupdate)
- [`LightClientStore`](#lightclientstore)
- [Helper functions](#helper-functions)
+ - [`finalized_root_gindex_at_slot`](#finalized_root_gindex_at_slot)
+ - [`current_sync_committee_gindex_at_slot`](#current_sync_committee_gindex_at_slot)
+ - [`next_sync_committee_gindex_at_slot`](#next_sync_committee_gindex_at_slot)
- [`is_valid_light_client_header`](#is_valid_light_client_header)
- [`is_sync_committee_update`](#is_sync_committee_update)
- [`is_finality_update`](#is_finality_update)
@@ -28,6 +31,7 @@
- [`is_next_sync_committee_known`](#is_next_sync_committee_known)
- [`get_safety_threshold`](#get_safety_threshold)
- [`get_subtree_index`](#get_subtree_index)
+ - [`is_valid_normalized_merkle_branch`](#is_valid_normalized_merkle_branch)
- [`compute_sync_committee_period_at_slot`](#compute_sync_committee_period_at_slot)
- [Light client initialization](#light-client-initialization)
- [`initialize_light_client_store`](#initialize_light_client_store)
@@ -171,6 +175,30 @@ class LightClientStore(object):
## Helper functions
+### `finalized_root_gindex_at_slot`
+
+```python
+def finalized_root_gindex_at_slot(slot: Slot) -> GeneralizedIndex:
+ # pylint: disable=unused-argument
+ return FINALIZED_ROOT_GINDEX
+```
+
+### `current_sync_committee_gindex_at_slot`
+
+```python
+def current_sync_committee_gindex_at_slot(slot: Slot) -> GeneralizedIndex:
+ # pylint: disable=unused-argument
+ return CURRENT_SYNC_COMMITTEE_GINDEX
+```
+
+### `next_sync_committee_gindex_at_slot`
+
+```python
+def next_sync_committee_gindex_at_slot(slot: Slot) -> GeneralizedIndex:
+ # pylint: disable=unused-argument
+ return NEXT_SYNC_COMMITTEE_GINDEX
+```
+
### `is_valid_light_client_header`
```python
@@ -273,6 +301,22 @@ def get_subtree_index(generalized_index: GeneralizedIndex) -> uint64:
return uint64(generalized_index % 2**(floorlog2(generalized_index)))
```
+### `is_valid_normalized_merkle_branch`
+
+```python
+def is_valid_normalized_merkle_branch(leaf: Bytes32,
+ branch: Sequence[Bytes32],
+ gindex: GeneralizedIndex,
+ root: Root) -> bool:
+ depth = floorlog2(gindex)
+ index = get_subtree_index(gindex)
+ num_extra = len(branch) - depth
+ for i in range(num_extra):
+ if branch[i] != Bytes32():
+ return False
+ return is_valid_merkle_branch(leaf, branch[num_extra:], depth, index, root)
+```
+
### `compute_sync_committee_period_at_slot`
```python
@@ -292,11 +336,10 @@ def initialize_light_client_store(trusted_block_root: Root,
assert is_valid_light_client_header(bootstrap.header)
assert hash_tree_root(bootstrap.header.beacon) == trusted_block_root
- assert is_valid_merkle_branch(
+ assert is_valid_normalized_merkle_branch(
leaf=hash_tree_root(bootstrap.current_sync_committee),
branch=bootstrap.current_sync_committee_branch,
- depth=floorlog2(CURRENT_SYNC_COMMITTEE_GINDEX),
- index=get_subtree_index(CURRENT_SYNC_COMMITTEE_GINDEX),
+ gindex=current_sync_committee_gindex_at_slot(bootstrap.header.beacon.slot),
root=bootstrap.header.beacon.state_root,
)
@@ -364,11 +407,10 @@ def validate_light_client_update(store: LightClientStore,
else:
assert is_valid_light_client_header(update.finalized_header)
finalized_root = hash_tree_root(update.finalized_header.beacon)
- assert is_valid_merkle_branch(
+ assert is_valid_normalized_merkle_branch(
leaf=finalized_root,
branch=update.finality_branch,
- depth=floorlog2(FINALIZED_ROOT_GINDEX),
- index=get_subtree_index(FINALIZED_ROOT_GINDEX),
+ gindex=finalized_root_gindex_at_slot(update.attested_header.beacon.slot),
root=update.attested_header.beacon.state_root,
)
@@ -379,11 +421,10 @@ def validate_light_client_update(store: LightClientStore,
else:
if update_attested_period == store_period and is_next_sync_committee_known(store):
assert update.next_sync_committee == store.next_sync_committee
- assert is_valid_merkle_branch(
+ assert is_valid_normalized_merkle_branch(
leaf=hash_tree_root(update.next_sync_committee),
branch=update.next_sync_committee_branch,
- depth=floorlog2(NEXT_SYNC_COMMITTEE_GINDEX),
- index=get_subtree_index(NEXT_SYNC_COMMITTEE_GINDEX),
+ gindex=next_sync_committee_gindex_at_slot(update.attested_header.beacon.slot),
root=update.attested_header.beacon.state_root,
)
diff --git a/specs/bellatrix/fork-choice.md b/specs/bellatrix/fork-choice.md
index 7bf607d6e9..17fb8e024d 100644
--- a/specs/bellatrix/fork-choice.md
+++ b/specs/bellatrix/fork-choice.md
@@ -75,7 +75,7 @@ As per EIP-3675, before a post-transition block is finalized, `notify_forkchoice
##### `safe_block_hash`
The `safe_block_hash` parameter MUST be set to return value of
-[`get_safe_execution_payload_hash(store: Store)`](../../fork_choice/safe-block.md#get_safe_execution_payload_hash) function.
+[`get_safe_execution_block_hash(store: Store)`](../../fork_choice/safe-block.md#get_safe_execution_block_hash) function.
##### `should_override_forkchoice_update`
diff --git a/specs/bellatrix/validator.md b/specs/bellatrix/validator.md
index 2900bd1f02..cb9dda05d6 100644
--- a/specs/bellatrix/validator.md
+++ b/specs/bellatrix/validator.md
@@ -120,8 +120,8 @@ To obtain an execution payload, a block proposer building a block on top of a `s
1. Set `payload_id = prepare_execution_payload(state, pow_chain, safe_block_hash, finalized_block_hash, suggested_fee_recipient, execution_engine)`, where:
* `state` is the state object after applying `process_slots(state, slot)` transition to the resulting state of the parent block processing
* `pow_chain` is a `Dict[Hash32, PowBlock]` dictionary that abstractly represents all blocks in the PoW chain with block hash as the dictionary key
- * `safe_block_hash` is the return value of the `get_safe_execution_payload_hash(store: Store)` function call
- * `finalized_block_hash` is the hash of the latest finalized execution payload (`Hash32()` if none yet finalized)
+ * `safe_block_hash` is the return value of the `get_safe_execution_block_hash(store: Store)` function call
+ * `finalized_block_hash` is the block hash of the latest finalized execution payload (`Hash32()` if none yet finalized)
* `suggested_fee_recipient` is the value suggested to be used for the `fee_recipient` field of the execution payload
diff --git a/specs/capella/beacon-chain.md b/specs/capella/beacon-chain.md
index b6d0f28f81..103530bf8b 100644
--- a/specs/capella/beacon-chain.md
+++ b/specs/capella/beacon-chain.md
@@ -379,10 +379,9 @@ def get_expected_withdrawals(state: BeaconState) -> Sequence[Withdrawal]:
```python
def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
expected_withdrawals = get_expected_withdrawals(state)
- assert len(payload.withdrawals) == len(expected_withdrawals)
+ assert payload.withdrawals == expected_withdrawals
- for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
- assert withdrawal == expected_withdrawal
+ for withdrawal in expected_withdrawals:
decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
# Update the next withdrawal index if this block contained withdrawals
diff --git a/specs/capella/light-client/fork.md b/specs/capella/light-client/fork.md
index 6dcc7578c2..6fcb6e3147 100644
--- a/specs/capella/light-client/fork.md
+++ b/specs/capella/light-client/fork.md
@@ -7,8 +7,8 @@
- [Introduction](#introduction)
- - [Upgrading light client data](#upgrading-light-client-data)
- - [Upgrading the store](#upgrading-the-store)
+- [Upgrading light client data](#upgrading-light-client-data)
+- [Upgrading the store](#upgrading-the-store)
@@ -17,7 +17,7 @@
This document describes how to upgrade existing light client objects based on the [Altair specification](../../altair/light-client/sync-protocol.md) to Capella. This is necessary when processing pre-Capella data with a post-Capella `LightClientStore`. Note that the data being exchanged over the network protocols uses the original format.
-### Upgrading light client data
+## Upgrading light client data
A Capella `LightClientStore` can still process earlier light client data. In order to do so, that pre-Capella data needs to be locally upgraded to Capella before processing.
@@ -70,7 +70,7 @@ def upgrade_lc_optimistic_update_to_capella(pre: bellatrix.LightClientOptimistic
)
```
-### Upgrading the store
+## Upgrading the store
Existing `LightClientStore` objects based on Altair MUST be upgraded to Capella before Capella based light client data can be processed. The `LightClientStore` upgrade MAY be performed before `CAPELLA_FORK_EPOCH`.
diff --git a/specs/deneb/beacon-chain.md b/specs/deneb/beacon-chain.md
index 4f036d3c93..0f6a8fc076 100644
--- a/specs/deneb/beacon-chain.md
+++ b/specs/deneb/beacon-chain.md
@@ -13,6 +13,7 @@
- [Preset](#preset)
- [Execution](#execution)
- [Configuration](#configuration)
+ - [Execution](#execution-1)
- [Validator cycle](#validator-cycle)
- [Containers](#containers)
- [Extended containers](#extended-containers)
@@ -77,13 +78,18 @@ Deneb is a consensus-layer upgrade containing a number of features. Including:
| Name | Value | Description |
| - | - | - |
| `MAX_BLOB_COMMITMENTS_PER_BLOCK` | `uint64(2**12)` (= 4096) | *[New in Deneb:EIP4844]* hardfork independent fixed theoretical limit same as `LIMIT_BLOBS_PER_TX` (see EIP 4844) |
+
+## Configuration
+
+### Execution
+
+| Name | Value | Description |
+| - | - | - |
| `MAX_BLOBS_PER_BLOCK` | `uint64(6)` | *[New in Deneb:EIP4844]* maximum number of blobs in a single block limited by `MAX_BLOB_COMMITMENTS_PER_BLOCK` |
*Note*: The blob transactions are packed into the execution payload by the EL/builder with their corresponding blobs being independently transmitted
and are limited by `MAX_BLOB_GAS_PER_BLOCK // GAS_PER_BLOB`. However the CL limit is independently defined by `MAX_BLOBS_PER_BLOCK`.
-## Configuration
-
### Validator cycle
| Name | Value |
@@ -260,7 +266,7 @@ def is_valid_block_hash(self: ExecutionEngine,
def is_valid_versioned_hashes(self: ExecutionEngine, new_payload_request: NewPayloadRequest) -> bool:
"""
Return ``True`` if and only if the version hashes computed by the blob transactions of
- ``new_payload_request.execution_payload`` matches ``new_payload_request.version_hashes``.
+ ``new_payload_request.execution_payload`` matches ``new_payload_request.versioned_hashes``.
"""
...
```
diff --git a/specs/deneb/light-client/fork.md b/specs/deneb/light-client/fork.md
index 2dce4778ed..07230a21c7 100644
--- a/specs/deneb/light-client/fork.md
+++ b/specs/deneb/light-client/fork.md
@@ -7,8 +7,8 @@
- [Introduction](#introduction)
- - [Upgrading light client data](#upgrading-light-client-data)
- - [Upgrading the store](#upgrading-the-store)
+- [Upgrading light client data](#upgrading-light-client-data)
+- [Upgrading the store](#upgrading-the-store)
@@ -17,7 +17,7 @@
This document describes how to upgrade existing light client objects based on the [Capella specification](../../capella/light-client/sync-protocol.md) to Deneb. This is necessary when processing pre-Deneb data with a post-Deneb `LightClientStore`. Note that the data being exchanged over the network protocols uses the original format.
-### Upgrading light client data
+## Upgrading light client data
A Deneb `LightClientStore` can still process earlier light client data. In order to do so, that pre-Deneb data needs to be locally upgraded to Deneb before processing.
@@ -90,7 +90,7 @@ def upgrade_lc_optimistic_update_to_deneb(pre: capella.LightClientOptimisticUpda
)
```
-### Upgrading the store
+## Upgrading the store
Existing `LightClientStore` objects based on Capella MUST be upgraded to Deneb before Deneb based light client data can be processed. The `LightClientStore` upgrade MAY be performed before `DENEB_FORK_EPOCH`.
diff --git a/specs/deneb/light-client/full-node.md b/specs/deneb/light-client/full-node.md
index db081b8e43..424723667c 100644
--- a/specs/deneb/light-client/full-node.md
+++ b/specs/deneb/light-client/full-node.md
@@ -17,7 +17,7 @@
## Introduction
-This upgrade adds information about the execution payload to light client data as part of the Deneb upgrade.
+Execution payload data is updated to account for the Deneb upgrade.
## Helper functions
diff --git a/specs/electra/beacon-chain.md b/specs/electra/beacon-chain.md
index 351f8634fd..cb5ee6a7a9 100644
--- a/specs/electra/beacon-chain.md
+++ b/specs/electra/beacon-chain.md
@@ -41,16 +41,16 @@
- [`BeaconState`](#beaconstate)
- [Helper functions](#helper-functions)
- [Predicates](#predicates)
- - [Updated `compute_proposer_index`](#updated-compute_proposer_index)
- - [Updated `is_eligible_for_activation_queue`](#updated-is_eligible_for_activation_queue)
+ - [Modified `compute_proposer_index`](#modified-compute_proposer_index)
+ - [Modified `is_eligible_for_activation_queue`](#modified-is_eligible_for_activation_queue)
- [New `is_compounding_withdrawal_credential`](#new-is_compounding_withdrawal_credential)
- [New `has_compounding_withdrawal_credential`](#new-has_compounding_withdrawal_credential)
- [New `has_execution_withdrawal_credential`](#new-has_execution_withdrawal_credential)
- - [Updated `is_fully_withdrawable_validator`](#updated-is_fully_withdrawable_validator)
- - [Updated `is_partially_withdrawable_validator`](#updated-is_partially_withdrawable_validator)
+ - [Modified `is_fully_withdrawable_validator`](#modified-is_fully_withdrawable_validator)
+ - [Modified `is_partially_withdrawable_validator`](#modified-is_partially_withdrawable_validator)
- [Misc](#misc-1)
- - [`get_committee_indices`](#get_committee_indices)
- - [`get_validator_max_effective_balance`](#get_validator_max_effective_balance)
+ - [New `get_committee_indices`](#new-get_committee_indices)
+ - [New `get_validator_max_effective_balance`](#new-get_validator_max_effective_balance)
- [Beacon state accessors](#beacon-state-accessors)
- [New `get_balance_churn_limit`](#new-get_balance_churn_limit)
- [New `get_activation_exit_churn_limit`](#new-get_activation_exit_churn_limit)
@@ -60,24 +60,24 @@
- [Modified `get_attesting_indices`](#modified-get_attesting_indices)
- [Modified `get_next_sync_committee_indices`](#modified-get_next_sync_committee_indices)
- [Beacon state mutators](#beacon-state-mutators)
- - [Updated `initiate_validator_exit`](#updated--initiate_validator_exit)
+ - [Modified `initiate_validator_exit`](#modified-initiate_validator_exit)
- [New `switch_to_compounding_validator`](#new-switch_to_compounding_validator)
- [New `queue_excess_active_balance`](#new-queue_excess_active_balance)
- [New `queue_entire_balance_and_reset_validator`](#new-queue_entire_balance_and_reset_validator)
- [New `compute_exit_epoch_and_update_churn`](#new-compute_exit_epoch_and_update_churn)
- [New `compute_consolidation_epoch_and_update_churn`](#new-compute_consolidation_epoch_and_update_churn)
- - [Updated `slash_validator`](#updated-slash_validator)
+ - [Modified `slash_validator`](#modified-slash_validator)
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
- [Epoch processing](#epoch-processing)
- - [Updated `process_epoch`](#updated-process_epoch)
- - [Updated `process_registry_updates`](#updated--process_registry_updates)
+ - [Modified `process_epoch`](#modified-process_epoch)
+ - [Modified `process_registry_updates`](#modified-process_registry_updates)
- [New `process_pending_balance_deposits`](#new-process_pending_balance_deposits)
- [New `process_pending_consolidations`](#new-process_pending_consolidations)
- - [Updated `process_effective_balance_updates`](#updated-process_effective_balance_updates)
+ - [Modified `process_effective_balance_updates`](#modified-process_effective_balance_updates)
- [Block processing](#block-processing)
- [Withdrawals](#withdrawals)
- - [Updated `get_expected_withdrawals`](#updated-get_expected_withdrawals)
- - [Updated `process_withdrawals`](#updated-process_withdrawals)
+ - [Modified `get_expected_withdrawals`](#modified-get_expected_withdrawals)
+ - [Modified `process_withdrawals`](#modified-process_withdrawals)
- [Execution payload](#execution-payload)
- [Modified `process_execution_payload`](#modified-process_execution_payload)
- [Operations](#operations)
@@ -85,12 +85,12 @@
- [Attestations](#attestations)
- [Modified `process_attestation`](#modified-process_attestation)
- [Deposits](#deposits)
- - [Updated `apply_deposit`](#updated--apply_deposit)
+ - [Modified `apply_deposit`](#modified-apply_deposit)
- [New `is_valid_deposit_signature`](#new-is_valid_deposit_signature)
- [Modified `add_validator_to_registry`](#modified-add_validator_to_registry)
- - [Updated `get_validator_from_deposit`](#updated-get_validator_from_deposit)
+ - [Modified `get_validator_from_deposit`](#modified-get_validator_from_deposit)
- [Voluntary exits](#voluntary-exits)
- - [Updated `process_voluntary_exit`](#updated-process_voluntary_exit)
+ - [Modified `process_voluntary_exit`](#modified-process_voluntary_exit)
- [Execution layer withdrawal requests](#execution-layer-withdrawal-requests)
- [New `process_withdrawal_request`](#new-process_withdrawal_request)
- [Deposit requests](#deposit-requests)
@@ -110,7 +110,7 @@ Electra is a consensus-layer upgrade containing a number of features. Including:
* [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251): Increase the MAX_EFFECTIVE_BALANCE
* [EIP-7549](https://eips.ethereum.org/EIPS/eip-7549): Move committee index outside Attestation
-*Note:* This specification is built upon [Deneb](../../deneb/beacon_chain.md) and is under active development.
+*Note:* This specification is built upon [Deneb](../deneb/beacon_chain.md) and is under active development.
## Constants
@@ -429,9 +429,9 @@ class BeaconState(Container):
### Predicates
-#### Updated `compute_proposer_index`
+#### Modified `compute_proposer_index`
-*Note*: The function is modified to use `MAX_EFFECTIVE_BALANCE_ELECTRA` preset.
+*Note*: The function `compute_proposer_index` is modified to use `MAX_EFFECTIVE_BALANCE_ELECTRA`.
```python
def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex:
@@ -452,7 +452,9 @@ def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex]
i += 1
```
-#### Updated `is_eligible_for_activation_queue`
+#### Modified `is_eligible_for_activation_queue`
+
+*Note*: The function `is_eligible_for_activation_queue` is modified to use `MIN_ACTIVATION_BALANCE` instead of `MAX_EFFECTIVE_BALANCE`.
```python
def is_eligible_for_activation_queue(validator: Validator) -> bool:
@@ -492,7 +494,9 @@ def has_execution_withdrawal_credential(validator: Validator) -> bool:
return has_compounding_withdrawal_credential(validator) or has_eth1_withdrawal_credential(validator)
```
-#### Updated `is_fully_withdrawable_validator`
+#### Modified `is_fully_withdrawable_validator`
+
+*Note*: The function `is_fully_withdrawable_validator` is modified to use `has_execution_withdrawal_credential` instead of `has_eth1_withdrawal_credential`.
```python
def is_fully_withdrawable_validator(validator: Validator, balance: Gwei, epoch: Epoch) -> bool:
@@ -506,7 +510,9 @@ def is_fully_withdrawable_validator(validator: Validator, balance: Gwei, epoch:
)
```
-#### Updated `is_partially_withdrawable_validator`
+#### Modified `is_partially_withdrawable_validator`
+
+*Note*: The function `is_partially_withdrawable_validator` is modified to use `get_validator_max_effective_balance` instead of `MAX_EFFECTIVE_BALANCE` and `has_execution_withdrawal_credential` instead of `has_eth1_withdrawal_credential`.
```python
def is_partially_withdrawable_validator(validator: Validator, balance: Gwei) -> bool:
@@ -525,14 +531,14 @@ def is_partially_withdrawable_validator(validator: Validator, balance: Gwei) ->
### Misc
-#### `get_committee_indices`
+#### New `get_committee_indices`
```python
def get_committee_indices(committee_bits: Bitvector) -> Sequence[CommitteeIndex]:
return [CommitteeIndex(index) for index, bit in enumerate(committee_bits) if bit]
```
-#### `get_validator_max_effective_balance`
+#### New `get_validator_max_effective_balance`
```python
def get_validator_max_effective_balance(validator: Validator) -> Gwei:
@@ -597,6 +603,8 @@ def get_pending_balance_to_withdraw(state: BeaconState, validator_index: Validat
#### Modified `get_attesting_indices`
+*Note*: The function `get_attesting_indices` is modified to support EIP7549.
+
```python
def get_attesting_indices(state: BeaconState, attestation: Attestation) -> Set[ValidatorIndex]:
"""
@@ -618,7 +626,7 @@ def get_attesting_indices(state: BeaconState, attestation: Attestation) -> Set[V
#### Modified `get_next_sync_committee_indices`
-*Note*: The function is modified to use `MAX_EFFECTIVE_BALANCE_ELECTRA` preset.
+*Note*: The function `get_next_sync_committee_indices` is modified to use `MAX_EFFECTIVE_BALANCE_ELECTRA`.
```python
def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]:
@@ -645,10 +653,11 @@ def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorInd
return sync_committee_indices
```
-
### Beacon state mutators
-#### Updated `initiate_validator_exit`
+#### Modified `initiate_validator_exit`
+
+*Note*: The function `initiate_validator_exit` is modified to use the new `compute_exit_epoch_and_update_churn` function.
```python
def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None:
@@ -692,6 +701,7 @@ def queue_excess_active_balance(state: BeaconState, index: ValidatorIndex) -> No
```
#### New `queue_entire_balance_and_reset_validator`
+
```python
def queue_entire_balance_and_reset_validator(state: BeaconState, index: ValidatorIndex) -> None:
balance = state.balances[index]
@@ -757,7 +767,9 @@ def compute_consolidation_epoch_and_update_churn(state: BeaconState, consolidati
return state.earliest_consolidation_epoch
```
-#### Updated `slash_validator`
+#### Modified `slash_validator`
+
+*Note*: The function `slash_validator` is modified to change how the slashing penalty and proposer/whistleblower rewards are calculated in accordance with EIP7251.
```python
def slash_validator(state: BeaconState,
@@ -791,7 +803,10 @@ def slash_validator(state: BeaconState,
### Epoch processing
-#### Updated `process_epoch`
+#### Modified `process_epoch`
+
+*Note*: The function `process_epoch` is modified to call updated functions and to process pending balance deposits and pending consolidations which are new in Electra.
+
```python
def process_epoch(state: BeaconState) -> None:
process_justification_and_finalization(state)
@@ -810,9 +825,9 @@ def process_epoch(state: BeaconState) -> None:
process_sync_committee_updates(state)
```
-#### Updated `process_registry_updates`
+#### Modified `process_registry_updates`
-`process_registry_updates` uses the updated definition of `initiate_validator_exit`
+*Note*: The function `process_registry_updates` is modified to use the updated definition of `initiate_validator_exit`
and changes how the activation epochs are computed for eligible validators.
```python
@@ -839,6 +854,7 @@ def process_registry_updates(state: BeaconState) -> None:
```python
def process_pending_balance_deposits(state: BeaconState) -> None:
+ next_epoch = Epoch(get_current_epoch(state) + 1)
available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit(state)
processed_amount = 0
next_deposit_index = 0
@@ -848,7 +864,7 @@ def process_pending_balance_deposits(state: BeaconState) -> None:
validator = state.validators[deposit.index]
# Validator is exiting, postpone the deposit until after withdrawable epoch
if validator.exit_epoch < FAR_FUTURE_EPOCH:
- if get_current_epoch(state) <= validator.withdrawable_epoch:
+ if next_epoch <= validator.withdrawable_epoch:
deposits_to_postpone.append(deposit)
# Deposited balance will never become active. Increase balance but do not consume churn
else:
@@ -879,13 +895,14 @@ def process_pending_balance_deposits(state: BeaconState) -> None:
```python
def process_pending_consolidations(state: BeaconState) -> None:
+ next_epoch = Epoch(get_current_epoch(state) + 1)
next_pending_consolidation = 0
for pending_consolidation in state.pending_consolidations:
source_validator = state.validators[pending_consolidation.source_index]
if source_validator.slashed:
next_pending_consolidation += 1
continue
- if source_validator.withdrawable_epoch > get_current_epoch(state):
+ if source_validator.withdrawable_epoch > next_epoch:
break
# Churn any target excess active balance of target and raise its max
@@ -899,9 +916,9 @@ def process_pending_consolidations(state: BeaconState) -> None:
state.pending_consolidations = state.pending_consolidations[next_pending_consolidation:]
```
-#### Updated `process_effective_balance_updates`
+#### Modified `process_effective_balance_updates`
-`process_effective_balance_updates` is updated with a new limit for the maximum effective balance.
+*Note*: The function `process_effective_balance_updates` is modified to use the new limit for the maximum effective balance.
```python
def process_effective_balance_updates(state: BeaconState) -> None:
@@ -911,6 +928,7 @@ def process_effective_balance_updates(state: BeaconState) -> None:
HYSTERESIS_INCREMENT = uint64(EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT)
DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
+ # [Modified in Electra:EIP7251]
EFFECTIVE_BALANCE_LIMIT = (
MAX_EFFECTIVE_BALANCE_ELECTRA if has_compounding_withdrawal_credential(validator)
else MIN_ACTIVATION_BALANCE
@@ -938,7 +956,9 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
#### Withdrawals
-##### Updated `get_expected_withdrawals`
+##### Modified `get_expected_withdrawals`
+
+*Note*: The function `get_expected_withdrawals` is modified to support EIP7251.
```python
def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal], uint64]:
@@ -994,7 +1014,9 @@ def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal],
return withdrawals, partial_withdrawals_count
```
-##### Updated `process_withdrawals`
+##### Modified `process_withdrawals`
+
+*Note*: The function `process_withdrawals` is modified to support EIP7251.
```python
def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
@@ -1160,9 +1182,9 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
##### Deposits
-###### Updated `apply_deposit`
+###### Modified `apply_deposit`
-*NOTE*: `process_deposit` is updated with a new definition of `apply_deposit`.
+*Note*: The function `process_deposit` is modified to support EIP7251.
```python
def apply_deposit(state: BeaconState,
@@ -1180,7 +1202,7 @@ def apply_deposit(state: BeaconState,
index = ValidatorIndex(validator_pubkeys.index(pubkey))
state.pending_balance_deposits.append(
PendingBalanceDeposit(index=index, amount=amount)
- ) # [Modified in Electra:EIP-7251]
+ ) # [Modified in Electra:EIP7251]
# Check if valid deposit switch to compounding credentials
if (
is_compounding_withdrawal_credential(withdrawal_credentials)
@@ -1210,6 +1232,8 @@ def is_valid_deposit_signature(pubkey: BLSPubkey,
###### Modified `add_validator_to_registry`
+*Note*: The function `add_validator_to_registry` is modified to initialize the validator with a balance of zero and add a pending balance deposit to the queue.
+
```python
def add_validator_to_registry(state: BeaconState,
pubkey: BLSPubkey,
@@ -1225,7 +1249,9 @@ def add_validator_to_registry(state: BeaconState,
state.pending_balance_deposits.append(PendingBalanceDeposit(index=index, amount=amount)) # [New in Electra:EIP7251]
```
-###### Updated `get_validator_from_deposit`
+###### Modified `get_validator_from_deposit`
+
+*Note*: The function `get_validator_from_deposit` is modified to initialize the validator with an effective balance of zero.
```python
def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes32) -> Validator:
@@ -1241,7 +1267,10 @@ def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes3
```
##### Voluntary exits
-###### Updated `process_voluntary_exit`
+
+###### Modified `process_voluntary_exit`
+
+*Note*: The function `process_voluntary_exit` is modified to ensure the validator has no pending withdrawals in the queue.
```python
def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None:
@@ -1269,8 +1298,6 @@ def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVolu
###### New `process_withdrawal_request`
-*Note*: This function is new in Electra following EIP-7002 and EIP-7251.
-
```python
def process_withdrawal_request(
state: BeaconState,
@@ -1338,8 +1365,6 @@ def process_withdrawal_request(
###### New `process_deposit_request`
-*Note*: This function is new in Electra:EIP6110.
-
```python
def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
# Set deposit request start index
diff --git a/specs/electra/light-client/fork.md b/specs/electra/light-client/fork.md
new file mode 100644
index 0000000000..d613df56a9
--- /dev/null
+++ b/specs/electra/light-client/fork.md
@@ -0,0 +1,133 @@
+# Electra Light Client -- Fork Logic
+
+## Table of contents
+
+
+
+
+
+- [Introduction](#introduction)
+- [Helper functions](#helper-functions)
+ - [`normalize_merkle_branch`](#normalize_merkle_branch)
+- [Upgrading light client data](#upgrading-light-client-data)
+- [Upgrading the store](#upgrading-the-store)
+
+
+
+
+## Introduction
+
+This document describes how to upgrade existing light client objects based on the [Deneb specification](../../deneb/light-client/sync-protocol.md) to Electra. This is necessary when processing pre-Electra data with a post-Electra `LightClientStore`. Note that the data being exchanged over the network protocols uses the original format.
+
+## Helper functions
+
+### `normalize_merkle_branch`
+
+```python
+def normalize_merkle_branch(branch: Sequence[Bytes32],
+ gindex: GeneralizedIndex) -> Sequence[Bytes32]:
+ depth = floorlog2(gindex)
+ num_extra = depth - len(branch)
+ return [Bytes32()] * num_extra + [*branch]
+```
+
+## Upgrading light client data
+
+A Electra `LightClientStore` can still process earlier light client data. In order to do so, that pre-Electra data needs to be locally upgraded to Electra before processing.
+
+```python
+def upgrade_lc_header_to_electra(pre: deneb.LightClientHeader) -> LightClientHeader:
+ return LightClientHeader(
+ beacon=pre.beacon,
+ execution=ExecutionPayloadHeader(
+ parent_hash=pre.execution.parent_hash,
+ fee_recipient=pre.execution.fee_recipient,
+ state_root=pre.execution.state_root,
+ receipts_root=pre.execution.receipts_root,
+ logs_bloom=pre.execution.logs_bloom,
+ prev_randao=pre.execution.prev_randao,
+ block_number=pre.execution.block_number,
+ gas_limit=pre.execution.gas_limit,
+ gas_used=pre.execution.gas_used,
+ timestamp=pre.execution.timestamp,
+ extra_data=pre.execution.extra_data,
+ base_fee_per_gas=pre.execution.base_fee_per_gas,
+ block_hash=pre.execution.block_hash,
+ transactions_root=pre.execution.transactions_root,
+ withdrawals_root=pre.execution.withdrawals_root,
+ blob_gas_used=pre.execution.blob_gas_used,
+ excess_blob_gas=pre.execution.blob_gas_used,
+ deposit_requests_root=Root(), # [New in Electra:EIP6110]
+ withdrawal_requests_root=Root(), # [New in Electra:EIP7002:EIP7251]
+ consolidation_requests_root=Root(), # [New in Electra:EIP7251]
+ ),
+ execution_branch=pre.execution_branch,
+ )
+```
+
+```python
+def upgrade_lc_bootstrap_to_electra(pre: deneb.LightClientBootstrap) -> LightClientBootstrap:
+ return LightClientBootstrap(
+ header=upgrade_lc_header_to_electra(pre.header),
+ current_sync_committee=pre.current_sync_committee,
+ current_sync_committee_branch=normalize_merkle_branch(
+ pre.current_sync_committee_branch, CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA),
+ )
+```
+
+```python
+def upgrade_lc_update_to_electra(pre: deneb.LightClientUpdate) -> LightClientUpdate:
+ return LightClientUpdate(
+ attested_header=upgrade_lc_header_to_electra(pre.attested_header),
+ next_sync_committee=pre.next_sync_committee,
+ next_sync_committee_branch=normalize_merkle_branch(
+ pre.next_sync_committee_branch, NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA),
+ finalized_header=upgrade_lc_header_to_electra(pre.finalized_header),
+ finality_branch=normalize_merkle_branch(
+ pre.finality_branch, FINALIZED_ROOT_GINDEX_ELECTRA),
+ sync_aggregate=pre.sync_aggregate,
+ signature_slot=pre.signature_slot,
+ )
+```
+
+```python
+def upgrade_lc_finality_update_to_electra(pre: deneb.LightClientFinalityUpdate) -> LightClientFinalityUpdate:
+ return LightClientFinalityUpdate(
+ attested_header=upgrade_lc_header_to_electra(pre.attested_header),
+ finalized_header=upgrade_lc_header_to_electra(pre.finalized_header),
+ finality_branch=normalize_merkle_branch(
+ pre.finality_branch, FINALIZED_ROOT_GINDEX_ELECTRA),
+ sync_aggregate=pre.sync_aggregate,
+ signature_slot=pre.signature_slot,
+ )
+```
+
+```python
+def upgrade_lc_optimistic_update_to_electra(pre: deneb.LightClientOptimisticUpdate) -> LightClientOptimisticUpdate:
+ return LightClientOptimisticUpdate(
+ attested_header=upgrade_lc_header_to_electra(pre.attested_header),
+ sync_aggregate=pre.sync_aggregate,
+ signature_slot=pre.signature_slot,
+ )
+```
+
+## Upgrading the store
+
+Existing `LightClientStore` objects based on Deneb MUST be upgraded to Electra before Electra based light client data can be processed. The `LightClientStore` upgrade MAY be performed before `ELECTRA_FORK_EPOCH`.
+
+```python
+def upgrade_lc_store_to_electra(pre: deneb.LightClientStore) -> LightClientStore:
+ if pre.best_valid_update is None:
+ best_valid_update = None
+ else:
+ best_valid_update = upgrade_lc_update_to_electra(pre.best_valid_update)
+ return LightClientStore(
+ finalized_header=upgrade_lc_header_to_electra(pre.finalized_header),
+ current_sync_committee=pre.current_sync_committee,
+ next_sync_committee=pre.next_sync_committee,
+ best_valid_update=best_valid_update,
+ optimistic_header=upgrade_lc_header_to_electra(pre.optimistic_header),
+ previous_max_active_participants=pre.previous_max_active_participants,
+ current_max_active_participants=pre.current_max_active_participants,
+ )
+```
diff --git a/specs/electra/light-client/full-node.md b/specs/electra/light-client/full-node.md
new file mode 100644
index 0000000000..f08a2cc5ed
--- /dev/null
+++ b/specs/electra/light-client/full-node.md
@@ -0,0 +1,80 @@
+# Electra Light Client -- Full Node
+
+**Notice**: This document is a work-in-progress for researchers and implementers.
+
+## Table of contents
+
+
+
+
+
+- [Introduction](#introduction)
+- [Helper functions](#helper-functions)
+ - [Modified `block_to_light_client_header`](#modified-block_to_light_client_header)
+
+
+
+
+## Introduction
+
+Execution payload data is updated to account for the Electra upgrade.
+
+## Helper functions
+
+### Modified `block_to_light_client_header`
+
+```python
+def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader:
+ epoch = compute_epoch_at_slot(block.message.slot)
+
+ if epoch >= CAPELLA_FORK_EPOCH:
+ payload = block.message.body.execution_payload
+ execution_header = ExecutionPayloadHeader(
+ parent_hash=payload.parent_hash,
+ fee_recipient=payload.fee_recipient,
+ state_root=payload.state_root,
+ receipts_root=payload.receipts_root,
+ logs_bloom=payload.logs_bloom,
+ prev_randao=payload.prev_randao,
+ block_number=payload.block_number,
+ gas_limit=payload.gas_limit,
+ gas_used=payload.gas_used,
+ timestamp=payload.timestamp,
+ extra_data=payload.extra_data,
+ base_fee_per_gas=payload.base_fee_per_gas,
+ block_hash=payload.block_hash,
+ transactions_root=hash_tree_root(payload.transactions),
+ withdrawals_root=hash_tree_root(payload.withdrawals),
+ )
+ if epoch >= DENEB_FORK_EPOCH:
+ execution_header.blob_gas_used = payload.blob_gas_used
+ execution_header.excess_blob_gas = payload.excess_blob_gas
+
+ # [New in Electra:EIP6110:EIP7002:EIP7251]
+ if epoch >= ELECTRA_FORK_EPOCH:
+ execution_header.deposit_requests_root = hash_tree_root(payload.deposit_requests)
+ execution_header.withdrawal_requests_root = hash_tree_root(payload.withdrawal_requests)
+ execution_header.consolidation_requests_root = hash_tree_root(payload.consolidation_requests)
+
+ execution_branch = ExecutionBranch(
+ compute_merkle_proof(block.message.body, EXECUTION_PAYLOAD_GINDEX))
+ else:
+ # Note that during fork transitions, `finalized_header` may still point to earlier forks.
+ # While Bellatrix blocks also contain an `ExecutionPayload` (minus `withdrawals_root`),
+ # it was not included in the corresponding light client data. To ensure compatibility
+ # with legacy data going through `upgrade_lc_header_to_capella`, leave out execution data.
+ execution_header = ExecutionPayloadHeader()
+ execution_branch = ExecutionBranch()
+
+ return LightClientHeader(
+ beacon=BeaconBlockHeader(
+ slot=block.message.slot,
+ proposer_index=block.message.proposer_index,
+ parent_root=block.message.parent_root,
+ state_root=block.message.state_root,
+ body_root=hash_tree_root(block.message.body),
+ ),
+ execution=execution_header,
+ execution_branch=execution_branch,
+ )
+```
diff --git a/specs/electra/light-client/p2p-interface.md b/specs/electra/light-client/p2p-interface.md
new file mode 100644
index 0000000000..3cbd5dd28f
--- /dev/null
+++ b/specs/electra/light-client/p2p-interface.md
@@ -0,0 +1,111 @@
+# Electra Light Client -- Networking
+
+**Notice**: This document is a work-in-progress for researchers and implementers.
+
+## Table of contents
+
+
+
+
+
+- [Networking](#networking)
+ - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
+ - [Topics and messages](#topics-and-messages)
+ - [Global topics](#global-topics)
+ - [`light_client_finality_update`](#light_client_finality_update)
+ - [`light_client_optimistic_update`](#light_client_optimistic_update)
+ - [The Req/Resp domain](#the-reqresp-domain)
+ - [Messages](#messages)
+ - [GetLightClientBootstrap](#getlightclientbootstrap)
+ - [LightClientUpdatesByRange](#lightclientupdatesbyrange)
+ - [GetLightClientFinalityUpdate](#getlightclientfinalityupdate)
+ - [GetLightClientOptimisticUpdate](#getlightclientoptimisticupdate)
+
+
+
+
+## Networking
+
+The [Deneb light client networking specification](../../deneb/light-client/p2p-interface.md) is extended to exchange [Electra light client data](./sync-protocol.md).
+
+### The gossip domain: gossipsub
+
+#### Topics and messages
+
+##### Global topics
+
+###### `light_client_finality_update`
+
+[0]: # (eth2spec: skip)
+
+| `fork_version` | Message SSZ type |
+|--------------------------------------------------------|-------------------------------------|
+| `GENESIS_FORK_VERSION` | n/a |
+| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` |
+| `CAPELLA_FORK_VERSION` | `capella.LightClientFinalityUpdate` |
+| `DENEB_FORK_VERSION` | `deneb.LightClientFinalityUpdate` |
+| `ELECTRA_FORK_VERSION` and later | `electra.LightClientFinalityUpdate` |
+
+###### `light_client_optimistic_update`
+
+[0]: # (eth2spec: skip)
+
+| `fork_version` | Message SSZ type |
+|--------------------------------------------------------|---------------------------------------|
+| `GENESIS_FORK_VERSION` | n/a |
+| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientOptimisticUpdate` |
+| `CAPELLA_FORK_VERSION` | `capella.LightClientOptimisticUpdate` |
+| `DENEB_FORK_VERSION` | `deneb.LightClientOptimisticUpdate` |
+| `ELECTRA_FORK_VERSION` and later | `electra.LightClientOptimisticUpdate` |
+
+### The Req/Resp domain
+
+#### Messages
+
+##### GetLightClientBootstrap
+
+[0]: # (eth2spec: skip)
+
+| `fork_version` | Response SSZ type |
+|--------------------------------------------------------|------------------------------------|
+| `GENESIS_FORK_VERSION` | n/a |
+| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientBootstrap` |
+| `CAPELLA_FORK_VERSION` | `capella.LightClientBootstrap` |
+| `DENEB_FORK_VERSION` | `deneb.LightClientBootstrap` |
+| `ELECTRA_FORK_VERSION` and later | `electra.LightClientBootstrap` |
+
+##### LightClientUpdatesByRange
+
+[0]: # (eth2spec: skip)
+
+| `fork_version` | Response chunk SSZ type |
+|--------------------------------------------------------|----------------------------------|
+| `GENESIS_FORK_VERSION` | n/a |
+| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientUpdate` |
+| `CAPELLA_FORK_VERSION` | `capella.LightClientUpdate` |
+| `DENEB_FORK_VERSION` | `deneb.LightClientUpdate` |
+| `ELECTRA_FORK_VERSION` and later | `electra.LightClientUpdate` |
+
+##### GetLightClientFinalityUpdate
+
+[0]: # (eth2spec: skip)
+
+| `fork_version` | Response SSZ type |
+|--------------------------------------------------------|-------------------------------------|
+| `GENESIS_FORK_VERSION` | n/a |
+| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` |
+| `CAPELLA_FORK_VERSION` | `capella.LightClientFinalityUpdate` |
+| `DENEB_FORK_VERSION` | `deneb.LightClientFinalityUpdate` |
+| `ELECTRA_FORK_VERSION` and later | `electra.LightClientFinalityUpdate` |
+
+##### GetLightClientOptimisticUpdate
+
+[0]: # (eth2spec: skip)
+
+| `fork_version` | Response SSZ type |
+|--------------------------------------------------------|---------------------------------------|
+| `GENESIS_FORK_VERSION` | n/a |
+| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientOptimisticUpdate` |
+| `CAPELLA_FORK_VERSION` | `capella.LightClientOptimisticUpdate` |
+| `DENEB_FORK_VERSION` | `deneb.LightClientOptimisticUpdate` |
+| `ELECTRA_FORK_VERSION` and later | `electra.LightClientOptimisticUpdate` |
diff --git a/specs/electra/light-client/sync-protocol.md b/specs/electra/light-client/sync-protocol.md
new file mode 100644
index 0000000000..ef9dcd5987
--- /dev/null
+++ b/specs/electra/light-client/sync-protocol.md
@@ -0,0 +1,188 @@
+# Electra Light Client -- Sync Protocol
+
+**Notice**: This document is a work-in-progress for researchers and implementers.
+
+## Table of contents
+
+
+
+
+
+- [Introduction](#introduction)
+- [Custom types](#custom-types)
+- [Constants](#constants)
+ - [Frozen constants](#frozen-constants)
+ - [New constants](#new-constants)
+- [Helper functions](#helper-functions)
+ - [Modified `finalized_root_gindex_at_slot`](#modified-finalized_root_gindex_at_slot)
+ - [Modified `current_sync_committee_gindex_at_slot`](#modified-current_sync_committee_gindex_at_slot)
+ - [Modified `next_sync_committee_gindex_at_slot`](#modified-next_sync_committee_gindex_at_slot)
+ - [Modified `get_lc_execution_root`](#modified-get_lc_execution_root)
+ - [Modified `is_valid_light_client_header`](#modified-is_valid_light_client_header)
+
+
+
+
+## Introduction
+
+This upgrade updates light client data to include the Electra changes to the [`ExecutionPayload`](../beacon-chain.md) structure and to the generalized indices of surrounding containers. It extends the [Deneb Light Client specifications](../../deneb/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Deneb based deployments to Electra.
+
+Additional documents describes the impact of the upgrade on certain roles:
+- [Full node](./full-node.md)
+- [Networking](./p2p-interface.md)
+
+## Custom types
+
+| Name | SSZ equivalent | Description |
+| - | - | - |
+| `FinalityBranch` | `Vector[Bytes32, floorlog2(FINALIZED_ROOT_GINDEX_ELECTRA)]` | Merkle branch of `finalized_checkpoint.root` within `BeaconState` |
+| `CurrentSyncCommitteeBranch` | `Vector[Bytes32, floorlog2(CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA)]` | Merkle branch of `current_sync_committee` within `BeaconState` |
+| `NextSyncCommitteeBranch` | `Vector[Bytes32, floorlog2(NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA)]` | Merkle branch of `next_sync_committee` within `BeaconState` |
+
+## Constants
+
+### Frozen constants
+
+Existing `GeneralizedIndex` constants are frozen at their [Altair](../../altair/light-client/sync-protocol.md#constants) values.
+
+| Name | Value |
+| - | - |
+| `FINALIZED_ROOT_GINDEX` | `get_generalized_index(altair.BeaconState, 'finalized_checkpoint', 'root')` (= 105) |
+| `CURRENT_SYNC_COMMITTEE_GINDEX` | `get_generalized_index(altair.BeaconState, 'current_sync_committee')` (= 54) |
+| `NEXT_SYNC_COMMITTEE_GINDEX` | `get_generalized_index(altair.BeaconState, 'next_sync_committee')` (= 55) |
+
+### New constants
+
+| Name | Value |
+| - | - |
+| `FINALIZED_ROOT_GINDEX_ELECTRA` | `get_generalized_index(BeaconState, 'finalized_checkpoint', 'root')` (= 169) |
+| `CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA` | `get_generalized_index(BeaconState, 'current_sync_committee')` (= 86) |
+| `NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA` | `get_generalized_index(BeaconState, 'next_sync_committee')` (= 87) |
+
+## Helper functions
+
+### Modified `finalized_root_gindex_at_slot`
+
+```python
+def finalized_root_gindex_at_slot(slot: Slot) -> GeneralizedIndex:
+ epoch = compute_epoch_at_slot(slot)
+
+ # [Modified in Electra]
+ if epoch >= ELECTRA_FORK_EPOCH:
+ return FINALIZED_ROOT_GINDEX_ELECTRA
+ return FINALIZED_ROOT_GINDEX
+```
+
+### Modified `current_sync_committee_gindex_at_slot`
+
+```python
+def current_sync_committee_gindex_at_slot(slot: Slot) -> GeneralizedIndex:
+ epoch = compute_epoch_at_slot(slot)
+
+ # [Modified in Electra]
+ if epoch >= ELECTRA_FORK_EPOCH:
+ return CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA
+ return CURRENT_SYNC_COMMITTEE_GINDEX
+```
+
+### Modified `next_sync_committee_gindex_at_slot`
+
+```python
+def next_sync_committee_gindex_at_slot(slot: Slot) -> GeneralizedIndex:
+ epoch = compute_epoch_at_slot(slot)
+
+ # [Modified in Electra]
+ if epoch >= ELECTRA_FORK_EPOCH:
+ return NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA
+ return NEXT_SYNC_COMMITTEE_GINDEX
+```
+
+### Modified `get_lc_execution_root`
+
+```python
+def get_lc_execution_root(header: LightClientHeader) -> Root:
+ epoch = compute_epoch_at_slot(header.beacon.slot)
+
+ # [New in Electra]
+ if epoch >= ELECTRA_FORK_EPOCH:
+ return hash_tree_root(header.execution)
+
+ # [Modified in Electra]
+ if epoch >= DENEB_FORK_EPOCH:
+ execution_header = deneb.ExecutionPayloadHeader(
+ parent_hash=header.execution.parent_hash,
+ fee_recipient=header.execution.fee_recipient,
+ state_root=header.execution.state_root,
+ receipts_root=header.execution.receipts_root,
+ logs_bloom=header.execution.logs_bloom,
+ prev_randao=header.execution.prev_randao,
+ block_number=header.execution.block_number,
+ gas_limit=header.execution.gas_limit,
+ gas_used=header.execution.gas_used,
+ timestamp=header.execution.timestamp,
+ extra_data=header.execution.extra_data,
+ base_fee_per_gas=header.execution.base_fee_per_gas,
+ block_hash=header.execution.block_hash,
+ transactions_root=header.execution.transactions_root,
+ withdrawals_root=header.execution.withdrawals_root,
+ blob_gas_used=header.execution.blob_gas_used,
+ excess_blob_gas=header.execution.excess_blob_gas,
+ )
+ return hash_tree_root(execution_header)
+
+ if epoch >= CAPELLA_FORK_EPOCH:
+ execution_header = capella.ExecutionPayloadHeader(
+ parent_hash=header.execution.parent_hash,
+ fee_recipient=header.execution.fee_recipient,
+ state_root=header.execution.state_root,
+ receipts_root=header.execution.receipts_root,
+ logs_bloom=header.execution.logs_bloom,
+ prev_randao=header.execution.prev_randao,
+ block_number=header.execution.block_number,
+ gas_limit=header.execution.gas_limit,
+ gas_used=header.execution.gas_used,
+ timestamp=header.execution.timestamp,
+ extra_data=header.execution.extra_data,
+ base_fee_per_gas=header.execution.base_fee_per_gas,
+ block_hash=header.execution.block_hash,
+ transactions_root=header.execution.transactions_root,
+ withdrawals_root=header.execution.withdrawals_root,
+ )
+ return hash_tree_root(execution_header)
+
+ return Root()
+```
+
+### Modified `is_valid_light_client_header`
+
+```python
+def is_valid_light_client_header(header: LightClientHeader) -> bool:
+ epoch = compute_epoch_at_slot(header.beacon.slot)
+
+ # [New in Electra:EIP6110:EIP7002:EIP7251]
+ if epoch < ELECTRA_FORK_EPOCH:
+ if (
+ header.execution.deposit_requests_root != Root()
+ or header.execution.withdrawal_requests_root != Root()
+ or header.execution.consolidation_requests_root != Root()
+ ):
+ return False
+
+ if epoch < DENEB_FORK_EPOCH:
+ if header.execution.blob_gas_used != uint64(0) or header.execution.excess_blob_gas != uint64(0):
+ return False
+
+ if epoch < CAPELLA_FORK_EPOCH:
+ return (
+ header.execution == ExecutionPayloadHeader()
+ and header.execution_branch == ExecutionBranch()
+ )
+
+ return is_valid_merkle_branch(
+ leaf=get_lc_execution_root(header),
+ branch=header.execution_branch,
+ depth=floorlog2(EXECUTION_PAYLOAD_GINDEX),
+ index=get_subtree_index(EXECUTION_PAYLOAD_GINDEX),
+ root=header.beacon.body_root,
+ )
+```
diff --git a/specs/electra/p2p-interface.md b/specs/electra/p2p-interface.md
index ebdcaaa831..88d14813b3 100644
--- a/specs/electra/p2p-interface.md
+++ b/specs/electra/p2p-interface.md
@@ -15,6 +15,7 @@ The specification of these changes continues in the same format as the network s
- [Topics and messages](#topics-and-messages)
- [Global topics](#global-topics)
- [`beacon_aggregate_and_proof`](#beacon_aggregate_and_proof)
+ - [Attestation subnets](#attestation-subnets)
- [`beacon_attestation_{subnet_id}`](#beacon_attestation_subnet_id)
@@ -49,6 +50,8 @@ The following validations are added:
* [REJECT] `len(committee_indices) == 1`, where `committee_indices = get_committee_indices(aggregate)`.
* [REJECT] `aggregate.data.index == 0`
+#### Attestation subnets
+
##### `beacon_attestation_{subnet_id}`
The following convenience variables are re-defined
diff --git a/ssz/merkle-proofs.md b/ssz/merkle-proofs.md
index 919ff07f59..9b9127d63a 100644
--- a/ssz/merkle-proofs.md
+++ b/ssz/merkle-proofs.md
@@ -176,8 +176,8 @@ def get_generalized_index(typ: SSZType, *path: PyUnion[int, SSZVariableName]) ->
for p in path:
assert not issubclass(typ, BasicValue) # If we descend to a basic type, the path cannot continue further
if p == '__len__':
- typ = uint64
assert issubclass(typ, (List, ByteList))
+ typ = uint64
root = GeneralizedIndex(root * 2 + 1)
else:
pos, _, _ = get_item_position(typ, p)
diff --git a/tests/README.md b/tests/README.md
index dbd2b31de2..8c281155c5 100644
--- a/tests/README.md
+++ b/tests/README.md
@@ -121,7 +121,7 @@ of return values. Here we add two values, the string `'pre'` and the initial sta
```
The state contains the last block, which is necessary for building up the next block (every block needs to
-have the hash of the previous one in a blockchain).
+have the root of the previous one in a blockchain).
```python
signed_block = state_transition_and_sign_block(spec, state, block)
@@ -291,8 +291,8 @@ not execute EVM programs or store user data. It exists to provide a secure sourc
information about the latest verified block hash of the execution layer.
For every slot a validator is randomly selected as the proposer. The proposer proposes a block
-for the current head of the consensus layer chain (built on the previous block). That block
-includes the hash of the proposed new head of the execution layer.
+for the current head of the consensus layer chain (built on the previous block). That block
+includes the block hash of the proposed new head of the execution layer.
For every slot there is also a randomly selected committee of validators that needs to vote whether
the new consensus layer block is valid, which requires the proposed head of the execution chain to
diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt
index 86f9d092d6..2ac49a507e 100644
--- a/tests/core/pyspec/eth2spec/VERSION.txt
+++ b/tests/core/pyspec/eth2spec/VERSION.txt
@@ -1 +1 @@
-1.5.0-alpha.3
+1.5.0-alpha.4
diff --git a/tests/core/pyspec/eth2spec/eip7732/__init__.py b/tests/core/pyspec/eth2spec/eip7732/__init__.py
new file mode 100644
index 0000000000..91b4a68537
--- /dev/null
+++ b/tests/core/pyspec/eth2spec/eip7732/__init__.py
@@ -0,0 +1 @@
+from . import mainnet as spec # noqa:F401
diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_single_merkle_proof.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_single_merkle_proof.py
index 6418caafd9..dd2f4a7164 100644
--- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_single_merkle_proof.py
+++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_single_merkle_proof.py
@@ -3,6 +3,11 @@
with_light_client,
with_test_suite_name,
)
+from eth2spec.test.helpers.light_client import (
+ latest_current_sync_committee_gindex,
+ latest_finalized_root_gindex,
+ latest_next_sync_committee_gindex,
+)
@with_test_suite_name("BeaconState")
@@ -10,17 +15,18 @@
@spec_state_test
def test_current_sync_committee_merkle_proof(spec, state):
yield "object", state
- current_sync_committee_branch = spec.compute_merkle_proof(state, spec.CURRENT_SYNC_COMMITTEE_GINDEX)
+ gindex = latest_current_sync_committee_gindex(spec)
+ branch = spec.compute_merkle_proof(state, gindex)
yield "proof", {
"leaf": "0x" + state.current_sync_committee.hash_tree_root().hex(),
- "leaf_index": spec.CURRENT_SYNC_COMMITTEE_GINDEX,
- "branch": ['0x' + root.hex() for root in current_sync_committee_branch]
+ "leaf_index": gindex,
+ "branch": ['0x' + root.hex() for root in branch]
}
assert spec.is_valid_merkle_branch(
leaf=state.current_sync_committee.hash_tree_root(),
- branch=current_sync_committee_branch,
- depth=spec.floorlog2(spec.CURRENT_SYNC_COMMITTEE_GINDEX),
- index=spec.get_subtree_index(spec.CURRENT_SYNC_COMMITTEE_GINDEX),
+ branch=branch,
+ depth=spec.floorlog2(gindex),
+ index=spec.get_subtree_index(gindex),
root=state.hash_tree_root(),
)
@@ -30,17 +36,18 @@ def test_current_sync_committee_merkle_proof(spec, state):
@spec_state_test
def test_next_sync_committee_merkle_proof(spec, state):
yield "object", state
- next_sync_committee_branch = spec.compute_merkle_proof(state, spec.NEXT_SYNC_COMMITTEE_GINDEX)
+ gindex = latest_next_sync_committee_gindex(spec)
+ branch = spec.compute_merkle_proof(state, gindex)
yield "proof", {
"leaf": "0x" + state.next_sync_committee.hash_tree_root().hex(),
- "leaf_index": spec.NEXT_SYNC_COMMITTEE_GINDEX,
- "branch": ['0x' + root.hex() for root in next_sync_committee_branch]
+ "leaf_index": gindex,
+ "branch": ['0x' + root.hex() for root in branch]
}
assert spec.is_valid_merkle_branch(
leaf=state.next_sync_committee.hash_tree_root(),
- branch=next_sync_committee_branch,
- depth=spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_GINDEX),
- index=spec.get_subtree_index(spec.NEXT_SYNC_COMMITTEE_GINDEX),
+ branch=branch,
+ depth=spec.floorlog2(gindex),
+ index=spec.get_subtree_index(gindex),
root=state.hash_tree_root(),
)
@@ -50,17 +57,18 @@ def test_next_sync_committee_merkle_proof(spec, state):
@spec_state_test
def test_finality_root_merkle_proof(spec, state):
yield "object", state
- finality_branch = spec.compute_merkle_proof(state, spec.FINALIZED_ROOT_GINDEX)
+ gindex = latest_finalized_root_gindex(spec)
+ branch = spec.compute_merkle_proof(state, gindex)
yield "proof", {
"leaf": "0x" + state.finalized_checkpoint.root.hex(),
- "leaf_index": spec.FINALIZED_ROOT_GINDEX,
- "branch": ['0x' + root.hex() for root in finality_branch]
+ "leaf_index": gindex,
+ "branch": ['0x' + root.hex() for root in branch]
}
assert spec.is_valid_merkle_branch(
leaf=state.finalized_checkpoint.root,
- branch=finality_branch,
- depth=spec.floorlog2(spec.FINALIZED_ROOT_GINDEX),
- index=spec.get_subtree_index(spec.FINALIZED_ROOT_GINDEX),
+ branch=branch,
+ depth=spec.floorlog2(gindex),
+ index=spec.get_subtree_index(gindex),
root=state.hash_tree_root(),
)
diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py
index b1bb13ee93..45c7d77887 100644
--- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py
+++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py
@@ -16,15 +16,16 @@
state_transition_with_full_block,
)
from eth2spec.test.helpers.constants import (
- ALTAIR, BELLATRIX, CAPELLA, DENEB,
+ ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA,
MINIMAL,
)
from eth2spec.test.helpers.fork_transition import (
do_fork,
+ transition_across_forks,
)
from eth2spec.test.helpers.forks import (
get_spec_for_fork_version,
- is_post_capella, is_post_deneb,
+ is_post_capella, is_post_deneb, is_post_electra,
)
from eth2spec.test.helpers.light_client import (
compute_start_slot_at_next_sync_committee_period,
@@ -47,6 +48,8 @@ class LightClientSyncTest(object):
def get_store_fork_version(s_spec):
+ if is_post_electra(s_spec):
+ return s_spec.config.ELECTRA_FORK_VERSION
if is_post_deneb(s_spec):
return s_spec.config.DENEB_FORK_VERSION
if is_post_capella(s_spec):
@@ -60,6 +63,11 @@ def setup_test(spec, state, s_spec=None, phases=None):
if s_spec is None:
s_spec = spec
+ if phases is None:
+ phases = {
+ spec.fork: spec,
+ s_spec.fork: s_spec,
+ }
test.s_spec = s_spec
yield "genesis_validators_root", "meta", "0x" + state.genesis_validators_root.hex()
@@ -77,7 +85,7 @@ def setup_test(spec, state, s_spec=None, phases=None):
yield "bootstrap_fork_digest", "meta", encode_hex(data_fork_digest)
yield "bootstrap", data
- upgraded = upgrade_lc_bootstrap_to_new_spec(d_spec, test.s_spec, data)
+ upgraded = upgrade_lc_bootstrap_to_new_spec(d_spec, test.s_spec, data, phases)
test.store = test.s_spec.initialize_light_client_store(trusted_block_root, upgraded)
store_fork_version = get_store_fork_version(test.s_spec)
store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root)
@@ -149,11 +157,10 @@ def emit_update(test, spec, state, block, attested_state, attested_block, finali
data = d_spec.create_light_client_update(state, block, attested_state, attested_block, finalized_block)
if not with_next:
data.next_sync_committee = spec.SyncCommittee()
- data.next_sync_committee_branch = \
- [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_GINDEX))]
+ data.next_sync_committee_branch = spec.NextSyncCommitteeBranch()
current_slot = state.slot
- upgraded = upgrade_lc_update_to_new_spec(d_spec, test.s_spec, data)
+ upgraded = upgrade_lc_update_to_new_spec(d_spec, test.s_spec, data, phases)
test.s_spec.process_light_client_update(test.store, upgraded, current_slot, test.genesis_validators_root)
yield get_update_file_name(d_spec, data), data
@@ -169,7 +176,7 @@ def emit_update(test, spec, state, block, attested_state, attested_block, finali
def emit_upgrade_store(test, new_s_spec, phases=None):
- test.store = upgrade_lc_store_to_new_spec(test.s_spec, new_s_spec, test.store)
+ test.store = upgrade_lc_store_to_new_spec(test.s_spec, new_s_spec, test.store, phases)
test.s_spec = new_s_spec
store_fork_version = get_store_fork_version(test.s_spec)
store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root)
@@ -561,7 +568,7 @@ def run_test_single_fork(spec, phases, state, fork):
# Upgrade to post-fork spec, attested block is still before the fork
attested_block = block.copy()
attested_state = state.copy()
- sync_aggregate, _ = get_sync_aggregate(phases[fork], state, phases=phases)
+ sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases)
state, block = do_fork(state, spec, phases[fork], fork_epoch, sync_aggregate=sync_aggregate)
spec = phases[fork]
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
@@ -635,6 +642,18 @@ def test_deneb_fork(spec, phases, state):
yield from run_test_single_fork(spec, phases, state, DENEB)
+@with_phases(phases=[DENEB], other_phases=[ELECTRA])
+@spec_test
+@with_config_overrides({
+ 'ELECTRA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2
+}, emit=False)
+@with_state
+@with_matching_spec_config(emitted_fork=ELECTRA)
+@with_presets([MINIMAL], reason="too slow")
+def test_electra_fork(spec, phases, state):
+ yield from run_test_single_fork(spec, phases, state, ELECTRA)
+
+
def run_test_multi_fork(spec, phases, state, fork_1, fork_2):
# Start test
test = yield from setup_test(spec, state, phases[fork_2], phases)
@@ -646,17 +665,28 @@ def run_test_multi_fork(spec, phases, state, fork_1, fork_2):
# ..., attested is from `fork_1`, ...
fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH')
- transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_1_epoch) - 1)
- state, attested_block = do_fork(state, spec, phases[fork_1], fork_1_epoch)
- spec = phases[fork_1]
+ spec, state, attested_block = transition_across_forks(
+ spec,
+ state,
+ spec.compute_start_slot_at_epoch(fork_1_epoch),
+ phases,
+ with_block=True,
+ )
attested_state = state.copy()
# ..., and signature is from `fork_2`
fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH')
- transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_2_epoch) - 1)
- sync_aggregate, _ = get_sync_aggregate(phases[fork_2], state)
- state, block = do_fork(state, spec, phases[fork_2], fork_2_epoch, sync_aggregate=sync_aggregate)
- spec = phases[fork_2]
+ spec, state, _ = transition_across_forks(
+ spec, state, spec.compute_start_slot_at_epoch(fork_2_epoch) - 1, phases)
+ sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases)
+ spec, state, block = transition_across_forks(
+ spec,
+ state,
+ spec.compute_start_slot_at_epoch(fork_2_epoch),
+ phases,
+ with_block=True,
+ sync_aggregate=sync_aggregate,
+ )
# Check that update applies
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
@@ -682,6 +712,33 @@ def test_capella_deneb_fork(spec, phases, state):
yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB)
+@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB, ELECTRA])
+@spec_test
+@with_config_overrides({
+ 'CAPELLA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2
+ 'DENEB_FORK_EPOCH': 4,
+ 'ELECTRA_FORK_EPOCH': 5,
+}, emit=False)
+@with_state
+@with_matching_spec_config(emitted_fork=ELECTRA)
+@with_presets([MINIMAL], reason="too slow")
+def test_capella_electra_fork(spec, phases, state):
+ yield from run_test_multi_fork(spec, phases, state, CAPELLA, ELECTRA)
+
+
+@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA])
+@spec_test
+@with_config_overrides({
+ 'DENEB_FORK_EPOCH': 3, # `setup_test` advances to epoch 2
+ 'ELECTRA_FORK_EPOCH': 4,
+}, emit=False)
+@with_state
+@with_matching_spec_config(emitted_fork=ELECTRA)
+@with_presets([MINIMAL], reason="too slow")
+def test_deneb_electra_fork(spec, phases, state):
+ yield from run_test_multi_fork(spec, phases, state, DENEB, ELECTRA)
+
+
def run_test_upgraded_store_with_legacy_data(spec, phases, state, fork):
# Start test (Legacy bootstrap with an upgraded store)
test = yield from setup_test(spec, state, phases[fork], phases)
@@ -713,10 +770,19 @@ def test_capella_store_with_legacy_data(spec, phases, state):
yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, CAPELLA)
-@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA], other_phases=[DENEB])
+@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA], other_phases=[CAPELLA, DENEB])
@spec_test
@with_state
@with_matching_spec_config(emitted_fork=DENEB)
@with_presets([MINIMAL], reason="too slow")
def test_deneb_store_with_legacy_data(spec, phases, state):
yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, DENEB)
+
+
+@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA, DENEB], other_phases=[CAPELLA, DENEB, ELECTRA])
+@spec_test
+@with_state
+@with_matching_spec_config(emitted_fork=ELECTRA)
+@with_presets([MINIMAL], reason="too slow")
+def test_electra_store_with_legacy_data(spec, phases, state):
+ yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, ELECTRA)
diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_execution_payload.py b/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_execution_payload.py
index a55a6e12a4..325431e7a9 100644
--- a/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_execution_payload.py
+++ b/tests/core/pyspec/eth2spec/test/bellatrix/block_processing/test_process_execution_payload.py
@@ -133,7 +133,7 @@ def test_bad_parent_hash_first_payload(spec, state):
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.parent_hash = b'\x55' * 32
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload)
@@ -146,7 +146,7 @@ def test_invalid_bad_parent_hash_regular_payload(spec, state):
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.parent_hash = spec.Hash32()
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
@@ -156,7 +156,7 @@ def run_bad_prev_randao_test(spec, state):
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.prev_randao = b'\x42' * 32
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
@@ -182,7 +182,7 @@ def run_bad_everything_test(spec, state):
execution_payload.parent_hash = spec.Hash32()
execution_payload.prev_randao = spec.Bytes32()
execution_payload.timestamp = 0
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
@@ -211,7 +211,7 @@ def run_bad_timestamp_test(spec, state, is_future):
else:
timestamp = execution_payload.timestamp - 1
execution_payload.timestamp = timestamp
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
@@ -249,7 +249,7 @@ def run_non_empty_extra_data_test(spec, state):
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.extra_data = b'\x45' * 12
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload)
assert state.latest_execution_payload_header.extra_data == execution_payload.extra_data
@@ -278,7 +278,7 @@ def run_non_empty_transactions_test(spec, state):
spec.Transaction(b'\x99' * 128)
for _ in range(num_transactions)
]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload)
assert state.latest_execution_payload_header.transactions_root == execution_payload.transactions.hash_tree_root()
@@ -304,7 +304,7 @@ def run_zero_length_transaction_test(spec, state):
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.transactions = [spec.Transaction(b'')]
assert len(execution_payload.transactions[0]) == 0
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload)
assert state.latest_execution_payload_header.transactions_root == execution_payload.transactions.hash_tree_root()
diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/fork_choice/test_on_merge_block.py b/tests/core/pyspec/eth2spec/test/bellatrix/fork_choice/test_on_merge_block.py
index 86071c225b..1520d85c5b 100644
--- a/tests/core/pyspec/eth2spec/test/bellatrix/fork_choice/test_on_merge_block.py
+++ b/tests/core/pyspec/eth2spec/test/bellatrix/fork_choice/test_on_merge_block.py
@@ -75,7 +75,7 @@ def test_all_valid(spec, state):
def run_func():
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_block.block_hash
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
signed_block = state_transition_and_sign_block(spec, state, block)
yield from tick_and_add_block(spec, store, signed_block, test_steps, merge_block=True)
# valid
@@ -107,7 +107,7 @@ def test_block_lookup_failed(spec, state):
def run_func():
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_block.block_hash
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
signed_block = state_transition_and_sign_block(spec, state, block)
yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False, merge_block=True,
block_not_found=True)
@@ -141,7 +141,7 @@ def test_too_early_for_merge(spec, state):
def run_func():
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_block.block_hash
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
signed_block = state_transition_and_sign_block(spec, state, block)
yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False, merge_block=True)
@@ -174,7 +174,7 @@ def test_too_late_for_merge(spec, state):
def run_func():
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_block.block_hash
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
signed_block = state_transition_and_sign_block(spec, state, block)
yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False, merge_block=True)
diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/sync/test_optimistic.py b/tests/core/pyspec/eth2spec/test/bellatrix/sync/test_optimistic.py
index eb56e368a8..2d1aaced91 100644
--- a/tests/core/pyspec/eth2spec/test/bellatrix/sync/test_optimistic.py
+++ b/tests/core/pyspec/eth2spec/test/bellatrix/sync/test_optimistic.py
@@ -65,7 +65,7 @@ def test_from_syncing_to_invalid(spec, state):
block_hashes[f'chain_a_{i - 1}'] if i != 0 else block_hashes['block_0']
)
block.body.execution_payload.extra_data = spec.hash(bytes(f'chain_a_{i}', 'UTF-8'))
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
block_hashes[f'chain_a_{i}'] = block.body.execution_payload.block_hash
signed_block = state_transition_and_sign_block(spec, state, block)
@@ -82,7 +82,7 @@ def test_from_syncing_to_invalid(spec, state):
block_hashes[f'chain_b_{i - 1}'] if i != 0 else block_hashes['block_0']
)
block.body.execution_payload.extra_data = spec.hash(bytes(f'chain_b_{i}', 'UTF-8'))
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
block_hashes[f'chain_b_{i}'] = block.body.execution_payload.block_hash
signed_block = state_transition_with_full_block(spec, state, True, True, block=block)
@@ -95,7 +95,7 @@ def test_from_syncing_to_invalid(spec, state):
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = signed_blocks_b[-1].message.body.execution_payload.block_hash
block.body.execution_payload.extra_data = spec.hash(bytes(f'chain_b_{i}', 'UTF-8'))
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
block_hashes['chain_b_3'] = block.body.execution_payload.block_hash
# Ensure that no duplicate block hashes
diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/unittests/test_validate_merge_block.py b/tests/core/pyspec/eth2spec/test/bellatrix/unittests/test_validate_merge_block.py
index df9792a43d..d519dfac0f 100644
--- a/tests/core/pyspec/eth2spec/test/bellatrix/unittests/test_validate_merge_block.py
+++ b/tests/core/pyspec/eth2spec/test/bellatrix/unittests/test_validate_merge_block.py
@@ -60,7 +60,7 @@ def test_validate_merge_block_success(spec, state):
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
run_validate_merge_block(spec, pow_chain, block)
@@ -81,7 +81,7 @@ def test_validate_merge_block_fail_parent_block_lookup(spec, state):
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
run_validate_merge_block(spec, pow_chain, block, valid=False)
@@ -93,7 +93,7 @@ def test_validate_merge_block_fail_after_terminal(spec, state):
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY + uint256(1)
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
run_validate_merge_block(spec, pow_chain, block, valid=False)
@@ -110,7 +110,7 @@ def test_validate_merge_block_tbh_override_success(spec, state):
pow_chain.head().block_hash = TERMINAL_BLOCK_HASH
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
run_validate_merge_block(spec, pow_chain, block)
@@ -126,7 +126,7 @@ def test_validate_merge_block_fail_parent_hash_is_not_tbh(spec, state):
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
run_validate_merge_block(spec, pow_chain, block, valid=False)
@@ -143,7 +143,7 @@ def test_validate_merge_block_terminal_block_hash_fail_activation_not_reached(sp
pow_chain.head().block_hash = TERMINAL_BLOCK_HASH
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
run_validate_merge_block(spec, pow_chain, block, valid=False)
@@ -159,5 +159,5 @@ def test_validate_merge_block_fail_activation_not_reached_parent_hash_is_not_tbh
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
run_validate_merge_block(spec, pow_chain, block, valid=False)
diff --git a/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_execution_payload.py b/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_execution_payload.py
index 1a2c1771a2..62e171b1f2 100644
--- a/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_execution_payload.py
+++ b/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_execution_payload.py
@@ -19,6 +19,6 @@ def test_invalid_bad_parent_hash_first_payload(spec, state):
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.parent_hash = b'\x55' * 32
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
diff --git a/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_withdrawals.py b/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_withdrawals.py
index 1fe05c18df..891ad35ed6 100644
--- a/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_withdrawals.py
+++ b/tests/core/pyspec/eth2spec/test/capella/block_processing/test_process_withdrawals.py
@@ -259,7 +259,7 @@ def test_invalid_non_withdrawable_non_empty_withdrawals(spec, state):
amount=420,
)
execution_payload.withdrawals.append(withdrawal)
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -272,7 +272,7 @@ def test_invalid_one_expected_full_withdrawal_and_none_in_withdrawals(spec, stat
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals = []
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -285,7 +285,7 @@ def test_invalid_one_expected_partial_withdrawal_and_none_in_withdrawals(spec, s
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals = []
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -298,7 +298,7 @@ def test_invalid_one_expected_full_withdrawal_and_duplicate_in_withdrawals(spec,
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals.append(execution_payload.withdrawals[0].copy())
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -311,7 +311,7 @@ def test_invalid_two_expected_partial_withdrawal_and_duplicate_in_withdrawals(sp
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals.append(execution_payload.withdrawals[0].copy())
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -325,7 +325,7 @@ def test_invalid_max_per_slot_full_withdrawals_and_one_less_in_withdrawals(spec,
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals = execution_payload.withdrawals[:-1]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -339,7 +339,7 @@ def test_invalid_max_per_slot_partial_withdrawals_and_one_less_in_withdrawals(sp
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals = execution_payload.withdrawals[:-1]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -353,7 +353,7 @@ def test_invalid_a_lot_fully_withdrawable_too_few_in_withdrawals(spec, state):
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals = execution_payload.withdrawals[:-1]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -367,7 +367,7 @@ def test_invalid_a_lot_partially_withdrawable_too_few_in_withdrawals(spec, state
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals = execution_payload.withdrawals[:-1]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -382,7 +382,7 @@ def test_invalid_a_lot_mixed_withdrawable_in_queue_too_few_in_withdrawals(spec,
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals = execution_payload.withdrawals[:-1]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -400,7 +400,7 @@ def test_invalid_incorrect_withdrawal_index(spec, state):
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals[0].index += 1
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -414,7 +414,7 @@ def test_invalid_incorrect_address_full(spec, state):
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals[0].address = b'\xff' * 20
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -428,7 +428,7 @@ def test_invalid_incorrect_address_partial(spec, state):
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals[0].address = b'\xff' * 20
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -441,7 +441,7 @@ def test_invalid_incorrect_amount_full(spec, state):
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals[0].amount += 1
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -454,7 +454,7 @@ def test_invalid_incorrect_amount_partial(spec, state):
next_slot(spec, state)
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.withdrawals[0].amount += 1
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -474,7 +474,7 @@ def test_invalid_one_of_many_incorrectly_full(spec, state):
withdrawal.index += 1
withdrawal.address = b'\x99' * 20
withdrawal.amount += 4000000
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -494,7 +494,7 @@ def test_invalid_one_of_many_incorrectly_partial(spec, state):
withdrawal.index += 1
withdrawal.address = b'\x99' * 20
withdrawal.amount += 4000000
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -514,7 +514,7 @@ def test_invalid_many_incorrectly_full(spec, state):
withdrawal.address = i.to_bytes(20, 'big')
else:
withdrawal.amount += 1
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
@@ -534,7 +534,7 @@ def test_invalid_many_incorrectly_partial(spec, state):
withdrawal.address = i.to_bytes(20, 'big')
else:
withdrawal.amount += 1
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
diff --git a/tests/core/pyspec/eth2spec/test/capella/light_client/test_single_merkle_proof.py b/tests/core/pyspec/eth2spec/test/capella/light_client/test_single_merkle_proof.py
index 41bb3f307e..7f414ab285 100644
--- a/tests/core/pyspec/eth2spec/test/capella/light_client/test_single_merkle_proof.py
+++ b/tests/core/pyspec/eth2spec/test/capella/light_client/test_single_merkle_proof.py
@@ -15,16 +15,17 @@ def test_execution_merkle_proof(spec, state):
block = state_transition_with_full_block(spec, state, True, False)
yield "object", block.message.body
- execution_branch = spec.compute_merkle_proof(block.message.body, spec.EXECUTION_PAYLOAD_GINDEX)
+ gindex = spec.EXECUTION_PAYLOAD_GINDEX
+ branch = spec.compute_merkle_proof(block.message.body, gindex)
yield "proof", {
"leaf": "0x" + block.message.body.execution_payload.hash_tree_root().hex(),
- "leaf_index": spec.EXECUTION_PAYLOAD_GINDEX,
- "branch": ['0x' + root.hex() for root in execution_branch]
+ "leaf_index": gindex,
+ "branch": ['0x' + root.hex() for root in branch]
}
assert spec.is_valid_merkle_branch(
leaf=block.message.body.execution_payload.hash_tree_root(),
- branch=execution_branch,
- depth=spec.floorlog2(spec.EXECUTION_PAYLOAD_GINDEX),
- index=spec.get_subtree_index(spec.EXECUTION_PAYLOAD_GINDEX),
+ branch=branch,
+ depth=spec.floorlog2(gindex),
+ index=spec.get_subtree_index(gindex),
root=block.message.body.hash_tree_root(),
)
diff --git a/tests/core/pyspec/eth2spec/test/deneb/block_processing/test_process_execution_payload.py b/tests/core/pyspec/eth2spec/test/deneb/block_processing/test_process_execution_payload.py
index b0937aac96..85175a3b9c 100644
--- a/tests/core/pyspec/eth2spec/test/deneb/block_processing/test_process_execution_payload.py
+++ b/tests/core/pyspec/eth2spec/test/deneb/block_processing/test_process_execution_payload.py
@@ -78,7 +78,7 @@ def test_incorrect_blob_tx_type(spec, state):
opaque_tx = b'\x04' + opaque_tx[1:] # incorrect tx type
execution_payload.transactions = [opaque_tx]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments)
@@ -95,7 +95,7 @@ def test_incorrect_transaction_length_1_extra_byte(spec, state):
opaque_tx = opaque_tx + b'\x12' # incorrect tx length, longer
execution_payload.transactions = [opaque_tx]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments)
@@ -112,7 +112,7 @@ def test_incorrect_transaction_length_1_byte_short(spec, state):
opaque_tx = opaque_tx[:-1] # incorrect tx length, shorter
execution_payload.transactions = [opaque_tx]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments)
@@ -129,7 +129,7 @@ def test_incorrect_transaction_length_empty(spec, state):
opaque_tx = opaque_tx[0:0] # incorrect tx length, empty
execution_payload.transactions = [opaque_tx]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments)
@@ -146,7 +146,7 @@ def test_incorrect_transaction_length_32_extra_bytes(spec, state):
opaque_tx = opaque_tx + b'\x12' * 32 # incorrect tx length
execution_payload.transactions = [opaque_tx]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments)
@@ -162,7 +162,7 @@ def test_no_transactions_with_commitments(spec, state):
_, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec)
execution_payload.transactions = []
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments)
@@ -179,7 +179,7 @@ def test_incorrect_commitment(spec, state):
blob_kzg_commitments[0] = b'\x12' * 48 # incorrect commitment
execution_payload.transactions = [opaque_tx]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments)
@@ -196,7 +196,7 @@ def test_incorrect_commitments_order(spec, state):
blob_kzg_commitments = [blob_kzg_commitments[1], blob_kzg_commitments[0]] # incorrect order
execution_payload.transactions = [opaque_tx]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments)
@@ -227,7 +227,7 @@ def test_zeroed_commitment(spec, state):
assert all(commitment == b'\x00' * 48 for commitment in blob_kzg_commitments)
execution_payload.transactions = [opaque_tx]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments)
@@ -243,7 +243,7 @@ def test_invalid_correct_input__execution_invalid(spec, state):
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec)
execution_payload.transactions = [opaque_tx]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments,
valid=False, execution_valid=False)
@@ -254,9 +254,9 @@ def test_invalid_correct_input__execution_invalid(spec, state):
def test_invalid_exceed_max_blobs_per_block(spec, state):
execution_payload = build_empty_execution_payload(spec, state)
- opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=spec.MAX_BLOBS_PER_BLOCK + 1)
+ opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=spec.config.MAX_BLOBS_PER_BLOCK + 1)
execution_payload.transactions = [opaque_tx]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
yield from run_execution_payload_processing(spec, state, execution_payload, blob_kzg_commitments, valid=False)
diff --git a/tests/core/pyspec/eth2spec/test/deneb/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/deneb/fork_choice/test_on_block.py
index 4ab7d819a9..f8ffa62346 100644
--- a/tests/core/pyspec/eth2spec/test/deneb/fork_choice/test_on_block.py
+++ b/tests/core/pyspec/eth2spec/test/deneb/fork_choice/test_on_block.py
@@ -29,7 +29,7 @@ def get_block_with_blob(spec, state, rng=None):
block = build_empty_block_for_next_slot(spec, state)
opaque_tx, blobs, blob_kzg_commitments, blob_kzg_proofs = get_sample_opaque_tx(spec, blob_count=1, rng=rng)
block.body.execution_payload.transactions = [opaque_tx]
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
block.body.blob_kzg_commitments = blob_kzg_commitments
return block, blobs, blob_kzg_proofs
diff --git a/tests/core/pyspec/eth2spec/test/deneb/merkle_proof/test_single_merkle_proof.py b/tests/core/pyspec/eth2spec/test/deneb/merkle_proof/test_single_merkle_proof.py
index 8ec6a1d927..1dda310123 100644
--- a/tests/core/pyspec/eth2spec/test/deneb/merkle_proof/test_single_merkle_proof.py
+++ b/tests/core/pyspec/eth2spec/test/deneb/merkle_proof/test_single_merkle_proof.py
@@ -36,7 +36,7 @@ def _run_blob_kzg_commitment_merkle_proof_test(spec, state, rng=None):
)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
signed_block = sign_block(spec, state, block, proposer_index=0)
blob_sidecars = spec.get_blob_sidecars(signed_block, blobs, proofs)
blob_index = 0
diff --git a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py
index 8598530d9a..94db2c34fc 100644
--- a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py
+++ b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py
@@ -40,7 +40,7 @@ def run_block_with_blobs(spec, state, blob_count, tx_count=1, blob_gas_used=1, e
block.body.execution_payload.transactions = txs
block.body.execution_payload.blob_gas_used = blob_gas_used
block.body.execution_payload.excess_blob_gas = excess_blob_gas
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
if valid:
signed_block = state_transition_and_sign_block(spec, state, block)
diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/test_config_invariants.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_config_invariants.py
index f3fa956d0a..1f44257856 100644
--- a/tests/core/pyspec/eth2spec/test/deneb/unittests/test_config_invariants.py
+++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_config_invariants.py
@@ -9,17 +9,20 @@
@spec_test
@single_phase
def test_length(spec):
- assert spec.MAX_BLOBS_PER_BLOCK < spec.MAX_BLOB_COMMITMENTS_PER_BLOCK
+ assert spec.config.MAX_BLOBS_PER_BLOCK < spec.MAX_BLOB_COMMITMENTS_PER_BLOCK
@with_deneb_and_later
@spec_test
@single_phase
def test_networking(spec):
- assert spec.MAX_BLOBS_PER_BLOCK < spec.MAX_BLOB_COMMITMENTS_PER_BLOCK
- assert spec.config.MAX_REQUEST_BLOB_SIDECARS == spec.config.MAX_REQUEST_BLOCKS_DENEB * spec.MAX_BLOBS_PER_BLOCK
+ assert spec.config.MAX_BLOBS_PER_BLOCK < spec.MAX_BLOB_COMMITMENTS_PER_BLOCK
+ assert (
+ spec.config.MAX_REQUEST_BLOB_SIDECARS ==
+ spec.config.MAX_REQUEST_BLOCKS_DENEB * spec.config.MAX_BLOBS_PER_BLOCK
+ )
# Start with the same size, but `BLOB_SIDECAR_SUBNET_COUNT` could potentially increase later.
- assert spec.config.BLOB_SIDECAR_SUBNET_COUNT == spec.MAX_BLOBS_PER_BLOCK
+ assert spec.config.BLOB_SIDECAR_SUBNET_COUNT == spec.config.MAX_BLOBS_PER_BLOCK
for i in range(spec.MAX_BLOB_COMMITMENTS_PER_BLOCK):
gindex = spec.get_generalized_index(spec.BeaconBlockBody, 'blob_kzg_commitments', i)
assert spec.floorlog2(gindex) == spec.KZG_COMMITMENT_INCLUSION_PROOF_DEPTH
diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py
index 7ed88cba45..6724e8304a 100644
--- a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py
+++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py
@@ -26,7 +26,7 @@ def _get_sample_sidecars(spec, state, rng):
block.body.blob_kzg_commitments = blob_kzg_commitments_1 + blob_kzg_commitments_2
block.body.execution_payload.transactions = [opaque_tx_1, opaque_tx_2]
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
blobs = blobs_1 + blobs_2
proofs = proofs_1 + proofs_2
diff --git a/tests/core/pyspec/eth2spec/test/eip7594/merkle_proof/test_single_merkle_proof.py b/tests/core/pyspec/eth2spec/test/eip7594/merkle_proof/test_single_merkle_proof.py
index 8e67b3d0bf..98c751508d 100644
--- a/tests/core/pyspec/eth2spec/test/eip7594/merkle_proof/test_single_merkle_proof.py
+++ b/tests/core/pyspec/eth2spec/test/eip7594/merkle_proof/test_single_merkle_proof.py
@@ -36,9 +36,10 @@ def _run_blob_kzg_commitments_merkle_proof_test(spec, state, rng=None):
)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
signed_block = sign_block(spec, state, block, proposer_index=0)
- column_sidcars = spec.get_data_column_sidecars(signed_block, blobs)
+ cells_and_kzg_proofs = [spec.compute_cells_and_kzg_proofs(blob) for blob in blobs]
+ column_sidcars = spec.get_data_column_sidecars(signed_block, cells_and_kzg_proofs)
column_sidcar = column_sidcars[0]
yield "object", block.body
diff --git a/tests/core/pyspec/eth2spec/test/eip7594/unittests/das/test_das.py b/tests/core/pyspec/eth2spec/test/eip7594/unittests/das/test_das.py
index b2e0a44b82..7110f2373e 100644
--- a/tests/core/pyspec/eth2spec/test/eip7594/unittests/das/test_das.py
+++ b/tests/core/pyspec/eth2spec/test/eip7594/unittests/das/test_das.py
@@ -1,7 +1,9 @@
import random
from eth2spec.test.context import (
+ expect_assertion_error,
spec_test,
single_phase,
+ with_config_overrides,
with_eip7594_and_later,
)
from eth2spec.test.helpers.sharding import (
@@ -64,3 +66,80 @@ def test_recover_matrix(spec):
# Ensure that the recovered matrix matches the original matrix
assert recovered_matrix == extended_matrix
+
+
+@with_eip7594_and_later
+@spec_test
+@single_phase
+def test_get_extended_sample_count__1(spec):
+ rng = random.Random(1111)
+ allowed_failures = rng.randint(0, spec.config.NUMBER_OF_COLUMNS // 2)
+ spec.get_extended_sample_count(allowed_failures)
+
+
+@with_eip7594_and_later
+@spec_test
+@single_phase
+def test_get_extended_sample_count__2(spec):
+ rng = random.Random(2222)
+ allowed_failures = rng.randint(0, spec.config.NUMBER_OF_COLUMNS // 2)
+ spec.get_extended_sample_count(allowed_failures)
+
+
+@with_eip7594_and_later
+@spec_test
+@single_phase
+def test_get_extended_sample_count__3(spec):
+ rng = random.Random(3333)
+ allowed_failures = rng.randint(0, spec.config.NUMBER_OF_COLUMNS // 2)
+ spec.get_extended_sample_count(allowed_failures)
+
+
+@with_eip7594_and_later
+@spec_test
+@single_phase
+def test_get_extended_sample_count__lower_bound(spec):
+ allowed_failures = 0
+ spec.get_extended_sample_count(allowed_failures)
+
+
+@with_eip7594_and_later
+@spec_test
+@single_phase
+def test_get_extended_sample_count__upper_bound(spec):
+ allowed_failures = spec.config.NUMBER_OF_COLUMNS // 2
+ spec.get_extended_sample_count(allowed_failures)
+
+
+@with_eip7594_and_later
+@spec_test
+@single_phase
+def test_get_extended_sample_count__upper_bound_exceed(spec):
+ allowed_failures = spec.config.NUMBER_OF_COLUMNS // 2 + 1
+ expect_assertion_error(lambda: spec.get_extended_sample_count(allowed_failures))
+
+
+@with_eip7594_and_later
+@spec_test
+@with_config_overrides({
+ 'NUMBER_OF_COLUMNS': 128,
+ 'SAMPLES_PER_SLOT': 16,
+})
+@single_phase
+def test_get_extended_sample_count__table_in_spec(spec):
+ table = dict(
+ # (allowed_failures, expected_extended_sample_count)
+ {
+ 0: 16,
+ 1: 20,
+ 2: 24,
+ 3: 27,
+ 4: 29,
+ 5: 32,
+ 6: 35,
+ 7: 37,
+ 8: 40,
+ }
+ )
+ for allowed_failures, expected_extended_sample_count in table.items():
+ assert spec.get_extended_sample_count(allowed_failures=allowed_failures) == expected_extended_sample_count
diff --git a/tests/core/pyspec/eth2spec/test/eip7594/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/eip7594/unittests/polynomial_commitments/test_polynomial_commitments.py
index 5bc3a4330a..c68c606765 100644
--- a/tests/core/pyspec/eth2spec/test/eip7594/unittests/polynomial_commitments/test_polynomial_commitments.py
+++ b/tests/core/pyspec/eth2spec/test/eip7594/unittests/polynomial_commitments/test_polynomial_commitments.py
@@ -111,35 +111,122 @@ def test_construct_vanishing_polynomial(spec):
@with_eip7594_and_later
@spec_test
@single_phase
-def test_verify_cell_kzg_proof(spec):
+def test_verify_cell_kzg_proof_batch_zero_cells(spec):
+ # Verify with zero cells should return true
+ assert spec.verify_cell_kzg_proof_batch(
+ commitments_bytes=[],
+ cell_indices=[],
+ cells=[],
+ proofs_bytes=[],
+ )
+
+
+@with_eip7594_and_later
+@spec_test
+@single_phase
+def test_verify_cell_kzg_proof_batch(spec):
+
+ # test with a single blob / commitment
blob = get_sample_blob(spec)
commitment = spec.blob_to_kzg_commitment(blob)
cells, proofs = spec.compute_cells_and_kzg_proofs(blob)
- cell_index = 0
- assert spec.verify_cell_kzg_proof(commitment, cell_index, cells[cell_index], proofs[cell_index])
- cell_index = 1
- assert spec.verify_cell_kzg_proof(commitment, cell_index, cells[cell_index], proofs[cell_index])
+ assert len(cells) == len(proofs)
+
+ assert spec.verify_cell_kzg_proof_batch(
+ commitments_bytes=[commitment, commitment],
+ cell_indices=[0, 4],
+ cells=[cells[0], cells[4]],
+ proofs_bytes=[proofs[0], proofs[4]],
+ )
+
+ # now test with three blobs / commitments
+ all_blobs = []
+ all_commitments = []
+ all_cells = []
+ all_proofs = []
+ for _ in range(3):
+ blob = get_sample_blob(spec)
+ commitment = spec.blob_to_kzg_commitment(blob)
+ cells, proofs = spec.compute_cells_and_kzg_proofs(blob)
+
+ assert len(cells) == len(proofs)
+
+ all_blobs.append(blob)
+ all_commitments.append(commitment)
+ all_cells.append(cells)
+ all_proofs.append(proofs)
+
+ # the cells of interest
+ commitment_indices = [0, 0, 1, 2, 1]
+ cell_indices = [0, 4, 0, 1, 2]
+ cells = [all_cells[i][j] for (i, j) in zip(commitment_indices, cell_indices)]
+ proofs = [all_proofs[i][j] for (i, j) in zip(commitment_indices, cell_indices)]
+ commitments = [all_commitments[i] for i in commitment_indices]
+
+ # do the check
+ assert spec.verify_cell_kzg_proof_batch(
+ commitments_bytes=commitments,
+ cell_indices=cell_indices,
+ cells=cells,
+ proofs_bytes=proofs,
+ )
@with_eip7594_and_later
@spec_test
@single_phase
-def test_verify_cell_kzg_proof_batch(spec):
+def test_verify_cell_kzg_proof_batch_invalid(spec):
+
+ # test with a single blob / commitment
blob = get_sample_blob(spec)
commitment = spec.blob_to_kzg_commitment(blob)
cells, proofs = spec.compute_cells_and_kzg_proofs(blob)
assert len(cells) == len(proofs)
- assert spec.verify_cell_kzg_proof_batch(
- row_commitments_bytes=[commitment],
- row_indices=[0, 0],
- column_indices=[0, 4],
- cells=[cells[0], cells[4]],
+ assert not spec.verify_cell_kzg_proof_batch(
+ commitments_bytes=[commitment, commitment],
+ cell_indices=[0, 4],
+ cells=[cells[0], cells[5]], # Note: this is where it should go wrong
proofs_bytes=[proofs[0], proofs[4]],
)
+ # now test with three blobs / commitments
+ all_blobs = []
+ all_commitments = []
+ all_cells = []
+ all_proofs = []
+ for _ in range(3):
+ blob = get_sample_blob(spec)
+ commitment = spec.blob_to_kzg_commitment(blob)
+ cells, proofs = spec.compute_cells_and_kzg_proofs(blob)
+
+ assert len(cells) == len(proofs)
+
+ all_blobs.append(blob)
+ all_commitments.append(commitment)
+ all_cells.append(cells)
+ all_proofs.append(proofs)
+
+ # the cells of interest
+ commitment_indices = [0, 0, 1, 2, 1]
+ cell_indices = [0, 4, 0, 1, 2]
+ cells = [all_cells[i][j] for (i, j) in zip(commitment_indices, cell_indices)]
+ proofs = [all_proofs[i][j] for (i, j) in zip(commitment_indices, cell_indices)]
+ commitments = [all_commitments[i] for i in commitment_indices]
+
+ # let's change one of the cells. Then it should not verify
+ cells[1] = all_cells[1][3]
+
+ # do the check
+ assert not spec.verify_cell_kzg_proof_batch(
+ commitments_bytes=commitments,
+ cell_indices=cell_indices,
+ cells=cells,
+ proofs_bytes=proofs,
+ )
+
@with_eip7594_and_later
@spec_test
@@ -164,12 +251,11 @@ def test_recover_cells_and_kzg_proofs(spec):
while j in cell_indices:
j = rng.randint(0, spec.CELLS_PER_EXT_BLOB - 1)
cell_indices.append(j)
- # Now the cells/proofs themselves
+ # Now the cells themselves
known_cells = [cells[cell_index] for cell_index in cell_indices]
- known_proofs = [proofs[cell_index] for cell_index in cell_indices]
# Recover the missing cells and proofs
- recovered_cells, recovered_proofs = spec.recover_cells_and_kzg_proofs(cell_indices, known_cells, known_proofs)
+ recovered_cells, recovered_proofs = spec.recover_cells_and_kzg_proofs(cell_indices, known_cells)
recovered_data = [x for xs in recovered_cells for x in xs]
# Check that the original data match the non-extended portion of the recovered data
diff --git a/tests/core/pyspec/eth2spec/test/eip7594/unittests/test_config_invariants.py b/tests/core/pyspec/eth2spec/test/eip7594/unittests/test_config_invariants.py
index 998bf35128..776ea883aa 100644
--- a/tests/core/pyspec/eth2spec/test/eip7594/unittests/test_config_invariants.py
+++ b/tests/core/pyspec/eth2spec/test/eip7594/unittests/test_config_invariants.py
@@ -18,7 +18,7 @@ def test_invariants(spec):
assert spec.config.MAX_REQUEST_DATA_COLUMN_SIDECARS == (
spec.config.MAX_REQUEST_BLOCKS_DENEB * spec.config.NUMBER_OF_COLUMNS
)
- assert spec.config.MAX_CELLS_IN_EXTENDED_MATRIX == spec.MAX_BLOBS_PER_BLOCK * spec.config.NUMBER_OF_COLUMNS
+ assert spec.config.MAX_CELLS_IN_EXTENDED_MATRIX == spec.config.MAX_BLOBS_PER_BLOCK * spec.config.NUMBER_OF_COLUMNS
@with_eip7594_and_later
diff --git a/tests/core/pyspec/eth2spec/test/eip7594/unittests/test_security.py b/tests/core/pyspec/eth2spec/test/eip7594/unittests/test_security.py
index dd85a673e5..282433b9c9 100644
--- a/tests/core/pyspec/eth2spec/test/eip7594/unittests/test_security.py
+++ b/tests/core/pyspec/eth2spec/test/eip7594/unittests/test_security.py
@@ -19,7 +19,7 @@ def test_sampling_config(spec):
security_requirement = 0.01
assert probability_of_unavailable <= security_requirement
- column_size_in_bytes = spec.FIELD_ELEMENTS_PER_CELL * spec.BYTES_PER_FIELD_ELEMENT * spec.MAX_BLOBS_PER_BLOCK
+ column_size_in_bytes = spec.FIELD_ELEMENTS_PER_CELL * spec.BYTES_PER_FIELD_ELEMENT * spec.config.MAX_BLOBS_PER_BLOCK
bytes_per_slot = column_size_in_bytes * spec.SAMPLES_PER_SLOT
# TODO: What is the bandwidth requirement?
bandwidth_requirement = 10000 # bytes/s
diff --git a/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_consolidation_request.py b/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_consolidation_request.py
index b12438d0d1..b05c20f565 100644
--- a/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_consolidation_request.py
+++ b/tests/core/pyspec/eth2spec/test/electra/block_processing/test_process_consolidation_request.py
@@ -421,6 +421,9 @@ def test_incorrect_source_equals_target(spec, state):
target_pubkey=state.validators[source_index].pubkey,
)
+ # Check the the return condition
+ assert consolidation.source_pubkey == consolidation.target_pubkey
+
yield from run_consolidation_processing(
spec, state, consolidation, success=False
)
@@ -454,6 +457,9 @@ def test_incorrect_exceed_pending_consolidations_limit(spec, state):
)
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
+ # Check the the return condition
+ assert len(state.pending_consolidations) == spec.PENDING_CONSOLIDATIONS_LIMIT
+
yield from run_consolidation_processing(
spec, state, consolidation, success=False
)
@@ -463,7 +469,6 @@ def test_incorrect_exceed_pending_consolidations_limit(spec, state):
@spec_state_test
@single_phase
def test_incorrect_not_enough_consolidation_churn_available(spec, state):
- state.validators = state.validators[0:2]
state.pending_consolidations = [
spec.PendingConsolidation(source_index=0, target_index=1)
]
@@ -481,7 +486,12 @@ def test_incorrect_not_enough_consolidation_churn_available(spec, state):
source_pubkey=state.validators[source_index].pubkey,
target_pubkey=state.validators[target_index].pubkey,
)
+
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
+
+ # Check the the return condition
+ assert spec.get_consolidation_churn_limit(state) <= spec.MIN_ACTIVATION_BALANCE
+
yield from run_consolidation_processing(
spec, state, consolidation, success=False
)
@@ -514,6 +524,9 @@ def test_incorrect_exited_source(spec, state):
# exit source
spec.initiate_validator_exit(state, source_index)
+ # Check the the return condition
+ assert state.validators[source_index].exit_epoch != spec.FAR_FUTURE_EPOCH
+
yield from run_consolidation_processing(
spec, state, consolidation, success=False
)
@@ -544,6 +557,10 @@ def test_incorrect_exited_target(spec, state):
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
# exit target
spec.initiate_validator_exit(state, 1)
+
+ # Check the the return condition
+ assert state.validators[target_index].exit_epoch != spec.FAR_FUTURE_EPOCH
+
yield from run_consolidation_processing(
spec, state, consolidation, success=False
)
@@ -576,6 +593,9 @@ def test_incorrect_inactive_source(spec, state):
# set source validator as not yet activated
state.validators[source_index].activation_epoch = spec.FAR_FUTURE_EPOCH
+ # Check the the return condition
+ assert not spec.is_active_validator(state.validators[source_index], current_epoch)
+
yield from run_consolidation_processing(
spec, state, consolidation, success=False
)
@@ -607,6 +627,10 @@ def test_incorrect_inactive_target(spec, state):
# set target validator as not yet activated
state.validators[1].activation_epoch = spec.FAR_FUTURE_EPOCH
+
+ # Check the the return condition
+ assert not spec.is_active_validator(state.validators[target_index], current_epoch)
+
yield from run_consolidation_processing(
spec, state, consolidation, success=False
)
@@ -633,6 +657,10 @@ def test_incorrect_no_source_execution_withdrawal_credential(spec, state):
target_pubkey=state.validators[target_index].pubkey,
)
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
+
+ # Check the the return condition
+ assert not spec.has_execution_withdrawal_credential(state.validators[source_index])
+
yield from run_consolidation_processing(
spec, state, consolidation, success=False
)
@@ -661,6 +689,10 @@ def test_incorrect_no_target_execution_withdrawal_credential(spec, state):
source_pubkey=state.validators[source_index].pubkey,
target_pubkey=state.validators[target_index].pubkey,
)
+
+ # Check the the return condition
+ assert not spec.has_execution_withdrawal_credential(state.validators[target_index])
+
yield from run_consolidation_processing(
spec, state, consolidation, success=False
)
@@ -691,6 +723,9 @@ def test_incorrect_incorrect_source_address(spec, state):
)
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
+ # Check the the return condition
+ assert not state.validators[source_index].withdrawal_credentials[12:] == consolidation.source_address
+
yield from run_consolidation_processing(
spec, state, consolidation, success=False
)
@@ -721,6 +756,9 @@ def test_incorrect_unknown_source_pubkey(spec, state):
)
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
+ # Check the the return condition
+ assert not state.validators[source_index].pubkey == consolidation.source_pubkey
+
yield from run_consolidation_processing(
spec, state, consolidation, success=False
)
@@ -751,6 +789,9 @@ def test_incorrect_unknown_target_pubkey(spec, state):
)
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
+ # Check the the return condition
+ assert not state.validators[target_index].pubkey == consolidation.target_pubkey
+
yield from run_consolidation_processing(
spec, state, consolidation, success=False
)
@@ -762,7 +803,7 @@ def run_consolidation_processing(spec, state, consolidation, success=True):
- pre-state ('pre')
- consolidation_request ('consolidation_request')
- post-state ('post').
- If ``valid == False``, run expecting ``AssertionError``
+ If ``success == False``, ``process_consolidation_request`` would return without any state change.
"""
if success:
diff --git a/tests/core/pyspec/eth2spec/test/electra/epoch_processing/test_process_pending_consolidations.py b/tests/core/pyspec/eth2spec/test/electra/epoch_processing/test_process_pending_consolidations.py
index 6c21a722ff..d750149839 100644
--- a/tests/core/pyspec/eth2spec/test/electra/epoch_processing/test_process_pending_consolidations.py
+++ b/tests/core/pyspec/eth2spec/test/electra/epoch_processing/test_process_pending_consolidations.py
@@ -1,8 +1,14 @@
-from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
+from eth2spec.test.helpers.epoch_processing import (
+ run_epoch_processing_with,
+ compute_state_by_epoch_processing_to,
+)
from eth2spec.test.context import (
spec_state_test,
with_electra_and_later,
)
+from eth2spec.test.helpers.state import (
+ next_epoch_with_full_participation,
+)
# ***********************
# * CONSOLIDATION TESTS *
@@ -185,3 +191,158 @@ def test_all_consolidation_cases_together(spec, state):
assert state.balances[target_index[i]] == pre_balances[target_index[i]]
# First consolidation is processed, second is skipped, last two are left in the queue
state.pending_consolidations = pre_pending_consolidations[2:]
+
+
+@with_electra_and_later
+@spec_state_test
+def test_pending_consolidation_future_epoch(spec, state):
+ current_epoch = spec.get_current_epoch(state)
+ source_index = spec.get_active_validator_indices(state, current_epoch)[0]
+ target_index = spec.get_active_validator_indices(state, current_epoch)[1]
+ # initiate source exit
+ spec.initiate_validator_exit(state, source_index)
+ # set withdrawable_epoch to exit_epoch + 1
+ state.validators[source_index].withdrawable_epoch = state.validators[source_index].exit_epoch + spec.Epoch(1)
+ # append pending consolidation
+ state.pending_consolidations.append(
+ spec.PendingConsolidation(source_index=source_index, target_index=target_index)
+ )
+ # Set the target withdrawal credential to eth1
+ eth1_withdrawal_credential = (
+ spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX + b"\x00" * 11 + b"\x11" * 20
+ )
+ state.validators[target_index].withdrawal_credentials = eth1_withdrawal_credential
+
+ # Advance to withdrawable_epoch - 1 with full participation
+ target_epoch = state.validators[source_index].withdrawable_epoch - spec.Epoch(1)
+ while spec.get_current_epoch(state) < target_epoch:
+ next_epoch_with_full_participation(spec, state)
+
+ # Obtain state before the call to process_pending_consolidations
+ state_before_consolidation = compute_state_by_epoch_processing_to(spec, state, "process_pending_consolidations")
+
+ yield from run_epoch_processing_with(spec, state, "process_pending_consolidations")
+
+ # Pending consolidation was successfully processed
+ expected_source_balance = state_before_consolidation.balances[source_index] - spec.MIN_ACTIVATION_BALANCE
+ assert (
+ state.validators[target_index].withdrawal_credentials[:1]
+ == spec.COMPOUNDING_WITHDRAWAL_PREFIX
+ )
+ assert state.balances[target_index] == 2 * spec.MIN_ACTIVATION_BALANCE
+ assert state.balances[source_index] == expected_source_balance
+ assert state.pending_consolidations == []
+
+ # Pending balance deposit to the target is created as part of `switch_to_compounding_validator`.
+ # The excess balance to queue are the rewards accumulated over the previous epoch transitions.
+ expected_pending_balance = state_before_consolidation.balances[target_index] - spec.MIN_ACTIVATION_BALANCE
+ assert len(state.pending_balance_deposits) > 0
+ pending_balance_deposit = state.pending_balance_deposits[len(state.pending_balance_deposits) - 1]
+ assert pending_balance_deposit.index == target_index
+ assert pending_balance_deposit.amount == expected_pending_balance
+
+
+@with_electra_and_later
+@spec_state_test
+def test_pending_consolidation_compounding_creds(spec, state):
+ current_epoch = spec.get_current_epoch(state)
+ source_index = spec.get_active_validator_indices(state, current_epoch)[0]
+ target_index = spec.get_active_validator_indices(state, current_epoch)[1]
+ # initiate source exit
+ spec.initiate_validator_exit(state, source_index)
+ # set withdrawable_epoch to exit_epoch + 1
+ state.validators[source_index].withdrawable_epoch = state.validators[source_index].exit_epoch + spec.Epoch(1)
+ # append pending consolidation
+ state.pending_consolidations.append(
+ spec.PendingConsolidation(source_index=source_index, target_index=target_index)
+ )
+ # Set the source and the target withdrawal credential to compounding
+ state.validators[source_index].withdrawal_credentials = (
+ spec.COMPOUNDING_WITHDRAWAL_PREFIX + b"\x00" * 11 + b"\x11" * 20
+ )
+ state.validators[target_index].withdrawal_credentials = (
+ spec.COMPOUNDING_WITHDRAWAL_PREFIX + b"\x00" * 11 + b"\x12" * 20
+ )
+
+ # Advance to withdrawable_epoch - 1 with full participation
+ target_epoch = state.validators[source_index].withdrawable_epoch - spec.Epoch(1)
+ while spec.get_current_epoch(state) < target_epoch:
+ next_epoch_with_full_participation(spec, state)
+
+ # Obtain state before the call to process_pending_consolidations
+ state_before_consolidation = compute_state_by_epoch_processing_to(spec, state, "process_pending_consolidations")
+
+ yield from run_epoch_processing_with(spec, state, "process_pending_consolidations")
+
+ # Pending consolidation was successfully processed
+ expected_target_balance = (
+ state_before_consolidation.balances[source_index] + state_before_consolidation.balances[target_index]
+ )
+ assert (
+ state.validators[target_index].withdrawal_credentials[:1]
+ == spec.COMPOUNDING_WITHDRAWAL_PREFIX
+ )
+ assert state.balances[target_index] == expected_target_balance
+ # All source balance is active and moved to the target,
+ # because the source validator has compounding credentials
+ assert state.balances[source_index] == 0
+ assert state.pending_consolidations == []
+
+ # Pending balance deposit to the target is not created,
+ # because the target already has compounding credentials
+ assert len(state.pending_balance_deposits) == 0
+
+
+@with_electra_and_later
+@spec_state_test
+def test_pending_consolidation_with_pending_deposit(spec, state):
+ current_epoch = spec.get_current_epoch(state)
+ source_index = spec.get_active_validator_indices(state, current_epoch)[0]
+ target_index = spec.get_active_validator_indices(state, current_epoch)[1]
+ # initiate source exit
+ spec.initiate_validator_exit(state, source_index)
+ # set withdrawable_epoch to exit_epoch + 1
+ state.validators[source_index].withdrawable_epoch = state.validators[source_index].exit_epoch + spec.Epoch(1)
+ # append pending consolidation
+ state.pending_consolidations.append(
+ spec.PendingConsolidation(source_index=source_index, target_index=target_index)
+ )
+ # append pending deposit
+ state.pending_balance_deposits.append(
+ spec.PendingBalanceDeposit(index=source_index, amount=spec.MIN_ACTIVATION_BALANCE)
+ )
+ # Set the source and the target withdrawal credential to compounding
+ state.validators[source_index].withdrawal_credentials = (
+ spec.COMPOUNDING_WITHDRAWAL_PREFIX + b"\x00" * 11 + b"\x11" * 20
+ )
+ state.validators[target_index].withdrawal_credentials = (
+ spec.COMPOUNDING_WITHDRAWAL_PREFIX + b"\x00" * 11 + b"\x12" * 20
+ )
+
+ # Advance to withdrawable_epoch - 1 with full participation
+ target_epoch = state.validators[source_index].withdrawable_epoch - spec.Epoch(1)
+ while spec.get_current_epoch(state) < target_epoch:
+ next_epoch_with_full_participation(spec, state)
+
+ # Obtain state before the call to process_pending_balance_deposits
+ state_before_consolidation = compute_state_by_epoch_processing_to(spec, state, "process_pending_balance_deposits")
+
+ yield from run_epoch_processing_with(spec, state, "process_pending_consolidations")
+
+ # Pending consolidation was successfully processed
+ expected_target_balance = (
+ state_before_consolidation.balances[source_index] + state_before_consolidation.balances[target_index]
+ )
+ assert (
+ state.validators[target_index].withdrawal_credentials[:1]
+ == spec.COMPOUNDING_WITHDRAWAL_PREFIX
+ )
+ assert state.balances[target_index] == expected_target_balance
+ assert state.balances[source_index] == 0
+ assert state.pending_consolidations == []
+
+ # Pending balance deposit to the source was not processed.
+ # It should only be processed in the next epoch transition
+ assert len(state.pending_balance_deposits) == 1
+ assert state.pending_balance_deposits[0] == spec.PendingBalanceDeposit(
+ index=source_index, amount=spec.MIN_ACTIVATION_BALANCE)
diff --git a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py
index 3bd6350b34..852521a32b 100644
--- a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py
+++ b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py
@@ -80,3 +80,29 @@ def test_fork_random_misc_balances(spec, phases, state):
@with_meta_tags(ELECTRA_FORK_TEST_META_TAGS)
def test_fork_random_large_validator_set(spec, phases, state):
yield from run_fork_test(phases[ELECTRA], state)
+
+
+@with_phases(phases=[DENEB], other_phases=[ELECTRA])
+@spec_test
+@with_state
+@with_meta_tags(ELECTRA_FORK_TEST_META_TAGS)
+def test_fork_pre_activation(spec, phases, state):
+ post_spec = phases[ELECTRA]
+ state.validators[0].activation_epoch = spec.FAR_FUTURE_EPOCH
+ post_state = yield from run_fork_test(post_spec, state)
+
+ assert len(post_state.pending_balance_deposits) > 0
+
+
+@with_phases(phases=[DENEB], other_phases=[ELECTRA])
+@spec_test
+@with_state
+@with_meta_tags(ELECTRA_FORK_TEST_META_TAGS)
+def test_fork_has_compounding_withdrawal_credential(spec, phases, state):
+ post_spec = phases[ELECTRA]
+ validator = state.validators[0]
+ state.balances[0] = post_spec.MIN_ACTIVATION_BALANCE + 1
+ validator.withdrawal_credentials = post_spec.COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
+ post_state = yield from run_fork_test(post_spec, state)
+
+ assert len(post_state.pending_balance_deposits) > 0
diff --git a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py
index 4fc76e9c93..9fa4d609fc 100644
--- a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py
+++ b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py
@@ -42,7 +42,7 @@ def test_basic_el_withdrawal_request(spec, state):
)
block = build_empty_block_for_next_slot(spec, state)
block.body.execution_payload.withdrawal_requests = [withdrawal_request]
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
signed_block = state_transition_and_sign_block(spec, state, block)
yield 'blocks', [signed_block]
@@ -79,7 +79,7 @@ def test_basic_btec_and_el_withdrawal_request_in_same_block(spec, state):
)
block.body.execution_payload.withdrawal_requests = [withdrawal_request]
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
signed_block = state_transition_and_sign_block(spec, state, block)
yield 'blocks', [signed_block]
@@ -131,7 +131,7 @@ def test_basic_btec_before_el_withdrawal_request(spec, state):
)
block_2 = build_empty_block_for_next_slot(spec, state)
block_2.body.execution_payload.withdrawal_requests = [withdrawal_request]
- block_2.body.execution_payload.block_hash = compute_el_block_hash(spec, block_2.body.execution_payload)
+ block_2.body.execution_payload.block_hash = compute_el_block_hash(spec, block_2.body.execution_payload, state)
signed_block_2 = state_transition_and_sign_block(spec, state, block_2)
yield 'blocks', [signed_block_1, signed_block_2]
@@ -164,7 +164,7 @@ def test_cl_exit_and_el_withdrawal_request_in_same_block(spec, state):
block = build_empty_block_for_next_slot(spec, state)
block.body.voluntary_exits = signed_voluntary_exits
block.body.execution_payload.withdrawal_requests = [withdrawal_request]
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
signed_block = state_transition_and_sign_block(spec, state, block)
yield 'blocks', [signed_block]
diff --git a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py
index f253b6c60d..4b8c1cd4f8 100644
--- a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py
+++ b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py
@@ -103,7 +103,7 @@ def prepare_state_and_block(spec,
# Assign deposits and deposit requests
block.body.deposits = deposits
block.body.execution_payload.deposit_requests = deposit_requests
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
return state, block
@@ -220,7 +220,7 @@ def test_deposit_transition__deposit_and_top_up_same_block(spec, state):
# Artificially assign deposit's pubkey to a deposit request of the same block
top_up_keys = [block.body.deposits[0].data.pubkey]
block.body.execution_payload.deposit_requests[0].pubkey = top_up_keys[0]
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
pre_pending_deposits = len(state.pending_balance_deposits)
diff --git a/tests/core/pyspec/eth2spec/test/helpers/block.py b/tests/core/pyspec/eth2spec/test/helpers/block.py
index 84770623e3..96a0155732 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/block.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/block.py
@@ -1,5 +1,6 @@
from eth2spec.test.helpers.execution_payload import build_empty_execution_payload
-from eth2spec.test.helpers.forks import is_post_whisk, is_post_altair, is_post_bellatrix
+from eth2spec.test.helpers.execution_payload import build_empty_signed_execution_payload_header
+from eth2spec.test.helpers.forks import is_post_whisk, is_post_altair, is_post_bellatrix, is_post_eip7732
from eth2spec.test.helpers.keys import privkeys, whisk_ks_initial, whisk_ks_final
from eth2spec.utils import bls
from eth2spec.utils.bls import only_with_bls
@@ -117,6 +118,11 @@ def build_empty_block(spec, state, slot=None, proposer_index=None):
if is_post_altair(spec):
empty_block.body.sync_aggregate.sync_committee_signature = spec.G2_POINT_AT_INFINITY
+ if is_post_eip7732(spec):
+ signed_header = build_empty_signed_execution_payload_header(spec, state)
+ empty_block.body.signed_execution_payload_header = signed_header
+ return empty_block
+
if is_post_bellatrix(spec):
empty_block.body.execution_payload = build_empty_execution_payload(spec, state)
diff --git a/tests/core/pyspec/eth2spec/test/helpers/constants.py b/tests/core/pyspec/eth2spec/test/helpers/constants.py
index ed398516cd..7e5d0a0f4c 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/constants.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/constants.py
@@ -20,6 +20,7 @@
ELECTRA = SpecForkName('electra')
WHISK = SpecForkName('whisk')
EIP7594 = SpecForkName('eip7594')
+EIP7732 = SpecForkName('eip7732')
#
# SpecFork settings
@@ -39,11 +40,11 @@
EIP7594,
)
# The forks that have light client specs
-LIGHT_CLIENT_TESTING_FORKS = (*[item for item in MAINNET_FORKS if item != PHASE0],)
+LIGHT_CLIENT_TESTING_FORKS = (*[item for item in MAINNET_FORKS if item != PHASE0], ELECTRA)
# The forks that output to the test vectors.
TESTGEN_FORKS = (*MAINNET_FORKS, ELECTRA, EIP7594, WHISK)
# Forks allowed in the test runner `--fork` flag, to fail fast in case of typos
-ALLOWED_TEST_RUNNER_FORKS = (*ALL_PHASES, WHISK)
+ALLOWED_TEST_RUNNER_FORKS = (*ALL_PHASES, WHISK, EIP7732)
# NOTE: the same definition as in `pysetup/md_doc_paths.py`
PREVIOUS_FORK_OF = {
@@ -57,6 +58,7 @@
# Experimental patches
WHISK: CAPELLA,
EIP7594: DENEB,
+ EIP7732: ELECTRA,
}
# For fork transition tests
diff --git a/tests/core/pyspec/eth2spec/test/helpers/electra/fork.py b/tests/core/pyspec/eth2spec/test/helpers/electra/fork.py
index 39a43a5233..0067a8cc07 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/electra/fork.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/electra/fork.py
@@ -63,3 +63,5 @@ def run_fork_test(post_spec, pre_state):
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
yield 'post', post_state
+
+ return post_state
diff --git a/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py b/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py
index 44b42aff91..80302e111d 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/epoch_processing.py
@@ -22,6 +22,8 @@ def get_process_calls(spec):
'charge_confirmed_header_fees', # sharding
'reset_pending_headers', # sharding
'process_eth1_data_reset',
+ 'process_pending_balance_deposits', # electra
+ 'process_pending_consolidations', # electra
'process_effective_balance_updates',
'process_slashings_reset',
'process_randao_mixes_reset',
@@ -72,3 +74,9 @@ def run_epoch_processing_with(spec, state, process_name: str):
yield 'pre', state
getattr(spec, process_name)(state)
yield 'post', state
+
+
+def compute_state_by_epoch_processing_to(spec, state, process_name: str):
+ state_copy = state.copy()
+ run_epoch_processing_to(spec, state_copy, process_name)
+ return state_copy
diff --git a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py
index dbfd29e671..1fbb12d7ba 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py
@@ -3,16 +3,31 @@
from rlp import encode
from rlp.sedes import big_endian_int, Binary, List
+from eth2spec.test.helpers.keys import privkeys
+from eth2spec.utils.ssz.ssz_impl import hash_tree_root
from eth2spec.debug.random_value import get_random_bytes_list
from eth2spec.test.helpers.withdrawals import get_expected_withdrawals
from eth2spec.test.helpers.forks import (
is_post_capella,
is_post_deneb,
is_post_electra,
+ is_post_eip7732,
)
def get_execution_payload_header(spec, execution_payload):
+ if is_post_eip7732(spec):
+ return spec.ExecutionPayloadHeader(
+ parent_block_hash=execution_payload.parent_hash,
+ parent_block_root=spec.Root(), # TODO: Fix this
+ block_hash=execution_payload.block_hash,
+ gas_limit=execution_payload.gas_limit,
+ builder_index=spec.ValidatorIndex(0), # TODO: Fix this
+ slot=spec.Slot(0), # TODO: Fix this
+ value=spec.Gwei(0), # TODO: Fix this
+ blob_kzg_commitments_root=spec.Root() # TODO: Fix this
+ )
+
payload_header = spec.ExecutionPayloadHeader(
parent_hash=execution_payload.parent_hash,
fee_recipient=execution_payload.fee_recipient,
@@ -59,10 +74,14 @@ def compute_el_header_block_hash(spec,
payload_header,
transactions_trie_root,
withdrawals_trie_root=None,
+ parent_beacon_block_root=None,
requests_trie_root=None):
"""
Computes the RLP execution block hash described by an `ExecutionPayloadHeader`.
"""
+ if is_post_eip7732(spec):
+ return spec.Hash32()
+
execution_payload_header_rlp = [
# parent_hash
(Binary(32, 32), payload_header.parent_hash),
@@ -106,8 +125,7 @@ def compute_el_header_block_hash(spec,
# excess_blob_gas
execution_payload_header_rlp.append((big_endian_int, payload_header.excess_blob_gas))
# parent_beacon_root
- empty_root = bytes.fromhex("0000000000000000000000000000000000000000000000000000000000000000")
- execution_payload_header_rlp.append((Binary(32, 32), empty_root))
+ execution_payload_header_rlp.append((Binary(32, 32), parent_beacon_block_root))
if is_post_electra(spec):
# requests_root
execution_payload_header_rlp.append((Binary(32, 32), requests_trie_root))
@@ -186,15 +204,18 @@ def get_consolidation_request_rlp_bytes(consolidation_request):
return b"\x02" + encode(values, sedes)
-def compute_el_block_hash(spec, payload):
+def compute_el_block_hash(spec, payload, pre_state):
transactions_trie_root = compute_trie_root_from_indexed_data(payload.transactions)
withdrawals_trie_root = None
+ parent_beacon_block_root = None
requests_trie_root = None
if is_post_capella(spec):
withdrawals_encoded = [get_withdrawal_rlp(withdrawal) for withdrawal in payload.withdrawals]
withdrawals_trie_root = compute_trie_root_from_indexed_data(withdrawals_encoded)
+ if is_post_deneb(spec):
+ parent_beacon_block_root = pre_state.latest_block_header.hash_tree_root()
if is_post_electra(spec):
requests_encoded = []
requests_encoded += [get_deposit_request_rlp_bytes(request) for request in payload.deposit_requests]
@@ -210,10 +231,39 @@ def compute_el_block_hash(spec, payload):
payload_header,
transactions_trie_root,
withdrawals_trie_root,
+ parent_beacon_block_root,
requests_trie_root,
)
+def build_empty_post_eip7732_execution_payload_header(spec, state):
+ if not is_post_eip7732(spec):
+ return
+ parent_block_root = hash_tree_root(state.latest_block_header)
+ return spec.ExecutionPayloadHeader(
+ parent_block_hash=state.latest_block_hash,
+ parent_block_root=parent_block_root,
+ block_hash=spec.Hash32(),
+ gas_limit=spec.uint64(0),
+ builder_index=spec.ValidatorIndex(0),
+ slot=state.slot,
+ value=spec.Gwei(0),
+ blob_kzg_commitments_root=spec.Root()
+ )
+
+
+def build_empty_signed_execution_payload_header(spec, state):
+ if not is_post_eip7732(spec):
+ return
+ message = build_empty_post_eip7732_execution_payload_header(spec, state)
+ privkey = privkeys[0]
+ signature = spec.get_execution_payload_header_signature(state, message, privkey)
+ return spec.SignedExecutionPayloadHeader(
+ message=message,
+ signature=signature,
+ )
+
+
def build_empty_execution_payload(spec, state, randao_mix=None):
"""
Assuming a pre-state of the same slot, build a valid ExecutionPayload without any transactions.
@@ -228,18 +278,19 @@ def build_empty_execution_payload(spec, state, randao_mix=None):
payload = spec.ExecutionPayload(
parent_hash=latest.block_hash,
fee_recipient=spec.ExecutionAddress(),
- state_root=latest.state_root, # no changes to the state
receipts_root=spec.Bytes32(bytes.fromhex("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")),
logs_bloom=spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](), # TODO: zeroed logs bloom for empty logs ok?
- block_number=latest.block_number + 1,
prev_randao=randao_mix,
- gas_limit=latest.gas_limit, # retain same limit
gas_used=0, # empty block, 0 gas
timestamp=timestamp,
extra_data=spec.ByteList[spec.MAX_EXTRA_DATA_BYTES](),
- base_fee_per_gas=latest.base_fee_per_gas, # retain same base_fee
transactions=empty_txs,
)
+ if not is_post_eip7732(spec):
+ payload.state_root = latest.state_root # no changes to the state
+ payload.block_number = latest.block_number + 1
+ payload.gas_limit = latest.gas_limit # retain same limit
+ payload.base_fee_per_gas = latest.base_fee_per_gas # retain same base_fee
if is_post_capella(spec):
payload.withdrawals = get_expected_withdrawals(spec, state)
if is_post_deneb(spec):
@@ -250,7 +301,7 @@ def build_empty_execution_payload(spec, state, randao_mix=None):
payload.withdrawal_requests = []
payload.consolidation_requests = []
- payload.block_hash = compute_el_block_hash(spec, payload)
+ payload.block_hash = compute_el_block_hash(spec, payload, state)
return payload
@@ -263,9 +314,9 @@ def build_randomized_execution_payload(spec, state, rng):
execution_payload.logs_bloom = spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](
get_random_bytes_list(rng, spec.BYTES_PER_LOGS_BLOOM)
)
- execution_payload.block_number = rng.randint(0, 10e10)
- execution_payload.gas_limit = rng.randint(0, 10e10)
- execution_payload.gas_used = rng.randint(0, 10e10)
+ execution_payload.block_number = rng.randint(0, int(10e10))
+ execution_payload.gas_limit = rng.randint(0, int(10e10))
+ execution_payload.gas_used = rng.randint(0, int(10e10))
extra_data_length = rng.randint(0, spec.MAX_EXTRA_DATA_BYTES)
execution_payload.extra_data = spec.ByteList[spec.MAX_EXTRA_DATA_BYTES](
get_random_bytes_list(rng, extra_data_length)
@@ -278,7 +329,7 @@ def build_randomized_execution_payload(spec, state, rng):
for _ in range(num_transactions)
]
- execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
+ execution_payload.block_hash = compute_el_block_hash(spec, execution_payload, state)
return execution_payload
diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py
index 094e2e8a5c..8598870fb6 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py
@@ -178,7 +178,7 @@ def add_block(spec,
# Check blob_data
if blob_data is not None:
- blobs = spec.List[spec.Blob, spec.MAX_BLOBS_PER_BLOCK](blob_data.blobs)
+ blobs = spec.List[spec.Blob, spec.config.MAX_BLOBS_PER_BLOCK](blob_data.blobs)
blobs_root = blobs.hash_tree_root()
yield get_blobs_file_name(blobs_root=blobs_root), blobs
diff --git a/tests/core/pyspec/eth2spec/test/helpers/forks.py b/tests/core/pyspec/eth2spec/test/helpers/forks.py
index 5ea03c31b3..288ad0d9e9 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/forks.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/forks.py
@@ -1,6 +1,6 @@
from .constants import (
PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB,
- ELECTRA, WHISK,
+ ELECTRA, WHISK, EIP7732,
PREVIOUS_FORK_OF,
)
@@ -45,6 +45,10 @@ def is_post_whisk(spec):
return is_post_fork(spec.fork, WHISK)
+def is_post_eip7732(spec):
+ return is_post_fork(spec.fork, EIP7732)
+
+
def get_spec_for_fork_version(spec, fork_version, phases):
if phases is None:
return spec
diff --git a/tests/core/pyspec/eth2spec/test/helpers/genesis.py b/tests/core/pyspec/eth2spec/test/helpers/genesis.py
index 4013b67bf2..95c1e97e55 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/genesis.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/genesis.py
@@ -6,7 +6,7 @@
compute_el_header_block_hash,
)
from eth2spec.test.helpers.forks import (
- is_post_altair, is_post_bellatrix, is_post_capella, is_post_electra, is_post_whisk,
+ is_post_altair, is_post_bellatrix, is_post_capella, is_post_deneb, is_post_electra, is_post_whisk,
)
from eth2spec.test.helpers.keys import pubkeys
from eth2spec.test.helpers.whisk import compute_whisk_initial_tracker_cached, compute_whisk_initial_k_commitment_cached
@@ -65,10 +65,13 @@ def get_sample_genesis_execution_payload_header(spec,
transactions_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
withdrawals_trie_root = None
+ parent_beacon_block_root = None
requests_trie_root = None
if is_post_capella(spec):
withdrawals_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+ if is_post_deneb(spec):
+ parent_beacon_block_root = bytes.fromhex("0000000000000000000000000000000000000000000000000000000000000000")
if is_post_electra(spec):
requests_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
@@ -77,6 +80,7 @@ def get_sample_genesis_execution_payload_header(spec,
payload_header,
transactions_trie_root,
withdrawals_trie_root,
+ parent_beacon_block_root,
requests_trie_root,
)
return payload_header
diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client.py b/tests/core/pyspec/eth2spec/test/helpers/light_client.py
index 3bdaa46497..4638c988b5 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/light_client.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/light_client.py
@@ -1,8 +1,11 @@
+from eth2spec.test.helpers.constants import (
+ CAPELLA, DENEB, ELECTRA,
+)
from eth2spec.test.helpers.fork_transition import (
transition_across_forks,
)
from eth2spec.test.helpers.forks import (
- is_post_capella, is_post_deneb,
+ is_post_capella, is_post_deneb, is_post_electra
)
from eth2spec.test.helpers.sync_committee import (
compute_aggregate_sync_committee_signature,
@@ -11,6 +14,24 @@
from math import floor
+def latest_finalized_root_gindex(spec):
+ if hasattr(spec, 'FINALIZED_ROOT_GINDEX_ELECTRA'):
+ return spec.FINALIZED_ROOT_GINDEX_ELECTRA
+ return spec.FINALIZED_ROOT_GINDEX
+
+
+def latest_current_sync_committee_gindex(spec):
+ if hasattr(spec, 'CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA'):
+ return spec.CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA
+ return spec.CURRENT_SYNC_COMMITTEE_GINDEX
+
+
+def latest_next_sync_committee_gindex(spec):
+ if hasattr(spec, 'NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA'):
+ return spec.NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA
+ return spec.NEXT_SYNC_COMMITTEE_GINDEX
+
+
def compute_start_slot_at_sync_committee_period(spec, sync_committee_period):
return spec.compute_start_slot_at_epoch(sync_committee_period * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
@@ -68,11 +89,13 @@ def create_update(spec,
if with_next:
update.next_sync_committee = attested_state.next_sync_committee
- update.next_sync_committee_branch = spec.compute_merkle_proof(attested_state, spec.NEXT_SYNC_COMMITTEE_GINDEX)
+ update.next_sync_committee_branch = spec.compute_merkle_proof(
+ attested_state, latest_next_sync_committee_gindex(spec))
if with_finality:
update.finalized_header = spec.block_to_light_client_header(finalized_block)
- update.finality_branch = spec.compute_merkle_proof(attested_state, spec.FINALIZED_ROOT_GINDEX)
+ update.finality_branch = spec.compute_merkle_proof(
+ attested_state, latest_finalized_root_gindex(spec))
update.sync_aggregate, update.signature_slot = get_sync_aggregate(
spec, attested_state, num_participants)
@@ -88,6 +111,20 @@ def needs_upgrade_to_deneb(spec, new_spec):
return is_post_deneb(new_spec) and not is_post_deneb(spec)
+def needs_upgrade_to_electra(spec, new_spec):
+ return is_post_electra(new_spec) and not is_post_electra(spec)
+
+
+def check_merkle_branch_equal(spec, new_spec, data, upgraded, gindex):
+ if is_post_electra(new_spec):
+ assert (
+ new_spec.normalize_merkle_branch(upgraded, gindex)
+ == new_spec.normalize_merkle_branch(data, gindex)
+ )
+ else:
+ assert upgraded == data
+
+
def check_lc_header_equal(spec, new_spec, data, upgraded):
assert upgraded.beacon.slot == data.beacon.slot
assert upgraded.beacon.hash_tree_root() == data.beacon.hash_tree_root()
@@ -98,15 +135,19 @@ def check_lc_header_equal(spec, new_spec, data, upgraded):
assert new_spec.get_lc_execution_root(upgraded) == new_spec.Root()
-def upgrade_lc_header_to_new_spec(spec, new_spec, data):
+def upgrade_lc_header_to_new_spec(spec, new_spec, data, phases):
upgraded = data
if needs_upgrade_to_capella(spec, new_spec):
- upgraded = new_spec.upgrade_lc_header_to_capella(upgraded)
+ upgraded = phases[CAPELLA].upgrade_lc_header_to_capella(upgraded)
check_lc_header_equal(spec, new_spec, data, upgraded)
if needs_upgrade_to_deneb(spec, new_spec):
- upgraded = new_spec.upgrade_lc_header_to_deneb(upgraded)
+ upgraded = phases[DENEB].upgrade_lc_header_to_deneb(upgraded)
+ check_lc_header_equal(spec, new_spec, data, upgraded)
+
+ if needs_upgrade_to_electra(spec, new_spec):
+ upgraded = phases[ELECTRA].upgrade_lc_header_to_electra(upgraded)
check_lc_header_equal(spec, new_spec, data, upgraded)
return upgraded
@@ -115,18 +156,28 @@ def upgrade_lc_header_to_new_spec(spec, new_spec, data):
def check_lc_bootstrap_equal(spec, new_spec, data, upgraded):
check_lc_header_equal(spec, new_spec, data.header, upgraded.header)
assert upgraded.current_sync_committee == data.current_sync_committee
- assert upgraded.current_sync_committee_branch == data.current_sync_committee_branch
+ check_merkle_branch_equal(
+ spec,
+ new_spec,
+ data.current_sync_committee_branch,
+ upgraded.current_sync_committee_branch,
+ latest_current_sync_committee_gindex(new_spec),
+ )
-def upgrade_lc_bootstrap_to_new_spec(spec, new_spec, data):
+def upgrade_lc_bootstrap_to_new_spec(spec, new_spec, data, phases):
upgraded = data
if needs_upgrade_to_capella(spec, new_spec):
- upgraded = new_spec.upgrade_lc_bootstrap_to_capella(upgraded)
+ upgraded = phases[CAPELLA].upgrade_lc_bootstrap_to_capella(upgraded)
check_lc_bootstrap_equal(spec, new_spec, data, upgraded)
if needs_upgrade_to_deneb(spec, new_spec):
- upgraded = new_spec.upgrade_lc_bootstrap_to_deneb(upgraded)
+ upgraded = phases[DENEB].upgrade_lc_bootstrap_to_deneb(upgraded)
+ check_lc_bootstrap_equal(spec, new_spec, data, upgraded)
+
+ if needs_upgrade_to_electra(spec, new_spec):
+ upgraded = phases[ELECTRA].upgrade_lc_bootstrap_to_electra(upgraded)
check_lc_bootstrap_equal(spec, new_spec, data, upgraded)
return upgraded
@@ -135,21 +186,38 @@ def upgrade_lc_bootstrap_to_new_spec(spec, new_spec, data):
def check_lc_update_equal(spec, new_spec, data, upgraded):
check_lc_header_equal(spec, new_spec, data.attested_header, upgraded.attested_header)
assert upgraded.next_sync_committee == data.next_sync_committee
- assert upgraded.next_sync_committee_branch == data.next_sync_committee_branch
+ check_merkle_branch_equal(
+ spec,
+ new_spec,
+ data.next_sync_committee_branch,
+ upgraded.next_sync_committee_branch,
+ latest_next_sync_committee_gindex(new_spec),
+ )
check_lc_header_equal(spec, new_spec, data.finalized_header, upgraded.finalized_header)
+ check_merkle_branch_equal(
+ spec,
+ new_spec,
+ data.finality_branch,
+ upgraded.finality_branch,
+ latest_finalized_root_gindex(new_spec),
+ )
assert upgraded.sync_aggregate == data.sync_aggregate
assert upgraded.signature_slot == data.signature_slot
-def upgrade_lc_update_to_new_spec(spec, new_spec, data):
+def upgrade_lc_update_to_new_spec(spec, new_spec, data, phases):
upgraded = data
if needs_upgrade_to_capella(spec, new_spec):
- upgraded = new_spec.upgrade_lc_update_to_capella(upgraded)
+ upgraded = phases[CAPELLA].upgrade_lc_update_to_capella(upgraded)
check_lc_update_equal(spec, new_spec, data, upgraded)
if needs_upgrade_to_deneb(spec, new_spec):
- upgraded = new_spec.upgrade_lc_update_to_deneb(upgraded)
+ upgraded = phases[DENEB].upgrade_lc_update_to_deneb(upgraded)
+ check_lc_update_equal(spec, new_spec, data, upgraded)
+
+ if needs_upgrade_to_electra(spec, new_spec):
+ upgraded = phases[ELECTRA].upgrade_lc_update_to_electra(upgraded)
check_lc_update_equal(spec, new_spec, data, upgraded)
return upgraded
@@ -158,19 +226,30 @@ def upgrade_lc_update_to_new_spec(spec, new_spec, data):
def check_lc_finality_update_equal(spec, new_spec, data, upgraded):
check_lc_header_equal(spec, new_spec, data.attested_header, upgraded.attested_header)
check_lc_header_equal(spec, new_spec, data.finalized_header, upgraded.finalized_header)
+ check_merkle_branch_equal(
+ spec,
+ new_spec,
+ data.finality_branch,
+ upgraded.finality_branch,
+ latest_finalized_root_gindex(new_spec),
+ )
assert upgraded.sync_aggregate == data.sync_aggregate
assert upgraded.signature_slot == data.signature_slot
-def upgrade_lc_finality_update_to_new_spec(spec, new_spec, data):
+def upgrade_lc_finality_update_to_new_spec(spec, new_spec, data, phases):
upgraded = data
if needs_upgrade_to_capella(spec, new_spec):
- upgraded = new_spec.upgrade_lc_finality_update_to_capella(upgraded)
+ upgraded = phases[CAPELLA].upgrade_lc_finality_update_to_capella(upgraded)
check_lc_finality_update_equal(spec, new_spec, data, upgraded)
if needs_upgrade_to_deneb(spec, new_spec):
- upgraded = new_spec.upgrade_lc_finality_update_to_deneb(upgraded)
+ upgraded = phases[DENEB].upgrade_lc_finality_update_to_deneb(upgraded)
+ check_lc_finality_update_equal(spec, new_spec, data, upgraded)
+
+ if needs_upgrade_to_electra(spec, new_spec):
+ upgraded = phases[ELECTRA].upgrade_lc_finality_update_to_electra(upgraded)
check_lc_finality_update_equal(spec, new_spec, data, upgraded)
return upgraded
@@ -189,15 +268,19 @@ def check_lc_store_equal(spec, new_spec, data, upgraded):
assert upgraded.current_max_active_participants == data.current_max_active_participants
-def upgrade_lc_store_to_new_spec(spec, new_spec, data):
+def upgrade_lc_store_to_new_spec(spec, new_spec, data, phases):
upgraded = data
if needs_upgrade_to_capella(spec, new_spec):
- upgraded = new_spec.upgrade_lc_store_to_capella(upgraded)
+ upgraded = phases[CAPELLA].upgrade_lc_store_to_capella(upgraded)
check_lc_store_equal(spec, new_spec, data, upgraded)
if needs_upgrade_to_deneb(spec, new_spec):
- upgraded = new_spec.upgrade_lc_store_to_deneb(upgraded)
+ upgraded = phases[DENEB].upgrade_lc_store_to_deneb(upgraded)
+ check_lc_store_equal(spec, new_spec, data, upgraded)
+
+ if needs_upgrade_to_electra(spec, new_spec):
+ upgraded = phases[ELECTRA].upgrade_lc_store_to_electra(upgraded)
check_lc_store_equal(spec, new_spec, data, upgraded)
return upgraded
diff --git a/tests/core/pyspec/eth2spec/test/helpers/state.py b/tests/core/pyspec/eth2spec/test/helpers/state.py
index 1e64bd4db2..07e7bfb478 100644
--- a/tests/core/pyspec/eth2spec/test/helpers/state.py
+++ b/tests/core/pyspec/eth2spec/test/helpers/state.py
@@ -60,6 +60,14 @@ def next_epoch(spec, state):
spec.process_slots(state, slot)
+def next_epoch_with_full_participation(spec, state):
+ """
+ Transition to the start slot of the next epoch with full participation
+ """
+ set_full_participation(spec, state)
+ next_epoch(spec, state)
+
+
def next_epoch_via_block(spec, state, insert_state_root=False):
"""
Transition to the start slot of the next epoch via a full block transition
diff --git a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py
index a527731b14..78802a6ddd 100644
--- a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py
+++ b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py
@@ -251,9 +251,9 @@ def random_block_deneb(spec, state, signed_blocks, scenario_state, rng=Random(34
block = random_block_capella(spec, state, signed_blocks, scenario_state, rng=rng)
# TODO: more commitments. blob_kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK]
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(
- spec, blob_count=rng.randint(0, spec.MAX_BLOBS_PER_BLOCK), rng=rng)
+ spec, blob_count=rng.randint(0, spec.config.MAX_BLOBS_PER_BLOCK), rng=rng)
block.body.execution_payload.transactions.append(opaque_tx)
- block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
+ block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state)
block.body.blob_kzg_commitments = blob_kzg_commitments
return block
diff --git a/tests/formats/kzg_7594/README.md b/tests/formats/kzg_7594/README.md
index dbd95dd3dc..f03bc3707c 100644
--- a/tests/formats/kzg_7594/README.md
+++ b/tests/formats/kzg_7594/README.md
@@ -8,6 +8,5 @@ The KZG test suite runner has the following handlers:
- [`compute_cells`](./compute_cells.md)
- [`compute_cells_and_kzg_proofs`](./compute_cells_and_kzg_proofs.md)
-- [`verify_cell_kzg_proof`](./verify_cell_kzg_proof.md)
- [`verify_cell_kzg_proof_batch`](./verify_cell_kzg_proof_batch.md)
- [`recover_all_cells`](./recover_all_cells.md)
diff --git a/tests/formats/kzg_7594/compute_cells.md b/tests/formats/kzg_7594/compute_cells.md
deleted file mode 100644
index aec118c20b..0000000000
--- a/tests/formats/kzg_7594/compute_cells.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Test format: Compute cells
-
-Compute the cells for a given `blob`.
-
-## Test case format
-
-The test data is declared in a `data.yaml` file:
-
-```yaml
-input:
- blob: Blob -- the data blob
-output: List[Cell] -- the cells
-```
-
-- `Blob` is a 131072-byte hexadecimal string, prefixed with `0x`.
-- `Cell` is a 2048-byte hexadecimal string, prefixed with `0x`.
-
-All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
-
-## Condition
-
-The `compute_cells` handler should compute the cells (chunks of an extended blob) for `blob`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) it should error, i.e. the output should be `null`.
diff --git a/tests/formats/kzg_7594/recover_cells_and_kzg_proofs.md b/tests/formats/kzg_7594/recover_cells_and_kzg_proofs.md
index 4e839c8ff4..b12f49c265 100644
--- a/tests/formats/kzg_7594/recover_cells_and_kzg_proofs.md
+++ b/tests/formats/kzg_7594/recover_cells_and_kzg_proofs.md
@@ -1,6 +1,6 @@
# Test format: Recover cells and KZG proofs
-Recover all cells/proofs given at least 50% of the original `cells` and `proofs`.
+Recover all cells/proofs given at least 50% of the original `cells`.
## Test case format
@@ -21,4 +21,4 @@ All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `
## Condition
-The `recover_cells_and_kzg_proofs` handler should recover missing cells and proofs, and the result should match the expected `output`. If any cell is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), any proof is invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve), or any `cell_index` is invalid (e.g. greater than the number of cells for an extended blob), it should error, i.e. the output should be `null`.
+The `recover_cells_and_kzg_proofs` handler should recover missing cells and proofs, and the result should match the expected `output`. If any cell is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), or any `cell_index` is invalid (e.g. greater than the number of cells for an extended blob), it should error, i.e. the output should be `null`.
diff --git a/tests/formats/kzg_7594/verify_cell_kzg_proof.md b/tests/formats/kzg_7594/verify_cell_kzg_proof.md
deleted file mode 100644
index 3d5f33b524..0000000000
--- a/tests/formats/kzg_7594/verify_cell_kzg_proof.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# Test format: Verify cell KZG proof
-
-Use the cell KZG `proof` to verify that the KZG `commitment` for a given `cell` is correct.
-
-## Test case format
-
-The test data is declared in a `data.yaml` file:
-
-```yaml
-input:
- commitment: Bytes48 -- the KZG commitment
- cell_index: CellIndex -- the cell index
- cell: Cell -- the cell
- proof: Bytes48 -- the KZG proof for the cell
-output: bool -- true (correct proof) or false (incorrect proof)
-```
-
-- `Bytes48` is a 48-byte hexadecimal string, prefixed with `0x`.
-- `CellIndex` is an unsigned 64-bit integer.
-- `Cell` is a 2048-byte hexadecimal string, prefixed with `0x`.
-
-All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
-
-## Condition
-
-The `verify_cell_kzg_proof` handler should verify that `commitment` is a correct KZG commitment to `cell` by using the cell KZG proof `proof`, and the result should match the expected `output`. If the commitment or proof is invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve), `cell` is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), or `cell_index` is invalid (e.g. greater than the number of cells for an extended blob), it should error, i.e. the output should be `null`.
diff --git a/tests/formats/kzg_7594/verify_cell_kzg_proof_batch.md b/tests/formats/kzg_7594/verify_cell_kzg_proof_batch.md
index 439d1e25ae..84bfaa1c2e 100644
--- a/tests/formats/kzg_7594/verify_cell_kzg_proof_batch.md
+++ b/tests/formats/kzg_7594/verify_cell_kzg_proof_batch.md
@@ -1,6 +1,6 @@
# Test format: Verify cell KZG proof batch
-Use the cell KZG `proofs` to verify that the KZG `row_commitments` for the given `cells` are correct.
+Use the cell KZG `proofs` to verify that the KZG `commitments` for the given `cells` are correct.
## Test case format
@@ -8,21 +8,19 @@ The test data is declared in a `data.yaml` file:
```yaml
input:
- row_commitments: List[Bytes48] -- the KZG commitments
- row_indices: List[RowIndex] -- the commitment index for each cell
- column_indices: List[ColumnIndex] -- the column index for each cell
+ commitments: List[Bytes48] -- the KZG commitments for each cell
+ cell_indices: List[CellIndex] -- the cell index for each cell
cells: List[Cell] -- the cells
proofs: List[Bytes48] -- the KZG proof for each cell
output: bool -- true (all proofs are correct) or false (some proofs incorrect)
```
- `Bytes48` is a 48-byte hexadecimal string, prefixed with `0x`.
-- `RowIndex` is an unsigned 64-bit integer.
-- `ColumnIndex` is an unsigned 64-bit integer.
+- `CellIndex` is an unsigned 64-bit integer.
- `Cell` is a 2048-byte hexadecimal string, prefixed with `0x`.
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
## Condition
-The `verify_cell_kzg_proof_batch` handler should verify that `row_commitments` are correct KZG commitments to `cells` by using the cell KZG proofs `proofs`, and the result should match the expected `output`. If any of the commitments or proofs are invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve), any cell is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), or any `cell_index` is invalid (e.g. greater than the number of cells for an extended blob), it should error, i.e. the output should be `null`.
+The `verify_cell_kzg_proof_batch` handler should verify that `commitments` are correct KZG commitments to `cells` by using the cell KZG proofs `proofs`, and the result should match the expected `output`. If any of the commitments or proofs are invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve), any cell is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), or any `cell_index` is invalid (e.g. greater than the number of cells for an extended blob), it should error, i.e. the output should be `null`.
diff --git a/tests/generators/kzg_7594/main.py b/tests/generators/kzg_7594/main.py
index e39b9d64cc..814a840537 100644
--- a/tests/generators/kzg_7594/main.py
+++ b/tests/generators/kzg_7594/main.py
@@ -13,14 +13,12 @@
from eth2spec.test.utils.kzg_tests import (
CELL_RANDOM_VALID1,
CELL_RANDOM_VALID2,
- G1,
INVALID_BLOBS,
INVALID_G1_POINTS,
INVALID_INDIVIDUAL_CELL_BYTES,
VALID_BLOBS,
VALID_CELLS_AND_PROOFS,
VALID_COMMITMENTS,
- VALID_INDIVIDUAL_RANDOM_CELL_BYTES,
bls_add_one,
encode_hex_list,
expect_exception,
@@ -59,160 +57,6 @@ def case_compute_cells_and_kzg_proofs():
}
-###############################################################################
-# Test cases for verify_cell_kzg_proof
-###############################################################################
-
-def case_verify_cell_kzg_proof():
- # Valid cases
- for i in range(len(VALID_BLOBS)):
- cells, proofs = VALID_CELLS_AND_PROOFS[i]
- commitment = VALID_COMMITMENTS[i]
- cell_index = (2 ** i - 1) % spec.CELLS_PER_EXT_BLOB
- cell = cells[cell_index]
- proof = proofs[cell_index]
- assert spec.verify_cell_kzg_proof(commitment, cell_index, cell, proof)
- identifier = make_id(commitment, cell_index, cell, proof)
- yield f'verify_cell_kzg_proof_case_valid_{identifier}', {
- 'input': {
- 'commitment': encode_hex(commitment),
- 'cell_index': cell_index,
- 'cell': encode_hex(cell),
- 'proof': encode_hex(proof),
- },
- 'output': True
- }
-
- # Incorrect commitment
- for i in range(len(VALID_BLOBS)):
- cells, proofs = VALID_CELLS_AND_PROOFS[i]
- commitment = bls_add_one(VALID_COMMITMENTS[i])
- cell_index = 99 % spec.CELLS_PER_EXT_BLOB
- cell = cells[cell_index]
- proof = proofs[cell_index]
- assert not spec.verify_cell_kzg_proof(commitment, cell_index, cell, proof)
- identifier = make_id(commitment, cell_index, cell, proof)
- yield f'verify_cell_kzg_proof_case_incorrect_commitment_{identifier}', {
- 'input': {
- 'commitment': encode_hex(commitment),
- 'cell_index': cell_index,
- 'cell': encode_hex(cell),
- 'proof': encode_hex(proof),
- },
- 'output': False
- }
-
- # Incorrect cell
- for i in range(len(VALID_INDIVIDUAL_RANDOM_CELL_BYTES)):
- cell_index = 16 % spec.CELLS_PER_EXT_BLOB
- commitment = VALID_COMMITMENTS[i]
- cells, proofs = VALID_CELLS_AND_PROOFS[i]
- cell = VALID_INDIVIDUAL_RANDOM_CELL_BYTES[i]
- proof = proofs[cell_index]
- assert not spec.verify_cell_kzg_proof(commitment, cell_index, cell, proof)
- identifier = make_id(commitment, cell_index, cell, proof)
- yield f'verify_cell_kzg_proof_case_incorrect_cell_{identifier}', {
- 'input': {
- 'commitment': encode_hex(commitment),
- 'cell_index': cell_index,
- 'cell': encode_hex(cell),
- 'proof': encode_hex(proof),
- },
- 'output': False
- }
-
- # Incorrect proof
- for i in range(len(VALID_BLOBS)):
- cell_index = 91 % spec.CELLS_PER_EXT_BLOB
- commitment = VALID_COMMITMENTS[i]
- cells, proofs = VALID_CELLS_AND_PROOFS[i]
- cell = cells[cell_index]
- proof = bls_add_one(proofs[cell_index])
- assert not spec.verify_cell_kzg_proof(commitment, cell_index, cell, proof)
- identifier = make_id(commitment, cell_index, cell, proof)
- yield f'verify_cell_kzg_proof_case_incorrect_proof_{identifier}', {
- 'input': {
- 'commitment': encode_hex(commitment),
- 'cell_index': cell_index,
- 'cell': encode_hex(cell),
- 'proof': encode_hex(proof),
- },
- 'output': False
- }
-
- # Edge case: Invalid commitment
- for commitment in INVALID_G1_POINTS:
- cells, proofs = VALID_CELLS_AND_PROOFS[0]
- cell_index = 81 % spec.CELLS_PER_EXT_BLOB
- cell = cells[cell_index]
- proof = proofs[cell_index]
- expect_exception(spec.verify_cell_kzg_proof, commitment, cell_index, cell, proof)
- identifier = make_id(commitment, cell_index, cell, proof)
- yield f'verify_cell_kzg_proof_case_invalid_commitment_{identifier}', {
- 'input': {
- 'commitment': encode_hex(commitment),
- 'cell_index': cell_index,
- 'cell': encode_hex(cell),
- 'proof': encode_hex(proof),
- },
- 'output': None
- }
-
- # Edge case: Invalid cell_index
- for cell_index in [spec.CELLS_PER_EXT_BLOB, spec.CELLS_PER_EXT_BLOB + 1]:
- cells, proofs = VALID_CELLS_AND_PROOFS[1]
- commitment = VALID_COMMITMENTS[1]
- cell = cells[0]
- proof = proofs[0]
- expect_exception(spec.verify_cell_kzg_proof, commitment, cell_index, cell, proof)
- identifier = make_id(commitment, cell_index, cell, proof)
- yield f'verify_cell_kzg_proof_case_invalid_cell_index_{identifier}', {
- 'input': {
- 'commitment': encode_hex(commitment),
- 'cell_index': cell_index,
- 'cell': encode_hex(cell),
- 'proof': encode_hex(proof),
- },
- 'output': None
- }
-
- # Edge case: Invalid cell
- for cell in INVALID_INDIVIDUAL_CELL_BYTES:
- cell_index = 32 % spec.CELLS_PER_EXT_BLOB
- commitment = VALID_COMMITMENTS[2]
- cells, proofs = VALID_CELLS_AND_PROOFS[2]
- proof = proofs[cell_index]
- expect_exception(spec.verify_cell_kzg_proof, commitment, cell_index, cell, proof)
- identifier = make_id(commitment, cell_index, cell, proof)
- yield f'verify_cell_kzg_proof_case_invalid_cell_{identifier}', {
- 'input': {
- 'commitment': encode_hex(commitment),
- 'cell_index': cell_index,
- 'cell': encode_hex(cell),
- 'proof': encode_hex(proof),
- },
- 'output': None
- }
-
- # Edge case: Invalid proof
- for proof in INVALID_G1_POINTS:
- cells, _ = VALID_CELLS_AND_PROOFS[3]
- commitment = VALID_COMMITMENTS[3]
- cell_index = 36 % spec.CELLS_PER_EXT_BLOB
- cell = cells[cell_index]
- expect_exception(spec.verify_cell_kzg_proof, commitment, cell_index, cell, proof)
- identifier = make_id(commitment, cell_index, cell, proof)
- yield f'verify_cell_kzg_proof_case_invalid_proof_{identifier}', {
- 'input': {
- 'commitment': encode_hex(commitment),
- 'cell_index': cell_index,
- 'cell': encode_hex(cell),
- 'proof': encode_hex(proof),
- },
- 'output': None
- }
-
-
###############################################################################
# Test cases for verify_cell_kzg_proof_batch
###############################################################################
@@ -221,16 +65,14 @@ def case_verify_cell_kzg_proof_batch():
# Valid cases
for i in range(len(VALID_BLOBS)):
cells, proofs = VALID_CELLS_AND_PROOFS[i]
- row_commitments = [VALID_COMMITMENTS[i]]
- row_indices = [0] * spec.CELLS_PER_EXT_BLOB
- column_indices = list(range(spec.CELLS_PER_EXT_BLOB))
- assert spec.verify_cell_kzg_proof_batch(row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
+ commitments = [VALID_COMMITMENTS[i] for _ in cells]
+ cell_indices = list(range(spec.CELLS_PER_EXT_BLOB))
+ assert spec.verify_cell_kzg_proof_batch(commitments, cell_indices, cells, proofs)
+ identifier = make_id(commitments, cell_indices, cells, proofs)
yield f'verify_cell_kzg_proof_batch_case_valid_{identifier}', {
'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
+ 'commitments': encode_hex_list(commitments),
+ 'cell_indices': cell_indices,
'cells': encode_hex_list(cells),
'proofs': encode_hex_list(proofs),
},
@@ -238,14 +80,13 @@ def case_verify_cell_kzg_proof_batch():
}
# Valid: zero cells
- cells, row_commitments, row_indices, column_indices, proofs = [], [], [], [], []
- assert spec.verify_cell_kzg_proof_batch(row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
+ cells, commitments, cell_indices, proofs = [], [], [], []
+ assert spec.verify_cell_kzg_proof_batch(commitments, cell_indices, cells, proofs)
+ identifier = make_id(commitments, cell_indices, cells, proofs)
yield f'verify_cell_kzg_proof_batch_case_valid_zero_cells_{identifier}', {
'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
+ 'commitments': encode_hex_list(commitments),
+ 'cell_indices': cell_indices,
'cells': encode_hex_list(cells),
'proofs': encode_hex_list(proofs),
},
@@ -255,38 +96,16 @@ def case_verify_cell_kzg_proof_batch():
# Valid: Verify cells from multiple blobs
cells0, proofs0 = VALID_CELLS_AND_PROOFS[0]
cells1, proofs1 = VALID_CELLS_AND_PROOFS[1]
- row_commitments = VALID_COMMITMENTS[:2]
- row_indices = [0, 1]
- column_indices = [0, 0]
+ commitments = [VALID_COMMITMENTS[0], VALID_COMMITMENTS[1]]
+ cell_indices = [0, 0]
cells = [cells0[0], cells1[0]]
proofs = [proofs0[0], proofs1[0]]
- assert spec.verify_cell_kzg_proof_batch(row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
+ assert spec.verify_cell_kzg_proof_batch(commitments, cell_indices, cells, proofs)
+ identifier = make_id(commitments, cell_indices, cells, proofs)
yield f'verify_cell_kzg_proof_batch_case_valid_multiple_blobs_{identifier}', {
'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
- 'cells': encode_hex_list(cells),
- 'proofs': encode_hex_list(proofs),
- },
- 'output': True
- }
-
- # Valid: Unused row commitments
- cells, proofs = VALID_CELLS_AND_PROOFS[2]
- cells, proofs = cells[:3], proofs[:3]
- # Provide list of all commitments
- row_commitments = VALID_COMMITMENTS
- row_indices = [2] * len(cells)
- column_indices = list(range(len(cells)))
- assert spec.verify_cell_kzg_proof_batch(row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
- yield f'verify_cell_kzg_proof_batch_case_valid_unused_row_commitments_{identifier}', {
- 'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
+ 'commitments': encode_hex_list(commitments),
+ 'cell_indices': cell_indices,
'cells': encode_hex_list(cells),
'proofs': encode_hex_list(proofs),
},
@@ -294,39 +113,35 @@ def case_verify_cell_kzg_proof_batch():
}
# Valid: Same cell multiple times
- row_commitments = [VALID_COMMITMENTS[3]]
num_duplicates = 3
- row_indices = [0] * num_duplicates
- column_indices = [0] * num_duplicates
+ commitments = [VALID_COMMITMENTS[3]] * num_duplicates
+ cell_indices = [0] * num_duplicates
cells = [VALID_CELLS_AND_PROOFS[3][0][0]] * num_duplicates
proofs = [VALID_CELLS_AND_PROOFS[3][1][0]] * num_duplicates
- assert spec.verify_cell_kzg_proof_batch(row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
+ assert spec.verify_cell_kzg_proof_batch(commitments, cell_indices, cells, proofs)
+ identifier = make_id(commitments, cell_indices, cells, proofs)
yield f'verify_cell_kzg_proof_batch_case_valid_same_cell_multiple_times_{identifier}', {
'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
+ 'commitments': encode_hex_list(commitments),
+ 'cell_indices': cell_indices,
'cells': encode_hex_list(cells),
'proofs': encode_hex_list(proofs),
},
'output': True
}
- # Incorrect row commitment
+ # Incorrect commitment
cells, proofs = VALID_CELLS_AND_PROOFS[5]
cells, proofs = cells[:1], proofs[:1]
- # Change commitment so it's wrong
- row_commitments = [bls_add_one(VALID_COMMITMENTS[5])]
- row_indices = [0] * len(cells)
- column_indices = list(range(len(cells)))
- assert not spec.verify_cell_kzg_proof_batch(row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
- yield f'verify_cell_kzg_proof_batch_case_incorrect_row_commitment_{identifier}', {
+ # Use the wrong commitment
+ commitments = [bls_add_one(VALID_COMMITMENTS[5])]
+ cell_indices = list(range(len(cells)))
+ assert not spec.verify_cell_kzg_proof_batch(commitments, cell_indices, cells, proofs)
+ identifier = make_id(commitments, cell_indices, cells, proofs)
+ yield f'verify_cell_kzg_proof_batch_case_incorrect_commitment_{identifier}', {
'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
+ 'commitments': encode_hex_list(commitments),
+ 'cell_indices': cell_indices,
'cells': encode_hex_list(cells),
'proofs': encode_hex_list(proofs),
},
@@ -336,18 +151,16 @@ def case_verify_cell_kzg_proof_batch():
# Incorrect cell
cells, proofs = VALID_CELLS_AND_PROOFS[6]
cells, proofs = cells[:1], proofs[:1]
- row_commitments = [VALID_COMMITMENTS[6]]
- row_indices = [0] * len(cells)
- column_indices = list(range(len(cells)))
+ commitments = [VALID_COMMITMENTS[6]]
+ cell_indices = list(range(len(cells)))
# Change last cell so it's wrong
cells[-1] = CELL_RANDOM_VALID2
- assert not spec.verify_cell_kzg_proof_batch(row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
+ assert not spec.verify_cell_kzg_proof_batch(commitments, cell_indices, cells, proofs)
+ identifier = make_id(commitments, cell_indices, cells, proofs)
yield f'verify_cell_kzg_proof_batch_case_incorrect_cell_{identifier}', {
'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
+ 'commitments': encode_hex_list(commitments),
+ 'cell_indices': cell_indices,
'cells': encode_hex_list(cells),
'proofs': encode_hex_list(proofs),
},
@@ -357,81 +170,54 @@ def case_verify_cell_kzg_proof_batch():
# Incorrect proof
cells, proofs = VALID_CELLS_AND_PROOFS[0]
cells, proofs = cells[:1], proofs[:1]
- row_commitments = [VALID_COMMITMENTS[0]]
- row_indices = [0] * len(cells)
- column_indices = list(range(len(cells)))
+ commitments = [VALID_COMMITMENTS[0]]
+ cell_indices = list(range(len(cells)))
# Change last proof so it's wrong
proofs[-1] = bls_add_one(proofs[-1])
- assert not spec.verify_cell_kzg_proof_batch(row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
+ assert not spec.verify_cell_kzg_proof_batch(commitments, cell_indices, cells, proofs)
+ identifier = make_id(commitments, cell_indices, cells, proofs)
yield f'verify_cell_kzg_proof_batch_case_incorrect_proof_{identifier}', {
'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
+ 'commitments': encode_hex_list(commitments),
+ 'cell_indices': cell_indices,
'cells': encode_hex_list(cells),
'proofs': encode_hex_list(proofs),
},
'output': False
}
- # Edge case: Invalid row commitment
+ # Edge case: Invalid commitment
for i, commitment in enumerate(INVALID_G1_POINTS):
cells, proofs = VALID_CELLS_AND_PROOFS[i % len(INVALID_G1_POINTS)]
cells, proofs = cells[:1], proofs[:1]
- # Set row_commitments to the invalid commitment
- row_commitments = [commitment]
- row_indices = [0] * len(cells)
- column_indices = list(range(len(cells)))
- expect_exception(spec.verify_cell_kzg_proof_batch, row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
- yield f'verify_cell_kzg_proof_batch_case_invalid_row_commitment_{identifier}', {
+ # Set commitments to the invalid commitment
+ commitments = [commitment]
+ cell_indices = list(range(len(cells)))
+ expect_exception(spec.verify_cell_kzg_proof_batch, commitments, cell_indices, cells, proofs)
+ identifier = make_id(commitments, cell_indices, cells, proofs)
+ yield f'verify_cell_kzg_proof_batch_case_invalid_commitment_{identifier}', {
'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
+ 'commitments': encode_hex_list(commitments),
+ 'cell_indices': cell_indices,
'cells': encode_hex_list(cells),
'proofs': encode_hex_list(proofs),
},
'output': None
}
- # Edge case: Invalid row_index
- cells, proofs = VALID_CELLS_AND_PROOFS[0]
- cells, proofs = cells[:1], proofs[:1]
- row_commitments = [VALID_COMMITMENTS[0]]
- row_indices = [0] * len(cells)
- # Set first row index to an invalid value
- row_indices[0] = 1
- column_indices = list(range(len(cells)))
- expect_exception(spec.verify_cell_kzg_proof_batch, row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
- yield f'verify_cell_kzg_proof_batch_case_invalid_row_index_{identifier}', {
- 'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
- 'cells': encode_hex_list(cells),
- 'proofs': encode_hex_list(proofs),
- },
- 'output': None
- }
-
- # Edge case: Invalid column_index
+ # Edge case: Invalid cell_index
cells, proofs = VALID_CELLS_AND_PROOFS[1]
cells, proofs = cells[:1], proofs[:1]
- row_commitments = [VALID_COMMITMENTS[1]]
- row_indices = [0] * len(cells)
- column_indices = list(range(len(cells)))
- # Set first column index to an invalid value
- column_indices[0] = spec.CELLS_PER_EXT_BLOB
- expect_exception(spec.verify_cell_kzg_proof_batch, row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
- yield f'verify_cell_kzg_proof_batch_case_invalid_column_index_{identifier}', {
+ commitments = [VALID_COMMITMENTS[1]]
+ cell_indices = list(range(len(cells)))
+ # Set first cell index to an invalid value
+ cell_indices[0] = spec.CELLS_PER_EXT_BLOB
+ expect_exception(spec.verify_cell_kzg_proof_batch, commitments, cell_indices, cells, proofs)
+ identifier = make_id(commitments, cell_indices, cells, proofs)
+ yield f'verify_cell_kzg_proof_batch_case_invalid_cell_index_{identifier}', {
'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
+ 'commitments': encode_hex_list(commitments),
+ 'cell_indices': cell_indices,
'cells': encode_hex_list(cells),
'proofs': encode_hex_list(proofs),
},
@@ -442,18 +228,16 @@ def case_verify_cell_kzg_proof_batch():
for i, cell in enumerate(INVALID_INDIVIDUAL_CELL_BYTES):
cells, proofs = VALID_CELLS_AND_PROOFS[i % len(INVALID_INDIVIDUAL_CELL_BYTES)]
cells, proofs = cells[:1], proofs[:1]
- row_commitments = [VALID_COMMITMENTS[i % len(INVALID_INDIVIDUAL_CELL_BYTES)]]
- row_indices = [0] * len(cells)
- column_indices = list(range(len(cells)))
+ commitments = [VALID_COMMITMENTS[i % len(INVALID_INDIVIDUAL_CELL_BYTES)]]
+ cell_indices = list(range(len(cells)))
# Set first cell to the invalid cell
cells[0] = cell
- expect_exception(spec.verify_cell_kzg_proof_batch, row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
+ expect_exception(spec.verify_cell_kzg_proof_batch, commitments, cell_indices, cells, proofs)
+ identifier = make_id(commitments, cell_indices, cells, proofs)
yield f'verify_cell_kzg_proof_batch_case_invalid_cell_{identifier}', {
'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
+ 'commitments': encode_hex_list(commitments),
+ 'cell_indices': cell_indices,
'cells': encode_hex_list(cells),
'proofs': encode_hex_list(proofs),
},
@@ -464,125 +248,95 @@ def case_verify_cell_kzg_proof_batch():
for i, proof in enumerate(INVALID_G1_POINTS):
cells, proofs = VALID_CELLS_AND_PROOFS[i % len(INVALID_G1_POINTS)]
cells, proofs = cells[:1], proofs[:1]
- row_commitments = [VALID_COMMITMENTS[i % len(INVALID_G1_POINTS)]]
- row_indices = [0] * len(cells)
- column_indices = list(range(len(cells)))
+ commitments = [VALID_COMMITMENTS[i % len(INVALID_G1_POINTS)]]
+ cell_indices = list(range(len(cells)))
# Set first proof to the invalid proof
proofs[0] = proof
- expect_exception(spec.verify_cell_kzg_proof_batch, row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
+ expect_exception(spec.verify_cell_kzg_proof_batch, commitments, cell_indices, cells, proofs)
+ identifier = make_id(commitments, cell_indices, cells, proofs)
yield f'verify_cell_kzg_proof_batch_case_invalid_proof_{identifier}', {
'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
- 'cells': encode_hex_list(cells),
- 'proofs': encode_hex_list(proofs),
- },
- 'output': None
- }
-
- # Edge case: Missing a row commitment
- cells, proofs = VALID_CELLS_AND_PROOFS[0]
- cells, proofs = cells[:1], proofs[:1]
- # Do not include the row commitment
- row_commitments = []
- row_indices = [0] * len(cells)
- column_indices = list(range(len(cells)))
- expect_exception(spec.verify_cell_kzg_proof_batch, row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
- yield f'verify_cell_kzg_proof_batch_case_invalid_missing_row_commitment_{identifier}', {
- 'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
+ 'commitments': encode_hex_list(commitments),
+ 'cell_indices': cell_indices,
'cells': encode_hex_list(cells),
'proofs': encode_hex_list(proofs),
},
'output': None
}
- # Edge case: Missing a row index
- cells, proofs = VALID_CELLS_AND_PROOFS[1]
- cells, proofs = cells[:2], proofs[:2]
- row_commitments = [VALID_COMMITMENTS[1]]
- # Leave off one of the row indices
- row_indices = [0] * (len(cells) - 1)
- column_indices = list(range(len(cells)))
- expect_exception(spec.verify_cell_kzg_proof_batch, row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
- yield f'verify_cell_kzg_proof_batch_case_invalid_missing_row_index_{identifier}', {
- 'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
- 'cells': encode_hex_list(cells),
- 'proofs': encode_hex_list(proofs),
- },
- 'output': None
- }
+ # Edge case: Missing a commitment
+ cells, proofs = VALID_CELLS_AND_PROOFS[0]
+ cells, proofs = cells[:2], proofs[:2]
+ # Do not include the second commitment
+ commitments = [VALID_COMMITMENTS[0]]
+ cell_indices = list(range(len(cells)))
+ expect_exception(spec.verify_cell_kzg_proof_batch, commitments, cell_indices, cells, proofs)
+ identifier = make_id(commitments, cell_indices, cells, proofs)
+ yield f'verify_cell_kzg_proof_batch_case_invalid_missing_commitment_{identifier}', {
+ 'input': {
+ 'commitments': encode_hex_list(commitments),
+ 'cell_indices': cell_indices,
+ 'cells': encode_hex_list(cells),
+ 'proofs': encode_hex_list(proofs),
+ },
+ 'output': None
+ }
- # Edge case: Missing a column index
- cells, proofs = VALID_CELLS_AND_PROOFS[2]
- cells, proofs = cells[:2], proofs[:2]
- row_commitments = [VALID_COMMITMENTS[2]]
- row_indices = [0] * len(cells)
- # Leave off one of the column indices
- column_indices = list(range(len(cells) - 1))
- expect_exception(spec.verify_cell_kzg_proof_batch, row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
- yield f'verify_cell_kzg_proof_batch_case_invalid_missing_column_index_{identifier}', {
- 'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
- 'cells': encode_hex_list(cells),
- 'proofs': encode_hex_list(proofs),
- },
- 'output': None
- }
+ # Edge case: Missing a cell index
+ cells, proofs = VALID_CELLS_AND_PROOFS[2]
+ cells, proofs = cells[:2], proofs[:2]
+ commitments = [VALID_COMMITMENTS[2], VALID_COMMITMENTS[2]]
+ # Leave off one of the cell indices
+ cell_indices = list(range(len(cells) - 1))
+ expect_exception(spec.verify_cell_kzg_proof_batch, commitments, cell_indices, cells, proofs)
+ identifier = make_id(commitments, cell_indices, cells, proofs)
+ yield f'verify_cell_kzg_proof_batch_case_invalid_missing_cell_index_{identifier}', {
+ 'input': {
+ 'commitments': encode_hex_list(commitments),
+ 'cell_indices': cell_indices,
+ 'cells': encode_hex_list(cells),
+ 'proofs': encode_hex_list(proofs),
+ },
+ 'output': None
+ }
- # Edge case: Missing a cell
- cells, proofs = VALID_CELLS_AND_PROOFS[3]
- cells, proofs = cells[:2], proofs[:2]
- row_commitments = [VALID_COMMITMENTS[3]]
- row_indices = [0] * len(cells)
- column_indices = list(range(len(cells)))
- # Remove the last proof
- cells = cells[:-1]
- expect_exception(spec.verify_cell_kzg_proof_batch, row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
- yield f'verify_cell_kzg_proof_batch_case_invalid_missing_cell_{identifier}', {
- 'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
- 'cells': encode_hex_list(cells),
- 'proofs': encode_hex_list(proofs),
- },
- 'output': None
- }
+ # Edge case: Missing a cell
+ cells, proofs = VALID_CELLS_AND_PROOFS[3]
+ cells, proofs = cells[:2], proofs[:2]
+ commitments = [VALID_COMMITMENTS[3], VALID_COMMITMENTS[3]]
+ cell_indices = list(range(len(cells)))
+ # Remove the last proof
+ cells = cells[:-1]
+ expect_exception(spec.verify_cell_kzg_proof_batch, commitments, cell_indices, cells, proofs)
+ identifier = make_id(commitments, cell_indices, cells, proofs)
+ yield f'verify_cell_kzg_proof_batch_case_invalid_missing_cell_{identifier}', {
+ 'input': {
+ 'commitments': encode_hex_list(commitments),
+ 'cell_indices': cell_indices,
+ 'cells': encode_hex_list(cells),
+ 'proofs': encode_hex_list(proofs),
+ },
+ 'output': None
+ }
- # Edge case: Missing a proof
- cells, proofs = VALID_CELLS_AND_PROOFS[4]
- cells, proofs = cells[:2], proofs[:2]
- row_commitments = [VALID_COMMITMENTS[4]]
- row_indices = [0] * len(cells)
- column_indices = list(range(len(cells)))
- # Remove the last proof
- proofs = proofs[:-1]
- expect_exception(spec.verify_cell_kzg_proof_batch, row_commitments, row_indices, column_indices, cells, proofs)
- identifier = make_id(row_commitments, row_indices, column_indices, cells, proofs)
- yield f'verify_cell_kzg_proof_batch_case_invalid_missing_proof_{identifier}', {
- 'input': {
- 'row_commitments': encode_hex_list(row_commitments),
- 'row_indices': row_indices,
- 'column_indices': column_indices,
- 'cells': encode_hex_list(cells),
- 'proofs': encode_hex_list(proofs),
- },
- 'output': None
- }
+ # Edge case: Missing a proof
+ cells, proofs = VALID_CELLS_AND_PROOFS[4]
+ cells, proofs = cells[:2], proofs[:2]
+ commitments = [VALID_COMMITMENTS[4], VALID_COMMITMENTS[4]]
+ cell_indices = list(range(len(cells)))
+ # Remove the last proof
+ proofs = proofs[:-1]
+ expect_exception(spec.verify_cell_kzg_proof_batch, commitments, cell_indices, cells, proofs)
+ identifier = make_id(commitments, cell_indices, cells, proofs)
+ yield f'verify_cell_kzg_proof_batch_case_invalid_missing_proof_{identifier}', {
+ 'input': {
+ 'commitments': encode_hex_list(commitments),
+ 'cell_indices': cell_indices,
+ 'cells': encode_hex_list(cells),
+ 'proofs': encode_hex_list(proofs),
+ },
+ 'output': None
+ }
###############################################################################
@@ -593,15 +347,14 @@ def case_recover_cells_and_kzg_proofs():
# Valid: No missing cells
cells, proofs = VALID_CELLS_AND_PROOFS[0]
cell_indices = list(range(spec.CELLS_PER_EXT_BLOB))
- recovered_cells, recovered_proofs = spec.recover_cells_and_kzg_proofs(cell_indices, cells, proofs)
+ recovered_cells, recovered_proofs = spec.recover_cells_and_kzg_proofs(cell_indices, cells)
assert recovered_cells == cells
assert recovered_proofs == proofs
- identifier = make_id(cell_indices, cells, proofs)
+ identifier = make_id(cell_indices, cells)
yield f'recover_cells_and_kzg_proofs_case_valid_no_missing_{identifier}', {
'input': {
'cell_indices': cell_indices,
'cells': encode_hex_list(cells),
- 'proofs': encode_hex_list(proofs),
},
'output': (encode_hex_list(recovered_cells), encode_hex_list(recovered_proofs))
}
@@ -610,16 +363,14 @@ def case_recover_cells_and_kzg_proofs():
cells, proofs = VALID_CELLS_AND_PROOFS[1]
cell_indices = list(range(0, spec.CELLS_PER_EXT_BLOB, 2))
partial_cells = [cells[cell_index] for cell_index in cell_indices]
- partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
- recovered_cells, recovered_proofs = spec.recover_cells_and_kzg_proofs(cell_indices, partial_cells, partial_proofs)
+ recovered_cells, recovered_proofs = spec.recover_cells_and_kzg_proofs(cell_indices, partial_cells)
assert recovered_cells == cells
assert recovered_proofs == proofs
- identifier = make_id(cell_indices, partial_cells, partial_proofs)
+ identifier = make_id(cell_indices, partial_cells)
yield f'recover_cells_and_kzg_proofs_case_valid_half_missing_every_other_cell_{identifier}', {
'input': {
'cell_indices': cell_indices,
'cells': encode_hex_list(partial_cells),
- 'proofs': encode_hex_list(partial_proofs),
},
'output': (encode_hex_list(recovered_cells), encode_hex_list(recovered_proofs))
}
@@ -628,8 +379,7 @@ def case_recover_cells_and_kzg_proofs():
cells, proofs = VALID_CELLS_AND_PROOFS[2]
cell_indices = list(range(0, spec.CELLS_PER_EXT_BLOB // 2))
partial_cells = [cells[cell_index] for cell_index in cell_indices]
- partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
- recovered_cells, recovered_proofs = spec.recover_cells_and_kzg_proofs(cell_indices, partial_cells, partial_proofs)
+ recovered_cells, recovered_proofs = spec.recover_cells_and_kzg_proofs(cell_indices, partial_cells)
assert recovered_cells == cells
assert recovered_proofs == proofs
identifier = make_id(cell_indices, partial_cells)
@@ -637,7 +387,6 @@ def case_recover_cells_and_kzg_proofs():
'input': {
'cell_indices': cell_indices,
'cells': encode_hex_list(partial_cells),
- 'proofs': encode_hex_list(partial_proofs),
},
'output': (encode_hex_list(recovered_cells), encode_hex_list(recovered_proofs))
}
@@ -646,8 +395,7 @@ def case_recover_cells_and_kzg_proofs():
cells, proofs = VALID_CELLS_AND_PROOFS[3]
cell_indices = list(range(spec.CELLS_PER_EXT_BLOB // 2, spec.CELLS_PER_EXT_BLOB))
partial_cells = [cells[cell_index] for cell_index in cell_indices]
- partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
- recovered_cells, recovered_proofs = spec.recover_cells_and_kzg_proofs(cell_indices, partial_cells, partial_proofs)
+ recovered_cells, recovered_proofs = spec.recover_cells_and_kzg_proofs(cell_indices, partial_cells)
assert recovered_cells == cells
assert recovered_proofs == proofs
identifier = make_id(cell_indices, partial_cells)
@@ -655,7 +403,6 @@ def case_recover_cells_and_kzg_proofs():
'input': {
'cell_indices': cell_indices,
'cells': encode_hex_list(partial_cells),
- 'proofs': encode_hex_list(partial_proofs),
},
'output': (encode_hex_list(recovered_cells), encode_hex_list(recovered_proofs))
}
@@ -668,95 +415,67 @@ def case_recover_cells_and_kzg_proofs():
'input': {
'cell_indices': cell_indices,
'cells': encode_hex_list(partial_cells),
- 'proofs': encode_hex_list(partial_proofs),
},
'output': None
}
# Edge case: More than half missing
- cells, proofs = VALID_CELLS_AND_PROOFS[4]
+ cells, _ = VALID_CELLS_AND_PROOFS[4]
cell_indices = list(range(spec.CELLS_PER_EXT_BLOB // 2 - 1))
partial_cells = [cells[cell_index] for cell_index in cell_indices]
- partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
- expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
- identifier = make_id(cell_indices, partial_cells, partial_proofs)
+ expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells)
+ identifier = make_id(cell_indices, partial_cells)
yield f'recover_cells_and_kzg_proofs_case_invalid_more_than_half_missing_{identifier}', {
'input': {
'cell_indices': cell_indices,
'cells': encode_hex_list(partial_cells),
- 'proofs': encode_hex_list(partial_proofs),
},
'output': None
}
# Edge case: More cells provided than CELLS_PER_EXT_BLOB
- cells, proofs = VALID_CELLS_AND_PROOFS[5]
+ cells, _ = VALID_CELLS_AND_PROOFS[5]
cell_indices = list(range(spec.CELLS_PER_EXT_BLOB)) + [0]
partial_cells = [cells[cell_index] for cell_index in cell_indices]
- partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
- expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
- identifier = make_id(cell_indices, partial_cells, partial_proofs)
+ expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells)
+ identifier = make_id(cell_indices, partial_cells)
yield f'recover_cells_and_kzg_proofs_case_invalid_more_cells_than_cells_per_ext_blob_{identifier}', {
'input': {
'cell_indices': cell_indices,
'cells': encode_hex_list(partial_cells),
- 'proofs': encode_hex_list(partial_proofs),
},
'output': None
}
# Edge case: Invalid cell_index
- cells, proofs = VALID_CELLS_AND_PROOFS[6]
+ cells, _ = VALID_CELLS_AND_PROOFS[6]
cell_indices = list(range(spec.CELLS_PER_EXT_BLOB // 2))
partial_cells = [cells[cell_index] for cell_index in cell_indices]
- partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
# Replace first cell_index with an invalid value
cell_indices[0] = spec.CELLS_PER_EXT_BLOB
- expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
- identifier = make_id(cell_indices, partial_cells, partial_proofs)
+ expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells)
+ identifier = make_id(cell_indices, partial_cells)
yield f'recover_cells_and_kzg_proofs_case_invalid_cell_index_{identifier}', {
'input': {
'cell_indices': cell_indices,
'cells': encode_hex_list(partial_cells),
- 'proofs': encode_hex_list(partial_proofs),
},
'output': None
}
# Edge case: Invalid cell
for cell in INVALID_INDIVIDUAL_CELL_BYTES:
- cells, proofs = VALID_CELLS_AND_PROOFS[6]
+ cells, _ = VALID_CELLS_AND_PROOFS[6]
cell_indices = list(range(spec.CELLS_PER_EXT_BLOB // 2))
partial_cells = [cells[cell_index] for cell_index in cell_indices]
- partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
# Replace first cell with an invalid value
partial_cells[0] = cell
- expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
- identifier = make_id(cell_indices, partial_cells, partial_proofs)
+ expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells)
+ identifier = make_id(cell_indices, partial_cells)
yield f'recover_cells_and_kzg_proofs_case_invalid_cell_{identifier}', {
'input': {
'cell_indices': cell_indices,
'cells': encode_hex_list(partial_cells),
- 'proofs': encode_hex_list(partial_proofs),
- },
- 'output': None
- }
-
- # Edge case: Invalid proof
- for proof in INVALID_G1_POINTS:
- cells, proofs = VALID_CELLS_AND_PROOFS[0]
- cell_indices = list(range(spec.CELLS_PER_EXT_BLOB // 2))
- partial_cells = [cells[cell_index] for cell_index in cell_indices]
- partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
- # Replace first proof with an invalid value
- partial_proofs[0] = proof
- expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
- identifier = make_id(cell_indices, partial_cells, partial_proofs)
- yield f'recover_cells_and_kzg_proofs_case_invalid_proof_{identifier}', {
- 'input': {
- 'cell_indices': cell_indices,
- 'cells': encode_hex_list(partial_cells),
- 'proofs': encode_hex_list(partial_proofs),
},
'output': None
}
@@ -765,16 +484,14 @@ def case_recover_cells_and_kzg_proofs():
cells, proofs = VALID_CELLS_AND_PROOFS[0]
cell_indices = list(range(0, spec.CELLS_PER_EXT_BLOB, 2))
partial_cells = [cells[cell_index] for cell_index in cell_indices]
- partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
# Add another cell_index
cell_indices.append(spec.CELLS_PER_EXT_BLOB - 1)
- expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
- identifier = make_id(cell_indices, partial_cells, partial_proofs)
+ expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells)
+ identifier = make_id(cell_indices, partial_cells)
yield f'recover_cells_and_kzg_proofs_case_invalid_more_cell_indices_than_cells_{identifier}', {
'input': {
'cell_indices': cell_indices,
'cells': encode_hex_list(partial_cells),
- 'proofs': encode_hex_list(partial_proofs),
},
'output': None
}
@@ -783,34 +500,14 @@ def case_recover_cells_and_kzg_proofs():
cells, proofs = VALID_CELLS_AND_PROOFS[1]
cell_indices = list(range(0, spec.CELLS_PER_EXT_BLOB, 2))
partial_cells = [cells[cell_index] for cell_index in cell_indices]
- partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
# Add another cell
partial_cells.append(CELL_RANDOM_VALID1)
- expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
- identifier = make_id(cell_indices, partial_cells, partial_proofs)
+ expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells)
+ identifier = make_id(cell_indices, partial_cells)
yield f'recover_cells_and_kzg_proofs_case_invalid_more_cells_than_cell_indices_{identifier}', {
'input': {
'cell_indices': cell_indices,
'cells': encode_hex_list(partial_cells),
- 'proofs': encode_hex_list(partial_proofs),
- },
- 'output': None
- }
-
- # Edge case: More proofs than cell_indices
- cells, proofs = VALID_CELLS_AND_PROOFS[1]
- cell_indices = list(range(0, spec.CELLS_PER_EXT_BLOB, 2))
- partial_cells = [cells[cell_index] for cell_index in cell_indices]
- partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
- # Add another proof
- partial_proofs.append(G1)
- expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
- identifier = make_id(cell_indices, partial_cells, partial_proofs)
- yield f'recover_cells_and_kzg_proofs_case_invalid_more_proofs_than_cell_indices_{identifier}', {
- 'input': {
- 'cell_indices': cell_indices,
- 'cells': encode_hex_list(partial_cells),
- 'proofs': encode_hex_list(partial_proofs),
},
'output': None
}
@@ -824,16 +521,14 @@ def case_recover_cells_and_kzg_proofs():
# to insufficient cell count, not because of a duplicate cell.
cell_indices = list(range(spec.CELLS_PER_EXT_BLOB // 2 + 1))
partial_cells = [cells[cell_index] for cell_index in cell_indices]
- partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
# Replace first cell_index with the second cell_index
cell_indices[0] = cell_indices[1]
- expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
- identifier = make_id(cell_indices, partial_cells, partial_proofs)
+ expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells)
+ identifier = make_id(cell_indices, partial_cells)
yield f'recover_cells_and_kzg_proofs_case_invalid_duplicate_cell_index_{identifier}', {
'input': {
'cell_indices': cell_indices,
'cells': encode_hex_list(partial_cells),
- 'proofs': encode_hex_list(partial_proofs),
},
'output': None
}
@@ -872,7 +567,6 @@ def cases_fn() -> Iterable[gen_typing.TestCase]:
gen_runner.run_generator("kzg_7594", [
# EIP-7594
create_provider(EIP7594, 'compute_cells_and_kzg_proofs', case_compute_cells_and_kzg_proofs),
- create_provider(EIP7594, 'verify_cell_kzg_proof', case_verify_cell_kzg_proof),
create_provider(EIP7594, 'verify_cell_kzg_proof_batch', case_verify_cell_kzg_proof_batch),
create_provider(EIP7594, 'recover_cells_and_kzg_proofs', case_recover_cells_and_kzg_proofs),
])
diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py
index cfe34aee4b..a3cdfd62fd 100644
--- a/tests/generators/light_client/main.py
+++ b/tests/generators/light_client/main.py
@@ -1,4 +1,4 @@
-from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB
+from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA
from eth2spec.gen_helpers.gen_from_tests.gen import combine_mods, run_state_test_generators
@@ -15,12 +15,14 @@
]}
capella_mods = combine_mods(_new_capella_mods, bellatrix_mods)
deneb_mods = capella_mods
+ electra_mods = deneb_mods
all_mods = {
ALTAIR: altair_mods,
BELLATRIX: bellatrix_mods,
CAPELLA: capella_mods,
DENEB: deneb_mods,
+ ELECTRA: electra_mods,
}
run_state_test_generators(runner_name="light_client", all_mods=all_mods)
diff --git a/tests/generators/merkle_proof/main.py b/tests/generators/merkle_proof/main.py
index b7d30fe9e4..69500137ab 100644
--- a/tests/generators/merkle_proof/main.py
+++ b/tests/generators/merkle_proof/main.py
@@ -1,4 +1,4 @@
-from eth2spec.test.helpers.constants import DENEB, EIP7594
+from eth2spec.test.helpers.constants import DENEB, ELECTRA, EIP7594
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods
@@ -9,10 +9,12 @@
_new_eip7594_mods = {key: 'eth2spec.test.eip7594.merkle_proof.test_' + key for key in [
'single_merkle_proof',
]}
+ electra_mods = deneb_mods
eip_7594_mods = combine_mods(_new_eip7594_mods, deneb_mods)
all_mods = {
DENEB: deneb_mods,
+ ELECTRA: electra_mods,
EIP7594: eip_7594_mods,
}