Skip to content

Commit

Permalink
Merge pull request #2839 from ethereum/dev
Browse files Browse the repository at this point in the history
Release v1.1.10 (kilnv2)
  • Loading branch information
djrtwo authored Mar 1, 2022
2 parents 7c9373b + 72d4597 commit 9f643d8
Show file tree
Hide file tree
Showing 15 changed files with 94 additions and 97 deletions.
24 changes: 17 additions & 7 deletions specs/altair/sync-protocol.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
- [`LightClientUpdate`](#lightclientupdate)
- [`LightClientStore`](#lightclientstore)
- [Helper functions](#helper-functions)
- [`is_finality_update`](#is_finality_update)
- [`get_subtree_index`](#get_subtree_index)
- [`get_active_header`](#get_active_header)
- [`get_safety_threshold`](#get_safety_threshold)
Expand Down Expand Up @@ -95,6 +96,13 @@ class LightClientStore(object):

## Helper functions

### `is_finality_update`

```python
def is_finality_update(update: LightClientUpdate) -> bool:
return update.finalized_header != BeaconBlockHeader()
```

### `get_subtree_index`

```python
Expand All @@ -109,7 +117,7 @@ def get_active_header(update: LightClientUpdate) -> BeaconBlockHeader:
# The "active header" is the header that the update is trying to convince us
# to accept. If a finalized header is present, it's the finalized header,
# otherwise it's the attested header
if update.finalized_header != BeaconBlockHeader():
if is_finality_update(update):
return update.finalized_header
else:
return update.attested_header
Expand Down Expand Up @@ -157,13 +165,13 @@ def validate_light_client_update(store: LightClientStore,
assert current_slot >= active_header.slot > store.finalized_header.slot

# Verify update does not skip a sync committee period
finalized_period = compute_epoch_at_slot(store.finalized_header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
update_period = compute_epoch_at_slot(active_header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
finalized_period = compute_sync_committee_period(compute_epoch_at_slot(store.finalized_header.slot))
update_period = compute_sync_committee_period(compute_epoch_at_slot(active_header.slot))
assert update_period in (finalized_period, finalized_period + 1)

# Verify that the `finalized_header`, if present, actually is the finalized header saved in the
# state of the `attested header`
if update.finalized_header == BeaconBlockHeader():
if not is_finality_update(update):
assert update.finality_branch == [Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))]
else:
assert is_valid_merkle_branch(
Expand Down Expand Up @@ -208,12 +216,14 @@ def validate_light_client_update(store: LightClientStore,
```python
def apply_light_client_update(store: LightClientStore, update: LightClientUpdate) -> None:
active_header = get_active_header(update)
finalized_period = compute_epoch_at_slot(store.finalized_header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
update_period = compute_epoch_at_slot(active_header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
finalized_period = compute_sync_committee_period(compute_epoch_at_slot(store.finalized_header.slot))
update_period = compute_sync_committee_period(compute_epoch_at_slot(active_header.slot))
if update_period == finalized_period + 1:
store.current_sync_committee = store.next_sync_committee
store.next_sync_committee = update.next_sync_committee
store.finalized_header = active_header
if store.finalized_header.slot > store.optimistic_header.slot:
store.optimistic_header = store.finalized_header
```

#### `process_light_client_update`
Expand Down Expand Up @@ -250,7 +260,7 @@ def process_light_client_update(store: LightClientStore,
# Update finalized header
if (
sum(sync_committee_bits) * 3 >= len(sync_committee_bits) * 2
and update.finalized_header != BeaconBlockHeader()
and is_finality_update(update)
):
# Normal update through 2/3 threshold
apply_light_client_update(store, update)
Expand Down
10 changes: 5 additions & 5 deletions specs/bellatrix/beacon-chain.md
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ class ExecutionPayload(Container):
state_root: Bytes32
receipts_root: Bytes32
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
random: Bytes32 # 'difficulty' in the yellow paper
prev_randao: Bytes32 # 'difficulty' in the yellow paper
block_number: uint64 # 'number' in the yellow paper
gas_limit: uint64
gas_used: uint64
Expand All @@ -193,7 +193,7 @@ class ExecutionPayloadHeader(Container):
state_root: Bytes32
receipts_root: Bytes32
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
random: Bytes32
prev_randao: Bytes32
block_number: uint64
gas_limit: uint64
gas_used: uint64
Expand Down Expand Up @@ -348,8 +348,8 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe
# Verify consistency of the parent hash with respect to the previous execution payload header
if is_merge_transition_complete(state):
assert payload.parent_hash == state.latest_execution_payload_header.block_hash
# Verify random
assert payload.random == get_randao_mix(state, get_current_epoch(state))
# Verify prev_randao
assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state))
# Verify timestamp
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
# Verify the execution payload is valid
Expand All @@ -361,7 +361,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe
state_root=payload.state_root,
receipts_root=payload.receipts_root,
logs_bloom=payload.logs_bloom,
random=payload.random,
prev_randao=payload.prev_randao,
block_number=payload.block_number,
gas_limit=payload.gas_limit,
gas_used=payload.gas_used,
Expand Down
2 changes: 1 addition & 1 deletion specs/bellatrix/fork-choice.md
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ Used to signal to initiate the payload build process via `notify_forkchoice_upda
@dataclass
class PayloadAttributes(object):
timestamp: uint64
random: Bytes32
prev_randao: Bytes32
suggested_fee_recipient: ExecutionAddress
```

Expand Down
2 changes: 1 addition & 1 deletion specs/bellatrix/validator.md
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def prepare_execution_payload(state: BeaconState,
# Set the forkchoice head and initiate the payload build process
payload_attributes = PayloadAttributes(
timestamp=compute_timestamp_at_slot(state, state.slot),
random=get_randao_mix(state, get_current_epoch(state)),
prev_randao=get_randao_mix(state, get_current_epoch(state)),
suggested_fee_recipient=suggested_fee_recipient,
)
return execution_engine.notify_forkchoice_updated(parent_hash, finalized_block_hash, payload_attributes)
Expand Down
5 changes: 0 additions & 5 deletions specs/das/p2p-interface.md
Original file line number Diff line number Diff line change
Expand Up @@ -196,11 +196,6 @@ This builds on top of the protocol identification and encoding spec which was in

Note that DAS networking uses a different protocol prefix: `/eth2/das/req`

The result codes are extended with:
- 3: **ResourceUnavailable** -- when the request was valid but cannot be served at this point in time.

TODO: unify with phase0? Lighthoue already defined this in their response codes enum.

### Messages

#### DASQuery
Expand Down
8 changes: 5 additions & 3 deletions specs/phase0/p2p-interface.md
Original file line number Diff line number Diff line change
Expand Up @@ -337,6 +337,8 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_
(a client MAY queue future aggregates for processing at the appropriate slot).
- _[REJECT]_ The aggregate attestation's epoch matches its target -- i.e. `aggregate.data.target.epoch ==
compute_epoch_at_slot(aggregate.data.slot)`
- _[IGNORE]_ The valid aggregate attestation defined by `hash_tree_root(aggregate)` has _not_ already been seen
(via aggregate gossip, within a verified block, or through the creation of an equivalent aggregate locally).
- _[IGNORE]_ The `aggregate` is the first valid aggregate received for the aggregator
with index `aggregate_and_proof.aggregator_index` for the epoch `aggregate.data.target.epoch`.
- _[REJECT]_ The attestation has participants --
Expand Down Expand Up @@ -424,7 +426,7 @@ The following validations MUST pass before forwarding the `attestation` on the s
- _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) passes validation.
- _[REJECT]_ The attestation's target block is an ancestor of the block named in the LMD vote -- i.e.
`get_ancestor(store, attestation.data.beacon_block_root, compute_start_slot_at_epoch(attestation.data.target.epoch)) == attestation.data.target.root`
- _[REJECT]_ The current `finalized_checkpoint` is an ancestor of the `block` defined by `attestation.data.beacon_block_root` -- i.e.
- _[IGNORE]_ The current `finalized_checkpoint` is an ancestor of the `block` defined by `attestation.data.beacon_block_root` -- i.e.
`get_ancestor(store, attestation.data.beacon_block_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch))
== store.finalized_checkpoint.root`

Expand Down Expand Up @@ -569,11 +571,11 @@ The response code can have one of the following values, encoded as a single unsi
The response payload adheres to the `ErrorMessage` schema (described below).
- 3: **ResourceUnavailable** -- the responder does not have requested resource.
The response payload adheres to the `ErrorMessage` schema (described below).
*Note*: This response code is only valid as a response to `BlocksByRange`.
*Note*: This response code is only valid as a response where specified.

Clients MAY use response codes above `128` to indicate alternative, erroneous request-specific responses.

The range `[3, 127]` is RESERVED for future usages, and should be treated as error if not recognized expressly.
The range `[4, 127]` is RESERVED for future usages, and should be treated as error if not recognized expressly.

The `ErrorMessage` schema is:

Expand Down
4 changes: 2 additions & 2 deletions sync/optimistic.md
Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,9 @@ def is_execution_block(block: BeaconBlock) -> bool:
```python
def is_optimistic_candidate_block(opt_store: OptimisticStore, current_slot: Slot, block: BeaconBlock) -> bool:
justified_root = opt_store.block_states[opt_store.head_block_root].current_justified_checkpoint.root
justifed_is_execution_block = is_execution_block(opt_store.blocks[justified_root])
justified_is_execution_block = is_execution_block(opt_store.blocks[justified_root])
block_is_deep = block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot
return justifed_is_execution_block or block_is_deep
return justified_is_execution_block or block_is_deep
```

Let only a node which returns `is_optimistic(opt_store, head) is True` be an *optimistic
Expand Down
2 changes: 1 addition & 1 deletion tests/core/pyspec/eth2spec/VERSION.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.1.9
1.1.10
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def run_sync_committees_progress_test(spec, state):
first_sync_committee = state.current_sync_committee.copy()
second_sync_committee = state.next_sync_committee.copy()

current_period = spec.get_current_epoch(state) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
current_period = spec.compute_sync_committee_period(spec.get_current_epoch(state))
next_period = current_period + 1
next_period_start_epoch = next_period * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
next_period_start_slot = next_period_start_epoch * spec.SLOTS_PER_EPOCH
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,38 +5,29 @@
with_presets,
with_altair_and_later,
)
from eth2spec.test.helpers.attestations import next_epoch_with_attestations
from eth2spec.test.helpers.attestations import (
next_epoch_with_attestations,
)
from eth2spec.test.helpers.block import (
build_empty_block,
build_empty_block_for_next_slot,
)
from eth2spec.test.helpers.constants import MINIMAL
from eth2spec.test.helpers.light_client import (
get_sync_aggregate,
initialize_light_client_store,
)
from eth2spec.test.helpers.state import (
next_slots,
state_transition_and_sign_block,
)
from eth2spec.test.helpers.sync_committee import (
compute_aggregate_sync_committee_signature,
)
from eth2spec.test.helpers.merkle import build_proof


def _initialize_light_client_store(spec, state):
return spec.LightClientStore(
finalized_header=spec.BeaconBlockHeader(),
current_sync_committee=state.current_sync_committee,
next_sync_committee=state.next_sync_committee,
best_valid_update=None,
optimistic_header=spec.BeaconBlockHeader(),
previous_max_active_participants=0,
current_max_active_participants=0,
)


@with_altair_and_later
@spec_state_test
def test_process_light_client_update_not_timeout(spec, state):
store = _initialize_light_client_store(spec, state)
store = initialize_light_client_store(spec, state)

# Block at slot 1 doesn't increase sync committee period, so it won't force update store.finalized_header
block = build_empty_block_for_next_slot(spec, state)
Expand All @@ -49,19 +40,7 @@ def test_process_light_client_update_not_timeout(spec, state):
body_root=signed_block.message.body.hash_tree_root(),
)
# Sync committee signing the header
all_pubkeys = [v.pubkey for v in state.validators]
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
sync_committee_bits = [True] * len(committee)
sync_committee_signature = compute_aggregate_sync_committee_signature(
spec,
state,
block_header.slot,
committee,
)
sync_aggregate = spec.SyncAggregate(
sync_committee_bits=sync_committee_bits,
sync_committee_signature=sync_committee_signature,
)
sync_aggregate = get_sync_aggregate(spec, state, block_header, block_root=None)
next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]

# Ensure that finality checkpoint is genesis
Expand Down Expand Up @@ -94,12 +73,12 @@ def test_process_light_client_update_not_timeout(spec, state):
@spec_state_test
@with_presets([MINIMAL], reason="too slow")
def test_process_light_client_update_timeout(spec, state):
store = _initialize_light_client_store(spec, state)
store = initialize_light_client_store(spec, state)

# Forward to next sync committee period
next_slots(spec, state, spec.UPDATE_TIMEOUT)
snapshot_period = spec.compute_epoch_at_slot(store.optimistic_header.slot) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
update_period = spec.compute_epoch_at_slot(state.slot) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
snapshot_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(store.optimistic_header.slot))
update_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(state.slot))
assert snapshot_period + 1 == update_period

block = build_empty_block_for_next_slot(spec, state)
Expand All @@ -113,20 +92,8 @@ def test_process_light_client_update_timeout(spec, state):
)

# Sync committee signing the finalized_block_header
all_pubkeys = [v.pubkey for v in state.validators]
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
sync_committee_bits = [True] * len(committee)
sync_committee_signature = compute_aggregate_sync_committee_signature(
spec,
state,
block_header.slot,
committee,
block_root=spec.Root(block_header.hash_tree_root()),
)
sync_aggregate = spec.SyncAggregate(
sync_committee_bits=sync_committee_bits,
sync_committee_signature=sync_committee_signature,
)
sync_aggregate = get_sync_aggregate(
spec, state, block_header, block_root=spec.Root(block_header.hash_tree_root()))

# Sync committee is updated
next_sync_committee_branch = build_proof(state.get_backing(), spec.NEXT_SYNC_COMMITTEE_INDEX)
Expand Down Expand Up @@ -158,7 +125,7 @@ def test_process_light_client_update_timeout(spec, state):
@spec_state_test
@with_presets([MINIMAL], reason="too slow")
def test_process_light_client_update_finality_updated(spec, state):
store = _initialize_light_client_store(spec, state)
store = initialize_light_client_store(spec, state)

# Change finality
blocks = []
Expand All @@ -169,8 +136,8 @@ def test_process_light_client_update_finality_updated(spec, state):
# Ensure that finality checkpoint has changed
assert state.finalized_checkpoint.epoch == 3
# Ensure that it's same period
snapshot_period = spec.compute_epoch_at_slot(store.optimistic_header.slot) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
update_period = spec.compute_epoch_at_slot(state.slot) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
snapshot_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(store.optimistic_header.slot))
update_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(state.slot))
assert snapshot_period == update_period

# Updated sync_committee and finality
Expand All @@ -191,20 +158,8 @@ def test_process_light_client_update_finality_updated(spec, state):
)

# Sync committee signing the finalized_block_header
all_pubkeys = [v.pubkey for v in state.validators]
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
sync_committee_bits = [True] * len(committee)
sync_committee_signature = compute_aggregate_sync_committee_signature(
spec,
state,
block_header.slot,
committee,
block_root=spec.Root(block_header.hash_tree_root()),
)
sync_aggregate = spec.SyncAggregate(
sync_committee_bits=sync_committee_bits,
sync_committee_signature=sync_committee_signature,
)
sync_aggregate = get_sync_aggregate(
spec, state, block_header, block_root=spec.Root(block_header.hash_tree_root()))

update = spec.LightClientUpdate(
attested_header=block_header,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def test_bad_random_first_payload(spec, state):

# execution payload
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.random = b'\x42' * 32
execution_payload.prev_randao = b'\x42' * 32

yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)

Expand All @@ -167,7 +167,7 @@ def test_bad_random_regular_payload(spec, state):

# execution payload
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.random = b'\x04' * 32
execution_payload.prev_randao = b'\x04' * 32

yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)

Expand All @@ -182,7 +182,7 @@ def test_bad_everything_regular_payload(spec, state):
# execution payload
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.parent_hash = spec.Hash32()
execution_payload.random = spec.Bytes32()
execution_payload.prev_randao = spec.Bytes32()
execution_payload.timestamp = 0

yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def test_prepare_execution_payload(spec, state):

# 1. Handle `is_merge_complete`
if is_merge_complete:
state.latest_execution_payload_header = spec.ExecutionPayloadHeader(random=b'\x12' * 32)
state.latest_execution_payload_header = spec.ExecutionPayloadHeader(prev_randao=b'\x12' * 32)
else:
state.latest_execution_payload_header = spec.ExecutionPayloadHeader()

Expand Down
Loading

0 comments on commit 9f643d8

Please sign in to comment.