Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

implement v1.2.0 optimistic sync tests #4174

Merged
merged 4 commits into from
Sep 27, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion AllTests-mainnet.md
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,11 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
+ [SCRYPT] Network Keystore encryption OK
```
OK: 12/12 Fail: 0/12 Skip: 0/12
## Latest valid hash [Preset: mainnet]
```diff
+ LVH searching OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Light client [Preset: mainnet]
```diff
+ Init from checkpoint OK
Expand Down Expand Up @@ -585,4 +590,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
OK: 9/9 Fail: 0/9 Skip: 0/9

---TOTAL---
OK: 326/331 Fail: 0/331 Skip: 5/331
OK: 327/332 Fail: 0/332 Skip: 5/332
5 changes: 3 additions & 2 deletions ConsensusSpecPreset-mainnet.md
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,7 @@ ConsensusSpecPreset-mainnet
+ Slots - over_epoch_boundary OK
+ Slots - slots_1 OK
+ Slots - slots_2 OK
+ Sync - mainnet/bellatrix/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK
+ [Invalid] EF - Altair - Sanity - Blocks - double_same_proposer_slashings_same_block [Prese OK
+ [Invalid] EF - Altair - Sanity - Blocks - double_similar_proposer_slashings_same_block [Pr OK
+ [Invalid] EF - Altair - Sanity - Blocks - double_validator_exit_same_block [Preset: mainne OK
Expand Down Expand Up @@ -439,7 +440,7 @@ ConsensusSpecPreset-mainnet
+ fork_random_low_balances OK
+ fork_random_misc_balances OK
```
OK: 429/436 Fail: 0/436 Skip: 7/436
OK: 430/437 Fail: 0/437 Skip: 7/437
## Attestation
```diff
+ [Invalid] EF - Altair - Operations - Attestation - after_epoch_slots OK
Expand Down Expand Up @@ -1292,4 +1293,4 @@ OK: 44/44 Fail: 0/44 Skip: 0/44
OK: 33/33 Fail: 0/33 Skip: 0/33

---TOTAL---
OK: 1115/1122 Fail: 0/1122 Skip: 7/1122
OK: 1116/1123 Fail: 0/1123 Skip: 7/1123
5 changes: 3 additions & 2 deletions ConsensusSpecPreset-minimal.md
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,7 @@ ConsensusSpecPreset-minimal
+ Slots - over_epoch_boundary OK
+ Slots - slots_1 OK
+ Slots - slots_2 OK
+ Sync - minimal/bellatrix/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK
+ [Invalid] EF - Altair - Sanity - Blocks - double_same_proposer_slashings_same_block [Prese OK
+ [Invalid] EF - Altair - Sanity - Blocks - double_similar_proposer_slashings_same_block [Pr OK
+ [Invalid] EF - Altair - Sanity - Blocks - double_validator_exit_same_block [Preset: minima OK
Expand Down Expand Up @@ -497,7 +498,7 @@ ConsensusSpecPreset-minimal
+ fork_random_low_balances OK
+ fork_random_misc_balances OK
```
OK: 487/494 Fail: 0/494 Skip: 7/494
OK: 488/495 Fail: 0/495 Skip: 7/495
## Attestation
```diff
+ [Invalid] EF - Altair - Operations - Attestation - after_epoch_slots OK
Expand Down Expand Up @@ -1391,4 +1392,4 @@ OK: 48/48 Fail: 0/48 Skip: 0/48
OK: 36/36 Fail: 0/36 Skip: 0/36

---TOTAL---
OK: 1206/1213 Fail: 0/1213 Skip: 7/1213
OK: 1207/1214 Fail: 0/1214 Skip: 7/1214
39 changes: 39 additions & 0 deletions beacon_chain/consensus_object_pools/blockchain_dag.nim
Original file line number Diff line number Diff line change
Expand Up @@ -1898,6 +1898,45 @@ proc updateHead*(
dag.finalizedHead.blck.root, stateRoot, dag.finalizedHead.slot.epoch)
dag.onFinHappened(dag, data)

proc getEarliestInvalidRoot*(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

getEarliestInvalidHashBlockRoot or getDescendantOfLatestValidHash would be clearer. Or, at the very least, getEarliestInvalidBlockRoot to remove ambiguity with other roots e.g. state root

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd want to use Root in some capacity here, because while it's dealing with the LVH (a hash, from the EL), what it's returning is firmly in CL-land, an SSZ root. HashBlockRoot is a bit redundant in that sense, and confusing the boundary between EL-"hash" and CL-"root".

But yes, state vs block root is a useful distinction to make, so getEarliestInvalidBlockRoot is a good change.

840d097

dag: ChainDAGRef, initialSearchRoot: Eth2Digest, lvh: Eth2Digest,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

lvh --> latestValidHash maybe? Could be confusing if revisited in a couple months.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

defaultEarliestInvalidRoot: Eth2Digest): Eth2Digest =
# Earliest within a chain/fork in question, per LVH definition. Intended to
# be called with `initialRoot` as the parent of the block regarding which a
# newPayload or forkchoiceUpdated execution_status has been received as the
# tests effectively require being able to access this before the BlockRef's
# made. Therefore, to accommodate the EF consensus spec sync tests, and the
# possibilities that the LVH might be an immediate parent or a more distant
# ancestor special-case handling of an earliest invalid root as potentially
# not being from this function's search, but being provided as a default by
# the caller with access to the block.
var curBlck = dag.getBlockRef(initialSearchRoot).valueOr:
# Being asked to traverse a chain which the DAG doesn't know about -- but
# that'd imply the block's otherwise invalid for CL as well as EL.
return static(default(Eth2Digest))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, would it be correct to return defaultEarliestInvalidRoot here as well?

If yes, the return type could be changed to Opt[Eth2Digest], and the caller could then do dag.getEarliestInvalidRoot(initialSearchRoot, lvh).get(defaultEarliestInvalidRoot). This would avoid polluting this function with defaultEarliestInvalidRoot.


# Only allow this special case outside loop; it's when the LVH is the direct
# parent of the reported invalid block
if curBlck.executionBlockRoot.isSome and
curBlck.executionBlockRoot.get == lvh:
return defaultEarliestInvalidRoot

while true:
# This was supposed to have been either caught by the pre-loop check or the
# parent check.
if curBlck.executionBlockRoot.isSome and
curBlck.executionBlockRoot.get == lvh:
doAssert false, "getEarliestInvalidRoot: unexpected LVH in loop body"

if (curBlck.parent.isNil) or
curBlck.parent.executionBlockRoot.isNone or
(curBlck.parent.executionBlockRoot.isSome and
curBlck.parent.executionBlockRoot.get == lvh):
tersec marked this conversation as resolved.
Show resolved Hide resolved
break
curBlck = curBlck.parent

curBlck.root
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is not necessarily correct.

When someone switches from a different CL to Nimbus but retains their EL, the EL's lvh may be far in the future. This means that none of our DAG blocks contains lvh. This implementation would then return the first descendant of dag.finalizedHead (or dag.finalizedHead itself if no descendants were yet added).

If lvh is not found, maybe better to return the defaultEarliestInvalidRoot or ZERO_HASH instead.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also, in extended periods of non-finality, the linear scan may become expensive if performed repeatedly.
One way to accelerate could be to stop the scan at the latest known-VALID block, instead of all the way back at finalized. Or, also having a constant maximum search depth.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

#4205

In theory, latest-known VALID block is exactly where this will stop. If it doesn't, that's an EL bug. EL bugs shouldn't crash Nimbus, or result in durably incorrect behavior, but EL bugs can trigger suboptimally slow behavior. It's meant to be a mutually trusted system, and I'm wary of adding complication to Nimbus to handle buggy ELs, when that complication can carry its own risks .

In particular, https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#payloadstatusv1 defines:

latestValidHash: DATA|null, 32 Bytes - the hash of the most recent valid block in the branch defined by payload and its ancestors

This also addresses the concern about the EL's lvh being far in the future in the new-CL-database/existing-EL-database case: the lvh is relative to the payload provided by the CL. Here, the failure mode for buggy EL is that Nimbus needs to be restarted, which seems reasonable.

As far as cost in general, this should be a relatively one-time thing per invalid branch -- once invalidated, it shouldn't be doing that backwards search again, so potentially better to do it once than multiple times, along the lines of how pruning huge swaths of finalized blocks after a long-unfinalizing network finalizes is expensive, but one-time. Doing halfway versions introduces less-well-defined state and doesn't necessarily save time overall, in a throughput sense.

There is an issue here where the quarantine unviables are sometimes checked only as direct parents rather than ancestors, which is an argument for not using the LVH-based search for adding to quarantine.

The other aspect is that it should rediscover all of this by just asking the EL, if it misses something. So everything here should be considered a cache.

For the moment, this is all only used in tests, so another approach is to move this out from blockchain_dag and into tests/, so ensure that it doesn't accidentally get used in blockchain_dag.

While the initial intent, and still one that seems broadly ideal, is to maximally match test behavior with non-test behavior (otherwise, what is the test testing?), just the status quo here is that this LVH infrastructure is test-only, so it can/should reflect that.


proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): Result[void, cstring] =
# Lightweight check to see if we have the minimal information needed to
# load up a database - we don't check head here - if something is wrong with
Expand Down
28 changes: 21 additions & 7 deletions beacon_chain/consensus_object_pools/consensus_manager.nim
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ func setOptimisticHead*(
proc runForkchoiceUpdated*(
eth1Monitor: Eth1Monitor,
headBlockRoot, safeBlockRoot, finalizedBlockRoot: Eth2Digest):
Future[PayloadExecutionStatus] {.async.} =
Future[(PayloadExecutionStatus, Option[BlockHash])] {.async.} =
# Allow finalizedBlockRoot to be 0 to avoid sync deadlocks.
#
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-3675.md#pos-events
Expand Down Expand Up @@ -199,11 +199,11 @@ proc runForkchoiceUpdated*(
latestValidHash = $fcuR.payloadStatus.latestValidHash,
validationError = $fcuR.payloadStatus.validationError

return fcuR.payloadStatus.status
return (fcuR.payloadStatus.status, fcuR.payloadStatus.latestValidHash)
except CatchableError as err:
error "runForkchoiceUpdated: forkchoiceUpdated failed",
err = err.msg
return PayloadExecutionStatus.syncing
return (PayloadExecutionStatus.syncing, none BlockHash)

proc runForkchoiceUpdatedDiscardResult*(
eth1Monitor: Eth1Monitor,
Expand All @@ -228,17 +228,31 @@ proc updateExecutionClientHead(
return Opt[void].ok()

# Can't use dag.head here because it hasn't been updated yet
let payloadExecutionStatus = await self.eth1Monitor.runForkchoiceUpdated(
headExecutionPayloadHash,
newHead.safeExecutionPayloadHash,
newHead.finalizedExecutionPayloadHash)
let (payloadExecutionStatus, latestValidHash) =
await self.eth1Monitor.runForkchoiceUpdated(
headExecutionPayloadHash,
newHead.safeExecutionPayloadHash,
newHead.finalizedExecutionPayloadHash)

case payloadExecutionStatus
of PayloadExecutionStatus.valid:
self.dag.markBlockVerified(self.quarantine[], newHead.blck.root)
of PayloadExecutionStatus.invalid, PayloadExecutionStatus.invalid_block_hash:
# This is a CL root, not EL hash
let earliestKnownInvalidRoot =
if latestValidHash.isSome:
self.dag.getEarliestInvalidRoot(
newHead.blck.root, latestValidHash.get.asEth2Digest,
newHead.blck.root)
else:
newHead.blck.root

self.attestationPool[].forkChoice.mark_root_invalid(
earliestKnownInvalidRoot)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm. Not sure whether this is correct in situations where the EL is restarted or changed to a different EL while Nimbus is running. Or, multi-EL scenarios which may not have the same sync progress.

The previous mechanism of only marking the specific block invalid, and then re-selecting a new head, doing fcu to it and then marking it invalid/valid one at a time seems more robust, in general, while not losing correctness (just a bit of latency), especially when considering how persistent an addUnviable can be (requiring a restart if done so incorrectly due to intermittent EL bug for example).

We could shortcut the recovery mechanism though, by using the getEarliestInvalidRoot for the next fcu instead. If that is indeed reported as invalid, then we don't need all the extra fcu for the intermediate blocks. however, if it is reported as valid, it just means that the EL was not synced as far as the DAG at the time it was asked for verdict.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm fine with keeping the previous logic in place (plus mark_invalid_root), yes, but then it starts diverging from the optimistic sync tests. As much as feasible, I want to keep them aligned, so what's tested is what's running. The non-earliestInvalid versions are in this PR mostly kept for as a fallback.

The previous mechanism still exists, too, and it's necessary in intial optimistic sync, where there can't be an LVH because there hasn't yet been a VALID block (here, LVH only reports actual-EL-VALID, not just not-INVALIDATED). So that recovery mechanism is still necessary.

Intermittent EL INVALIDATED bugs should be fixed. It seems unwise to contort Nimbus excessively to handle that "well", beyond, as it does, not persisting them so at least restarts clear them.

I'm not sure exactly what the multi-EL scenario will look like, but if they disagree with each other on this, well, that's not going to work out well.

But, I'm fine with keeping this unchanged in this PR and exploring this in a future PR, while the LVH handling remains in place to pass the EF optimistic sync tests.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

313a35d reverts the LVH parts, while still adding mark_root_invalid.

self.dag.markBlockInvalid(newHead.blck.root)
self.dag.markBlockInvalid(earliestKnownInvalidRoot)
self.quarantine[].addUnviable(newHead.blck.root)
self.quarantine[].addUnviable(earliestKnownInvalidRoot)
return Opt.none(void)
of PayloadExecutionStatus.accepted, PayloadExecutionStatus.syncing:
self.dag.optimisticRoots.incl newHead.blck.root
Expand Down
1 change: 1 addition & 0 deletions beacon_chain/fork_choice/fork_choice.nim
Original file line number Diff line number Diff line change
Expand Up @@ -442,6 +442,7 @@ func mark_root_invalid*(self: var ForkChoice, root: Eth2Digest) =
self.backend.proto_array.nodes.offset
if nodePhysicalIdx < self.backend.proto_array.nodes.buf.len:
self.backend.proto_array.nodes.buf[nodePhysicalIdx].invalid = true
self.backend.proto_array.propagateInvalidity(nodePhysicalIdx)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shouldn't this already be covered for purpose of scoring / selecting heads?
nodeLeadsToViableHead should already be false if the current block is invalid, treating the entire branch as invalid?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The updating is done in maybeUpdateBestChildAndDescendant

func maybeUpdateBestChildAndDescendant(self: var ProtoArray,
parentIdx: Index,
childIdx: Index): FcResult[void] =
## Observe the parent at `parentIdx` with respect to the child at `childIdx` and
## potentially modify the `parent.bestChild` and `parent.bestDescendant` values
##
## There are four scenarios:
##
## 1. The child is already the best child
## but it's now invalid due to a FFG change and should be removed.
## 2. The child is already the best child
## and the parent is updated with the new best descendant
## 3. The child is not the best child but becomes the best child
## 4. The child is not the best child and does not become the best child
let child = self.nodes[childIdx]
if child.isNone():
return err ForkChoiceError(
kind: fcInvalidNodeIndex,
index: childIdx)
let parent = self.nodes[parentIdx]
if parent.isNone():
return err ForkChoiceError(
kind: fcInvalidNodeIndex,
index: parentIdx)
let childLeadsToViableHead = ? self.nodeLeadsToViableHead(child.get())

Which is called by applyScoreChanges which iterates from descendants to ancestors

for nodePhysicalIdx in countdown(self.nodes.len - 1, 0):
if node.root.isZero:
continue
if node.parent.isSome():
let parentLogicalIdx = node.parent.unsafeGet()
let parentPhysicalIdx = parentLogicalIdx - self.nodes.offset
if parentPhysicalIdx < 0:
# Orphan
continue
let nodeLogicalIdx = nodePhysicalIdx + self.nodes.offset
? self.maybeUpdateBestChildAndDescendant(parentLogicalIdx, nodeLogicalIdx)

Which is correct for the scores as such -- it allows quick updates. However, invalidity propagates the other direction, from ancestors to descendants, unlike the scores, and there's no existing mechanism in fork choice to propagate information from ancestors to descendants.

# Best-effort; attempts to mark unknown roots invalid harmlessly ignored
except KeyError:
discard
Expand Down
22 changes: 22 additions & 0 deletions beacon_chain/fork_choice/proto_array.nim
Original file line number Diff line number Diff line change
Expand Up @@ -558,6 +558,28 @@ func nodeIsViableForHead(self: ProtoArray, node: ProtoNode): bool =
(self.checkpoints.finalized.epoch == GENESIS_EPOCH)
)

func propagateInvalidity*(
self: var ProtoArray, startPhysicalIdx: Index) =
# Called when startPhysicalIdx is updated in a parent role, so the pairs of
# indices generated of (parent, child) where both >= startPhysicalIdx, mean
# the loop in general from the child's perspective starts one index higher.
for nodePhysicalIdx in startPhysicalIdx + 1 ..< self.nodes.len:
let nodeParent = self.nodes.buf[nodePhysicalIdx].parent
if nodeParent.isNone:
continue

let
parentLogicalIdx = nodeParent.unsafeGet()
parentPhysicalIdx = parentLogicalIdx - self.nodes.offset

# Former case is orphaned, latter is invalid, but caught in score updates
if parentPhysicalIdx < 0 or parentPhysicalIdx >= self.nodes.len:
continue

# Invalidity transmits to all descendents
if self.nodes.buf[parentPhysicalIdx].invalid:
self.nodes.buf[nodePhysicalIdx].invalid = true
Comment on lines +580 to +581
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What about new descendants of an invalid block that are being added after propagateInvalidity was originally called?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In theory, they shouldn't be added in the first place, as they'd be rejected as unviable by the DAG/quarantine.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The other fallback here is that they'll ask the EL again if someone does try to build on them, and get that block instead. Since by then things should have mostly caught up to head, it's more likely to be the only remaining set of blocks on which building is still happening, and therefore should iteratively converge.


# Diagnostics
# ----------------------------------------------------------------------
# Helpers to dump internal state
Expand Down
2 changes: 1 addition & 1 deletion beacon_chain/gossip_processing/block_processor.nim
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ proc expectValidForkchoiceUpdated(
eth1Monitor: Eth1Monitor,
headBlockRoot, safeBlockRoot, finalizedBlockRoot: Eth2Digest
): Future[void] {.async.} =
let payloadExecutionStatus =
let (payloadExecutionStatus, _) =
await eth1Monitor.runForkchoiceUpdated(
headBlockRoot, safeBlockRoot, finalizedBlockRoot)
if payloadExecutionStatus != PayloadExecutionStatus.valid:
Expand Down
80 changes: 62 additions & 18 deletions tests/consensus_spec/test_fixture_fork_choice.nim
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ type
opOnBlock
opOnMergeBlock
opOnAttesterSlashing
opInvalidateRoot
opChecks

Operation = object
Expand All @@ -55,6 +56,9 @@ type
powBlock: PowBlock
of opOnAttesterSlashing:
attesterSlashing: AttesterSlashing
of opInvalidateRoot:
invalidatedRoot: Eth2Digest
latestValidHash: Eth2Digest
of opChecks:
checks: JsonNode

Expand Down Expand Up @@ -156,6 +160,13 @@ proc loadOps(path: string, fork: BeaconStateFork): seq[Operation] =
)
result.add Operation(kind: opOnAttesterSlashing,
attesterSlashing: attesterSlashing)
elif step.hasKey"payload_status":
if step["payload_status"]["status"].getStr() == "INVALID":
result.add Operation(kind: opInvalidateRoot,
valid: true,
invalidatedRoot: Eth2Digest.fromHex(step["block_hash"].getStr()),
latestValidHash: Eth2Digest.fromHex(
step["payload_status"]["latest_valid_hash"].getStr()))
elif step.hasKey"checks":
result.add Operation(kind: opChecks,
checks: step["checks"])
Expand All @@ -165,7 +176,7 @@ proc loadOps(path: string, fork: BeaconStateFork): seq[Operation] =
if step.hasKey"valid":
doAssert step.len == 2
result[^1].valid = step["valid"].getBool()
elif not step.hasKey"checks":
elif not step.hasKey"checks" and not step.hasKey"payload_status":
doAssert step.len == 1
result[^1].valid = true

Expand All @@ -176,7 +187,9 @@ proc stepOnBlock(
state: var ForkedHashedBeaconState,
stateCache: var StateCache,
signedBlock: ForkySignedBeaconBlock,
time: BeaconTime): Result[BlockRef, BlockError] =
time: BeaconTime,
invalidatedRoots: Table[Eth2Digest, Eth2Digest]):
Result[BlockRef, BlockError] =
# 1. Move state to proper slot.
doAssert dag.updateState(
state,
Expand All @@ -193,6 +206,30 @@ proc stepOnBlock(
else:
type TrustedBlock = bellatrix.TrustedSignedBeaconBlock

# In normal Nimbus flow, for this (effectively) newPayload-based INVALID, it
# is checked even before entering the DAG, by the block processor. Currently
# the optimistic sync test(s) don't include a later-fcU-INVALID case. Whilst
# this wouldn't be part of this check, presumably, their FC test vector step
# would also have `true` validity because it'd not be known they weren't, so
# adding this mock of the block processor is realistic and sufficient.
when not (
signedBlock is phase0.SignedBeaconBlock or
signedBlock is altair.SignedBeaconBlock):
let executionPayloadHash =
signedBlock.message.body.execution_payload.block_hash
if executionPayloadHash in invalidatedRoots:
# Mocks fork choice INVALID list application. These tests sequence this
# in a way the block processor does not, specifying each payload_status
# before the block itself, while Nimbus fork choice treats invalidating
# a non-existent block root as a no-op and does not remember it for the
# future.
let lvh = invalidatedRoots.getOrDefault(
executionPayloadHash, static(default(Eth2Digest)))
fkChoice[].mark_root_invalid(dag.getEarliestInvalidRoot(
signedBlock.message.parent_root, lvh, executionPayloadHash))

return err BlockError.Invalid

let blockAdded = dag.addHeadBlock(verifier, signedBlock) do (
blckRef: BlockRef, signedBlock: TrustedBlock,
epochRef: EpochRef, unrealized: FinalityCheckpoints):
Expand Down Expand Up @@ -278,6 +315,7 @@ proc doRunTest(path: string, fork: BeaconStateFork) =

let steps = loadOps(path, fork)
var time = stores.fkChoice.checkpoints.time
var invalidatedRoots: Table[Eth2Digest, Eth2Digest]

let state = newClone(stores.dag.headState)
var stateCache = StateCache()
Expand All @@ -298,7 +336,7 @@ proc doRunTest(path: string, fork: BeaconStateFork) =
let status = stepOnBlock(
stores.dag, stores.fkChoice,
verifier, state[], stateCache,
blck, time)
blck, time, invalidatedRoots)
doAssert status.isOk == step.valid
of opOnAttesterSlashing:
let indices =
Expand All @@ -307,12 +345,14 @@ proc doRunTest(path: string, fork: BeaconStateFork) =
for idx in indices.get:
stores.fkChoice[].process_equivocation(idx)
doAssert indices.isOk == step.valid
of opInvalidateRoot:
invalidatedRoots[step.invalidatedRoot] = step.latestValidHash
of opChecks:
stepChecks(step.checks, stores.dag, stores.fkChoice, time)
else:
doAssert false, "Unsupported"

proc runTest(path: string, fork: BeaconStateFork) =
proc runTest(testType: static[string], path: string, fork: BeaconStateFork) =
const SKIP = [
# protoArray can handle blocks in the future gracefully
# spec: https://github.com/ethereum/consensus-specs/blame/v1.1.3/specs/phase0/fork-choice.md#L349
Expand All @@ -327,7 +367,7 @@ proc runTest(path: string, fork: BeaconStateFork) =
"all_valid",
]

test "ForkChoice - " & path.relativePath(SszTestsDir):
test testType & " - " & path.relativePath(SszTestsDir):
when defined(windows):
# Some test files have very long paths
skip()
Expand All @@ -337,17 +377,21 @@ proc runTest(path: string, fork: BeaconStateFork) =
else:
doRunTest(path, fork)

suite "EF - ForkChoice" & preset():
const presetPath = SszTestsDir/const_preset
for kind, path in walkDir(presetPath, relative = true, checkDir = true):
let testsPath = presetPath/path/"fork_choice"
if kind != pcDir or not dirExists(testsPath):
continue
let fork = forkForPathComponent(path).valueOr:
raiseAssert "Unknown test fork: " & testsPath
for kind, path in walkDir(testsPath, relative = true, checkDir = true):
let basePath = testsPath/path/"pyspec_tests"
if kind != pcDir:
template fcSuite(suiteName: static[string], testPathElem: static[string]) =
suite "EF - " & suiteName & preset():
const presetPath = SszTestsDir/const_preset
for kind, path in walkDir(presetPath, relative = true, checkDir = true):
let testsPath = presetPath/path/testPathElem
if kind != pcDir or not dirExists(testsPath):
continue
for kind, path in walkDir(basePath, relative = true, checkDir = true):
runTest(basePath/path, fork)
let fork = forkForPathComponent(path).valueOr:
raiseAssert "Unknown test fork: " & testsPath
for kind, path in walkDir(testsPath, relative = true, checkDir = true):
let basePath = testsPath/path/"pyspec_tests"
if kind != pcDir:
continue
for kind, path in walkDir(basePath, relative = true, checkDir = true):
runTest(suiteName, basePath/path, fork)

fcSuite("ForkChoice", "fork_choice")
fcSuite("Sync", "sync")
Loading