From b60456fdf347dae478fda6f7aa94eaf83c4fd80c Mon Sep 17 00:00:00 2001 From: tersec Date: Fri, 26 Aug 2022 22:47:40 +0000 Subject: [PATCH] `withState`: `state` -> `forkyState` (#4038) --- .../block_clearance.nim | 2 +- .../consensus_object_pools/blockchain_dag.nim | 38 ++++++++++--------- .../blockchain_dag_light_client.nim | 13 ++++--- beacon_chain/spec/state_transition_block.nim | 2 +- research/simutils.nim | 4 +- ...est_fixture_light_client_sync_protocol.nim | 2 +- 6 files changed, 33 insertions(+), 28 deletions(-) diff --git a/beacon_chain/consensus_object_pools/block_clearance.nim b/beacon_chain/consensus_object_pools/block_clearance.nim index c05f0e996c..4e73b9ee40 100644 --- a/beacon_chain/consensus_object_pools/block_clearance.nim +++ b/beacon_chain/consensus_object_pools/block_clearance.nim @@ -143,7 +143,7 @@ proc advanceClearanceState*(dag: ChainDAGRef) = # Notably, we use the clearance state here because that's where the block will # first be seen - later, this state will be copied to the head state! let advanced = withState(dag.clearanceState): - state.data.slot > state.data.latest_block_header.slot + forkyState.data.slot > forkyState.data.latest_block_header.slot if not advanced: let next = getStateField(dag.clearanceState, slot) + 1 diff --git a/beacon_chain/consensus_object_pools/blockchain_dag.nim b/beacon_chain/consensus_object_pools/blockchain_dag.nim index 0f7bd8216e..6079e630fa 100644 --- a/beacon_chain/consensus_object_pools/blockchain_dag.nim +++ b/beacon_chain/consensus_object_pools/blockchain_dag.nim @@ -143,7 +143,7 @@ template is_merge_transition_complete( stateParam: ForkedHashedBeaconState): bool = withState(stateParam): when stateFork >= BeaconStateFork.Bellatrix: - is_merge_transition_complete(state.data) + is_merge_transition_complete(forkyState.data) else: false @@ -366,7 +366,7 @@ func init*( dependent_epoch = if epoch < 1: Epoch(0) else: epoch - 1 attester_dependent_root = - withState(state): state.dependent_root(dependent_epoch) + withState(state): forkyState.dependent_root(dependent_epoch) ShufflingRef( epoch: epoch, @@ -380,13 +380,15 @@ func init*( cache: var StateCache): T = let epoch = state.get_current_epoch() - proposer_dependent_root = withState(state): state.proposer_dependent_root + proposer_dependent_root = withState(state): + forkyState.proposer_dependent_root shufflingRef = dag.findShufflingRef(state.latest_block_id, epoch).valueOr: let tmp = ShufflingRef.init(state, cache, epoch) dag.putShufflingRef(tmp) tmp - attester_dependent_root = withState(state): state.attester_dependent_root + attester_dependent_root = withState(state): + forkyState.attester_dependent_root epochRef = EpochRef( key: dag.epochAncestor(state.latest_block_id, epoch), @@ -703,7 +705,7 @@ proc putState(dag: ChainDAGRef, state: ForkedHashedBeaconState, bid: BlockId) = # transaction to prevent database inconsistencies, but the state loading code # is resilient against one or the other going missing withState(state): - dag.db.putState(state) + dag.db.putState(forkyState) debug "Stored state", putStateDur = Moment.now() - startTick @@ -733,9 +735,10 @@ proc advanceSlots*( # in the monitor. This may be inaccurate during a deep reorg (>1 epoch) # which is an acceptable tradeoff for monitoring. withState(state): - let postEpoch = state.data.slot.epoch + let postEpoch = forkyState.data.slot.epoch if preEpoch != postEpoch: - dag.validatorMonitor[].registerEpochInfo(postEpoch, info, state.data) + dag.validatorMonitor[].registerEpochInfo( + postEpoch, info, forkyState.data) proc applyBlock( dag: ChainDAGRef, state: var ForkedHashedBeaconState, bid: BlockId, @@ -1233,7 +1236,7 @@ proc updateState*( let startTick = Moment.now() current {.used.} = withState(state): - BlockSlotId.init(state.latest_block_id, state.data.slot) + BlockSlotId.init(forkyState.latest_block_id, forkyState.data.slot) var ancestors: seq[BlockId] @@ -1361,7 +1364,7 @@ proc updateState*( let assignTick = Moment.now() ancestor {.used.} = withState(state): - BlockSlotId.init(state.latest_block_id, state.data.slot) + BlockSlotId.init(forkyState.latest_block_id, forkyState.data.slot) ancestorRoot {.used.} = getStateRoot(state) var info: ForkedEpochInfo @@ -1576,7 +1579,7 @@ func syncCommitteeParticipants*(dag: ChainDAGRef, when stateFork >= BeaconStateFork.Altair: let period = sync_committee_period(slot) - curPeriod = sync_committee_period(state.data.slot) + curPeriod = sync_committee_period(forkyState.data.slot) if period == curPeriod: @(dag.headSyncCommittees.current_sync_committee) @@ -1606,7 +1609,7 @@ func getSubcommitteePositions*( when stateFork >= BeaconStateFork.Altair: let period = sync_committee_period(slot) - curPeriod = sync_committee_period(state.data.slot) + curPeriod = sync_committee_period(forkyState.data.slot) template search(syncCommittee: openArray[ValidatorIndex]): seq[uint64] = dag.getSubcommitteePositionsAux( @@ -1769,7 +1772,7 @@ proc updateHead*( withState(dag.headState): when stateFork >= BeaconStateFork.Altair: - dag.headSyncCommittees = state.data.get_sync_committee_cache(cache) + dag.headSyncCommittees = forkyState.data.get_sync_committee_cache(cache) let finalized_checkpoint = @@ -1822,8 +1825,9 @@ proc updateHead*( if not(isNil(dag.onHeadChanged)): let currentEpoch = epoch(newHead.slot) - depRoot = withState(dag.headState): state.proposer_dependent_root - prevDepRoot = withState(dag.headState): state.attester_dependent_root + depRoot = withState(dag.headState): forkyState.proposer_dependent_root + prevDepRoot = withState(dag.headState): + forkyState.attester_dependent_root epochTransition = (finalizedHead != dag.finalizedHead) # TODO (cheatfate): Proper implementation required data = HeadChangeInfoObject.init(dag.head.slot, dag.head.root, @@ -1837,7 +1841,7 @@ proc updateHead*( # state-related metrics change - notify the validator monitor. # Doing this update during head update ensures there's a reasonable number # of such updates happening - at most once per valid block. - dag.validatorMonitor[].registerState(state.data) + dag.validatorMonitor[].registerState(forkyState.data) if finalizedHead != dag.finalizedHead: debug "Reached new finalization checkpoint", @@ -2180,10 +2184,10 @@ proc rebuildIndex*(dag: ChainDAGRef) = dag.updateFlags).expect("process_slots shouldn't fail when state slot is correct") withState(state[]): - dag.db.putState(state) + dag.db.putState(forkyState) dag.db.checkpoint() - state_root = state.root + state_root = forkyState.root # Now that we have states all the way to genesis, we can adjust the tail # and readjust the in-memory indices to what they would look like if we had diff --git a/beacon_chain/consensus_object_pools/blockchain_dag_light_client.nim b/beacon_chain/consensus_object_pools/blockchain_dag_light_client.nim index 964532d811..84e4f13c8f 100644 --- a/beacon_chain/consensus_object_pools/blockchain_dag_light_client.nim +++ b/beacon_chain/consensus_object_pools/blockchain_dag_light_client.nim @@ -119,7 +119,7 @@ proc syncCommitteeRootForPeriod( dag.withUpdatedExistingState(tmpState, bsi) do: withState(state): when stateFork >= BeaconStateFork.Altair: - ok state.syncCommitteeRoot + ok forkyState.syncCommitteeRoot else: raiseAssert "Unreachable" do: err() @@ -208,7 +208,7 @@ proc initLightClientBootstrapForPeriod( continue let branch = withState(tmpState[]): when stateFork >= BeaconStateFork.Altair: - state.data.build_proof(altair.CURRENT_SYNC_COMMITTEE_INDEX).get + forkyState.data.build_proof(altair.CURRENT_SYNC_COMMITTEE_INDEX).get else: raiseAssert "Unreachable" dag.lcDataStore.db.putCurrentSyncCommitteeBranch(bid.slot, branch) res @@ -322,7 +322,7 @@ proc initLightClientUpdateForPeriod( dag.withUpdatedExistingState(tmpState[], attestedBid.atSlot) do: withState(state): when stateFork >= BeaconStateFork.Altair: - state.data.finalized_checkpoint.epoch + forkyState.data.finalized_checkpoint.epoch else: raiseAssert "Unreachable" do: dag.handleUnexpectedLightClientError(attestedBid.slot) @@ -615,7 +615,7 @@ proc initLightClientDataCache*(dag: ChainDAGRef) = return withState(dag.headState): when stateFork >= BeaconStateFork.Altair: - dag.cacheLightClientData(state, dag.head.bid) + dag.cacheLightClientData(forkyState, dag.head.bid) else: raiseAssert "Unreachable" # `tailSlot` cannot be before Altair if dag.lcDataStore.importMode == LightClientDataImportMode.OnlyNew: return @@ -748,7 +748,7 @@ proc processHeadChangeForLightClient*(dag: ChainDAGRef) = period, dag.lcDataStore.cache.pendingBest.getOrDefault(key)) withState(dag.headState): # Common case separate to avoid `tmpState` copy when stateFork >= BeaconStateFork.Altair: - let key = (headPeriod, state.syncCommitteeRoot) + let key = (headPeriod, forkyState.syncCommitteeRoot) dag.lcDataStore.db.putBestUpdate( headPeriod, dag.lcDataStore.cache.pendingBest.getOrDefault(key)) else: raiseAssert "Unreachable" # `tailSlot` cannot be before Altair @@ -845,7 +845,8 @@ proc getLightClientBootstrap*( dag.withUpdatedExistingState(tmpState[], bsi) do: branch = withState(state): when stateFork >= BeaconStateFork.Altair: - state.data.build_proof(altair.CURRENT_SYNC_COMMITTEE_INDEX).get + forkyState.data.build_proof( + altair.CURRENT_SYNC_COMMITTEE_INDEX).get else: raiseAssert "Unreachable" do: return err() dag.lcDataStore.db.putCurrentSyncCommitteeBranch(slot, branch) diff --git a/beacon_chain/spec/state_transition_block.nim b/beacon_chain/spec/state_transition_block.nim index b950db271e..e4e1515ba9 100644 --- a/beacon_chain/spec/state_transition_block.nim +++ b/beacon_chain/spec/state_transition_block.nim @@ -236,7 +236,7 @@ proc check_attester_slashing*( state: var ForkedHashedBeaconState; attester_slashing: SomeAttesterSlashing; flags: UpdateFlags): Result[seq[ValidatorIndex], cstring] = withState(state): - check_attester_slashing(state.data, attester_slashing, flags) + check_attester_slashing(forkyState.data, attester_slashing, flags) # https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/specs/phase0/beacon-chain.md#attester-slashings proc process_attester_slashing*( diff --git a/research/simutils.nim b/research/simutils.nim index 60ef7ac553..71e64fc484 100644 --- a/research/simutils.nim +++ b/research/simutils.nim @@ -82,11 +82,11 @@ proc loadGenesis*(validators: Natural, validate: bool): cfg, readAllBytes(genesisFn).tryGet())) withState(res[]): - if state.data.slot != GENESIS_SLOT: + if forkyState.data.slot != GENESIS_SLOT: echo "Can only start from genesis state" quit 1 - if state.data.validators.len != validators: + if forkyState.data.validators.len != validators: echo &"Supplied genesis file has {state.data.validators.len} validators, while {validators} where requested, running anyway" echo &"Loaded {genesisFn}..." diff --git a/tests/consensus_spec/altair/test_fixture_light_client_sync_protocol.nim b/tests/consensus_spec/altair/test_fixture_light_client_sync_protocol.nim index 62ea99b470..fea6fa61b2 100644 --- a/tests/consensus_spec/altair/test_fixture_light_client_sync_protocol.nim +++ b/tests/consensus_spec/altair/test_fixture_light_client_sync_protocol.nim @@ -124,7 +124,7 @@ proc block_for_next_slot( let attestations = if withAttestations: - let block_root = withState(forked): state.latest_block_root + let block_root = withState(forked): forkyState.latest_block_root makeFullAttestations(forked, block_root, state.slot, cache) else: @[]