From e6c2317322d3abfe4530509c9fd68272def0585f Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Fri, 18 Oct 2024 19:12:49 +0100 Subject: [PATCH 01/32] initial commit: simple project structure with makefile, readme and build configs --- Makefile | 7 +++++ nimbus.nimble | 7 +++++ nimbus_unified/README.md | 48 +++++++++++++++++++++++++++++++ nimbus_unified/nimbus_unified.nim | 9 ++++++ 4 files changed, 71 insertions(+) create mode 100644 nimbus_unified/README.md create mode 100644 nimbus_unified/nimbus_unified.nim diff --git a/Makefile b/Makefile index 6ca25bd4c..fea105e67 100644 --- a/Makefile +++ b/Makefile @@ -105,6 +105,7 @@ VERIF_PROXY_OUT_PATH ?= build/libverifproxy/ deps \ update \ nimbus \ + nimbus_unified \ fluffy \ nimbus_verified_proxy \ libverifproxy \ @@ -366,6 +367,12 @@ ifneq ($(USE_LIBBACKTRACE), 0) + $(MAKE) -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT) endif +# Nimbus unified related targets + +# builds the unified client +nimbus_unified: | build deps + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:chronicles_log_level=TRACE -o:build/$@ "nimbus_unified/$@.nim" # Note about building Nimbus as a library: # # There were `wrappers`, `wrappers-static`, `libnimbus.so` and `libnimbus.a` diff --git a/nimbus.nimble b/nimbus.nimble index 47408a3bc..a0824887c 100644 --- a/nimbus.nimble +++ b/nimbus.nimble @@ -40,6 +40,7 @@ when declared(namedBin): "nimbus/nimbus_execution_client": "nimbus_execution_client", "fluffy/fluffy": "fluffy", "nimbus_verified_proxy/nimbus_verified_proxy": "nimbus_verified_proxy", + "nimbus_unified":"nimbus_unified" }.toTable() import std/os @@ -129,3 +130,9 @@ task nimbus_verified_proxy, "Build Nimbus verified proxy": task nimbus_verified_proxy_test, "Run Nimbus verified proxy tests": test "nimbus_verified_proxy/tests", "test_proof_validation", "-d:chronicles_log_level=ERROR -d:nimbus_db_backend=sqlite" + + +## nimbus unified tasks + +task nimbus_unified, "Build nimbus unified": + buildBinary "nimbus_unified", "nimbus_unified/", "-d:chronicles_log_level=TRACE" \ No newline at end of file diff --git a/nimbus_unified/README.md b/nimbus_unified/README.md new file mode 100644 index 000000000..1b883adfb --- /dev/null +++ b/nimbus_unified/README.md @@ -0,0 +1,48 @@ +# Nimbus Unified + + +[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) + +[![Discord: Nimbus](https://img.shields.io/badge/discord-nimbus-orange.svg)](https://discord.gg/XRxWahP) +[![Status: #nimbus-general](https://img.shields.io/badge/status-nimbus--general-orange.svg)](https://join.status.im/nimbus-general) + + +# description +Nimbus Unified combines Ethereum execution and consensus layer functionalities, featuring a fully integrated beacon node), validator duties, and execution layer support. This setup allows the Nimbus client to handle both Ethereum consensus (Eth2) and execution (Eth1) tasks within a single package. + +--> meh, requires improvement +# documentation +For in-depth configuration and functionality of Nimbus Eth1 and Nimbus Eth2, refer to: + +- [Nimbus-eth1 - Execution layer client](https://github.com/status-im/nimbus-eth1) Documentation +- [Nimbus-eth2 - Consensus layer client](https://github.com/status-im/nimbus-eth2) Documentation + +--- to be concluded +# dependencies +tbd +# how to +## configuration + todo +## commands + todo +## compile +tbd + - mac os, windows, and linux +## colaborate +We welcome contributions to Nimbus Unified! Please adhere to the following guidelines: + +- Follow the [Nimbus Code of Conduct](https://github.com/status-im/nimbus-eth2/blob/master/CODE_OF_CONDUCT.md). +- Use the [Nimbus Code Style Guide](https://github.com/status-im/nimbus-eth2/blob/master/docs/code_style.md) to maintain code consistency. +- Format your code using the [Nim Pretty Printer (nph)](https://github.com/nim-lang/nimpretty) to ensure consistency across the codebase. Run it as part of your pull request process. +## License + +Licensed and distributed under either of + +* MIT license: [LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT + +or + +* Apache License, Version 2.0: [LICENSE-APACHEv2](LICENSE-APACHEv2) or https://www.apache.org/licenses/LICENSE-2.0 + +at your option. These files may not be copied, modified, or distributed except according to those terms. diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim new file mode 100644 index 000000000..9317d0375 --- /dev/null +++ b/nimbus_unified/nimbus_unified.nim @@ -0,0 +1,9 @@ +# nimbus_unified +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +when isMainModule: + echo "hello Nimbus" From fa43a03d0cd714f02b179e26473d6d2fa3dbfdc4 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Fri, 18 Oct 2024 19:14:35 +0100 Subject: [PATCH 02/32] setting control-c handler --- nimbus_unified/nimbus_unified.nim | 33 ++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index 9317d0375..704b66afe 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -5,5 +5,36 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. +import os + when isMainModule: - echo "hello Nimbus" + echo "Starting Nimbus" + ## TODO + ## - make banner and config + ## - file limits + ## - check if we have permissions to create data folder if needed + ## - setup logging + + ## this code snippet requires a conf.nim file (eg: beacon_lc_bridge_conf.nim) + # var config = makeBannerAndConfig("Nimbus client ", NimbusConfig) + # setupLogging(config.logLevel, config.logStdout, config.logFile) + + ## Graceful shutdown by handling of Ctrl+C signal + proc controlCHandler() {.noconv.} = + when defined(windows): + # workaround for https://github.com/nim-lang/Nim/issues/4057 + try: + setupForeignThreadGc() + except NimbusTasksError as exc: + raiseAssert exc.msg # shouldn't happen + + echo "\nCtrl+C pressed. Shutting down working tasks" + + echo "Shutting down now" + quit(0) + + setControlCHook(controlCHandler) + + while true: + echo "looping" + sleep(2000) \ No newline at end of file From afdcc07e338a02a5191444d437e9a8aa71e5047e Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Sat, 19 Oct 2024 01:54:59 +0100 Subject: [PATCH 03/32] types and initial Monitor-Worker threading pattern --- nimbus_unified/nimbus_unified.nim | 137 +++++++++++++++++++++++++++++- 1 file changed, 135 insertions(+), 2 deletions(-) diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index 704b66afe..22698dc21 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -5,7 +5,130 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -import os +import std/[atomics, os, tables], beacon_chain/nimbus_binary_common + +## Exceptions +type NimbusTasksError* = object of CatchableError + +#task shutdown flag +var isShutDownRequired*: Atomic[bool] +isShutDownRequired.store(false) + +## Configuration +## TODO: implement a json (or other format like yaml) config reader for config reading (file config scenarios) +## or extract from other nimbus components +## TODO: implement a command line reader to read arguments +type NimbusConfig* = object + configTable: Table[string, string] + +## Nimbus workers arguments (thread arguments) +type TaskParameters* = object + name*: string + configs*: string + # TODO: replace this with the extracted configs from NimbusConfig needed by the worker + +## Constants +const cNimbusMaxTasks* = 5 +const cNimbusTaskTimeoutMs* = 5000 + +## Task and associated task information +type NimbusTask* = ref object + name*: string + timeoutMs*: uint32 + threadHandler*: Thread[TaskParameters] + +## Task scheduler and manager +type NimbusTasks* = ref object + taskList*: array[cNimbusMaxTasks, NimbusTask] + +# ------------------------------------------------------------------------------ +# Private and helper functions +# ------------------------------------------------------------------------------ + +## Execution Layer handler +proc executionLayerHandler(parameters: TaskParameters) {.thread.} = + echo "Started task:" + while true: + sleep(3000) + echo "exec" + if isShutDownRequired.load() == true: + break + echo "\tExiting task" + +## Consensus Layer handler +proc consensusLayerHandler(parameters: TaskParameters) {.thread.} = + echo "Started task:" + while true: + sleep(3000) + echo "exec" + if isShutDownRequired.load() == true: + break + echo "\tExiting task" + +## Waits for tasks to finish +proc joinTasks(tasks: var NimbusTasks) = + for i in 0 .. cNimbusMaxTasks - 1: + if not tasks.taskList[i].isNil: + joinThread(tasks.taskList[i].threadHandler) + + echo "\tAll tasks finished" + +# ---- + +# ------------------------------------------------------------------------------ +# Public functions +# ------------------------------------------------------------------------------ + +## adds a new task to nimbus Tasks. +## Note that thread handler passed by argument needs to have the signature: proc foobar(NimbusParameters) +proc addNewTask*( + tasks: var NimbusTasks, + name: string, + timeout: uint32, + taskHandler: proc(config: TaskParameters) {.thread.}, + parameters: var TaskParameters, +) = + #search next available worker + var currentIndex = -1 + for i in 0 .. cNimbusMaxTasks - 1: + if tasks.taskList[i].isNil: + tasks.taskList[i] = NimbusTask.new + tasks.taskList[i].name = name + tasks.taskList[i].timeoutMs = timeout + currentIndex = i + parameters.name = name + break + + if currentIndex < 0: + raise newException(NimbusTasksError, "No free slots on Nimbus Tasks") + + createThread(tasks.taskList[currentIndex].threadHandler, taskHandler, parameters) + echo "Created task:" + +## Task monitoring +proc monitor*(tasksList: var NimbusTasks, config: NimbusConfig) = + echo "monitoring tasks" + + while true: + echo "nothing new" + sleep(5000) + +## create running workers +proc startTasks*(tasksList: var NimbusTasks, configs: NimbusConfig) = + # TODO: extract configs for each task from NimbusConfig + # or extract them somewhere else and passs them here + var + paramsExecution: TaskParameters = + TaskParameters(configs: "task configs extracted from NimbusConfig go here") + paramsConsensus: TaskParameters = + TaskParameters(configs: "task configs extracted from NimbusConfig go here") + + tasksList.addNewTask( + "Execution Layer", cNimbusTaskTimeoutMs, executionLayerHandler, paramsExecution + ) + tasksList.addNewTask( + "Consensus Layer", cNimbusTaskTimeoutMs, consensusLayerHandler, paramsConsensus + ) when isMainModule: echo "Starting Nimbus" @@ -15,10 +138,18 @@ when isMainModule: ## - check if we have permissions to create data folder if needed ## - setup logging - ## this code snippet requires a conf.nim file (eg: beacon_lc_bridge_conf.nim) + # TODO - read configuration + # TODO - implement config reader for all components + let nimbusConfigs = NimbusConfig() + var tasksList: NimbusTasks = NimbusTasks.new + + + ## next code snippet requires a conf.nim file (eg: beacon_lc_bridge_conf.nim) # var config = makeBannerAndConfig("Nimbus client ", NimbusConfig) # setupLogging(config.logLevel, config.logStdout, config.logFile) + tasksList.startTasks(nimbusConfigs) + ## Graceful shutdown by handling of Ctrl+C signal proc controlCHandler() {.noconv.} = when defined(windows): @@ -30,6 +161,8 @@ when isMainModule: echo "\nCtrl+C pressed. Shutting down working tasks" + isShutDownRequired.store(true) + tasksList.joinTasks() echo "Shutting down now" quit(0) From 70ad6c71cf48f78f4aad06094a15e41b348f46ae Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Sat, 19 Oct 2024 02:01:07 +0100 Subject: [PATCH 04/32] added chronicles support --- nimbus_unified/nimbus_unified.cfg | 14 ++++++++++++ nimbus_unified/nimbus_unified.nim | 37 +++++++++++++++++-------------- 2 files changed, 34 insertions(+), 17 deletions(-) create mode 100644 nimbus_unified/nimbus_unified.cfg diff --git a/nimbus_unified/nimbus_unified.cfg b/nimbus_unified/nimbus_unified.cfg new file mode 100644 index 000000000..a7f0bad4e --- /dev/null +++ b/nimbus_unified/nimbus_unified.cfg @@ -0,0 +1,14 @@ +# nimbus_unified +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +-d:"chronicles_sinks=textlines[dynamic],json[dynamic]" +-d:"chronicles_runtime_filtering=on" +-d:"chronicles_disable_thread_id" + +@if release: + -d:"chronicles_line_numbers:0" +@end \ No newline at end of file diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index 22698dc21..27ab23205 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -import std/[atomics, os, tables], beacon_chain/nimbus_binary_common +import std/[atomics, os, tables], chronicles, beacon_chain/nimbus_binary_common ## Exceptions type NimbusTasksError* = object of CatchableError @@ -41,29 +41,33 @@ type NimbusTask* = ref object type NimbusTasks* = ref object taskList*: array[cNimbusMaxTasks, NimbusTask] +## log +logScope: + topics = "Task manager" + # ------------------------------------------------------------------------------ # Private and helper functions # ------------------------------------------------------------------------------ ## Execution Layer handler proc executionLayerHandler(parameters: TaskParameters) {.thread.} = - echo "Started task:" + info "Started task:", task = parameters.name while true: sleep(3000) - echo "exec" + info "exec" if isShutDownRequired.load() == true: break - echo "\tExiting task" + info "\tExiting task;", task = parameters.name ## Consensus Layer handler proc consensusLayerHandler(parameters: TaskParameters) {.thread.} = - echo "Started task:" + info "Started task:", task = parameters.name while true: sleep(3000) - echo "exec" + info "exec" if isShutDownRequired.load() == true: break - echo "\tExiting task" + info "\tExiting task:", task = parameters.name ## Waits for tasks to finish proc joinTasks(tasks: var NimbusTasks) = @@ -71,7 +75,7 @@ proc joinTasks(tasks: var NimbusTasks) = if not tasks.taskList[i].isNil: joinThread(tasks.taskList[i].threadHandler) - echo "\tAll tasks finished" + info "\tAll tasks finished" # ---- @@ -103,14 +107,14 @@ proc addNewTask*( raise newException(NimbusTasksError, "No free slots on Nimbus Tasks") createThread(tasks.taskList[currentIndex].threadHandler, taskHandler, parameters) - echo "Created task:" + info "Created task:", task = tasks.taskList[currentIndex].name ## Task monitoring proc monitor*(tasksList: var NimbusTasks, config: NimbusConfig) = - echo "monitoring tasks" + info "monitoring tasks" while true: - echo "nothing new" + info "nothing new" sleep(5000) ## create running workers @@ -131,7 +135,7 @@ proc startTasks*(tasksList: var NimbusTasks, configs: NimbusConfig) = ) when isMainModule: - echo "Starting Nimbus" + info "Starting Nimbus" ## TODO ## - make banner and config ## - file limits @@ -143,7 +147,6 @@ when isMainModule: let nimbusConfigs = NimbusConfig() var tasksList: NimbusTasks = NimbusTasks.new - ## next code snippet requires a conf.nim file (eg: beacon_lc_bridge_conf.nim) # var config = makeBannerAndConfig("Nimbus client ", NimbusConfig) # setupLogging(config.logLevel, config.logStdout, config.logFile) @@ -159,15 +162,15 @@ when isMainModule: except NimbusTasksError as exc: raiseAssert exc.msg # shouldn't happen - echo "\nCtrl+C pressed. Shutting down working tasks" + notice "\nCtrl+C pressed. Shutting down working tasks" isShutDownRequired.store(true) tasksList.joinTasks() - echo "Shutting down now" + notice "Shutting down now" quit(0) setControlCHook(controlCHandler) while true: - echo "looping" - sleep(2000) \ No newline at end of file + info "looping" + sleep(2000) From 05f24b39475673677499162e2da2a42dfd645c9d Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Sat, 19 Oct 2024 02:24:46 +0100 Subject: [PATCH 05/32] moved configs to dedicated directory --- nimbus_unified/configs/nimbus_configs.nim | 34 +++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 nimbus_unified/configs/nimbus_configs.nim diff --git a/nimbus_unified/configs/nimbus_configs.nim b/nimbus_unified/configs/nimbus_configs.nim new file mode 100644 index 000000000..55034b275 --- /dev/null +++ b/nimbus_unified/configs/nimbus_configs.nim @@ -0,0 +1,34 @@ +# nimbus_unified +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import std/[atomics, tables], beacon_chain/nimbus_binary_common + +## Exceptions +type NimbusTasksError* = object of CatchableError + +## Configuration +## TODO: implement a json (or other format like yaml) config reader for config reading (file config scenarios) +## TODO: implement a command line reader to read arguments +type NimbusConfig* = object + configTable: Table[string, string] + +## Nimbus workers arguments (thread arguments) +type TaskParameters* = object + name*: string + configs*: string + # TODO: replace this with the extracted configs from NimbusConfig needed by the worker + +## Task shutdown flag +## The behaviour required: this thread needs to atomically change the flag value when +## a shutdown is required or when detects a stopped thread. +## Given the behaviour wanted, atomic operations are sufficient without barriers or fences. Compilers +## may reorder instructions, but given that the order is not important, this does not affect +## the semantic wanted: If instructions are reordered, the worker will fail to read on the current iteration +## but will read it correctly on the next iteration ( this thread is the only on which changes the flag behaviour, +## and will always change it to true) +var isShutDownRequired*: Atomic[bool] +isShutDownRequired.store(false) From 53f4bbcac7e212166715533dbd092af89f3dde7f Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Sat, 19 Oct 2024 02:31:37 +0100 Subject: [PATCH 06/32] added wrappers for execution an consensus layers --- .../consensus/consensus_wrapper.nim | 23 ++++++++++ .../execution/execution_wrapper.nim | 24 +++++++++++ nimbus_unified/nimbus_unified.nim | 42 ++++++------------- 3 files changed, 59 insertions(+), 30 deletions(-) create mode 100644 nimbus_unified/consensus/consensus_wrapper.nim create mode 100644 nimbus_unified/execution/execution_wrapper.nim diff --git a/nimbus_unified/consensus/consensus_wrapper.nim b/nimbus_unified/consensus/consensus_wrapper.nim new file mode 100644 index 000000000..0182ffb5e --- /dev/null +++ b/nimbus_unified/consensus/consensus_wrapper.nim @@ -0,0 +1,23 @@ +# nimbus_unified +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import chronicles, std/[os, atomics], ../configs/nimbus_configs +export nimbus_configs + +## log +logScope: + topics = "Consensus layer" + +proc consensusWrapper*(parameters: TaskParameters) = + info "Consensus wrapper:", worker = parameters.name + while true: + sleep(3000) + info "looping consensus" + if isShutDownRequired.load() == true: + break + + warn "\tExiting consensus wrapper" diff --git a/nimbus_unified/execution/execution_wrapper.nim b/nimbus_unified/execution/execution_wrapper.nim new file mode 100644 index 000000000..21522849c --- /dev/null +++ b/nimbus_unified/execution/execution_wrapper.nim @@ -0,0 +1,24 @@ +# nimbus_unified +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import chronicles, std/[os, atomics], ../configs/nimbus_configs +export nimbus_configs + +## log +logScope: + topics = "Execution layer" + +proc executionWrapper*(parameters: TaskParameters) = + info "Execution wrapper:", worker = parameters.name + + while true: + sleep(2000) + info "looping execution" + if isShutDownRequired.load() == true: + break + + warn "\tExiting execution:", worker = parameters.name diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index 27ab23205..4271cc602 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -5,39 +5,26 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -import std/[atomics, os, tables], chronicles, beacon_chain/nimbus_binary_common - -## Exceptions -type NimbusTasksError* = object of CatchableError - -#task shutdown flag -var isShutDownRequired*: Atomic[bool] -isShutDownRequired.store(false) - -## Configuration -## TODO: implement a json (or other format like yaml) config reader for config reading (file config scenarios) -## or extract from other nimbus components -## TODO: implement a command line reader to read arguments -type NimbusConfig* = object - configTable: Table[string, string] - -## Nimbus workers arguments (thread arguments) -type TaskParameters* = object - name*: string - configs*: string - # TODO: replace this with the extracted configs from NimbusConfig needed by the worker +import + std/[atomics, os], + chronicles, + consensus/consensus_wrapper, + execution/execution_wrapper ## Constants const cNimbusMaxTasks* = 5 const cNimbusTaskTimeoutMs* = 5000 +## Exceptions +type NimbusTasksError* = object of CatchableError + ## Task and associated task information type NimbusTask* = ref object name*: string timeoutMs*: uint32 threadHandler*: Thread[TaskParameters] -## Task scheduler and manager +## Task manager type NimbusTasks* = ref object taskList*: array[cNimbusMaxTasks, NimbusTask] @@ -53,8 +40,7 @@ logScope: proc executionLayerHandler(parameters: TaskParameters) {.thread.} = info "Started task:", task = parameters.name while true: - sleep(3000) - info "exec" + executionWrapper(parameters) if isShutDownRequired.load() == true: break info "\tExiting task;", task = parameters.name @@ -62,14 +48,10 @@ proc executionLayerHandler(parameters: TaskParameters) {.thread.} = ## Consensus Layer handler proc consensusLayerHandler(parameters: TaskParameters) {.thread.} = info "Started task:", task = parameters.name - while true: - sleep(3000) - info "exec" - if isShutDownRequired.load() == true: - break + consensusWrapper(parameters) info "\tExiting task:", task = parameters.name -## Waits for tasks to finish +## Waits for tasks to finish (joinThreads) proc joinTasks(tasks: var NimbusTasks) = for i in 0 .. cNimbusMaxTasks - 1: if not tasks.taskList[i].isNil: From caec70c17f164e60e979ba68628b127ffe3b7b16 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Sat, 19 Oct 2024 02:44:53 +0100 Subject: [PATCH 07/32] added monitoring skeleton --- nimbus_unified/nimbus_unified.nim | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index 4271cc602..0ad900741 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -96,8 +96,14 @@ proc monitor*(tasksList: var NimbusTasks, config: NimbusConfig) = info "monitoring tasks" while true: - info "nothing new" - sleep(5000) + info "checking tasks ... " + + # -check an atomic (to be created when needed) if it s required to shutdown + # this will atomic flag solves: + # - non responding thread + # - thread that required shutdown + + sleep(cNimbusTaskTimeoutMs) ## create running workers proc startTasks*(tasksList: var NimbusTasks, configs: NimbusConfig) = @@ -116,6 +122,8 @@ proc startTasks*(tasksList: var NimbusTasks, configs: NimbusConfig) = "Consensus Layer", cNimbusTaskTimeoutMs, consensusLayerHandler, paramsConsensus ) +# ------ + when isMainModule: info "Starting Nimbus" ## TODO @@ -129,7 +137,7 @@ when isMainModule: let nimbusConfigs = NimbusConfig() var tasksList: NimbusTasks = NimbusTasks.new - ## next code snippet requires a conf.nim file (eg: beacon_lc_bridge_conf.nim) + ## this code snippet requires a conf.nim file (eg: beacon_lc_bridge_conf.nim) # var config = makeBannerAndConfig("Nimbus client ", NimbusConfig) # setupLogging(config.logLevel, config.logStdout, config.logFile) @@ -150,9 +158,7 @@ when isMainModule: tasksList.joinTasks() notice "Shutting down now" quit(0) - setControlCHook(controlCHandler) - while true: - info "looping" - sleep(2000) + #start monitoring + tasksList.monitor(nimbusConfigs) From fcb962d4489d11d96e55eb2d33fd4be1ec24d58f Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Mon, 21 Oct 2024 11:24:23 +0100 Subject: [PATCH 08/32] copy&paste beacon node from eth2 --- .../consensus/consensus_wrapper.nim | 178 +++++++++++++++++- 1 file changed, 170 insertions(+), 8 deletions(-) diff --git a/nimbus_unified/consensus/consensus_wrapper.nim b/nimbus_unified/consensus/consensus_wrapper.nim index 0182ffb5e..956de97a9 100644 --- a/nimbus_unified/consensus/consensus_wrapper.nim +++ b/nimbus_unified/consensus/consensus_wrapper.nim @@ -5,19 +5,181 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -import chronicles, std/[os, atomics], ../configs/nimbus_configs +import + std/[os, exitprocs], + beacon_chain/nimbus_binary_common, + beacon_chain/spec/forks, + beacon_chain/[beacon_chain_db, trusted_node_sync], + beacon_chain/networking/network_metadata_downloads, + chronos, + chronicles, + stew/io2, + ../configs/nimbus_configs + export nimbus_configs ## log logScope: topics = "Consensus layer" -proc consensusWrapper*(parameters: TaskParameters) = - info "Consensus wrapper:", worker = parameters.name - while true: - sleep(3000) - info "looping consensus" - if isShutDownRequired.load() == true: - break +## Copy paste from nimbus_beacon_node.nim Copied due to the fact that nimbus_beacon_node +## contains the programMain. +## TODO: extract from that file into a common file +var gPidFile: string + +#TODO: Investigate why the commented code triggers GC violation +proc createPidFile(filename: string) {.raises: [IOError].} = + # writeFile filename, $os.getCurrentProcessId() + writeFile filename, "222" + # gPidFile = filename + # addExitProc proc() {.noconv.} = + # ## TODO: changed from original file, fixes dprecation warning + # discard io2.removeFile(gPidFile) + +## runs beacon node +## adpated from nimbus-eth2 +proc doRunBeaconNode( + config: var BeaconNodeConf, rng: ref HmacDrbgContext +) {.raises: [CatchableError].} = + info "Launching beacon node", + version = "fullVersionStr", #TODO:changed from original version + bls_backend = $BLS_BACKEND, + const_preset, + cmdParams = commandLineParams(), + config + + template ignoreDeprecatedOption(option: untyped): untyped = + if config.option.isSome: + warn "Config option is deprecated", option = config.option.get + + ignoreDeprecatedOption requireEngineAPI + ignoreDeprecatedOption safeSlotsToImportOptimistically + ignoreDeprecatedOption terminalTotalDifficultyOverride + ignoreDeprecatedOption optimistic + ignoreDeprecatedOption validatorMonitorTotals + ignoreDeprecatedOption web3ForcePolling + + createPidFile(config.dataDir.string / "beacon_node.pid") + + config.createDumpDirs() + + #TODO: We might need to split this on the same file + # if config.metricsEnabled: + # let metricsAddress = config.metricsAddress + # notice "Starting metrics HTTP server", + # url = "http://" & $metricsAddress & ":" & $config.metricsPort & "/metrics" + # try: + # startMetricsHttpServer($metricsAddress, config.metricsPort) + # except CatchableError as exc: + # raise exc + # except Exception as exc: + # raiseAssert exc.msg # TODO fix metrics + +proc fetchGenesisState( + metadata: Eth2NetworkMetadata, + genesisState = none(InputFile), + genesisStateUrl = none(Uri), +): Future[ref ForkedHashedBeaconState] {.async: (raises: []).} = + let genesisBytes = + if metadata.genesis.kind != BakedIn and genesisState.isSome: + let res = io2.readAllBytes(genesisState.get.string) + res.valueOr: + error "Failed to read genesis state file", err = res.error.ioErrorMsg + quit 1 + elif metadata.hasGenesis: + try: + if metadata.genesis.kind == BakedInUrl: + info "Obtaining genesis state", + sourceUrl = $genesisStateUrl.get(parseUri metadata.genesis.url) + await metadata.fetchGenesisBytes(genesisStateUrl) + except CatchableError as err: + error "Failed to obtain genesis state", + source = metadata.genesis.sourceDesc, err = err.msg + quit 1 + else: + @[] + + if genesisBytes.len > 0: + try: + newClone readSszForkedHashedBeaconState(metadata.cfg, genesisBytes) + except CatchableError as err: + error "Invalid genesis state", + size = genesisBytes.len, digest = eth2digest(genesisBytes), err = err.msg + quit 1 + else: + nil + +proc doRunTrustedNodeSync( + db: BeaconChainDB, + metadata: Eth2NetworkMetadata, + databaseDir: string, + eraDir: string, + restUrl: string, + stateId: Option[string], + trustedBlockRoot: Option[Eth2Digest], + backfill: bool, + reindex: bool, + downloadDepositSnapshot: bool, + genesisState: ref ForkedHashedBeaconState, +) {.async.} = + let syncTarget = + if stateId.isSome: + if trustedBlockRoot.isSome: + warn "Ignoring `trustedBlockRoot`, `stateId` is set", stateId, trustedBlockRoot + TrustedNodeSyncTarget(kind: TrustedNodeSyncKind.StateId, stateId: stateId.get) + elif trustedBlockRoot.isSome: + TrustedNodeSyncTarget( + kind: TrustedNodeSyncKind.TrustedBlockRoot, + trustedBlockRoot: trustedBlockRoot.get, + ) + else: + TrustedNodeSyncTarget(kind: TrustedNodeSyncKind.StateId, stateId: "finalized") + + await db.doTrustedNodeSync( + metadata.cfg, databaseDir, eraDir, restUrl, syncTarget, backfill, reindex, + downloadDepositSnapshot, genesisState, + ) + +## --end copy paste file from nimbus-eth2/nimbus_beacon_node.nim + +## Consensus wrapper +proc consensusWrapper*(parameters: TaskParameters) {.raises: [CatchableError].} = + let rng = HmacDrbgContext.new() + var config = parameters.beaconNodeConfigs + try: + doRunBeaconNode(config, rng) + except CatchableError as e: + fatal "error", message = e.msg + quit 1 + + let + metadata = loadEth2Network(config) + db = BeaconChainDB.new(config.databaseDir, metadata.cfg, inMemory = false) + genesisState = waitFor fetchGenesisState(metadata) + + try: + waitFor( + db.doRunTrustedNodeSync( + metadata, config.databaseDir, config.eraDir, "http://127.0.0.1:5052", + config.stateId, config.lcTrustedBlockRoot, config.backfillBlocks, config.reindex, + config.downloadDepositSnapshot, genesisState, + ) + ) + except CatchableError as e: + fatal "error", message = e.msg + quit 1 + + db.close() + + # --web3-url=http://127.0.0.1:8551 --jwt-secret=/tmp/jwtsecret --log-level=TRACE + # --network=${NETWORK} \ + # --data-dir="${DATA_DIR}" \ + # --tcp-port=$(( ${BASE_P2P_PORT} + ${NODE_ID} )) \ + # --udp-port=$(( ${BASE_P2P_PORT} + ${NODE_ID} )) \ + # --rest \ + # --rest-port=$(( ${BASE_REST_PORT} + ${NODE_ID} )) \ + # --metrics \ + # ${WEB3_URL_ARG} ${EXTRA_ARGS} \ + # "$@" warn "\tExiting consensus wrapper" From 5f3ee680ca5c2dfded5e2dcad48ef445ebd44c05 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Mon, 21 Oct 2024 11:30:43 +0100 Subject: [PATCH 09/32] added secp256k1 support --- Makefile | 2 +- nimbus.nimble | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index fea105e67..04d973519 100644 --- a/Makefile +++ b/Makefile @@ -372,7 +372,7 @@ endif # builds the unified client nimbus_unified: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:chronicles_log_level=TRACE -o:build/$@ "nimbus_unified/$@.nim" + $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:libp2p_pki_schemes=secp256k1 -d:chronicles_log_level=TRACE -o:build/$@ "nimbus_unified/$@.nim" # Note about building Nimbus as a library: # # There were `wrappers`, `wrappers-static`, `libnimbus.so` and `libnimbus.a` diff --git a/nimbus.nimble b/nimbus.nimble index a0824887c..9017c7d61 100644 --- a/nimbus.nimble +++ b/nimbus.nimble @@ -31,7 +31,8 @@ requires "nim >= 1.6.0", "blscurve", "evmc", "web3", - "minilru" + "minilru", + "secp256k1" binDir = "build" From 1b89a835ebd6f5c614ca4a501be251b2141b28f0 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Mon, 21 Oct 2024 11:32:29 +0100 Subject: [PATCH 10/32] refactored types --- nimbus_unified/configs/nimbus_configs.nim | 1 + nimbus_unified/nimbus_unified.cfg | 5 +- nimbus_unified/nimbus_unified.nim | 71 ++++++++++++++--------- 3 files changed, 48 insertions(+), 29 deletions(-) diff --git a/nimbus_unified/configs/nimbus_configs.nim b/nimbus_unified/configs/nimbus_configs.nim index 55034b275..c4ff9502b 100644 --- a/nimbus_unified/configs/nimbus_configs.nim +++ b/nimbus_unified/configs/nimbus_configs.nim @@ -20,6 +20,7 @@ type NimbusConfig* = object type TaskParameters* = object name*: string configs*: string + beaconNodeConfigs*: BeaconNodeConf # TODO: replace this with the extracted configs from NimbusConfig needed by the worker ## Task shutdown flag diff --git a/nimbus_unified/nimbus_unified.cfg b/nimbus_unified/nimbus_unified.cfg index a7f0bad4e..87359f110 100644 --- a/nimbus_unified/nimbus_unified.cfg +++ b/nimbus_unified/nimbus_unified.cfg @@ -5,10 +5,13 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. +-d:"libp2p_pki_schemes=secp256k1" + -d:"chronicles_sinks=textlines[dynamic],json[dynamic]" -d:"chronicles_runtime_filtering=on" -d:"chronicles_disable_thread_id" @if release: -d:"chronicles_line_numbers:0" -@end \ No newline at end of file +@end + diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index 0ad900741..61f5dc6c6 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -9,7 +9,9 @@ import std/[atomics, os], chronicles, consensus/consensus_wrapper, - execution/execution_wrapper + execution/execution_wrapper, + beacon_chain/[conf, conf_common], + beacon_chain/[beacon_chain_db] ## Constants const cNimbusMaxTasks* = 5 @@ -69,7 +71,6 @@ proc joinTasks(tasks: var NimbusTasks) = ## Note that thread handler passed by argument needs to have the signature: proc foobar(NimbusParameters) proc addNewTask*( tasks: var NimbusTasks, - name: string, timeout: uint32, taskHandler: proc(config: TaskParameters) {.thread.}, parameters: var TaskParameters, @@ -79,10 +80,10 @@ proc addNewTask*( for i in 0 .. cNimbusMaxTasks - 1: if tasks.taskList[i].isNil: tasks.taskList[i] = NimbusTask.new - tasks.taskList[i].name = name + tasks.taskList[i].name = parameters.name tasks.taskList[i].timeoutMs = timeout currentIndex = i - parameters.name = name + parameters.name = parameters.name break if currentIndex < 0: @@ -100,48 +101,61 @@ proc monitor*(tasksList: var NimbusTasks, config: NimbusConfig) = # -check an atomic (to be created when needed) if it s required to shutdown # this will atomic flag solves: - # - non responding thread - # - thread that required shutdown + # - non responding thread + # - thread that required shutdown sleep(cNimbusTaskTimeoutMs) ## create running workers -proc startTasks*(tasksList: var NimbusTasks, configs: NimbusConfig) = - # TODO: extract configs for each task from NimbusConfig - # or extract them somewhere else and passs them here +proc startTasks*( + tasksList: var NimbusTasks, configs: NimbusConfig, beaconConfigs: var BeaconNodeConf +) = + let + + # TODO: extract configs for each task from NimbusConfig + # or extract them somewhere else and passs them here + execName = "Execution Layer" + consName = "Consensus Layer" var - paramsExecution: TaskParameters = - TaskParameters(configs: "task configs extracted from NimbusConfig go here") - paramsConsensus: TaskParameters = - TaskParameters(configs: "task configs extracted from NimbusConfig go here") - - tasksList.addNewTask( - "Execution Layer", cNimbusTaskTimeoutMs, executionLayerHandler, paramsExecution - ) - tasksList.addNewTask( - "Consensus Layer", cNimbusTaskTimeoutMs, consensusLayerHandler, paramsConsensus - ) + paramsExecution: TaskParameters = TaskParameters( + name: execName, + configs: "task configs extracted from NimbusConfig go here", + beaconNodeConfigs: beaconConfigs, + ) + paramsConsensus: TaskParameters = TaskParameters( + name: execName, + configs: "task configs extracted from NimbusConfig go here", + beaconNodeConfigs: beaconConfigs, + ) + + tasksList.addNewTask(cNimbusTaskTimeoutMs, executionLayerHandler, paramsExecution) + tasksList.addNewTask(cNimbusTaskTimeoutMs, consensusLayerHandler, paramsConsensus) # ------ when isMainModule: info "Starting Nimbus" ## TODO - ## - make banner and config ## - file limits ## - check if we have permissions to create data folder if needed ## - setup logging - - # TODO - read configuration - # TODO - implement config reader for all components + ## - read configuration + ## - implement config reader for all components let nimbusConfigs = NimbusConfig() var tasksList: NimbusTasks = NimbusTasks.new - ## this code snippet requires a conf.nim file (eg: beacon_lc_bridge_conf.nim) - # var config = makeBannerAndConfig("Nimbus client ", NimbusConfig) - # setupLogging(config.logLevel, config.logStdout, config.logFile) + ##TODO: this is an adapted call os the vars required by makeBannerAndConfig + ##these values need to be read from some config file + const SPEC_VERSION = "1.5.0-alpha.8" + const copyrights = "status" + const nimBanner = "nimbus" + const clientId = "beacon node" + var beaconNodeConfig = makeBannerAndConfig( + clientId, copyrights, nimBanner, SPEC_VERSION, [], BeaconNodeConf + ).valueOr: + quit(0) - tasksList.startTasks(nimbusConfigs) + tasksList.startTasks(nimbusConfigs, beaconNodeConfig) ## Graceful shutdown by handling of Ctrl+C signal proc controlCHandler() {.noconv.} = @@ -158,6 +172,7 @@ when isMainModule: tasksList.joinTasks() notice "Shutting down now" quit(0) + setControlCHook(controlCHandler) #start monitoring From a526d01ab812311b8ca7a67a87dac809409aebae Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Mon, 21 Oct 2024 15:45:45 +0100 Subject: [PATCH 11/32] fixed gcsafe error on createPidFile --- .../consensus/consensus_wrapper.nim | 20 ++++--------------- nimbus_unified/nimbus_unified.nim | 18 +++++++++++++---- 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/nimbus_unified/consensus/consensus_wrapper.nim b/nimbus_unified/consensus/consensus_wrapper.nim index 956de97a9..815c3a1e1 100644 --- a/nimbus_unified/consensus/consensus_wrapper.nim +++ b/nimbus_unified/consensus/consensus_wrapper.nim @@ -6,7 +6,7 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import - std/[os, exitprocs], + std/os, beacon_chain/nimbus_binary_common, beacon_chain/spec/forks, beacon_chain/[beacon_chain_db, trusted_node_sync], @@ -22,22 +22,11 @@ export nimbus_configs logScope: topics = "Consensus layer" -## Copy paste from nimbus_beacon_node.nim Copied due to the fact that nimbus_beacon_node -## contains the programMain. +## following procedures are copies from nimbus_beacon_node.nim. ## TODO: extract from that file into a common file -var gPidFile: string - -#TODO: Investigate why the commented code triggers GC violation -proc createPidFile(filename: string) {.raises: [IOError].} = - # writeFile filename, $os.getCurrentProcessId() - writeFile filename, "222" - # gPidFile = filename - # addExitProc proc() {.noconv.} = - # ## TODO: changed from original file, fixes dprecation warning - # discard io2.removeFile(gPidFile) ## runs beacon node -## adpated from nimbus-eth2 +## adapted from nimbus-eth2 proc doRunBeaconNode( config: var BeaconNodeConf, rng: ref HmacDrbgContext ) {.raises: [CatchableError].} = @@ -59,8 +48,6 @@ proc doRunBeaconNode( ignoreDeprecatedOption validatorMonitorTotals ignoreDeprecatedOption web3ForcePolling - createPidFile(config.dataDir.string / "beacon_node.pid") - config.createDumpDirs() #TODO: We might need to split this on the same file @@ -146,6 +133,7 @@ proc doRunTrustedNodeSync( proc consensusWrapper*(parameters: TaskParameters) {.raises: [CatchableError].} = let rng = HmacDrbgContext.new() var config = parameters.beaconNodeConfigs + try: doRunBeaconNode(config, rng) except CatchableError as e: diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index 61f5dc6c6..9b33f9362 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -6,8 +6,9 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import - std/[atomics, os], + std/[atomics, os, exitprocs], chronicles, + stew/io2, consensus/consensus_wrapper, execution/execution_wrapper, beacon_chain/[conf, conf_common], @@ -61,6 +62,16 @@ proc joinTasks(tasks: var NimbusTasks) = info "\tAll tasks finished" +#TODO: Investigate if this is really needed? and for what purpose? +var gPidFile: string +proc createPidFile(filename: string) {.raises: [IOError].} = + writeFile filename, $os.getCurrentProcessId() + gPidFile = filename + addExitProc ( + proc() = + discard io2.removeFile(filename) + ) + # ---- # ------------------------------------------------------------------------------ @@ -88,7 +99,6 @@ proc addNewTask*( if currentIndex < 0: raise newException(NimbusTasksError, "No free slots on Nimbus Tasks") - createThread(tasks.taskList[currentIndex].threadHandler, taskHandler, parameters) info "Created task:", task = tasks.taskList[currentIndex].name @@ -109,7 +119,7 @@ proc monitor*(tasksList: var NimbusTasks, config: NimbusConfig) = ## create running workers proc startTasks*( tasksList: var NimbusTasks, configs: NimbusConfig, beaconConfigs: var BeaconNodeConf -) = +) {.raises: [CatchableError].} = let # TODO: extract configs for each task from NimbusConfig @@ -174,6 +184,6 @@ when isMainModule: quit(0) setControlCHook(controlCHandler) - + createPidFile(beaconNodeConfig.databaseDir.string / "unified.pid") #start monitoring tasksList.monitor(nimbusConfigs) From e5e2a88e9a3bba9ea5bfa189bc2d02adab318217 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Wed, 23 Oct 2024 12:10:35 +0100 Subject: [PATCH 12/32] small repairs and additions --- nimbus_unified/README.md | 6 ++++-- nimbus_unified/configs/nimbus_configs.nim | 10 ++++++--- .../consensus/consensus_wrapper.nim | 16 ++++++++------ nimbus_unified/nimbus_unified.nim | 21 ++++++++++++------- 4 files changed, 34 insertions(+), 19 deletions(-) diff --git a/nimbus_unified/README.md b/nimbus_unified/README.md index 1b883adfb..d79513139 100644 --- a/nimbus_unified/README.md +++ b/nimbus_unified/README.md @@ -8,8 +8,10 @@ [![Status: #nimbus-general](https://img.shields.io/badge/status-nimbus--general-orange.svg)](https://join.status.im/nimbus-general) +# NOTE - whole document to be concluded... + # description -Nimbus Unified combines Ethereum execution and consensus layer functionalities, featuring a fully integrated beacon node), validator duties, and execution layer support. This setup allows the Nimbus client to handle both Ethereum consensus (Eth2) and execution (Eth1) tasks within a single package. +Nimbus Unified combines Ethereum execution and consensus layer functionalities, featuring a fully integrated beacon node, validator duties, and execution layer support. This setup allows the Nimbus client to handle both Ethereum consensus (Eth2) and execution (Eth1) tasks within a single package. --> meh, requires improvement # documentation @@ -45,4 +47,4 @@ or * Apache License, Version 2.0: [LICENSE-APACHEv2](LICENSE-APACHEv2) or https://www.apache.org/licenses/LICENSE-2.0 -at your option. These files may not be copied, modified, or distributed except according to those terms. +These files may not be copied, modified, or distributed except according to those terms. diff --git a/nimbus_unified/configs/nimbus_configs.nim b/nimbus_unified/configs/nimbus_configs.nim index c4ff9502b..f9521ce31 100644 --- a/nimbus_unified/configs/nimbus_configs.nim +++ b/nimbus_unified/configs/nimbus_configs.nim @@ -12,7 +12,8 @@ type NimbusTasksError* = object of CatchableError ## Configuration ## TODO: implement a json (or other format like yaml) config reader for config reading (file config scenarios) -## TODO: implement a command line reader to read arguments +## 1) implement a command line reader to read arguments +## 2) good option to adhere to other projects conventions and use the in place support to read and load type NimbusConfig* = object configTable: Table[string, string] @@ -20,16 +21,19 @@ type NimbusConfig* = object type TaskParameters* = object name*: string configs*: string - beaconNodeConfigs*: BeaconNodeConf - # TODO: replace this with the extracted configs from NimbusConfig needed by the worker + beaconNodeConfigs*: BeaconNodeConf # ## Task shutdown flag +## ## The behaviour required: this thread needs to atomically change the flag value when ## a shutdown is required or when detects a stopped thread. +## ## Given the behaviour wanted, atomic operations are sufficient without barriers or fences. Compilers ## may reorder instructions, but given that the order is not important, this does not affect ## the semantic wanted: If instructions are reordered, the worker will fail to read on the current iteration ## but will read it correctly on the next iteration ( this thread is the only on which changes the flag behaviour, ## and will always change it to true) +## +## With this we avoid the overhead of locks var isShutDownRequired*: Atomic[bool] isShutDownRequired.store(false) diff --git a/nimbus_unified/consensus/consensus_wrapper.nim b/nimbus_unified/consensus/consensus_wrapper.nim index 815c3a1e1..425bd4770 100644 --- a/nimbus_unified/consensus/consensus_wrapper.nim +++ b/nimbus_unified/consensus/consensus_wrapper.nim @@ -23,7 +23,7 @@ logScope: topics = "Consensus layer" ## following procedures are copies from nimbus_beacon_node.nim. -## TODO: extract from that file into a common file +## TODO: if possible, extract from that file into a common file ## runs beacon node ## adapted from nimbus-eth2 @@ -62,6 +62,7 @@ proc doRunBeaconNode( # except Exception as exc: # raiseAssert exc.msg # TODO fix metrics +## adapted/copied from nimbus-eth2 proc fetchGenesisState( metadata: Eth2NetworkMetadata, genesisState = none(InputFile), @@ -96,6 +97,7 @@ proc fetchGenesisState( else: nil +## adapted/copied from nimbus-eth2 proc doRunTrustedNodeSync( db: BeaconChainDB, metadata: Eth2NetworkMetadata, @@ -138,7 +140,7 @@ proc consensusWrapper*(parameters: TaskParameters) {.raises: [CatchableError].} doRunBeaconNode(config, rng) except CatchableError as e: fatal "error", message = e.msg - quit 1 + # TODO: we need to create an dedicated atomic asking task manager to join threads let metadata = loadEth2Network(config) @@ -149,16 +151,18 @@ proc consensusWrapper*(parameters: TaskParameters) {.raises: [CatchableError].} waitFor( db.doRunTrustedNodeSync( metadata, config.databaseDir, config.eraDir, "http://127.0.0.1:5052", - config.stateId, config.lcTrustedBlockRoot, config.backfillBlocks, config.reindex, - config.downloadDepositSnapshot, genesisState, + config.stateId, config.lcTrustedBlockRoot, config.backfillBlocks, + config.reindex, config.downloadDepositSnapshot, genesisState, ) ) except CatchableError as e: - fatal "error", message = e.msg - quit 1 + # TODO: we need to create an dedicated atomic asking task manager to join threads + fatal "error", message = e.MsgSource db.close() + # TODO: nice to start creating some binary launch scripts + # --web3-url=http://127.0.0.1:8551 --jwt-secret=/tmp/jwtsecret --log-level=TRACE # --network=${NETWORK} \ # --data-dir="${DATA_DIR}" \ diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index 9b33f9362..3b1b40c14 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -15,12 +15,10 @@ import beacon_chain/[beacon_chain_db] ## Constants +## TODO: evaluate the proposed timeouts with team const cNimbusMaxTasks* = 5 const cNimbusTaskTimeoutMs* = 5000 -## Exceptions -type NimbusTasksError* = object of CatchableError - ## Task and associated task information type NimbusTask* = ref object name*: string @@ -99,7 +97,12 @@ proc addNewTask*( if currentIndex < 0: raise newException(NimbusTasksError, "No free slots on Nimbus Tasks") - createThread(tasks.taskList[currentIndex].threadHandler, taskHandler, parameters) + try: + createThread(tasks.taskList[currentIndex].threadHandler, taskHandler, parameters) + except CatchableError as e: + # TODO: joinThreads + fatal "error creating task (thread)", msg=e.msg + info "Created task:", task = tasks.taskList[currentIndex].name ## Task monitoring @@ -109,8 +112,8 @@ proc monitor*(tasksList: var NimbusTasks, config: NimbusConfig) = while true: info "checking tasks ... " - # -check an atomic (to be created when needed) if it s required to shutdown - # this will atomic flag solves: + # -check tasks flag (to be created when needed) if it's required to shutdown + # this atomic flag solves: # - non responding thread # - thread that required shutdown @@ -123,7 +126,8 @@ proc startTasks*( let # TODO: extract configs for each task from NimbusConfig - # or extract them somewhere else and passs them here + # or extract them somewhere else and passs them here. + # check nimbus_configs annotations. execName = "Execution Layer" consName = "Consensus Layer" var @@ -149,7 +153,7 @@ when isMainModule: ## - file limits ## - check if we have permissions to create data folder if needed ## - setup logging - ## - read configuration + ## - read configuration (check nimbus_configs file anottations) ## - implement config reader for all components let nimbusConfigs = NimbusConfig() var tasksList: NimbusTasks = NimbusTasks.new @@ -168,6 +172,7 @@ when isMainModule: tasksList.startTasks(nimbusConfigs, beaconNodeConfig) ## Graceful shutdown by handling of Ctrl+C signal + ## TODO: we might need to declare it per thread proc controlCHandler() {.noconv.} = when defined(windows): # workaround for https://github.com/nim-lang/Nim/issues/4057 From 7fad6d4f8dfeaf050bb98aceb9b0a02d69af8b87 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Wed, 23 Oct 2024 12:35:12 +0100 Subject: [PATCH 13/32] inital exec run script --- nimbus_unified/run_nimbus_unified.sh | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100755 nimbus_unified/run_nimbus_unified.sh diff --git a/nimbus_unified/run_nimbus_unified.sh b/nimbus_unified/run_nimbus_unified.sh new file mode 100755 index 000000000..3e96a7e33 --- /dev/null +++ b/nimbus_unified/run_nimbus_unified.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# nimbus_unified +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + + +#TODO: a lot to do on this exec script. improve/refactor as the project proceeds + +#Execution +# tbd +#Consensus +# tbd +#unified +exec build/nimbus_unified \ +--network=holesky \ +--data-dir=build/data/shared_holesky_0 \ +--tcp-port=9000 \ +--udp-port=9000 \ +--rest-port=5052 \ +--web3-url=http://127.0.0.1:8551 \ +--jwt-secret=/tmp/jwtsecret \ +--log-level=TRACE \ No newline at end of file From a0336df19dad3471cfb5b639a4bea2cf402e9440 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Mon, 28 Oct 2024 23:57:10 +0000 Subject: [PATCH 14/32] changes: Makefile: Disabled libbacktrace: for some reason was given a strange error DW_FORM_addrx. Requires further investigation, given that it can be a macos issue, or related to the fact tathatth nimbus-eth2 is a submodule. moved getPid to thread worker it self. might required further investigation regarding thread pattern. small typos --- Makefile | 2 +- nimbus_unified/configs/nimbus_configs.nim | 2 +- nimbus_unified/nimbus_unified.nim | 36 +++++++++++++---------- nimbus_unified/run_nimbus_unified.sh | 3 -- 4 files changed, 23 insertions(+), 20 deletions(-) diff --git a/Makefile b/Makefile index 04d973519..4db7d42cf 100644 --- a/Makefile +++ b/Makefile @@ -372,7 +372,7 @@ endif # builds the unified client nimbus_unified: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:libp2p_pki_schemes=secp256k1 -d:chronicles_log_level=TRACE -o:build/$@ "nimbus_unified/$@.nim" + $(ENV_SCRIPT) nim c $(NIM_PARAMS) --verbosity:3 -d:debug -d:disable_libbacktrace -d:libp2p_pki_schemes=secp256k1 -d:chronicles_log_level=TRACE -o:build/$@ "nimbus_unified/$@.nim" # Note about building Nimbus as a library: # # There were `wrappers`, `wrappers-static`, `libnimbus.so` and `libnimbus.a` diff --git a/nimbus_unified/configs/nimbus_configs.nim b/nimbus_unified/configs/nimbus_configs.nim index f9521ce31..a42574c20 100644 --- a/nimbus_unified/configs/nimbus_configs.nim +++ b/nimbus_unified/configs/nimbus_configs.nim @@ -21,7 +21,7 @@ type NimbusConfig* = object type TaskParameters* = object name*: string configs*: string - beaconNodeConfigs*: BeaconNodeConf # + beaconNodeConfigs*: BeaconNodeConf ## Task shutdown flag ## diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index 3b1b40c14..9ea7e9950 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -12,7 +12,8 @@ import consensus/consensus_wrapper, execution/execution_wrapper, beacon_chain/[conf, conf_common], - beacon_chain/[beacon_chain_db] + beacon_chain/[beacon_chain_db], + beacon_chain/validators/keystore_management ## Constants ## TODO: evaluate the proposed timeouts with team @@ -60,15 +61,14 @@ proc joinTasks(tasks: var NimbusTasks) = info "\tAll tasks finished" -#TODO: Investigate if this is really needed? and for what purpose? -var gPidFile: string -proc createPidFile(filename: string) {.raises: [IOError].} = - writeFile filename, $os.getCurrentProcessId() - gPidFile = filename - addExitProc ( - proc() = - discard io2.removeFile(filename) - ) +# var gPidFile: string +# proc createPidFile(filename: string) {.raises: [IOError].} = +# writeFile filename, $os.getCurrentProcessId() +# gPidFile = filename +# addExitProc ( +# proc() = +# discard io2.removeFile(filename) +# ) # ---- @@ -101,7 +101,7 @@ proc addNewTask*( createThread(tasks.taskList[currentIndex].threadHandler, taskHandler, parameters) except CatchableError as e: # TODO: joinThreads - fatal "error creating task (thread)", msg=e.msg + fatal "error creating task (thread)", msg = e.msg info "Created task:", task = tasks.taskList[currentIndex].name @@ -151,7 +151,6 @@ when isMainModule: info "Starting Nimbus" ## TODO ## - file limits - ## - check if we have permissions to create data folder if needed ## - setup logging ## - read configuration (check nimbus_configs file anottations) ## - implement config reader for all components @@ -163,12 +162,19 @@ when isMainModule: const SPEC_VERSION = "1.5.0-alpha.8" const copyrights = "status" const nimBanner = "nimbus" - const clientId = "beacon node" + const clientId = "nimbus unified" var beaconNodeConfig = makeBannerAndConfig( clientId, copyrights, nimBanner, SPEC_VERSION, [], BeaconNodeConf ).valueOr: - quit(0) + stderr.write error + quit QuitFailure + + if not (checkAndCreateDataDir(string(beaconNodeConfig.dataDir))): + # We are unable to access/create data folder or data folder's + # permissions are insecure. + quit QuitFailure + # create and start tasks tasksList.startTasks(nimbusConfigs, beaconNodeConfig) ## Graceful shutdown by handling of Ctrl+C signal @@ -189,6 +195,6 @@ when isMainModule: quit(0) setControlCHook(controlCHandler) - createPidFile(beaconNodeConfig.databaseDir.string / "unified.pid") + # createPidFile(beaconNodeConfig.databaseDir.string / "unified.pid") #start monitoring tasksList.monitor(nimbusConfigs) diff --git a/nimbus_unified/run_nimbus_unified.sh b/nimbus_unified/run_nimbus_unified.sh index 3e96a7e33..a2edd8bc9 100755 --- a/nimbus_unified/run_nimbus_unified.sh +++ b/nimbus_unified/run_nimbus_unified.sh @@ -20,7 +20,4 @@ exec build/nimbus_unified \ --data-dir=build/data/shared_holesky_0 \ --tcp-port=9000 \ --udp-port=9000 \ ---rest-port=5052 \ ---web3-url=http://127.0.0.1:8551 \ ---jwt-secret=/tmp/jwtsecret \ --log-level=TRACE \ No newline at end of file From 00e705669f714b84128031f3f5db46150b741dcd Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Tue, 29 Oct 2024 22:46:25 +0000 Subject: [PATCH 15/32] Improved synchronization mechanism --- .../consensus/consensus_wrapper.nim | 45 ++++------------ .../execution/execution_wrapper.nim | 4 +- nimbus_unified/nimbus_unified.nim | 52 ++++++++++--------- nimbus_unified/run_nimbus_unified.sh | 4 +- 4 files changed, 42 insertions(+), 63 deletions(-) diff --git a/nimbus_unified/consensus/consensus_wrapper.nim b/nimbus_unified/consensus/consensus_wrapper.nim index 425bd4770..b80153edc 100644 --- a/nimbus_unified/consensus/consensus_wrapper.nim +++ b/nimbus_unified/consensus/consensus_wrapper.nim @@ -6,7 +6,7 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import - std/os, + std/[os, atomics], beacon_chain/nimbus_binary_common, beacon_chain/spec/forks, beacon_chain/[beacon_chain_db, trusted_node_sync], @@ -133,45 +133,20 @@ proc doRunTrustedNodeSync( ## Consensus wrapper proc consensusWrapper*(parameters: TaskParameters) {.raises: [CatchableError].} = + # Single RNG instance for the application - will be seeded on construction + # and avoid using system resources (such as urandom) after that let rng = HmacDrbgContext.new() var config = parameters.beaconNodeConfigs + setupFileLimits() + + #TODO: Another FC unsafe procedure + # setupLogging(config.logLevel, config.logStdout, config.logFile) + try: doRunBeaconNode(config, rng) except CatchableError as e: fatal "error", message = e.msg - # TODO: we need to create an dedicated atomic asking task manager to join threads - - let - metadata = loadEth2Network(config) - db = BeaconChainDB.new(config.databaseDir, metadata.cfg, inMemory = false) - genesisState = waitFor fetchGenesisState(metadata) - try: - waitFor( - db.doRunTrustedNodeSync( - metadata, config.databaseDir, config.eraDir, "http://127.0.0.1:5052", - config.stateId, config.lcTrustedBlockRoot, config.backfillBlocks, - config.reindex, config.downloadDepositSnapshot, genesisState, - ) - ) - except CatchableError as e: - # TODO: we need to create an dedicated atomic asking task manager to join threads - fatal "error", message = e.MsgSource - - db.close() - - # TODO: nice to start creating some binary launch scripts - - # --web3-url=http://127.0.0.1:8551 --jwt-secret=/tmp/jwtsecret --log-level=TRACE - # --network=${NETWORK} \ - # --data-dir="${DATA_DIR}" \ - # --tcp-port=$(( ${BASE_P2P_PORT} + ${NODE_ID} )) \ - # --udp-port=$(( ${BASE_P2P_PORT} + ${NODE_ID} )) \ - # --rest \ - # --rest-port=$(( ${BASE_REST_PORT} + ${NODE_ID} )) \ - # --metrics \ - # ${WEB3_URL_ARG} ${EXTRA_ARGS} \ - # "$@" - - warn "\tExiting consensus wrapper" + isShutDownRequired.store(true) + warn "\tExiting consensus wrapper" \ No newline at end of file diff --git a/nimbus_unified/execution/execution_wrapper.nim b/nimbus_unified/execution/execution_wrapper.nim index 21522849c..a8dede67d 100644 --- a/nimbus_unified/execution/execution_wrapper.nim +++ b/nimbus_unified/execution/execution_wrapper.nim @@ -12,13 +12,15 @@ export nimbus_configs logScope: topics = "Execution layer" +const cTempExecutionTimeoutMs = 5000 proc executionWrapper*(parameters: TaskParameters) = info "Execution wrapper:", worker = parameters.name while true: - sleep(2000) + sleep(cTempExecutionTimeoutMs) info "looping execution" if isShutDownRequired.load() == true: break + isShutDownRequired.store(true) warn "\tExiting execution:", worker = parameters.name diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index 9ea7e9950..d3dcb67bf 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -41,10 +41,7 @@ logScope: ## Execution Layer handler proc executionLayerHandler(parameters: TaskParameters) {.thread.} = info "Started task:", task = parameters.name - while true: - executionWrapper(parameters) - if isShutDownRequired.load() == true: - break + executionWrapper(parameters) info "\tExiting task;", task = parameters.name ## Consensus Layer handler @@ -55,20 +52,21 @@ proc consensusLayerHandler(parameters: TaskParameters) {.thread.} = ## Waits for tasks to finish (joinThreads) proc joinTasks(tasks: var NimbusTasks) = + warn "Waiting all tasks to finish ... " for i in 0 .. cNimbusMaxTasks - 1: if not tasks.taskList[i].isNil: joinThread(tasks.taskList[i].threadHandler) - info "\tAll tasks finished" + notice "All tasks finished correctly" -# var gPidFile: string -# proc createPidFile(filename: string) {.raises: [IOError].} = -# writeFile filename, $os.getCurrentProcessId() -# gPidFile = filename -# addExitProc ( -# proc() = -# discard io2.removeFile(filename) -# ) +var gPidFile: string +proc createPidFile(filename: string) {.raises: [IOError].} = + writeFile filename, $os.getCurrentProcessId() + gPidFile = filename + addExitProc ( + proc() = + discard io2.removeFile(filename) + ) # ---- @@ -107,18 +105,16 @@ proc addNewTask*( ## Task monitoring proc monitor*(tasksList: var NimbusTasks, config: NimbusConfig) = - info "monitoring tasks" + info "started task monitoring" while true: info "checking tasks ... " - - # -check tasks flag (to be created when needed) if it's required to shutdown - # this atomic flag solves: - # - non responding thread - # - thread that required shutdown - + if isShutDownRequired.load() == true: + break sleep(cNimbusTaskTimeoutMs) + tasksList.joinTasks() + ## create running workers proc startTasks*( tasksList: var NimbusTasks, configs: NimbusConfig, beaconConfigs: var BeaconNodeConf @@ -159,7 +155,7 @@ when isMainModule: ##TODO: this is an adapted call os the vars required by makeBannerAndConfig ##these values need to be read from some config file - const SPEC_VERSION = "1.5.0-alpha.8" + const SPEC_VERSION = "tbd" const copyrights = "status" const nimBanner = "nimbus" const clientId = "nimbus unified" @@ -169,13 +165,16 @@ when isMainModule: stderr.write error quit QuitFailure - if not (checkAndCreateDataDir(string(beaconNodeConfig.dataDir))): + #TODO: if we don't add the "db" program crashes on + if not(checkAndCreateDataDir(string(beaconNodeConfig.dataDir/"db"))): # We are unable to access/create data folder or data folder's # permissions are insecure. quit QuitFailure - # create and start tasks - tasksList.startTasks(nimbusConfigs, beaconNodeConfig) + # TODO: data directory is not created(build/data/shared_holesky_0/db/) + # and "createPidFile" throws an exception + # solution: manually create the directory + createPidFile(beaconNodeConfig.databaseDir.string / "unified.pid") ## Graceful shutdown by handling of Ctrl+C signal ## TODO: we might need to declare it per thread @@ -195,6 +194,9 @@ when isMainModule: quit(0) setControlCHook(controlCHandler) - # createPidFile(beaconNodeConfig.databaseDir.string / "unified.pid") + + #create and start tasks + tasksList.startTasks(nimbusConfigs, beaconNodeConfig) + #start monitoring tasksList.monitor(nimbusConfigs) diff --git a/nimbus_unified/run_nimbus_unified.sh b/nimbus_unified/run_nimbus_unified.sh index a2edd8bc9..561dbf217 100755 --- a/nimbus_unified/run_nimbus_unified.sh +++ b/nimbus_unified/run_nimbus_unified.sh @@ -17,7 +17,7 @@ #unified exec build/nimbus_unified \ --network=holesky \ ---data-dir=build/data/shared_holesky_0 \ +--data-dir="build/data/shared_holesky_0" \ --tcp-port=9000 \ --udp-port=9000 \ ---log-level=TRACE \ No newline at end of file +--log-level=TRACE From d5f7a6eb546df6a15676889e7cea15669f328d34 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Wed, 30 Oct 2024 11:38:21 +0000 Subject: [PATCH 16/32] synch achieved: There are currently 2 issues: - fetch genesis state on empty data folder: error msg "The downloaded genesis state cannot be verified (checksum mismatch)\" The error arises from here: fetchGenesisState->network_metadata_downloads.fetchGenesisBytes.L58 something to do with the readssz or withState, investigations point to the data downloaded or some config missing WA: comment lines 58-62, compile and run until the fetch genesis state is completed, then you can uncomment, it works from here - spam of error messages: \"metrics error:New label values must be added from same thread as the metric was created from\": This happens due to the fact that libp2p declares some gauges, and given that they are created inside a thread, metrics library starts to complain. (no WA/correction so far)" --- Makefile | 3 +- .../consensus/adapted_block_chain_dag.nim | 2884 +++++++++++++++++ .../consensus/consensus_wrapper.nim | 2413 +++++++++++++- 3 files changed, 5232 insertions(+), 68 deletions(-) create mode 100644 nimbus_unified/consensus/adapted_block_chain_dag.nim diff --git a/Makefile b/Makefile index 4db7d42cf..8f99d6fd1 100644 --- a/Makefile +++ b/Makefile @@ -370,9 +370,10 @@ endif # Nimbus unified related targets # builds the unified client +# NIM_PARAMS := -d:release --parallelBuild:1 -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku,grandine $(NIM_PARAMS) nimbus_unified: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim c $(NIM_PARAMS) --verbosity:3 -d:debug -d:disable_libbacktrace -d:libp2p_pki_schemes=secp256k1 -d:chronicles_log_level=TRACE -o:build/$@ "nimbus_unified/$@.nim" + $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:disable_libbacktrace -d:libp2p_pki_schemes=secp256k1 -o:build/$@ "nimbus_unified/$@.nim" # Note about building Nimbus as a library: # # There were `wrappers`, `wrappers-static`, `libnimbus.so` and `libnimbus.a` diff --git a/nimbus_unified/consensus/adapted_block_chain_dag.nim b/nimbus_unified/consensus/adapted_block_chain_dag.nim new file mode 100644 index 000000000..e342480dc --- /dev/null +++ b/nimbus_unified/consensus/adapted_block_chain_dag.nim @@ -0,0 +1,2884 @@ +# nimbus_unified +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import + std/[algorithm, sequtils, tables, sets], + stew/[arrayops, assign2, byteutils], + chronos, metrics, results, snappy, chronicles, + beacon_chain/spec/[beaconstate, eth2_merkleization, eth2_ssz_serialization, helpers, + state_transition, validator], + beacon_chain/spec/forks, + beacon_chain/[beacon_chain_db, beacon_clock, era_db], + beacon_chain/consensus_object_pools/[block_pools_types, block_quarantine] + +export + eth2_merkleization, eth2_ssz_serialization, + block_pools_types, results, beacon_chain_db + +logScope: topics = "chaindag" +# adapted from nimbus-eth2 + +# # https://github.com/ethereum/beacon-metrics/blob/master/metrics.md#interop-metrics +# declareGauge beacon_head_root, "Root of the head block of the beacon chain" +# declareGauge beacon_head_slot, "Slot of the head block of the beacon chain" + +# # https://github.com/ethereum/beacon-metrics/blob/master/metrics.md#interop-metrics +# declareGauge beacon_finalized_epoch, "Current finalized epoch" # On epoch transition +# declareGauge beacon_finalized_root, "Current finalized root" # On epoch transition +# declareGauge beacon_current_justified_epoch, "Current justified epoch" # On epoch transition +# declareGauge beacon_current_justified_root, "Current justified root" # On epoch transition +# declareGauge beacon_previous_justified_epoch, "Current previously justified epoch" # On epoch transition +# declareGauge beacon_previous_justified_root, "Current previously justified root" # On epoch transition + +# declareGauge beacon_reorgs_total_total, "Total occurrences of reorganizations of the chain" # On fork choice; backwards-compat name (used to be a counter) +# declareGauge beacon_reorgs_total, "Total occurrences of reorganizations of the chain" # Interop copy +# declareCounter beacon_state_data_cache_hits, "EpochRef hits" +# declareCounter beacon_state_data_cache_misses, "EpochRef misses" +# declareCounter beacon_state_rewinds, "State database rewinds" + +# declareGauge beacon_active_validators, "Number of validators in the active validator set" +# declareGauge beacon_current_active_validators, "Number of validators in the active validator set" # Interop copy +# declareGauge beacon_pending_deposits, "Number of pending deposits (state.eth1_data.deposit_count - state.eth1_deposit_index)" # On block +# declareGauge beacon_processed_deposits_total, "Number of total deposits included on chain" # On block + +declareCounter beacon_dag_state_replay_seconds, "Time spent replaying states" + +const + EPOCHS_PER_STATE_SNAPSHOT* = 32 + ## When finality happens, we prune historical states from the database except + ## for a snapshot every 32 epochs from which replays can happen - there's a + ## balance here between making long replays and saving on disk space + MAX_SLOTS_PER_PRUNE* = SLOTS_PER_EPOCH + ## We prune the database incrementally so as not to introduce long + ## processing breaks - this number is the maximum number of blocks we allow + ## to be pruned every time the prune call is made (once per slot typically) + ## unless head is moving faster (ie during sync) + + +proc putBlock*( + dag: ChainDAGRef, signedBlock: ForkyTrustedSignedBeaconBlock) = + dag.db.putBlock(signedBlock) + +proc updateState*( + dag: ChainDAGRef, state: var ForkedHashedBeaconState, bsi: BlockSlotId, + save: bool, cache: var StateCache): bool {.gcsafe.} + +template withUpdatedState*( + dag: ChainDAGRef, stateParam: var ForkedHashedBeaconState, + bsiParam: BlockSlotId, okBody: untyped, failureBody: untyped): untyped = + ## Helper template that updates stateData to a particular BlockSlot - usage of + ## stateData is unsafe outside of block, or across `await` boundaries + + block: + let bsi {.inject.} = bsiParam + var cache {.inject.} = StateCache() + if updateState(dag, stateParam, bsi, false, cache): + template bid(): BlockId {.inject, used.} = bsi.bid + template updatedState(): ForkedHashedBeaconState {.inject, used.} = stateParam + okBody + else: + failureBody + +func get_effective_balances( + validators: openArray[Validator], epoch: Epoch): seq[Gwei] = + ## Get the balances from a state as counted for fork choice + result.newSeq(validators.len) # zero-init + + for i in 0 ..< result.len: + # All non-active validators have a 0 balance + let validator = unsafeAddr validators[i] + if validator[].is_active_validator(epoch) and not validator[].slashed: + result[i] = validator[].effective_balance + +proc updateValidatorKeys*(dag: ChainDAGRef, validators: openArray[Validator]) = + # Update validator key cache - must be called every time a valid block is + # applied to the state - this is important to ensure that when we sync blocks + # without storing a state (non-epoch blocks essentially), the deposits from + # those blocks are persisted to the in-database cache of immutable validator + # data (but no earlier than that the whole block as been validated) + dag.db.updateImmutableValidators(validators) + +proc updateFinalizedBlocks*(db: BeaconChainDB, newFinalized: openArray[BlockId]) = + if db.db.readOnly: return # TODO abstraction leak - where to put this? + + db.withManyWrites: + for bid in newFinalized: + db.finalizedBlocks.insert(bid.slot, bid.root) + +proc updateFrontfillBlocks*(dag: ChainDAGRef) = + # When backfilling is done and manages to reach the frontfill point, we can + # write the frontfill index knowing that the block information in the + # era files match the chain + if dag.db.db.readOnly: return # TODO abstraction leak - where to put this? + + if dag.frontfillBlocks.len == 0 or dag.backfill.slot > GENESIS_SLOT: + return + + info "Writing frontfill index", slots = dag.frontfillBlocks.len + + dag.db.withManyWrites: + let low = dag.db.finalizedBlocks.low.expect( + "wrote at least tailRef during init") + let blocks = min(low.int, dag.frontfillBlocks.len - 1) + var parent: Eth2Digest + for i in 0..blocks: + let root = dag.frontfillBlocks[i] + if not isZero(root): + dag.db.finalizedBlocks.insert(Slot(i), root) + dag.db.putBeaconBlockSummary( + root, BeaconBlockSummary(slot: Slot(i), parent_root: parent)) + parent = root + + reset(dag.frontfillBlocks) + +func validatorKey*( + dag: ChainDAGRef, index: ValidatorIndex or uint64): Opt[CookedPubKey] = + ## Returns the validator pubkey for the index, assuming it's been observed + ## at any point in time - this function may return pubkeys for indicies that + ## are not (yet) part of the head state (if the key has been observed on a + ## non-head branch)! + dag.db.immutableValidators.load(index) + +template is_merge_transition_complete*( + stateParam: ForkedHashedBeaconState): bool = + withState(stateParam): + when consensusFork >= ConsensusFork.Bellatrix: + is_merge_transition_complete(forkyState.data) + else: + false + +func effective_balances*(epochRef: EpochRef): seq[Gwei] = + try: + SSZ.decode(snappy.decode(epochRef.effective_balances_bytes, uint32.high), + List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT]).toSeq() + except CatchableError as exc: + raiseAssert exc.msg + +func getBlockRef*(dag: ChainDAGRef, root: Eth2Digest): Opt[BlockRef] = + ## Retrieve a resolved block reference, if available - this function does + ## not return historical finalized blocks, see `getBlockIdAtSlot` for a + ## function that covers the entire known history + let key = KeyedBlockRef.asLookupKey(root) + # HashSet lacks the api to do check-and-get in one lookup - `[]` will return + # the copy of the instance in the set which has more fields than `root` set! + if key in dag.forkBlocks: + try: ok(dag.forkBlocks[key].blockRef()) + except KeyError: raiseAssert "contains" + else: + err() + +func getBlockIdAtSlot*( + state: ForkyHashedBeaconState, slot: Slot): Opt[BlockSlotId] = + ## Use given state to attempt to find a historical `BlockSlotId`. + if slot > state.data.slot: + return Opt.none(BlockSlotId) # State does not know about requested slot + if state.data.slot > slot + SLOTS_PER_HISTORICAL_ROOT: + return Opt.none(BlockSlotId) # Cache has expired + + var idx = slot mod SLOTS_PER_HISTORICAL_ROOT + let root = + if slot == state.data.slot: + state.latest_block_root + else: + state.data.block_roots[idx] + var bid = BlockId(slot: slot, root: root) + + let availableSlots = + min(slot.uint64, slot + SLOTS_PER_HISTORICAL_ROOT - state.data.slot) + for i in 0 ..< availableSlots: + if idx == 0: + idx = SLOTS_PER_HISTORICAL_ROOT + dec idx + if state.data.block_roots[idx] != root: + return Opt.some BlockSlotId.init(bid, slot) + dec bid.slot + + if bid.slot == GENESIS_SLOT: + return Opt.some BlockSlotId.init(bid, slot) + Opt.none(BlockSlotId) # Unknown if there are more empty slots before + +func getBlockIdAtSlot*(dag: ChainDAGRef, slot: Slot): Opt[BlockSlotId] = + ## Retrieve the canonical block at the given slot, or the last block that + ## comes before - similar to atSlot, but without the linear scan - may hit + ## the database to look up early indices. + if slot > dag.finalizedHead.slot: + return dag.head.atSlot(slot).toBlockSlotId() # iterate to the given slot + + if dag.finalizedHead.blck == nil: + # Not initialized yet (in init) + return Opt.none(BlockSlotId) + + if slot >= dag.finalizedHead.blck.slot: + # finalized head is still in memory + return dag.finalizedHead.blck.atSlot(slot).toBlockSlotId() + + # Load from memory, if the block ID is sufficiently recent. + # For checkpoint sync, this is the only available of historical block IDs + # until sufficient blocks have been backfilled. + template tryWithState(state: ForkedHashedBeaconState) = + block: + withState(state): + # State must be a descendent of the finalized chain to be viable + let finBsi = forkyState.getBlockIdAtSlot(dag.finalizedHead.slot) + if finBsi.isSome and # DAG finalized bid slot wrong if CP not @ epoch + finBsi.unsafeGet.bid.root == dag.finalizedHead.blck.bid.root: + let bsi = forkyState.getBlockIdAtSlot(slot) + if bsi.isSome: + return bsi + tryWithState dag.headState + tryWithState dag.epochRefState + tryWithState dag.clearanceState + + # Fallback to database, this only works for backfilled blocks + let finlow = dag.db.finalizedBlocks.low.expect("at least tailRef written") + if slot >= finlow: + var pos = slot + while true: + let root = dag.db.finalizedBlocks.get(pos) + + if root.isSome(): + return ok BlockSlotId.init( + BlockId(root: root.get(), slot: pos), slot) + + doAssert pos > finlow, "We should have returned the finlow" + + pos = pos - 1 + + if slot == GENESIS_SLOT and dag.genesis.isSome(): + return ok dag.genesis.get().atSlot() + + err() # not backfilled yet + +proc containsBlock( + cfg: RuntimeConfig, db: BeaconChainDB, slot: Slot, root: Eth2Digest): bool = + db.containsBlock(root, cfg.consensusForkAtEpoch(slot.epoch)) + +proc containsBlock*(dag: ChainDAGRef, bid: BlockId): bool = + dag.cfg.containsBlock(dag.db, bid.slot, bid.root) + +proc getForkedBlock*(db: BeaconChainDB, root: Eth2Digest): + Opt[ForkedTrustedSignedBeaconBlock] = + # When we only have a digest, we don't know which fork it's from so we try + # them one by one - this should be used sparingly + static: doAssert high(ConsensusFork) == ConsensusFork.Electra + if (let blck = db.getBlock(root, electra.TrustedSignedBeaconBlock); + blck.isSome()): + ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) + elif (let blck = db.getBlock(root, deneb.TrustedSignedBeaconBlock); + blck.isSome()): + ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) + elif (let blck = db.getBlock(root, capella.TrustedSignedBeaconBlock); + blck.isSome()): + ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) + elif (let blck = db.getBlock(root, bellatrix.TrustedSignedBeaconBlock); + blck.isSome()): + ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) + elif (let blck = db.getBlock(root, altair.TrustedSignedBeaconBlock); + blck.isSome()): + ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) + elif (let blck = db.getBlock(root, phase0.TrustedSignedBeaconBlock); + blck.isSome()): + ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) + else: + err() + +proc getBlock*( + dag: ChainDAGRef, bid: BlockId, + T: type ForkyTrustedSignedBeaconBlock): Opt[T] = + dag.db.getBlock(bid.root, T) or + getBlock( + dag.era, getStateField(dag.headState, historical_roots).asSeq, + dag.headState.historical_summaries().asSeq, + bid.slot, Opt[Eth2Digest].ok(bid.root), T) + +proc getBlockSSZ*(dag: ChainDAGRef, bid: BlockId, bytes: var seq[byte]): bool = + # Load the SSZ-encoded data of a block into `bytes`, overwriting the existing + # content + let fork = dag.cfg.consensusForkAtEpoch(bid.slot.epoch) + dag.db.getBlockSSZ(bid.root, bytes, fork) or + (bid.slot <= dag.finalizedHead.slot and + getBlockSSZ( + dag.era, getStateField(dag.headState, historical_roots).asSeq, + dag.headState.historical_summaries().asSeq, + bid.slot, bytes).isOk() and bytes.len > 0) + +proc getBlockSZ*(dag: ChainDAGRef, bid: BlockId, bytes: var seq[byte]): bool = + # Load the snappy-frame-compressed ("SZ") SSZ-encoded data of a block into + # `bytes`, overwriting the existing content + # careful: there are two snappy encodings in use, with and without framing! + # Returns true if the block is found, false if not + let fork = dag.cfg.consensusForkAtEpoch(bid.slot.epoch) + dag.db.getBlockSZ(bid.root, bytes, fork) or + (bid.slot <= dag.finalizedHead.slot and + getBlockSZ( + dag.era, getStateField(dag.headState, historical_roots).asSeq, + dag.headState.historical_summaries().asSeq, + bid.slot, bytes).isOk and bytes.len > 0) + +proc getForkedBlock*( + dag: ChainDAGRef, bid: BlockId): Opt[ForkedTrustedSignedBeaconBlock] = + + let fork = dag.cfg.consensusForkAtEpoch(bid.slot.epoch) + result.ok(ForkedTrustedSignedBeaconBlock(kind: fork)) + withBlck(result.get()): + type T = type(forkyBlck) + forkyBlck = getBlock(dag, bid, T).valueOr: + getBlock( + dag.era, getStateField(dag.headState, historical_roots).asSeq, + dag.headState.historical_summaries().asSeq, + bid.slot, Opt[Eth2Digest].ok(bid.root), T).valueOr: + result.err() + return + +proc getBlockId*(db: BeaconChainDB, root: Eth2Digest): Opt[BlockId] = + block: # We might have a summary in the database + let summary = db.getBeaconBlockSummary(root) + if summary.isOk(): + return ok(BlockId(root: root, slot: summary.get().slot)) + + block: + # We might have a block without having written a summary - this can happen + # if there was a crash between writing the block and writing the summary, + # specially in databases written by older nimbus versions + let forked = db.getForkedBlock(root) + if forked.isSome(): + # Shouldn't happen too often but.. + let + blck = forked.get() + summary = withBlck(blck): forkyBlck.message.toBeaconBlockSummary() + debug "Writing summary", blck = shortLog(blck) + db.putBeaconBlockSummary(root, summary) + return ok(BlockId(root: root, slot: summary.slot)) + + err() + +proc getBlockId*(dag: ChainDAGRef, root: Eth2Digest): Opt[BlockId] = + ## Look up block id by root in history - useful for turning a root into a + ## slot - may hit the database, may return blocks that have since become + ## unviable - use `getBlockIdAtSlot` to check that the block is still viable + ## if used in a sensitive context + block: # If we have a BlockRef, this is the fastest way to get a block id + let blck = dag.getBlockRef(root) + if blck.isOk(): + return ok(blck.get().bid) + + dag.db.getBlockId(root) + +proc getForkedBlock*( + dag: ChainDAGRef, root: Eth2Digest): Opt[ForkedTrustedSignedBeaconBlock] = + let bid = dag.getBlockId(root) + if bid.isSome(): + dag.getForkedBlock(bid.get()) + else: + # In case we didn't have a summary - should be rare, but .. + dag.db.getForkedBlock(root) + +func isCanonical*(dag: ChainDAGRef, bid: BlockId): bool = + ## Returns `true` if the given `bid` is part of the history selected by + ## `dag.head`. + let current = dag.getBlockIdAtSlot(bid.slot).valueOr: + return false # We don't know, so .. + return current.bid == bid + +func isFinalized*(dag: ChainDAGRef, bid: BlockId): bool = + ## Returns `true` if the given `bid` is part of the finalized history + ## selected by `dag.finalizedHead`. + dag.isCanonical(bid) and (bid.slot <= dag.finalizedHead.slot) + +func parent*(dag: ChainDAGRef, bid: BlockId): Opt[BlockId] = + if bid.slot == 0: + return err() + + if bid.slot > dag.finalizedHead.slot: + # Make sure we follow the correct history as there may be forks + let blck = ? dag.getBlockRef(bid.root) + + doAssert not isNil(blck.parent), "should reach finalized head" + return ok blck.parent.bid + + let bids = ? dag.getBlockIdAtSlot(bid.slot - 1) + ok(bids.bid) + +func parentOrSlot*(dag: ChainDAGRef, bsi: BlockSlotId): Opt[BlockSlotId] = + if bsi.slot == 0: + return err() + + if bsi.isProposed: + let parent = ? dag.parent(bsi.bid) + ok BlockSlotId.init(parent, bsi.slot) + else: + ok BlockSlotId.init(bsi.bid, bsi.slot - 1) + +func atSlot*(dag: ChainDAGRef, bid: BlockId, slot: Slot): Opt[BlockSlotId] = + if bid.slot > dag.finalizedHead.slot: + let blck = ? dag.getBlockRef(bid.root) + + if slot > dag.finalizedHead.slot: + return blck.atSlot(slot).toBlockSlotId() + else: + # Check if the given `bid` is still part of history - it might hail from an + # orphaned fork + let existing = ? dag.getBlockIdAtSlot(bid.slot) + if existing.bid != bid: + return err() # Not part of known / relevant history + + if existing.slot == slot: # and bid.slot == slot + return ok existing + + if bid.slot <= slot: + ok BlockSlotId.init(bid, slot) + else: + dag.getBlockIdAtSlot(slot) + +func nextTimestamp[I, T](cache: var LRUCache[I, T]): uint32 = + if cache.timestamp == uint32.high: + for i in 0 ..< I: + template e: untyped = cache.entries[i] + if e.lastUsed != 0: + e.lastUsed = 1 + cache.timestamp = 1 + inc cache.timestamp + cache.timestamp + +template peekIt[I, T](cache: var LRUCache[I, T], predicate: untyped): Opt[T] = + block: + var res: Opt[T] + for i in 0 ..< I: + template e: untyped = cache.entries[i] + template it: untyped {.inject, used.} = e.value + if e.lastUsed != 0 and predicate: + res.ok it + break + res + +template findIt[I, T](cache: var LRUCache[I, T], predicate: untyped): Opt[T] = + block: + var res: Opt[T] + for i in 0 ..< I: + template e: untyped = cache.entries[i] + template it: untyped {.inject, used.} = e.value + if e.lastUsed != 0 and predicate: + e.lastUsed = cache.nextTimestamp + res.ok it + break + res + +template delIt[I, T](cache: var LRUCache[I, T], predicate: untyped) = + block: + for i in 0 ..< I: + template e: untyped = cache.entries[i] + template it: untyped {.inject, used.} = e.value + if e.lastUsed != 0 and predicate: + e.reset() + +func put[I, T](cache: var LRUCache[I, T], value: T) = + var lru = 0 + block: + var min = uint32.high + for i in 0 ..< I: + template e: untyped = cache.entries[i] + if e.lastUsed < min: + min = e.lastUsed + lru = i + if min == 0: + break + + template e: untyped = cache.entries[lru] + e.value = value + e.lastUsed = cache.nextTimestamp + +func epochAncestor(dag: ChainDAGRef, bid: BlockId, epoch: Epoch): + Opt[BlockSlotId] = + ## The epoch ancestor is the last block that has an effect on the epoch- + ## related state data, as updated in `process_epoch` - this block determines + ## effective balances, validator addtions and removals etc and serves as a + ## base for `EpochRef` construction. + if epoch < dag.tail.slot.epoch or bid.slot < dag.tail.slot: + # Not enough information in database to meaningfully process pre-tail epochs + return Opt.none BlockSlotId + + let + dependentSlot = + if epoch == dag.tail.slot.epoch: + # Use the tail as "dependent block" - this may be the genesis block, or, + # in the case of checkpoint sync, the checkpoint block + dag.tail.slot + else: + epoch.start_slot() - 1 + bsi = ? dag.atSlot(bid, dependentSlot) + epochSlot = + if epoch == dag.tail.slot.epoch: + dag.tail.slot + else: + epoch.start_slot() + ok BlockSlotId(bid: bsi.bid, slot: epochSlot) + +func epochKey(dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[EpochKey] = + ## The state transition works by storing information from blocks in a + ## "working" area until the epoch transition, then batching work collected + ## during the epoch. Thus, last block in the ancestor epochs is the block + ## that has an impact on epoch currently considered. + ## + ## This function returns an epoch key pointing to that epoch boundary, i.e. the + ## boundary where the last block has been applied to the state and epoch + ## processing has been done. + let bsi = dag.epochAncestor(bid, epoch).valueOr: + return Opt.none(EpochKey) + + Opt.some(EpochKey(bid: bsi.bid, epoch: epoch)) + +func putShufflingRef*(dag: ChainDAGRef, shufflingRef: ShufflingRef) = + ## Store shuffling in the cache + if shufflingRef.epoch < dag.finalizedHead.slot.epoch(): + # Only cache epoch information for unfinalized blocks - earlier states + # are seldomly used (ie RPC), so no need to cache + return + + dag.shufflingRefs.put shufflingRef + +func findShufflingRef*( + dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[ShufflingRef] = + ## Lookup a shuffling in the cache, returning `none` if it's not present - see + ## `getShufflingRef` for a version that creates a new instance if it's missing + let + dependent_slot = epoch.attester_dependent_slot() + dependent_bsi = ? dag.atSlot(bid, dependent_slot) + + # Check `ShufflingRef` cache + let shufflingRef = dag.shufflingRefs.findIt( + it.epoch == epoch and it.attester_dependent_root == dependent_bsi.bid.root) + if shufflingRef.isOk: + return shufflingRef + + # Check `EpochRef` cache + let epochRef = dag.epochRefs.peekIt( + it.shufflingRef.epoch == epoch and + it.shufflingRef.attester_dependent_root == dependent_bsi.bid.root) + if epochRef.isOk: + dag.putShufflingRef(epochRef.get.shufflingRef) + return ok epochRef.get.shufflingRef + + err() + +func findEpochRef*( + dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[EpochRef] = + ## Lookup an EpochRef in the cache, returning `none` if it's not present - see + ## `getEpochRef` for a version that creates a new instance if it's missing + let key = ? dag.epochKey(bid, epoch) + + dag.epochRefs.findIt(it.key == key) + +func putEpochRef(dag: ChainDAGRef, epochRef: EpochRef) = + if epochRef.epoch < dag.finalizedHead.slot.epoch(): + # Only cache epoch information for unfinalized blocks - earlier states + # are seldomly used (ie RPC), so no need to cache + return + + dag.epochRefs.put epochRef + +func init*( + T: type ShufflingRef, state: ForkedHashedBeaconState, + cache: var StateCache, epoch: Epoch): T = + let attester_dependent_root = + withState(state): forkyState.dependent_root(epoch.get_previous_epoch) + + ShufflingRef( + epoch: epoch, + attester_dependent_root: attester_dependent_root, + shuffled_active_validator_indices: + cache.get_shuffled_active_validator_indices(state, epoch), + ) + +func init*( + T: type EpochRef, dag: ChainDAGRef, state: ForkedHashedBeaconState, + cache: var StateCache): T = + let + epoch = state.get_current_epoch() + proposer_dependent_root = withState(state): + forkyState.proposer_dependent_root + shufflingRef = dag.findShufflingRef(state.latest_block_id, epoch).valueOr: + let tmp = ShufflingRef.init(state, cache, epoch) + dag.putShufflingRef(tmp) + tmp + + total_active_balance = withState(state): + get_total_active_balance(forkyState.data, cache) + epochRef = EpochRef( + key: dag.epochKey(state.latest_block_id, epoch).expect( + "Valid epoch ancestor when processing state"), + + eth1_data: + getStateField(state, eth1_data), + eth1_deposit_index: + getStateField(state, eth1_deposit_index), + + checkpoints: + FinalityCheckpoints( + justified: getStateField(state, current_justified_checkpoint), + finalized: getStateField(state, finalized_checkpoint)), + + # beacon_proposers: Separately filled below + proposer_dependent_root: proposer_dependent_root, + + shufflingRef: shufflingRef, + total_active_balance: total_active_balance + ) + epochStart = epoch.start_slot() + + for i in 0'u64.. 0: + load(epoch - 1) + + if dag.head != nil: # nil during init.. sigh + let period = dag.head.slot.sync_committee_period + if period == epoch.sync_committee_period and + period notin cache.sync_committees and + period > dag.cfg.ALTAIR_FORK_EPOCH.sync_committee_period(): + # If the block we're aiming for shares ancestry with head, we can reuse + # the cached head committee - this accounts for most "live" cases like + # syncing and checking blocks since the committees rarely change + let periodBsi = dag.atSlot(bid, period.start_slot) + if periodBsi.isSome and periodBsi == + dag.atSlot(dag.head.bid, period.start_slot): + # We often end up sharing sync committees with head during sync / gossip + # validation / head updates + cache.sync_committees[period] = dag.headSyncCommittees + +func containsForkBlock*(dag: ChainDAGRef, root: Eth2Digest): bool = + ## Checks for blocks at the finalized checkpoint or newer + KeyedBlockRef.asLookupKey(root) in dag.forkBlocks + +func isFinalizedStateSnapshot(slot: Slot): bool = + slot.is_epoch and slot.epoch mod EPOCHS_PER_STATE_SNAPSHOT == 0 + +func isStateCheckpoint(dag: ChainDAGRef, bsi: BlockSlotId): bool = + ## State checkpoints are the points in time for which we store full state + ## snapshots, which later serve as rewind starting points when replaying state + ## transitions from database, for example during reorgs. + ## + # As a policy, we only store epoch boundary states without the epoch block + # (if it exists) applied - the rest can be reconstructed by loading an epoch + # boundary state and applying the missing blocks. + # We also avoid states that were produced with empty slots only - as such, + # there is only a checkpoint for the first epoch after a block. + + # The tail block also counts as a state checkpoint! + (bsi.isProposed and bsi.bid == dag.tail) or + (bsi.slot.is_epoch and bsi.slot.epoch == (bsi.bid.slot.epoch + 1)) + +proc getState( + db: BeaconChainDB, cfg: RuntimeConfig, block_root: Eth2Digest, slot: Slot, + state: var ForkedHashedBeaconState, rollback: RollbackProc): bool = + let state_root = db.getStateRoot(block_root, slot).valueOr: + return false + + db.getState(cfg.consensusForkAtEpoch(slot.epoch), state_root, state, rollback) + +proc containsState*( + db: BeaconChainDB, cfg: RuntimeConfig, block_root: Eth2Digest, + slots: Slice[Slot], legacy = true): bool = + var slot = slots.b + while slot >= slots.a: + let state_root = db.getStateRoot(block_root, slot) + if state_root.isSome() and + db.containsState( + cfg.consensusForkAtEpoch(slot.epoch), state_root.get(), legacy): + return true + + if slot == slots.a: # avoid underflow at genesis + break + slot -= 1 + false + +proc getState*( + db: BeaconChainDB, cfg: RuntimeConfig, block_root: Eth2Digest, + slots: Slice[Slot], state: var ForkedHashedBeaconState, + rollback: RollbackProc): bool = + var slot = slots.b + while slot >= slots.a: + let state_root = db.getStateRoot(block_root, slot) + if state_root.isSome() and + db.getState( + cfg.consensusForkAtEpoch(slot.epoch), state_root.get(), state, + rollback): + return true + + if slot == slots.a: # avoid underflow at genesis + break + slot -= 1 + false + +proc getState( + dag: ChainDAGRef, bsi: BlockSlotId, state: var ForkedHashedBeaconState): bool = + ## Load a state from the database given a block and a slot - this will first + ## lookup the state root in the state root table then load the corresponding + ## state, if it exists + if not dag.isStateCheckpoint(bsi): + return false + + let rollbackAddr = + # Any restore point will do as long as it's not the object being updated + if unsafeAddr(state) == unsafeAddr(dag.headState): + unsafeAddr dag.clearanceState + else: + unsafeAddr dag.headState + + let v = addr state + func rollback() = + assign(v[], rollbackAddr[]) + + dag.db.getState(dag.cfg, bsi.bid.root, bsi.slot, state, rollback) + +proc getStateByParent( + dag: ChainDAGRef, bid: BlockId, state: var ForkedHashedBeaconState): bool = + ## Try to load the state referenced by the parent of the given `bid` - this + ## state can be used to advance to the `bid` state itself. + let slot = bid.slot + + let + summary = dag.db.getBeaconBlockSummary(bid.root).valueOr: + return false + parentMinSlot = + dag.db.getBeaconBlockSummary(summary.parent_root). + map(proc(x: auto): auto = x.slot).valueOr: + # in the cases that we don't have slot information, we'll search for the + # state for a few back from the `bid` slot - if there are gaps of empty + # slots larger than this, we will not be able to load the state using this + # trick + if slot.uint64 >= (EPOCHS_PER_STATE_SNAPSHOT * 2) * SLOTS_PER_EPOCH: + slot - (EPOCHS_PER_STATE_SNAPSHOT * 2) * SLOTS_PER_EPOCH + else: + Slot(0) + + let rollbackAddr = + # Any restore point will do as long as it's not the object being updated + if unsafeAddr(state) == unsafeAddr(dag.headState): + unsafeAddr dag.clearanceState + else: + unsafeAddr dag.headState + + let v = addr state + func rollback() = + assign(v[], rollbackAddr[]) + + dag.db.getState( + dag.cfg, summary.parent_root, parentMinSlot..slot, state, rollback) + +proc getNearbyState( + dag: ChainDAGRef, state: var ForkedHashedBeaconState, bid: BlockId, + lowSlot: Slot): Opt[void] = + ## Load state from DB that is close to `bid` and has at least slot `lowSlot`. + var + e = bid.slot.epoch + b = bid + while true: + let stateSlot = e.start_slot + if stateSlot < lowSlot: + return err() + b = (? dag.atSlot(b, max(stateSlot, 1.Slot) - 1)).bid + let bsi = BlockSlotId.init(b, stateSlot) + if not dag.getState(bsi, state): + if e == GENESIS_EPOCH: + return err() + dec e + continue + return ok() + +proc currentSyncCommitteeForPeriod*( + dag: ChainDAGRef, + tmpState: var ForkedHashedBeaconState, + period: SyncCommitteePeriod): Opt[SyncCommittee] = + ## Fetch a `SyncCommittee` for a given sync committee period. + ## For non-finalized periods, follow the chain as selected by fork choice. + let lowSlot = max(dag.tail.slot, dag.cfg.ALTAIR_FORK_EPOCH.start_slot) + if period < lowSlot.sync_committee_period: + return err() + let + periodStartSlot = period.start_slot + syncCommitteeSlot = max(periodStartSlot, lowSlot) + bsi = ? dag.getBlockIdAtSlot(syncCommitteeSlot) + dag.withUpdatedState(tmpState, bsi) do: + withState(updatedState): + when consensusFork >= ConsensusFork.Altair: + ok forkyState.data.current_sync_committee + else: err() + do: err() + +proc getBlockIdAtSlot*( + dag: ChainDAGRef, state: ForkyHashedBeaconState, slot: Slot): Opt[BlockId] = + if slot >= state.data.slot: + Opt.some state.latest_block_id + elif state.data.slot <= slot + SLOTS_PER_HISTORICAL_ROOT: + dag.getBlockId(state.data.get_block_root_at_slot(slot)) + else: + Opt.none(BlockId) + +# adapted from nimbus-eth2 + +# proc updateBeaconMetrics( +# state: ForkedHashedBeaconState, bid: BlockId, cache: var StateCache) = + # https://github.com/ethereum/beacon-metrics/blob/master/metrics.md#additional-metrics + # both non-negative, so difference can't overflow or underflow int64 + + # beacon_head_root.set(bid.root.toGaugeValue) + # beacon_head_slot.set(bid.slot.toGaugeValue) + + # withState(state): + # beacon_pending_deposits.set( + # (forkyState.data.eth1_data.deposit_count - + # forkyState.data.eth1_deposit_index).toGaugeValue) + # beacon_processed_deposits_total.set( + # forkyState.data.eth1_deposit_index.toGaugeValue) + + # beacon_current_justified_epoch.set( + # forkyState.data.current_justified_checkpoint.epoch.toGaugeValue) + # beacon_current_justified_root.set( + # forkyState.data.current_justified_checkpoint.root.toGaugeValue) + # beacon_previous_justified_epoch.set( + # forkyState.data.previous_justified_checkpoint.epoch.toGaugeValue) + # beacon_previous_justified_root.set( + # forkyState.data.previous_justified_checkpoint.root.toGaugeValue) + # beacon_finalized_epoch.set( + # forkyState.data.finalized_checkpoint.epoch.toGaugeValue) + # beacon_finalized_root.set( + # forkyState.data.finalized_checkpoint.root.toGaugeValue) + + # let active_validators = count_active_validators( + # forkyState.data, forkyState.data.slot.epoch, cache).toGaugeValue + # beacon_active_validators.set(active_validators) + # beacon_current_active_validators.set(active_validators) + +# import blockchain_dag_light_client + +# export +# blockchain_dag_light_client.getLightClientBootstrap, +# blockchain_dag_light_client.getLightClientUpdateForPeriod, +# blockchain_dag_light_client.getLightClientFinalityUpdate, +# blockchain_dag_light_client.getLightClientOptimisticUpdate + +proc putState(dag: ChainDAGRef, state: ForkedHashedBeaconState, bid: BlockId) = + # Store a state and its root + let slot = getStateField(state, slot) + logScope: + blck = shortLog(bid) + stateSlot = shortLog(slot) + stateRoot = shortLog(getStateRoot(state)) + + if not dag.isStateCheckpoint(BlockSlotId.init(bid, slot)): + return + + # Don't consider legacy tables here, they are slow to read so we'll want to + # rewrite things in the new table anyway. + if dag.db.containsState( + dag.cfg.consensusForkAtEpoch(slot.epoch), getStateRoot(state), + legacy = false): + return + + let startTick = Moment.now() + # Ideally we would save the state and the root lookup cache in a single + # transaction to prevent database inconsistencies, but the state loading code + # is resilient against one or the other going missing + withState(state): + dag.db.putState(forkyState) + + debug "Stored state", putStateDur = Moment.now() - startTick + +proc advanceSlots*( + dag: ChainDAGRef, state: var ForkedHashedBeaconState, slot: Slot, save: bool, + cache: var StateCache, info: var ForkedEpochInfo) = + # Given a state, advance it zero or more slots by applying empty slot + # processing - the state must be positioned at or before `slot` + doAssert getStateField(state, slot) <= slot + + let stateBid = state.latest_block_id + while getStateField(state, slot) < slot: + let + preEpoch = getStateField(state, slot).epoch + + loadStateCache(dag, cache, stateBid, getStateField(state, slot).epoch) + + process_slots( + dag.cfg, state, getStateField(state, slot) + 1, cache, info, + dag.updateFlags).expect("process_slots shouldn't fail when state slot is correct") + if save: + dag.putState(state, stateBid) + + # The reward information in the state transition is computed for epoch + # transitions - when transitioning into epoch N, the activities in epoch + # N-2 are translated into balance updates, and this is what we capture + # in the monitor. This may be inaccurate during a deep reorg (>1 epoch) + # which is an acceptable tradeoff for monitoring. + withState(state): + let postEpoch = forkyState.data.slot.epoch + if preEpoch != postEpoch and postEpoch >= 2: + var proposers: array[SLOTS_PER_EPOCH, Opt[ValidatorIndex]] + let epochRef = dag.findEpochRef(stateBid, postEpoch - 2) + if epochRef.isSome(): + proposers = epochRef[][].beacon_proposers + + dag.validatorMonitor[].registerEpochInfo( + forkyState.data, proposers, info) + +proc applyBlock( + dag: ChainDAGRef, state: var ForkedHashedBeaconState, bid: BlockId, + cache: var StateCache, info: var ForkedEpochInfo): Result[void, cstring] = + loadStateCache(dag, cache, bid, getStateField(state, slot).epoch) + + discard case dag.cfg.consensusForkAtEpoch(bid.slot.epoch) + of ConsensusFork.Phase0: + let data = getBlock(dag, bid, phase0.TrustedSignedBeaconBlock).valueOr: + return err("Block load failed") + ? state_transition( + dag.cfg, state, data, cache, info, + dag.updateFlags + {slotProcessed}, noRollback) + of ConsensusFork.Altair: + let data = getBlock(dag, bid, altair.TrustedSignedBeaconBlock).valueOr: + return err("Block load failed") + ? state_transition( + dag.cfg, state, data, cache, info, + dag.updateFlags + {slotProcessed}, noRollback) + of ConsensusFork.Bellatrix: + let data = getBlock(dag, bid, bellatrix.TrustedSignedBeaconBlock).valueOr: + return err("Block load failed") + ? state_transition( + dag.cfg, state, data, cache, info, + dag.updateFlags + {slotProcessed}, noRollback) + of ConsensusFork.Capella: + let data = getBlock(dag, bid, capella.TrustedSignedBeaconBlock).valueOr: + return err("Block load failed") + ? state_transition( + dag.cfg, state, data, cache, info, + dag.updateFlags + {slotProcessed}, noRollback) + of ConsensusFork.Deneb: + let data = getBlock(dag, bid, deneb.TrustedSignedBeaconBlock).valueOr: + return err("Block load failed") + ? state_transition( + dag.cfg, state, data, cache, info, + dag.updateFlags + {slotProcessed}, noRollback) + of ConsensusFork.Electra: + let data = getBlock(dag, bid, electra.TrustedSignedBeaconBlock).valueOr: + return err("Block load failed") + ? state_transition( + dag.cfg, state, data, cache, info, + dag.updateFlags + {slotProcessed}, noRollback) + + ok() + +## NOTE: Adapted from nimbus-eth2/beacon_chain/consensus_object_pools/blockchain_dag.nim +## removed lightclient initialization +proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB, + validatorMonitor: ref ValidatorMonitor, updateFlags: UpdateFlags, + eraPath = ".", + onBlockCb: OnBlockCallback = nil, onHeadCb: OnHeadCallback = nil, + onReorgCb: OnReorgCallback = nil, onFinCb: OnFinalizedCallback = nil, + vanityLogs = default(VanityLogs) + # lcDataConfig = default(LightClientDataConfig) + ): ChainDAGRef = + cfg.checkForkConsistency() + + doAssert updateFlags - {strictVerification} == {}, + "Other flags not supported in ChainDAG" + + # TODO we require that the db contains both a head and a tail block - + # asserting here doesn't seem like the right way to go about it however.. + + # Tail is the first block for which we can construct a state - either + # genesis or a checkpoint + let + startTick = Moment.now() + genesisRoot = db.getGenesisBlock() + tailRoot = db.getTailBlock().expect( + "preInit should have initialized the database with a tail block root") + tail = db.getBlockId(tailRoot).expect( + "tail block summary in database, database corrupt?") + headRoot = db.getHeadBlock().expect("head root, database corrupt?") + head = db.getBlockId(headRoot).expect("head block id, database corrupt?") + + # Have to be careful with this instance, it is not yet fully initialized so + # as to avoid having to allocate a separate "init" state + dag = ChainDAGRef( + db: db, + validatorMonitor: validatorMonitor, + genesis: genesisRoot.map( + proc(x: auto): auto = BlockId(root: x, slot: GENESIS_SLOT)), + tail: tail, + + # The only allowed flag right now is strictVerification, as the others all + # allow skipping some validation. + updateFlags: updateFlags * {strictVerification}, + cfg: cfg, + + vanityLogs: vanityLogs, + + # NOTE: commented from original file + # lcDataStore: initLightClientDataStore( + # lcDataConfig, cfg, db.getLightClientDataDB()), + + onBlockAdded: onBlockCb, + onHeadChanged: onHeadCb, + onReorgHappened: onReorgCb, + onFinHappened: onFinCb, + ) + loadTick = Moment.now() + + var + headRef, curRef: BlockRef + + # When starting from a checkpoint with an empty block, we'll store the state + # "ahead" of the head slot - this slot would be considered finalized + slot = max(head.slot, (tail.slot.epoch + 1).start_slot) + # To know the finalized checkpoint of the head, we need to recreate its + # state - the tail is implicitly finalized, and if we have a finalized block + # table, that provides another hint + finalizedSlot = db.finalizedBlocks.high.get(tail.slot) + cache: StateCache + foundHeadState = false + headBlocks: seq[BlockRef] + + # Load head -> finalized, or all summaries in case the finalized block table + # hasn't been written yet + for blck in db.getAncestorSummaries(head.root): + # The execution block root gets filled in as needed. Nonfinalized Bellatrix + # and later blocks are loaded as optimistic, which gets adjusted that first + # `VALID` fcU from an EL plus markBlockVerified. Pre-merge blocks still get + # marked as `VALID`. + let newRef = BlockRef.init( + blck.root, Opt.none Eth2Digest, executionValid = false, + blck.summary.slot) + if headRef == nil: + headRef = newRef + + if curRef != nil: + link(newRef, curRef) + + curRef = newRef + + dag.forkBlocks.incl(KeyedBlockRef.init(curRef)) + + if not foundHeadState: + foundHeadState = db.getState( + cfg, blck.root, blck.summary.slot..slot, dag.headState, noRollback) + slot = blck.summary.slot + + if not foundHeadState: + # When the database has been written with a pre-fork version of the + # software, it may happen that blocks produced using an "unforked" + # chain get written to the database - we need to skip such blocks + # when loading the database with a fork-compatible version + if containsBlock(cfg, db, curRef.slot, curRef.root): + headBlocks.add curRef + else: + if headBlocks.len > 0: + fatal "Missing block needed to create head state, database corrupt?", + curRef = shortLog(curRef) + quit 1 + # Without the block data we can't form a state for this root, so + # we'll need to move the head back + headRef = nil + dag.forkBlocks.excl(KeyedBlockRef.init(curRef)) + + if curRef.slot <= finalizedSlot: + # Only non-finalized slots get a `BlockRef` + break + + let summariesTick = Moment.now() + + if not foundHeadState: + if not dag.getStateByParent(curRef.bid, dag.headState): + fatal "Could not load head state, database corrupt?", + head = shortLog(head), tail = shortLog(dag.tail) + quit 1 + + block: + # EpochRef needs an epoch boundary state + assign(dag.epochRefState, dag.headState) + + var info: ForkedEpochInfo + + while headBlocks.len > 0: + dag.applyBlock( + dag.headState, headBlocks.pop().bid, cache, + info).expect("head blocks should apply") + + dag.head = headRef + dag.heads = @[headRef] + + withState(dag.headState): + when consensusFork >= ConsensusFork.Altair: + dag.headSyncCommittees = forkyState.data.get_sync_committee_cache(cache) + + assign(dag.clearanceState, dag.headState) + + if dag.headState.latest_block_root == tail.root: + # In case we started from a checkpoint with an empty slot + finalizedSlot = getStateField(dag.headState, slot) + + finalizedSlot = + max( + finalizedSlot, + getStateField(dag.headState, finalized_checkpoint).epoch.start_slot) + + let + configFork = case dag.headState.kind + of ConsensusFork.Phase0: genesisFork(cfg) + of ConsensusFork.Altair: altairFork(cfg) + of ConsensusFork.Bellatrix: bellatrixFork(cfg) + of ConsensusFork.Capella: capellaFork(cfg) + of ConsensusFork.Deneb: denebFork(cfg) + of ConsensusFork.Electra: electraFork(cfg) + stateFork = getStateField(dag.headState, fork) + + # Here, we check only the `current_version` field because the spec + # mandates that testnets starting directly from a particular fork + # should have `previous_version` set to `current_version` while + # this doesn't happen to be the case in network that go through + # regular hard-fork upgrades. See for example: + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#testing + if stateFork.current_version != configFork.current_version: + error "State from database does not match network, check --network parameter", + tail = dag.tail, headRef, stateFork, configFork + quit 1 + + # Need to load state to find genesis validators root, before loading era db + dag.era = EraDB.new( + cfg, eraPath, getStateField(dag.headState, genesis_validators_root)) + + # We used an interim finalizedHead while loading the head state above - now + # that we have loaded the dag up to the finalized slot, we can also set + # finalizedHead to its real value + dag.finalizedHead = headRef.atSlot(finalizedSlot) + dag.lastPrunePoint = dag.finalizedHead.toBlockSlotId().expect("not nil") + + doAssert dag.finalizedHead.blck != nil, + "The finalized head should exist at the slot" + + block: # Top up finalized blocks + if db.finalizedBlocks.high.isNone or + db.finalizedBlocks.high.get() < dag.finalizedHead.blck.slot: + # Versions prior to 1.7.0 did not store finalized blocks in the + # database, and / or the application might have crashed between the head + # and finalized blocks updates. + info "Loading finalized blocks", + finHigh = db.finalizedBlocks.high, + finalizedHead = shortLog(dag.finalizedHead) + + var + newFinalized: seq[BlockId] + tmp = dag.finalizedHead.blck + while tmp.parent != nil: + newFinalized.add(tmp.bid) + let p = tmp.parent + tmp.parent = nil + tmp = p + + for blck in db.getAncestorSummaries(tmp.root): + if db.finalizedBlocks.high.isSome and + blck.summary.slot <= db.finalizedBlocks.high.get: + break + + newFinalized.add(BlockId(slot: blck.summary.slot, root: blck.root)) + + db.updateFinalizedBlocks(newFinalized) + + doAssert dag.finalizedHead.blck.parent == nil, + "The finalized head is the last BlockRef with a parent" + + block: + let finalized = db.finalizedBlocks.get(db.finalizedBlocks.high.get()).expect( + "tail at least") + if finalized != dag.finalizedHead.blck.root: + error "Head does not lead to finalized block, database corrupt?", + head = shortLog(head), finalizedHead = shortLog(dag.finalizedHead), + tail = shortLog(dag.tail), finalized = shortLog(finalized) + quit 1 + + dag.backfill = block: + let backfillSlot = db.finalizedBlocks.low.expect("tail at least") + if backfillSlot <= dag.horizon: + # Backfill done, no need to load anything + BeaconBlockSummary() + elif backfillSlot < dag.tail.slot: + let backfillRoot = db.finalizedBlocks.get(backfillSlot).expect( + "low to be loadable") + + db.getBeaconBlockSummary(backfillRoot).expect( + "Backfill block must have a summary: " & $backfillRoot) + elif dag.containsBlock(dag.tail): + db.getBeaconBlockSummary(dag.tail.root).expect( + "Tail block must have a summary: " & $dag.tail.root) + else: + # Checkpoint sync, checkpoint block unavailable + BeaconBlockSummary( + slot: dag.tail.slot + 1, + parent_root: dag.tail.root) + + dag.forkDigests = newClone ForkDigests.init( + cfg, getStateField(dag.headState, genesis_validators_root)) + + withState(dag.headState): + dag.validatorMonitor[].registerState(forkyState.data) + + # updateBeaconMetrics(dag.headState, dag.head.bid, cache) + + let finalizedTick = Moment.now() + + if dag.backfill.slot > GENESIS_SLOT: # Try frontfill from era files + let backfillSlot = dag.backfill.slot - 1 + dag.frontfillBlocks = newSeqOfCap[Eth2Digest](backfillSlot.int) + + let + historical_roots = getStateField(dag.headState, historical_roots).asSeq() + historical_summaries = dag.headState.historical_summaries.asSeq() + + var + blocks = 0 + + # Here, we'll build up the slot->root mapping in memory for the range of + # blocks from genesis to backfill, if possible. + for bid in dag.era.getBlockIds( + historical_roots, historical_summaries, Slot(0), Eth2Digest()): + # If backfill has not yet started, the backfill slot itself also needs + # to be served from era files. Checkpoint sync starts from state only + if bid.slot > backfillSlot or + (bid.slot == backfillSlot and bid.root != dag.tail.root): + # If we end up in here, we failed the root comparison just below in + # an earlier iteration + fatal "Era summaries don't lead up to backfill, database or era files corrupt?", + bid, backfillSlot + quit 1 + + # In BeaconState.block_roots, empty slots are filled with the root of + # the previous block - in our data structure, we use a zero hash instead + dag.frontfillBlocks.setLen(bid.slot.int + 1) + dag.frontfillBlocks[bid.slot.int] = bid.root + + if bid.root == dag.backfill.parent_root: + # We've reached the backfill point, meaning blocks are available + # in the sqlite database from here onwards - remember this point in + # time so that we can write summaries to the database - it's a lot + # faster to load from database than to iterate over era files with + # the current naive era file reader. + reset(dag.backfill) + + dag.updateFrontfillBlocks() + + break + + blocks += 1 + + if blocks > 0: + info "Front-filled blocks from era files", blocks, backfillSlot + + let frontfillTick = Moment.now() + + # Fill validator key cache in case we're loading an old database that doesn't + # have a cache + dag.updateValidatorKeys(getStateField(dag.headState, validators).asSeq()) + + # Initialize pruning such that when starting with a database that hasn't been + # pruned, we work our way from the tail to the horizon in incremental steps + dag.lastHistoryPruneHorizon = dag.horizon() + dag.lastHistoryPruneBlockHorizon = block: + let boundary = min(dag.tail.slot, dag.horizon()) + if boundary.epoch() >= EPOCHS_PER_STATE_SNAPSHOT: + start_slot(boundary.epoch() - EPOCHS_PER_STATE_SNAPSHOT) + else: + Slot(0) + + info "Block DAG initialized", + head = shortLog(dag.head), + finalizedHead = shortLog(dag.finalizedHead), + tail = shortLog(dag.tail), + backfill = shortLog(dag.backfill), + + loadDur = loadTick - startTick, + summariesDur = summariesTick - loadTick, + finalizedDur = finalizedTick - summariesTick, + frontfillDur = frontfillTick - finalizedTick, + keysDur = Moment.now() - frontfillTick + + dag.initLightClientDataCache() + + dag + +template genesis_validators_root*(dag: ChainDAGRef): Eth2Digest = + getStateField(dag.headState, genesis_validators_root) + +proc genesisBlockRoot*(dag: ChainDAGRef): Eth2Digest = + dag.db.getGenesisBlock().expect("DB must be initialized with genesis block") + +func getEpochRef*( + dag: ChainDAGRef, state: ForkedHashedBeaconState, cache: var StateCache): EpochRef = + ## Get a cached `EpochRef` or construct one based on the given state - always + ## returns an EpochRef instance + let + bid = state.latest_block_id + epoch = state.get_current_epoch() + + dag.findEpochRef(bid, epoch).valueOr: + let res = EpochRef.init(dag, state, cache) + dag.putEpochRef(res) + res + +proc getEpochRef*( + dag: ChainDAGRef, bid: BlockId, epoch: Epoch, + preFinalized: bool): Result[EpochRef, cstring] = + ## Return a cached EpochRef or construct one from the database, if possible - + ## returns `none` on failure. + ## + ## When `preFinalized` is true, include epochs from before the finalized + ## checkpoint in the search - this potentially can result in long processing + ## times due to state replays. + ## + ## Requests for epochs >= dag.finalizedHead.slot.epoch always return an + ## instance. One must be careful to avoid race conditions in `async` code + ## where the finalized head might change during an `await`. + ## + ## Requests for epochs < dag.finalizedHead.slot.epoch may fail, either because + ## the search was limited by the `preFinalized` flag or because state history + ## has been pruned - `none` will be returned in this case. + if not preFinalized and epoch < dag.finalizedHead.slot.epoch: + return err("Requesting pre-finalized EpochRef") + + if bid.slot < dag.tail.slot or epoch < dag.tail.slot.epoch: + return err("Requesting EpochRef for pruned state") + + let epochRef = dag.findEpochRef(bid, epoch) + if epochRef.isOk(): + # adapted from nimbus-eth2 + # beacon_state_data_cache_hits.inc + return ok epochRef.get() + + # beacon_state_data_cache_misses.inc + + let + ancestor = dag.epochAncestor(bid, epoch).valueOr: + # If we got in here, the bid must be unknown or we would have gotten + # _some_ ancestor (like the tail) + return err("Requesting EpochRef for non-canonical block") + + var cache: StateCache + if not updateState(dag, dag.epochRefState, ancestor, false, cache): + return err("Could not load requested state") + + ok(dag.getEpochRef(dag.epochRefState, cache)) + +proc getEpochRef*( + dag: ChainDAGRef, blck: BlockRef, epoch: Epoch, + preFinalized: bool): Result[EpochRef, cstring] = + dag.getEpochRef(blck.bid, epoch, preFinalized) + +proc getFinalizedEpochRef*(dag: ChainDAGRef): EpochRef = + dag.getEpochRef( + dag.finalizedHead.blck, dag.finalizedHead.slot.epoch, false).expect( + "getEpochRef for finalized head should always succeed") + +proc ancestorSlot*( + dag: ChainDAGRef, state: ForkyHashedBeaconState, bid: BlockId, + lowSlot: Slot): Opt[Slot] = + ## Return common ancestor slot of `bid` and `state`, if at least `lowSlot`. + ## Return `none` if no common ancestor is found with slot >= `lowSlot`. + if state.data.slot < lowSlot or bid.slot < lowSlot: + return Opt.none(Slot) + + var stateBid = ? dag.getBlockIdAtSlot(state, bid.slot) + if stateBid.slot < lowSlot: + return Opt.none(Slot) + + var blockBid = (? dag.atSlot(bid, stateBid.slot)).bid + if blockBid.slot < lowSlot: + return Opt.none(Slot) + + while stateBid != blockBid: + if stateBid.slot >= blockBid.slot: + stateBid = ? dag.getBlockIdAtSlot( + state, min(blockBid.slot, stateBid.slot - 1)) + if stateBid.slot < lowSlot: + return Opt.none(Slot) + else: + blockBid = ? dag.parent(blockBid) + if blockBid.slot < lowSlot: + return Opt.none(Slot) + + Opt.some stateBid.slot + +proc computeRandaoMix( + bdata: ForkedTrustedSignedBeaconBlock): Opt[Eth2Digest] = + ## Compute the requested RANDAO mix for `bdata` without `state`, if possible. + withBlck(bdata): + when consensusFork >= ConsensusFork.Bellatrix: + if forkyBlck.message.is_execution_block: + var mix = eth2digest(forkyBlck.message.body.randao_reveal.toRaw()) + mix.data.mxor forkyBlck.message.body.execution_payload.prev_randao.data + return ok mix + Opt.none(Eth2Digest) + +proc computeRandaoMix*( + dag: ChainDAGRef, state: ForkyHashedBeaconState, bid: BlockId, + lowSlot: Slot): Opt[Eth2Digest] = + ## Compute the requested RANDAO mix for `bid` based on `state`. + ## Return `none` if `state` and `bid` do not share a common ancestor + ## with slot >= `lowSlot`. + let ancestorSlot = ? dag.ancestorSlot(state, bid, lowSlot) + doAssert ancestorSlot <= state.data.slot + doAssert ancestorSlot <= bid.slot + + # If `blck` is post merge, RANDAO information is immediately available + let + bdata = ? dag.getForkedBlock(bid) + fullMix = computeRandaoMix(bdata) + if fullMix.isSome: + return fullMix + + # RANDAO mix has to be recomputed from `bid` and `state` + var mix {.noinit.}: Eth2Digest + proc mixToAncestor(highBid: BlockId): Opt[void] = + ## Mix in/out RANDAO reveals back to `ancestorSlot` + var bid = highBid + while bid.slot > ancestorSlot: + let bdata = ? dag.getForkedBlock(bid) + withBlck(bdata): # See `process_randao` / `process_randao_mixes_reset` + mix.data.mxor eth2digest( + forkyBlck.message.body.randao_reveal.toRaw()).data + bid = ? dag.parent(bid) + ok() + + # Mix in RANDAO from `bid` + if ancestorSlot < bid.slot: + withBlck(bdata): + mix = eth2digest(forkyBlck.message.body.randao_reveal.toRaw()) + ? mixToAncestor(? dag.parent(bid)) + else: + mix.reset() + + # Mix in RANDAO from `state` + let ancestorEpoch = ancestorSlot.epoch + if ancestorEpoch + EPOCHS_PER_HISTORICAL_VECTOR <= state.data.slot.epoch: + return Opt.none(Eth2Digest) + let mixRoot = state.dependent_root(ancestorEpoch + 1) + if mixRoot.isZero: + return Opt.none(Eth2Digest) + ? mixToAncestor(? dag.getBlockId(mixRoot)) + mix.data.mxor state.data.get_randao_mix(ancestorEpoch).data + + ok mix + +proc computeRandaoMixFromMemory*( + dag: ChainDAGRef, bid: BlockId, lowSlot: Slot): Opt[Eth2Digest] = + ## Compute requested RANDAO mix for `bid` from available states (~5 ms). + template tryWithState(state: ForkedHashedBeaconState) = + block: + withState(state): + let mix = dag.computeRandaoMix(forkyState, bid, lowSlot) + if mix.isSome: + return mix + tryWithState dag.headState + tryWithState dag.epochRefState + tryWithState dag.clearanceState + +proc computeRandaoMixFromDatabase*( + dag: ChainDAGRef, bid: BlockId, lowSlot: Slot): Opt[Eth2Digest] = + ## Compute requested RANDAO mix for `bid` using closest DB state (~500 ms). + let state = assignClone(dag.headState) + ? dag.getNearbyState(state[], bid, lowSlot) + withState(state[]): + dag.computeRandaoMix(forkyState, bid, lowSlot) + +proc computeRandaoMix( + dag: ChainDAGRef, bid: BlockId, lowSlot: Slot): Opt[Eth2Digest] = + # Try to compute from states available in memory + let mix = dag.computeRandaoMixFromMemory(bid, lowSlot) + if mix.isSome: + return mix + + # If `blck` is post merge, RANDAO information is immediately available + let + bdata = ? dag.getForkedBlock(bid) + fullMix = computeRandaoMix(bdata) + if fullMix.isSome: + return fullMix + + # Fall back to database + dag.computeRandaoMixFromDatabase(bid, lowSlot) + +proc computeRandaoMix*(dag: ChainDAGRef, bid: BlockId): Opt[Eth2Digest] = + ## Compute requested RANDAO mix for `bid`. + const maxSlotDistance = SLOTS_PER_HISTORICAL_ROOT + let lowSlot = max(bid.slot, maxSlotDistance.Slot) - maxSlotDistance + dag.computeRandaoMix(bid, lowSlot) + +proc lowSlotForAttesterShuffling*(epoch: Epoch): Slot = + ## Return minimum slot that a state must share ancestry with a block history + ## so that RANDAO at `epoch.attester_dependent_slot` can be computed. + + # A state must be somewhat recent so that `get_active_validator_indices` + # for the queried `epoch` cannot be affected by any such skipped processing. + const numDelayEpochs = compute_activation_exit_epoch(GENESIS_EPOCH).uint64 + let lowEpoch = max(epoch, (numDelayEpochs - 1).Epoch) - (numDelayEpochs - 1) + lowEpoch.start_slot + +proc computeShufflingRef*( + dag: ChainDAGRef, state: ForkyHashedBeaconState, + blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] = + ## Compute `ShufflingRef` for `blck@epoch` based on `state`. + ## If `state` has unviable `get_active_validator_indices`, return `none`. + + let + dependentBid = (? dag.atSlot(blck.bid, epoch.attester_dependent_slot)).bid + lowSlot = epoch.lowSlotForAttesterShuffling + mix = ? dag.computeRandaoMix(state, dependentBid, lowSlot) + + return ok ShufflingRef( + epoch: epoch, + attester_dependent_root: dependentBid.root, + shuffled_active_validator_indices: + state.data.get_shuffled_active_validator_indices(epoch, mix)) + +proc computeShufflingRefFromMemory*( + dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] = + ## Compute `ShufflingRef` from available states (~5 ms). + template tryWithState(state: ForkedHashedBeaconState) = + block: + withState(state): + let shufflingRef = dag.computeShufflingRef(forkyState, blck, epoch) + if shufflingRef.isOk: + return shufflingRef + tryWithState dag.headState + tryWithState dag.epochRefState + tryWithState dag.clearanceState + +proc getShufflingRef*( + dag: ChainDAGRef, blck: BlockRef, epoch: Epoch, + preFinalized: bool): Opt[ShufflingRef] = + ## Return the shuffling in the given history and epoch - this potentially is + ## faster than returning a full EpochRef because the shuffling is determined + ## an epoch in advance and therefore is less sensitive to reorgs + var shufflingRef = dag.findShufflingRef(blck.bid, epoch) + if shufflingRef.isSome: + return shufflingRef + + # Use existing states to quickly compute the shuffling + shufflingRef = dag.computeShufflingRefFromMemory(blck, epoch) + if shufflingRef.isSome: + dag.putShufflingRef(shufflingRef.get) + return shufflingRef + + # Last resort, this can take several seconds as this may replay states + let epochRef = dag.getEpochRef(blck, epoch, preFinalized).valueOr: + return Opt.none ShufflingRef + dag.putShufflingRef(epochRef.shufflingRef) + Opt.some epochRef.shufflingRef + +func stateCheckpoint*(dag: ChainDAGRef, bsi: BlockSlotId): BlockSlotId = + ## The first ancestor BlockSlot that is a state checkpoint + var bsi = bsi + while not dag.isStateCheckpoint(bsi): + if bsi.isProposed: + bsi.bid = dag.parent(bsi.bid).valueOr: + break + else: + bsi.slot = bsi.slot - 1 + bsi + +template forkAtEpoch*(dag: ChainDAGRef, epoch: Epoch): Fork = + forkAtEpoch(dag.cfg, epoch) + +proc getBlockRange*( + dag: ChainDAGRef, startSlot: Slot, skipStep: uint64, + output: var openArray[BlockId]): Natural = + ## This function populates an `output` buffer of blocks + ## with a slots ranging from `startSlot` up to, but not including, + ## `startSlot + skipStep * output.len`, skipping any slots that don't have + ## a block. + ## + ## Blocks will be written to `output` from the end without gaps, even if + ## a block is missing in a particular slot. The return value shows how + ## many slots were missing blocks - to iterate over the result, start + ## at this index. + ## + ## If there were no blocks in the range, `output.len` will be returned. + let + requestedCount = output.lenu64 + headSlot = dag.head.slot + + trace "getBlockRange entered", + head = shortLog(dag.head.root), requestedCount, startSlot, skipStep, headSlot + + if startSlot < dag.backfill.slot: + debug "Got request for pre-backfill slot", + startSlot, backfillSlot = dag.backfill.slot, horizonSlot = dag.horizon + return output.len + + if headSlot <= startSlot or requestedCount == 0: + return output.len # Identical to returning an empty set of block as indicated above + + let + runway = uint64(headSlot - startSlot) + + # This is the number of blocks that will follow the start block + extraSlots = min(runway div skipStep, requestedCount - 1) + + # If `skipStep` is very large, `extraSlots` should be 0 from + # the previous line, so `endSlot` will be equal to `startSlot`: + endSlot = startSlot + extraSlots * skipStep + + var + curSlot = endSlot + o = output.len + + # Process all blocks that follow the start block (may be zero blocks) + while curSlot > startSlot: + let bs = dag.getBlockIdAtSlot(curSlot) + if bs.isSome and bs.get().isProposed(): + o -= 1 + output[o] = bs.get().bid + curSlot -= skipStep + + # Handle start slot separately (to avoid underflow when computing curSlot) + let bs = dag.getBlockIdAtSlot(startSlot) + if bs.isSome and bs.get().isProposed(): + o -= 1 + output[o] = bs.get().bid + + o # Return the index of the first non-nil item in the output + +proc updateState*( + dag: ChainDAGRef, state: var ForkedHashedBeaconState, bsi: BlockSlotId, + save: bool, cache: var StateCache): bool = + ## Rewind or advance state such that it matches the given block and slot - + ## this may include replaying from an earlier snapshot if blck is on a + ## different branch or has advanced to a higher slot number than slot + ## If `bs.slot` is higher than `bs.blck.slot`, `updateState` will fill in + ## with empty/non-block slots + + # First, see if we're already at the requested block. If we are, also check + # that the state has not been advanced past the desired block - if it has, + # an earlier state must be loaded since there's no way to undo the slot + # transitions + + let + startTick = Moment.now() + current {.used.} = withState(state): + BlockSlotId.init(forkyState.latest_block_id, forkyState.data.slot) + + var + ancestors: seq[BlockId] + found = false + + template exactMatch(state: ForkedHashedBeaconState, bsi: BlockSlotId): bool = + # The block is the same and we're at an early enough slot - the state can + # be used to arrive at the desired blockslot + state.matches_block_slot(bsi.bid.root, bsi.slot) + + template canAdvance(state: ForkedHashedBeaconState, bsi: BlockSlotId): bool = + # The block is the same and we're at an early enough slot - the state can + # be used to arrive at the desired blockslot + state.can_advance_slots(bsi.bid.root, bsi.slot) + + # Fast path: check all caches for an exact match - this is faster than + # advancing a state where there's epoch processing to do, by a wide margin - + # it also avoids `hash_tree_root` for slot processing + if exactMatch(state, bsi): + found = true + elif not save: + # When required to save states, we cannot rely on the caches because that + # would skip the extra processing that save does - not all information that + # goes into the database is cached + if exactMatch(dag.headState, bsi): + assign(state, dag.headState) + found = true + elif exactMatch(dag.clearanceState, bsi): + assign(state, dag.clearanceState) + found = true + elif exactMatch(dag.epochRefState, bsi): + assign(state, dag.epochRefState) + found = true + + const RewindBlockThreshold = 64 + + if not found: + # No exact match found - see if any in-memory state can be used as a base + # onto which we can apply a few blocks - there's a tradeoff here between + # loading the state from disk and performing the block applications + var cur = bsi + while ancestors.len < RewindBlockThreshold: + if isZero(cur.bid.root): # tail reached + break + + if canAdvance(state, cur): # Typical case / fast path when there's no reorg + found = true + break + + if not save: # see above + if canAdvance(dag.headState, cur): + assign(state, dag.headState) + found = true + break + + if canAdvance(dag.clearanceState, cur): + assign(state, dag.clearanceState) + found = true + break + + if canAdvance(dag.epochRefState, cur): + assign(state, dag.epochRefState) + found = true + break + + if cur.isProposed(): + # This is not an empty slot, so the block will need to be applied to + # eventually reach bs + ancestors.add(cur.bid) + + # Move slot by slot to capture epoch boundary states + cur = dag.parentOrSlot(cur).valueOr: + break + + if not found: + debug "UpdateStateData cache miss", + current = shortLog(current), target = shortLog(bsi) + + # Either the state is too new or was created by applying a different block. + # We'll now resort to loading the state from the database then reapplying + # blocks until we reach the desired point in time. + + var cur = bsi + ancestors.setLen(0) + + # Look for a state in the database and load it - as long as it cannot be + # found, keep track of the blocks that are needed to reach it from the + # state that eventually will be found. + # If we hit the tail, it means that we've reached a point for which we can + # no longer recreate history - this happens for example when starting from + # a checkpoint block + let startEpoch = bsi.slot.epoch + while not canAdvance(state, cur) and + not dag.db.getState(dag.cfg, cur.bid.root, cur.slot, state, noRollback): + # There's no state saved for this particular BlockSlot combination, and + # the state we have can't trivially be advanced (in case it was older than + # RewindBlockThreshold), keep looking.. + if cur.isProposed(): + # This is not an empty slot, so the block will need to be applied to + # eventually reach bs + ancestors.add(cur.bid) + + if cur.slot == GENESIS_SLOT or (cur.slot < dag.finalizedHead.slot and + cur.slot.epoch + uint64(EPOCHS_PER_STATE_SNAPSHOT) * 2 < startEpoch): + # We've either walked two full state snapshot lengths or hit the tail + # and still can't find a matching state: this can happen when + # starting the node from an arbitrary finalized checkpoint and not + # backfilling the states + notice "Request for pruned historical state", + request = shortLog(bsi), tail = shortLog(dag.tail), + cur = shortLog(cur), finalized = shortLog(dag.finalizedHead) + return false + + # Move slot by slot to capture epoch boundary states + cur = dag.parentOrSlot(cur).valueOr: + if not dag.getStateByParent(cur.bid, state): + notice "Request for pruned historical state", + request = shortLog(bsi), tail = shortLog(dag.tail), + cur = shortLog(cur) + return false + break + + # beacon_state_rewinds.inc() + + # Starting state has been assigned, either from memory or database + let + assignTick = Moment.now() + ancestor {.used.} = withState(state): + BlockSlotId.init(forkyState.latest_block_id, forkyState.data.slot) + ancestorRoot {.used.} = getStateRoot(state) + + var info: ForkedEpochInfo + # Time to replay all the blocks between then and now + for i in countdown(ancestors.len - 1, 0): + # Because the ancestors are in the database, there's no need to persist them + # again. Also, because we're applying blocks that were loaded from the + # database, we can skip certain checks that have already been performed + # before adding the block to the database. + if (let res = dag.applyBlock(state, ancestors[i], cache, info); res.isErr): + warn "Failed to apply block from database", + blck = shortLog(ancestors[i]), + state_bid = shortLog(state.latest_block_id), + error = res.error() + + return false + + # ...and make sure to process empty slots as requested + dag.advanceSlots(state, bsi.slot, save, cache, info) + + # ...and make sure to load the state cache, if it exists + loadStateCache(dag, cache, bsi.bid, getStateField(state, slot).epoch) + + let + assignDur = assignTick - startTick + replayDur = Moment.now() - assignTick + beacon_dag_state_replay_seconds.inc(replayDur.toFloatSeconds) + + # TODO https://github.com/status-im/nim-chronicles/issues/108 + if (assignDur + replayDur) >= MinSignificantProcessingDuration: + # This might indicate there's a cache that's not in order or a disk that is + # too slow - for now, it's here for investigative purposes and the cutoff + # time might need tuning + info "State replayed", + blocks = ancestors.len, + slots = getStateField(state, slot) - ancestor.slot, + current = shortLog(current), + ancestor = shortLog(ancestor), + target = shortLog(bsi), + ancestorStateRoot = shortLog(ancestorRoot), + targetStateRoot = shortLog(getStateRoot(state)), + found, + assignDur, + replayDur + elif ancestors.len > 0: + debug "State replayed", + blocks = ancestors.len, + slots = getStateField(state, slot) - ancestor.slot, + current = shortLog(current), + ancestor = shortLog(ancestor), + target = shortLog(bsi), + ancestorStateRoot = shortLog(ancestorRoot), + targetStateRoot = shortLog(getStateRoot(state)), + found, + assignDur, + replayDur + else: # Normal case! + trace "State advanced", + blocks = ancestors.len, + slots = getStateField(state, slot) - ancestor.slot, + current = shortLog(current), + ancestor = shortLog(ancestor), + target = shortLog(bsi), + ancestorStateRoot = shortLog(ancestorRoot), + targetStateRoot = shortLog(getStateRoot(state)), + found, + assignDur, + replayDur + + true + +proc delState(dag: ChainDAGRef, bsi: BlockSlotId) = + # Delete state and mapping for a particular block+slot + if not dag.isStateCheckpoint(bsi): + return # We only ever save epoch states + + if (let root = dag.db.getStateRoot(bsi.bid.root, bsi.slot); root.isSome()): + dag.db.withManyWrites: + dag.db.delStateRoot(bsi.bid.root, bsi.slot) + dag.db.delState( + dag.cfg.consensusForkAtEpoch(bsi.slot.epoch), root.get()) + +proc pruneBlockSlot(dag: ChainDAGRef, bs: BlockSlot) = + # TODO: should we move that disk I/O to `onSlotEnd` + dag.delState(bs.toBlockSlotId().expect("not nil")) + + if bs.isProposed(): + # Update light client data + # dag.deleteLightClientData(bs.blck.bid) + + bs.blck.executionValid = true + dag.forkBlocks.excl(KeyedBlockRef.init(bs.blck)) + discard dag.db.delBlock( + dag.cfg.consensusForkAtEpoch(bs.blck.slot.epoch), bs.blck.root) + +proc pruneBlocksDAG(dag: ChainDAGRef) = + ## This prunes the block DAG + ## This does NOT prune the cached state checkpoints and EpochRef + ## This must be done after a new finalization point is reached + ## to invalidate pending blocks or attestations referring + ## to a now invalid fork. + ## + ## This does NOT update the `dag.lastPrunePoint` field. + ## as the caches and fork choice can be pruned at a later time. + + # Clean up block refs, walking block by block + let startTick = Moment.now() + + # Finalization means that we choose a single chain as the canonical one - + # it also means we're no longer interested in any branches from that chain + # up to the finalization point + let hlen = dag.heads.len + for i in 0..= ConsensusFork.Altair: + let + period = sync_committee_period(slot) + curPeriod = sync_committee_period(forkyState.data.slot) + + if period == curPeriod: + @(dag.headSyncCommittees.current_sync_committee) + elif period == curPeriod + 1: + @(dag.headSyncCommittees.next_sync_committee) + else: @[] + else: + @[] + +func getSubcommitteePositionsAux( + dag: ChainDAGRef, + syncCommittee: openArray[ValidatorIndex], + subcommitteeIdx: SyncSubcommitteeIndex, + validatorIdx: uint64): seq[uint64] = + var pos = 0'u64 + for valIdx in syncCommittee.syncSubcommittee(subcommitteeIdx): + if validatorIdx == uint64(valIdx): + result.add pos + inc pos + +func getSubcommitteePositions*( + dag: ChainDAGRef, + slot: Slot, + subcommitteeIdx: SyncSubcommitteeIndex, + validatorIdx: uint64): seq[uint64] = + withState(dag.headState): + when consensusFork >= ConsensusFork.Altair: + let + period = sync_committee_period(slot) + curPeriod = sync_committee_period(forkyState.data.slot) + + template search(syncCommittee: openArray[ValidatorIndex]): seq[uint64] = + dag.getSubcommitteePositionsAux( + syncCommittee, subcommitteeIdx, validatorIdx) + + if period == curPeriod: + search(dag.headSyncCommittees.current_sync_committee) + elif period == curPeriod + 1: + search(dag.headSyncCommittees.next_sync_committee) + else: @[] + else: + @[] + +template syncCommitteeParticipants*( + dag: ChainDAGRef, + slot: Slot, + subcommitteeIdx: SyncSubcommitteeIndex): seq[ValidatorIndex] = + toSeq(syncSubcommittee(dag.syncCommitteeParticipants(slot), subcommitteeIdx)) + +iterator syncCommitteeParticipants*( + dag: ChainDAGRef, + slot: Slot, + subcommitteeIdx: SyncSubcommitteeIndex, + aggregationBits: SyncCommitteeAggregationBits): ValidatorIndex = + for pos, valIdx in dag.syncCommitteeParticipants(slot, subcommitteeIdx): + if pos < aggregationBits.bits and aggregationBits[pos]: + yield valIdx + +func needStateCachesAndForkChoicePruning*(dag: ChainDAGRef): bool = + dag.lastPrunePoint != dag.finalizedHead.toBlockSlotId().expect("not nil") + +proc pruneStateCachesDAG*(dag: ChainDAGRef) = + ## This prunes the cached state checkpoints and EpochRef + ## This does NOT prune the state associated with invalidated blocks on a fork + ## They are pruned via `pruneBlocksDAG` + ## + ## This updates the `dag.lastPrunePoint` variable + doAssert dag.needStateCachesAndForkChoicePruning() + let startTick = Moment.now() + block: # Remove states, walking slot by slot + # We remove all state checkpoints that come _before_ the current finalized + # head, as we might frequently be asked to replay states from the + # finalized checkpoint and onwards (for example when validating blocks and + # attestations) + var + finPoint = dag.finalizedHead.toBlockSlotId().expect("not nil") + cur = dag.parentOrSlot(dag.stateCheckpoint(finPoint)) + prev = dag.parentOrSlot(dag.stateCheckpoint(dag.lastPrunePoint)) + + while cur.isSome and prev.isSome and cur.get() != prev.get(): + let bs = cur.get() + if not isFinalizedStateSnapshot(bs.slot) and + bs.slot != dag.tail.slot: + dag.delState(bs) + let tmp = cur.get() + cur = dag.parentOrSlot(tmp) + + let statePruneTick = Moment.now() + + block: # Clean up old EpochRef instances + # After finalization, we can clear up the epoch cache and save memory - + # it will be recomputed if needed + dag.epochRefs.delIt(it.epoch < dag.finalizedHead.slot.epoch) + dag.shufflingRefs.delIt(it.epoch < dag.finalizedHead.slot.epoch) + + let epochRefPruneTick = Moment.now() + + dag.lastPrunePoint = dag.finalizedHead.toBlockSlotId().expect("not nil") + + debug "Pruned the state checkpoints and DAG caches.", + statePruneDur = statePruneTick - startTick, + epochRefPruneDur = epochRefPruneTick - statePruneTick + +func pruneStep(horizon, lastHorizon, lastBlockHorizon: Slot): + tuple[stateHorizon, blockHorizon: Slot] = + ## Compute a reasonable incremental pruning step considering the current + ## horizon, how far the database has been pruned already and where we want the + ## tail to be - the return value shows the first state and block that we + ## should _keep_ (inclusive). + + const SLOTS_PER_STATE_SNAPSHOT = + uint64(EPOCHS_PER_STATE_SNAPSHOT * SLOTS_PER_EPOCH) + + let + blockHorizon = block: + let + # Keep up with horizon if it's moving fast, ie if we're syncing + maxSlots = max(horizon - lastHorizon, MAX_SLOTS_PER_PRUNE) + + # Move the block horizon cap with a lag so that it moves slot-by-slot + # instead of a big jump every time we prune a state - assuming we + # prune every slot, this makes us prune one slot at a time instead of + # a burst of prunes (as computed by maxSlots) around every snapshot + # change followed by no pruning for the rest of the period + maxBlockHorizon = + if horizon + 1 >= SLOTS_PER_STATE_SNAPSHOT: + horizon + 1 - SLOTS_PER_STATE_SNAPSHOT + else: + Slot(0) + + # `lastBlockHorizon` captures the case where we're incrementally + # pruning a database that hasn't been pruned for a while: it's + # initialized to a pre-tail value on startup and moves to approach + # `maxBlockHorizon`. + min(maxBlockHorizon, lastBlockHorizon + maxSlots) + + # Round up such that we remove state only once blocks have been removed + stateHorizon = + ((blockHorizon + SLOTS_PER_STATE_SNAPSHOT - 1) div + SLOTS_PER_STATE_SNAPSHOT) * SLOTS_PER_STATE_SNAPSHOT + + (Slot(stateHorizon), blockHorizon) + +proc pruneHistory*(dag: ChainDAGRef, startup = false) = + ## Perform an incremental pruning step of the history + if dag.db.db.readOnly: + return + + let + horizon = dag.horizon() + (stateHorizon, blockHorizon) = pruneStep( + horizon, dag.lastHistoryPruneHorizon, dag.lastHistoryPruneBlockHorizon) + + doAssert blockHorizon <= stateHorizon, + "we must never prune blocks while leaving the state" + + debug "Pruning history", + horizon, blockHorizon, stateHorizon, + lastHorizon = dag.lastHistoryPruneHorizon, + lastBlockHorizon = dag.lastHistoryPruneBlockHorizon, + tail = dag.tail, head = dag.head + + dag.lastHistoryPruneHorizon = horizon + dag.lastHistoryPruneBlockHorizon = blockHorizon + + dag.db.withManyWrites: + if stateHorizon > dag.tail.slot: + # First, we want to see if it's possible to prune any states - we store one + # state every EPOCHS_PER_STATE_SNAPSHOT, so this happens infrequently. + + var + cur = dag.getBlockIdAtSlot(stateHorizon) + + var first = true + while cur.isSome(): + let bs = cur.get() + # We don't delete legacy states because the legacy database is openend + # in read-only and slow to delete from due to its sub-optimal structure + if dag.db.containsState( + dag.cfg, bs.bid.root, bs.slot..bs.slot, legacy = first): + if first: + # We leave the state on the prune horizon intact and update the tail + # to point to this state, indicating the new point in time from + # which we can load states in general. + debug "Updating tail", bs + dag.db.putTailBlock(bs.bid.root) + dag.tail = bs.bid + first = false + else: + debug "Pruning historical state", bs + dag.delState(bs) + elif not bs.isProposed: + trace "Reached already-pruned slot, done pruning states", bs + break + + if bs.isProposed: + # We store states either at the same slot at the block (checkpoint) or + # by advancing the slot to the nearest epoch start - check both when + # pruning + cur = dag.parentOrSlot(bs) + elif bs.slot.epoch > EPOCHS_PER_STATE_SNAPSHOT: + # Jump one snapshot interval at a time, but don't prune genesis + cur = dag.getBlockIdAtSlot(start_slot(bs.slot.epoch() - EPOCHS_PER_STATE_SNAPSHOT)) + else: + break + + # Prune blocks after sanity-checking that we don't prune post-tail blocks - + # this could happen if a state is missing at the expected state horizon and + # would indicate a partially inconsistent database since the base + # invariant is that there exists a state at the snapshot slot - better not + # further mess things up regardless + if blockHorizon > GENESIS_SLOT and blockHorizon <= dag.tail.slot: + var + # Leave the horizon block itself + cur = dag.getBlockIdAtSlot(blockHorizon - 1).map(proc(x: auto): auto = x.bid) + + while cur.isSome: + let + bid = cur.get() + fork = dag.cfg.consensusForkAtEpoch(bid.slot.epoch) + + if bid.slot == GENESIS_SLOT: + # Leave genesis block for nostalgia and the REST API + break + + if not dag.db.delBlock(fork, bid.root): + # Stop at the first gap - this is typically the pruning point of the + # previous call to pruneHistory. An inconsistent DB might have more + # blocks beyond that point but we have no efficient way of detecting + # that. + break + + cur = dag.parent(bid) + + # TODO There have been varied reports of startup pruning causing long + # startup times - an incremental approach would be needed here also + if false and + startup and + dag.cfg.consensusForkAtEpoch(blockHorizon.epoch) > ConsensusFork.Phase0: + # Once during start, we'll clear all "old fork" data - this ensures we get + # rid of any leftover junk in the tables - we do so after linear pruning + # so as to "mostly" clean up the phase0 tables as well (which cannot be + # pruned easily by fork) - one fork at a time, so as not to take too long + + let stateFork = dag.cfg.consensusForkAtEpoch(dag.tail.slot.epoch) + var clearedStates = false + if stateFork > ConsensusFork.Phase0: + for fork in ConsensusFork.Phase0.. ConsensusFork.Phase0: + for fork in ConsensusFork.Phase0..= ConsensusFork.Bellatrix: + Opt.some forkyBlck.message.body.execution_payload.block_hash + else: + Opt.some ZERO_HASH + +proc loadExecutionBlockHash*( + dag: ChainDAGRef, blck: BlockRef): Opt[Eth2Digest] = + if blck.executionBlockHash.isNone: + blck.executionBlockHash = dag.loadExecutionBlockHash(blck.bid) + blck.executionBlockHash + +from std/packedsets import PackedSet, incl, items + +func getValidatorChangeStatuses( + state: ForkedHashedBeaconState, vis: openArray[ValidatorIndex]): + PackedSet[ValidatorIndex] = + var res: PackedSet[ValidatorIndex] + withState(state): + for vi in vis: + if forkyState.data.validators[vi].withdrawal_credentials.data[0] == + BLS_WITHDRAWAL_PREFIX: + res.incl vi + res + +func checkBlsToExecutionChanges( + state: ForkedHashedBeaconState, vis: PackedSet[ValidatorIndex]): bool = + # Within each fork, BLS_WITHDRAWAL_PREFIX to ETH1_ADDRESS_WITHDRAWAL_PREFIX + # and never ETH1_ADDRESS_WITHDRAWAL_PREFIX to BLS_WITHDRAWAL_PREFIX. Latter + # can still happen via reorgs. + # Cases: + # 1) unchanged (BLS_WITHDRAWAL_PREFIX or ETH1_ADDRESS_WITHDRAWAL_PREFIX) from + # old to new head. + # 2) ETH1_ADDRESS_WITHDRAWAL_PREFIX to BLS_WITHDRAWAL_PREFIX + # 3) BLS_WITHDRAWAL_PREFIX to ETH1_ADDRESS_WITHDRAWAL_PREFIX + # + # Only report (3), i.e. whether there were validator indices with withdrawal + # credentials previously using BLS_WITHDRAWAL_PREFIX now using, instead, the + # ETH1_ADDRESS_WITHDRAWAL_PREFIX prefix indicating a BLS to execution change + # went through. + # + # Since it tracks head, it's possible reorgs trigger reporting the same + # validator indices multiple times; this is fine. + withState(state): + anyIt( vis, forkyState.data.validators[it].has_eth1_withdrawal_credential) + +proc updateHead*( + dag: ChainDAGRef, newHead: BlockRef, quarantine: var Quarantine, + knownValidators: openArray[ValidatorIndex]) = + ## Update what we consider to be the current head, as given by the fork + ## choice. + ## + ## The choice of head affects the choice of finalization point - the order + ## of operations naturally becomes important here - after updating the head, + ## blocks that were once considered potential candidates for a tree will + ## now fall from grace, or no longer be considered resolved. + doAssert not newHead.isNil() + + # Could happen if enough blocks get invalidated and would corrupt database - + # When finalized checkpoint is empty, the slot may also be smaller + doAssert newHead.slot >= dag.finalizedHead.slot or + newHead == dag.finalizedHead.blck + + let lastHead = dag.head + + logScope: + newHead = shortLog(newHead) + lastHead = shortLog(lastHead) + + if lastHead == newHead: + trace "No head block update" + return + + if newHead.parent.isNil: + # The new head should always have the finalizedHead as ancestor - thus, + # this should not happen except in a race condition where the selected + # `BlockRef` had its parent set to nil as happens during finalization - + # notably, resetting the head to be the finalizedHead is not allowed + error "Cannot update head to block without parent" + return + + let + lastHeadStateRoot = getStateRoot(dag.headState) + lastHeadMergeComplete = dag.headState.is_merge_transition_complete() + lastHeadKind = dag.headState.kind + lastKnownValidatorsChangeStatuses = getValidatorChangeStatuses( + dag.headState, knownValidators) + + # Start off by making sure we have the right state - updateState will try + # to use existing in-memory states to make this smooth + var cache: StateCache + if not updateState( + dag, dag.headState, newHead.bid.atSlot(), false, cache): + # Advancing the head state should never fail, given that the tail is + # implicitly finalised, the head is an ancestor of the tail and we always + # store the tail state in the database, as well as every epoch slot state in + # between + fatal "Unable to load head state during head update, database corrupt?", + lastHead = shortLog(lastHead) + quit 1 + + dag.head = newHead + + if dag.headState.is_merge_transition_complete() and not + lastHeadMergeComplete and + dag.vanityLogs.onMergeTransitionBlock != nil: + dag.vanityLogs.onMergeTransitionBlock() + + if dag.headState.kind > lastHeadKind: + case dag.headState.kind + of ConsensusFork.Phase0 .. ConsensusFork.Bellatrix: + discard + of ConsensusFork.Capella: + if dag.vanityLogs.onUpgradeToCapella != nil: + dag.vanityLogs.onUpgradeToCapella() + of ConsensusFork.Deneb: + if dag.vanityLogs.onUpgradeToDeneb != nil: + dag.vanityLogs.onUpgradeToDeneb() + of ConsensusFork.Electra: + if dag.vanityLogs.onUpgradeToElectra != nil: + dag.vanityLogs.onUpgradeToElectra() + + if dag.vanityLogs.onKnownBlsToExecutionChange != nil and + checkBlsToExecutionChanges( + dag.headState, lastKnownValidatorsChangeStatuses): + dag.vanityLogs.onKnownBlsToExecutionChange() + + dag.db.putHeadBlock(newHead.root) + + # updateBeaconMetrics(dag.headState, dag.head.bid, cache) + + withState(dag.headState): + when consensusFork >= ConsensusFork.Altair: + dag.headSyncCommittees = forkyState.data.get_sync_committee_cache(cache) + + let + finalized_checkpoint = + getStateField(dag.headState, finalized_checkpoint) + finalizedSlot = + # finalized checkpoint may move back in the head state compared to what + # we've seen in other forks - it does not move back in fork choice + # however, so we'll use the last-known-finalized in that case + max(finalized_checkpoint.epoch.start_slot(), dag.finalizedHead.slot) + finalizedHead = newHead.atSlot(finalizedSlot) + + doAssert (not finalizedHead.blck.isNil), + "Block graph should always lead to a finalized block" + + # Update light client data + # dag.processHeadChangeForLightClient() + + let (isAncestor, ancestorDepth) = lastHead.getDepth(newHead) + if not(isAncestor): + notice "Updated head block with chain reorg", + headParent = shortLog(newHead.parent), + stateRoot = shortLog(getStateRoot(dag.headState)), + justified = shortLog(getStateField( + dag.headState, current_justified_checkpoint)), + finalized = shortLog(getStateField(dag.headState, finalized_checkpoint)), + isOptHead = not newHead.executionValid + + if not(isNil(dag.onReorgHappened)): + let + # TODO (cheatfate): Proper implementation required + data = ReorgInfoObject.init(dag.head.slot, uint64(ancestorDepth), + lastHead.root, newHead.root, + lastHeadStateRoot, + getStateRoot(dag.headState)) + dag.onReorgHappened(data) + + # A reasonable criterion for "reorganizations of the chain" + quarantine.clearAfterReorg() + + # beacon_reorgs_total_total.inc() + # beacon_reorgs_total.inc() + else: + debug "Updated head block", + stateRoot = shortLog(getStateRoot(dag.headState)), + justified = shortLog(getStateField( + dag.headState, current_justified_checkpoint)), + finalized = shortLog(getStateField(dag.headState, finalized_checkpoint)), + isOptHead = not newHead.executionValid + + if not(isNil(dag.onHeadChanged)): + let + depRoot = withState(dag.headState): forkyState.proposer_dependent_root + prevDepRoot = withState(dag.headState): + forkyState.attester_dependent_root + epochTransition = (finalizedHead != dag.finalizedHead) + # TODO (cheatfate): Proper implementation required + data = HeadChangeInfoObject.init(dag.head.slot, dag.head.root, + getStateRoot(dag.headState), + epochTransition, prevDepRoot, + depRoot) + dag.onHeadChanged(data) + + withState(dag.headState): + # Every time the head changes, the "canonical" view of balances and other + # state-related metrics change - notify the validator monitor. + # Doing this update during head update ensures there's a reasonable number + # of such updates happening - at most once per valid block. + dag.validatorMonitor[].registerState(forkyState.data) + + if finalizedHead != dag.finalizedHead: + debug "Reached new finalization checkpoint", + stateRoot = shortLog(getStateRoot(dag.headState)), + justified = shortLog(getStateField( + dag.headState, current_justified_checkpoint)), + finalized = shortLog(getStateField(dag.headState, finalized_checkpoint)) + let oldFinalizedHead = dag.finalizedHead + + block: + # Update `dag.finalizedBlocks` with all newly finalized blocks (those + # newer than the previous finalized head), then update `dag.finalizedHead` + var newFinalized: seq[BlockId] + var tmp = finalizedHead.blck + while not isNil(tmp) and tmp.slot >= dag.finalizedHead.slot: + newFinalized.add(tmp.bid) + if tmp != finalizedHead.blck: + # The newly finalized block itself should remain in here so that fork + # choice still can find it via root + dag.forkBlocks.excl(KeyedBlockRef.init(tmp)) + + let p = tmp.parent + tmp.parent = nil # Reset all parent links to release memory + tmp = p + + dag.finalizedHead = finalizedHead + + dag.db.updateFinalizedBlocks(newFinalized) + + let oldBlockHash = dag.loadExecutionBlockHash(oldFinalizedHead.blck) + if oldBlockHash.isSome and oldBlockHash.unsafeGet.isZero: + let newBlockHash = dag.loadExecutionBlockHash(dag.finalizedHead.blck) + if newBlockHash.isSome and not newBlockHash.unsafeGet.isZero: + if dag.vanityLogs.onFinalizedMergeTransitionBlock != nil: + dag.vanityLogs.onFinalizedMergeTransitionBlock() + + # Pruning the block dag is required every time the finalized head changes + # in order to clear out blocks that are no longer viable and should + # therefore no longer be considered as part of the chain we're following + dag.pruneBlocksDAG() + + # Update light client data + # dag.processFinalizationForLightClient(oldFinalizedHead) + + # Send notification about new finalization point via callback. + if not(isNil(dag.onFinHappened)): + let stateRoot = + if dag.finalizedHead.slot == dag.head.slot: getStateRoot(dag.headState) + elif dag.finalizedHead.slot + SLOTS_PER_HISTORICAL_ROOT > dag.head.slot: + getStateField(dag.headState, state_roots).data[ + int(dag.finalizedHead.slot mod SLOTS_PER_HISTORICAL_ROOT)] + else: + Eth2Digest() # The thing that finalized was >8192 blocks old? + # TODO (cheatfate): Proper implementation required + let data = FinalizationInfoObject.init( + dag.finalizedHead.blck.root, stateRoot, dag.finalizedHead.slot.epoch) + dag.onFinHappened(dag, data) + +proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): Result[void, cstring] = + ## Lightweight check to see if it is likely that the given database has been + ## initialized + let + tailBlockRoot = db.getTailBlock() + if not tailBlockRoot.isSome(): + return err("Tail block root missing") + + let + tailBlock = db.getBlockId(tailBlockRoot.get()) + if not tailBlock.isSome(): + return err("Tail block information missing") + + ok() + +proc preInit*( + T: type ChainDAGRef, db: BeaconChainDB, state: ForkedHashedBeaconState) = + ## Initialize a database using the given state, which potentially may be a + ## non-genesis state. + ## + ## When used with a non-genesis state, the resulting database will not be + ## compatible with pre-22.11 versions. + logScope: + stateRoot = $getStateRoot(state) + stateSlot = getStateField(state, slot) + + doAssert getStateField(state, slot).is_epoch, + "Can only initialize database from epoch states" + + withState(state): + db.putState(forkyState) + + if forkyState.data.slot == GENESIS_SLOT: + let blck = get_initial_beacon_block(forkyState) + db.putBlock(blck) + db.putGenesisBlock(blck.root) + db.putHeadBlock(blck.root) + db.putTailBlock(blck.root) + + notice "Database initialized from genesis", + blockRoot = $blck.root + else: + let blockRoot = forkyState.latest_block_root() + # We write a summary but not the block contents - these will have to be + # backfilled from the network + db.putBeaconBlockSummary(blockRoot, BeaconBlockSummary( + slot: forkyState.data.latest_block_header.slot, + parent_root: forkyState.data.latest_block_header.parent_root + )) + db.putHeadBlock(blockRoot) + db.putTailBlock(blockRoot) + + if db.getGenesisBlock().isSome(): + notice "Checkpoint written to database", blockRoot = $blockRoot + else: + notice "Database initialized from checkpoint", blockRoot = $blockRoot + +proc getProposer*( + dag: ChainDAGRef, head: BlockRef, slot: Slot): Opt[ValidatorIndex] = + let + epochRef = dag.getEpochRef(head.bid, slot.epoch(), false).valueOr: + notice "Cannot load EpochRef for given head", head, slot, error + return Opt.none(ValidatorIndex) + + slotInEpoch = slot.since_epoch_start() + + let proposer = epochRef.beacon_proposers[slotInEpoch] + if proposer.isSome(): + if proposer.get().uint64 >= dag.db.immutableValidators.lenu64(): + # Sanity check - it should never happen that the key cache doesn't contain + # a key for the selected proposer - that would mean that we somehow + # created validators in the state without updating the cache! + warn "Proposer key not found", + keys = dag.db.immutableValidators.lenu64(), proposer = proposer.get() + return Opt.none(ValidatorIndex) + + proposer + +proc getProposalState*( + dag: ChainDAGRef, head: BlockRef, slot: Slot, cache: var StateCache): + Result[ref ForkedHashedBeaconState, cstring] = + ## Return a state suitable for making proposals for the given head and slot - + ## in particular, the state can be discarded after use and does not have a + ## state root set + + # Start with the clearance state, since this one typically has been advanced + # and thus has a hot hash tree cache + let state = assignClone(dag.clearanceState) + + var + info = ForkedEpochInfo() + if not state[].can_advance_slots(head.root, slot): + # The last state root will be computed as part of block production, so skip + # it now + if not dag.updateState( + state[], head.atSlot(slot - 1).toBlockSlotId().expect("not nil"), + false, cache): + error "Cannot get proposal state - skipping block production, database corrupt?", + head = shortLog(head), + slot + return err("Cannot create proposal state") + else: + loadStateCache(dag, cache, head.bid, slot.epoch) + + if getStateField(state[], slot) < slot: + process_slots( + dag.cfg, state[], slot, cache, info, + {skipLastStateRootCalculation}).expect("advancing 1 slot should not fail") + + ok state + +func aggregateAll*( + dag: ChainDAGRef, + validator_indices: openArray[ValidatorIndex]): Result[CookedPubKey, cstring] = + if validator_indices.len == 0: + # Aggregation spec requires non-empty collection + # - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04 + # Consensus specs require at least one attesting index in attestation + # - https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#is_valid_indexed_attestation + return err("aggregate: no attesting keys") + + let + firstKey = dag.validatorKey(validator_indices[0]).valueOr: + return err("aggregate: invalid validator index") + + var aggregateKey{.noinit.}: AggregatePublicKey + + aggregateKey.init(firstKey) + + for i in 1 ..< validator_indices.len: + let key = dag.validatorKey(validator_indices[i]).valueOr: + return err("aggregate: invalid validator index") + aggregateKey.aggregate(key) + + ok(finish(aggregateKey)) + +func aggregateAll*( + dag: ChainDAGRef, + validator_indices: openArray[ValidatorIndex|uint64], + bits: BitSeq | BitArray): Result[CookedPubKey, cstring] = + if validator_indices.len() != bits.len(): + return err("aggregateAll: mismatch in bits length") + + var + aggregateKey{.noinit.}: AggregatePublicKey + inited = false + + for i in 0.. dag.horizon + +proc rebuildIndex*(dag: ChainDAGRef) = + ## After a checkpoint sync, we lack intermediate states to replay from - this + ## function rebuilds them so that historical replay can take place again + ## TODO the pruning of junk states could be moved to a separate function that + ## runs either on startup + # First, we check what states we already have in the database - that allows + # resuming the operation at any time + let + roots = dag.db.loadStateRoots() + historicalRoots = getStateField(dag.headState, historical_roots).asSeq() + historicalSummaries = dag.headState.historical_summaries.asSeq() + + var + canonical = newSeq[Eth2Digest]( + (dag.finalizedHead.slot.epoch + EPOCHS_PER_STATE_SNAPSHOT - 1) div + EPOCHS_PER_STATE_SNAPSHOT) + # `junk` puts in place some infrastructure to prune unnecessary states - it + # will be more useful in the future as a base for pruning + junk: seq[((Slot, Eth2Digest), Eth2Digest)] + + for k, v in roots: + if k[0] >= dag.finalizedHead.slot: + continue # skip newer stuff + if k[0] < dag.backfill.slot: + continue # skip stuff for which we have no blocks + + if not isFinalizedStateSnapshot(k[0]): + # `tail` will move at the end of the process, so we won't need any + # intermediate states + junk.add((k, v)) + + continue # skip non-snapshot slots + + if k[0] > 0: + let bs = dag.getBlockIdAtSlot(k[0] - 1) + if bs.isNone or bs.get().bid.root != k[1]: + # remove things that are no longer a canonical part of the chain or + # cannot be reached via a block + junk.add((k, v)) + continue + + if not dag.db.containsState(dag.cfg.consensusForkAtEpoch(k[0].epoch), v): + continue # If it's not in the database.. + + canonical[k[0].epoch div EPOCHS_PER_STATE_SNAPSHOT] = v + + let + state = (ref ForkedHashedBeaconState)() + + var + cache: StateCache + info: ForkedEpochInfo + tailBid: Opt[BlockId] + states: int + + # `canonical` holds all slots at which a state is expected to appear, using a + # zero root whenever a particular state is missing - this way, if there's + # partial progress or gaps, they will be dealt with correctly + for i, state_root in canonical.mpairs(): + let + slot = Epoch(i * EPOCHS_PER_STATE_SNAPSHOT).start_slot + + if slot < dag.backfill.slot: + # TODO if we have era files, we could try to load blocks from them at + # this point + # TODO if we don't do the above, we can of course compute the starting `i` + continue + + if tailBid.isNone(): + if state_root.isZero: + # If we can find an era file with this state, use it as an alternative + # starting point - ignore failures for now + if dag.era.getState( + historicalRoots, historicalSummaries, slot, state[]).isOk(): + state_root = getStateRoot(state[]) + + withState(state[]): dag.db.putState(forkyState) + tailBid = Opt.some state[].latest_block_id() + + else: + if not dag.db.getState( + dag.cfg.consensusForkAtEpoch(slot.epoch), state_root, state[], + noRollback): + fatal "Cannot load state, database corrupt or created for a different network?", + state_root, slot + quit 1 + tailBid = Opt.some state[].latest_block_id() + + continue + + if i == 0 or canonical[i - 1].isZero: + reset(tailBid) # No unbroken history! + continue + + if not state_root.isZero: + states += 1 + continue + + let + startSlot = Epoch((i - 1) * EPOCHS_PER_STATE_SNAPSHOT).start_slot + + info "Recreating state snapshot", + slot, startStateRoot = canonical[i - 1], startSlot + + if getStateRoot(state[]) != canonical[i - 1]: + if not dag.db.getState( + dag.cfg.consensusForkAtEpoch(startSlot.epoch), canonical[i - 1], + state[], noRollback): + error "Can't load start state, database corrupt?", + startStateRoot = shortLog(canonical[i - 1]), slot = startSlot + return + + for slot in startSlot.. 0: + info "Dropping redundant states", states, redundant = junk.len + + for i in junk: + dag.db.delStateRoot(i[0][1], i[0][0]) + dag.db.delState(dag.cfg.consensusForkAtEpoch(i[0][0].epoch), i[1]) diff --git a/nimbus_unified/consensus/consensus_wrapper.nim b/nimbus_unified/consensus/consensus_wrapper.nim index b80153edc..8fc648997 100644 --- a/nimbus_unified/consensus/consensus_wrapper.nim +++ b/nimbus_unified/consensus/consensus_wrapper.nim @@ -5,8 +5,10 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. +#TODO: Clean these imports import - std/[os, atomics], + std/[os, atomics, random, terminal, times, exitprocs, sequtils], + metrics, beacon_chain/nimbus_binary_common, beacon_chain/spec/forks, beacon_chain/[beacon_chain_db, trusted_node_sync], @@ -14,55 +16,1161 @@ import chronos, chronicles, stew/io2, - ../configs/nimbus_configs + eth/p2p/discoveryv5/[enr, random2], + ../configs/nimbus_configs, + beacon_chain/consensus_object_pools/vanity_logs/vanity_logs, + beacon_chain/statusbar, + beacon_chain/nimbus_binary_common, + beacon_chain/spec/[forks, digest, helpers], + beacon_chain/spec/datatypes/base, + beacon_chain/[beacon_chain_db, trusted_node_sync, beacon_node], + beacon_chain/spec/weak_subjectivity, + beacon_chain/rpc/[rest_beacon_api, rest_api, state_ttl_cache], + beacon_chain/consensus_object_pools/blob_quarantine, + beacon_chain/networking/[topic_params, network_metadata, network_metadata_downloads], + beacon_chain/spec/datatypes/[bellatrix], + beacon_chain/sync/[sync_protocol], + beacon_chain/validators/[keystore_management, beacon_validators], + beacon_chain/consensus_object_pools/[blockchain_dag], + beacon_chain/spec/ + [beaconstate, state_transition, state_transition_epoch, validator, ssz_codec] export nimbus_configs +when defined(posix): + import system/ansi_c + +from beacon_chain/spec/datatypes/deneb import SignedBeaconBlock +from beacon_chain/beacon_node_light_client + import shouldSyncOptimistically, initLightClient, updateLightClientFromDag +from libp2p/protocols/pubsub/gossipsub + import TopicParams, validateParameters, init + ## log logScope: topics = "Consensus layer" -## following procedures are copies from nimbus_beacon_node.nim. -## TODO: if possible, extract from that file into a common file +# adapted from nimbus-eth2 +# # https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics +# declareGauge beacon_slot, "Latest slot of the beacon chain state" +# declareGauge beacon_current_epoch, "Current epoch" -## runs beacon node -## adapted from nimbus-eth2 -proc doRunBeaconNode( - config: var BeaconNodeConf, rng: ref HmacDrbgContext -) {.raises: [CatchableError].} = - info "Launching beacon node", - version = "fullVersionStr", #TODO:changed from original version - bls_backend = $BLS_BACKEND, - const_preset, - cmdParams = commandLineParams(), - config +# # Finalization tracking +# declareGauge finalization_delay, +# "Epoch delay between scheduled epoch and finalized epoch" - template ignoreDeprecatedOption(option: untyped): untyped = - if config.option.isSome: - warn "Config option is deprecated", option = config.option.get +# declareGauge ticks_delay, +# "How long does to take to run the onSecond loop" - ignoreDeprecatedOption requireEngineAPI - ignoreDeprecatedOption safeSlotsToImportOptimistically - ignoreDeprecatedOption terminalTotalDifficultyOverride - ignoreDeprecatedOption optimistic - ignoreDeprecatedOption validatorMonitorTotals - ignoreDeprecatedOption web3ForcePolling +# declareGauge next_action_wait, +# "Seconds until the next attestation will be sent" - config.createDumpDirs() +# declareGauge next_proposal_wait, +# "Seconds until the next proposal will be sent, or Inf if not known" - #TODO: We might need to split this on the same file - # if config.metricsEnabled: - # let metricsAddress = config.metricsAddress - # notice "Starting metrics HTTP server", - # url = "http://" & $metricsAddress & ":" & $config.metricsPort & "/metrics" - # try: - # startMetricsHttpServer($metricsAddress, config.metricsPort) - # except CatchableError as exc: - # raise exc - # except Exception as exc: - # raiseAssert exc.msg # TODO fix metrics +# declareGauge sync_committee_active, +# "1 if there are current sync committee duties, 0 otherwise" + +# declareCounter db_checkpoint_seconds, +# "Time spent checkpointing the database to clear the WAL file" + +const SlashingDbName = "slashing_protection" +# changing this requires physical file rename as well or history is lost + + +## NOTE +## following procedures are copies/adaptations from nimbus_beacon_node.nim. +## TODO: Extract do adequate structures and files + + +# TODO: need to figure out behaviour on threaded patterns +# Using this function here is signaled as non GC SAFE given +# that gPidFile might be accessed concurrently with no guards + +# var gPidFile: string +# proc createPidFile(filename: string) {.raises: [IOError].} = +# writeFile filename, $os.getCurrentProcessId() +# gPidFile = filename +# addExitProc ( +# proc() = +# discard io2.removeFile(filename) +# ) + +proc initFullNode( + node: BeaconNode, + rng: ref HmacDrbgContext, + dag: ChainDAGRef, + taskpool: TaskPoolPtr, + getBeaconTime: GetBeaconTimeFn +) {.async.} = + template config(): auto = + node.config + + proc onPhase0AttestationReceived(data: phase0.Attestation) = + node.eventBus.attestQueue.emit(data) + + proc onElectraAttestationReceived(data: electra.Attestation) = + debugComment "electra attestation queue" + + proc onSyncContribution(data: SignedContributionAndProof) = + node.eventBus.contribQueue.emit(data) + + proc onVoluntaryExitAdded(data: SignedVoluntaryExit) = + node.eventBus.exitQueue.emit(data) + + proc onBLSToExecutionChangeAdded(data: SignedBLSToExecutionChange) = + node.eventBus.blsToExecQueue.emit(data) + + proc onProposerSlashingAdded(data: ProposerSlashing) = + node.eventBus.propSlashQueue.emit(data) + + proc onPhase0AttesterSlashingAdded(data: phase0.AttesterSlashing) = + node.eventBus.attSlashQueue.emit(data) + + proc onElectraAttesterSlashingAdded(data: electra.AttesterSlashing) = + debugComment "electra att slasher queue" + + proc onBlobSidecarAdded(data: BlobSidecarInfoObject) = + node.eventBus.blobSidecarQueue.emit(data) + + proc onBlockAdded(data: ForkedTrustedSignedBeaconBlock) = + let optimistic = + if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: + some node.dag.is_optimistic(data.toBlockId()) + else: + none[bool]() + node.eventBus.blocksQueue.emit( + EventBeaconBlockObject.init(data, optimistic)) + + proc onHeadChanged(data: HeadChangeInfoObject) = + let eventData = + if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: + var res = data + res.optimistic = + some node.dag.is_optimistic(BlockId(slot: data.slot, root: data.block_root)) + res + else: + data + node.eventBus.headQueue.emit(eventData) + + proc onChainReorg(data: ReorgInfoObject) = + let eventData = + if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: + var res = data + res.optimistic = some node.dag.is_optimistic( + BlockId(slot: data.slot, root: data.new_head_block) + ) + res + else: + data + node.eventBus.reorgQueue.emit(eventData) + + proc makeOnFinalizationCb( + # This `nimcall` functions helps for keeping track of what + # needs to be captured by the onFinalization closure. + eventBus: EventBus, + elManager: ELManager, + ): OnFinalizedCallback {.nimcall.} = + static: + doAssert (elManager is ref) + return proc(dag: ChainDAGRef, data: FinalizationInfoObject) = + if elManager != nil: + let finalizedEpochRef = dag.getFinalizedEpochRef() + discard trackFinalizedState( + elManager, finalizedEpochRef.eth1_data, finalizedEpochRef.eth1_deposit_index + ) + # node.updateLightClientFromDag() + let eventData = + if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: + var res = data + # `slot` in this `BlockId` may be higher than block's actual slot, + # this is alright for the purpose of calling `is_optimistic`. + res.optimistic = some node.dag.is_optimistic( + BlockId(slot: data.epoch.start_slot, root: data.block_root) + ) + res + else: + data + eventBus.finalQueue.emit(eventData) + + func getLocalHeadSlot(): Slot = + dag.head.slot + + proc getLocalWallSlot(): Slot = + node.beaconClock.now.slotOrZero + + func getFirstSlotAtFinalizedEpoch(): Slot = + dag.finalizedHead.slot + + func getBackfillSlot(): Slot = + if dag.backfill.parent_root != dag.tail.root: dag.backfill.slot else: dag.tail.slot + + func getFrontfillSlot(): Slot = + max(dag.frontfill.get(BlockId()).slot, dag.horizon) + + proc isWithinWeakSubjectivityPeriod(): bool = + let + currentSlot = node.beaconClock.now().slotOrZero() + checkpoint = Checkpoint( + epoch: epoch(getStateField(node.dag.headState, slot)), + root: getStateField(node.dag.headState, latest_block_header).state_root, + ) + is_within_weak_subjectivity_period( + node.dag.cfg, currentSlot, node.dag.headState, checkpoint + ) + + proc eventWaiter(): Future[void] {.async: (raises: [CancelledError]).} = + await node.shutdownEvent.wait() + bnStatus = BeaconNodeStatus.Stopping + + asyncSpawn eventWaiter() + + let + quarantine = newClone(Quarantine.init()) + attestationPool = newClone( + AttestationPool.init( + dag, quarantine, onPhase0AttestationReceived, onElectraAttestationReceived + ) + ) + syncCommitteeMsgPool = + newClone(SyncCommitteeMsgPool.init(rng, dag.cfg, onSyncContribution)) + # adapted from nimbus-eth2 + # lightClientPool = newClone(LightClientPool()) + validatorChangePool = newClone( + ValidatorChangePool.init( + dag, attestationPool, onVoluntaryExitAdded, onBLSToExecutionChangeAdded, + onProposerSlashingAdded, onPhase0AttesterSlashingAdded, + onElectraAttesterSlashingAdded, + ) + ) + blobQuarantine = newClone(BlobQuarantine.init(onBlobSidecarAdded)) + consensusManager = ConsensusManager.new( + dag, + attestationPool, + quarantine, + node.elManager, + ActionTracker.init(node.network.nodeId, config.subscribeAllSubnets), + node.dynamicFeeRecipientsStore, + config.validatorsDir, + config.defaultFeeRecipient, + config.suggestedGasLimit, + ) + blockProcessor = BlockProcessor.new( + config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming, rng, taskpool, + consensusManager, node.validatorMonitor, blobQuarantine, getBeaconTime, + ) + blockVerifier = proc( + signedBlock: ForkedSignedBeaconBlock, + blobs: Opt[BlobSidecars], + maybeFinalized: bool, + ): Future[Result[void, VerifierError]] {. + async: (raises: [CancelledError], raw: true) + .} = + # The design with a callback for block verification is unusual compared + # to the rest of the application, but fits with the general approach + # taken in the sync/request managers - this is an architectural compromise + # that should probably be reimagined more holistically in the future. + blockProcessor[].addBlock( + MsgSource.gossip, signedBlock, blobs, maybeFinalized = maybeFinalized + ) + rmanBlockVerifier = proc( + signedBlock: ForkedSignedBeaconBlock, maybeFinalized: bool + ): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} = + withBlck(signedBlock): + when consensusFork >= ConsensusFork.Deneb: + if not blobQuarantine[].hasBlobs(forkyBlck): + # We don't have all the blobs for this block, so we have + # to put it in blobless quarantine. + if not quarantine[].addBlobless(dag.finalizedHead.slot, forkyBlck): + err(VerifierError.UnviableFork) + else: + err(VerifierError.MissingParent) + else: + let blobs = blobQuarantine[].popBlobs(forkyBlck.root, forkyBlck) + await blockProcessor[].addBlock( + MsgSource.gossip, + signedBlock, + Opt.some(blobs), + maybeFinalized = maybeFinalized, + ) + else: + await blockProcessor[].addBlock( + MsgSource.gossip, + signedBlock, + Opt.none(BlobSidecars), + maybeFinalized = maybeFinalized, + ) + rmanBlockLoader = proc(blockRoot: Eth2Digest): Opt[ForkedTrustedSignedBeaconBlock] = + dag.getForkedBlock(blockRoot) + rmanBlobLoader = proc(blobId: BlobIdentifier): Opt[ref BlobSidecar] = + var blob_sidecar = BlobSidecar.new() + if dag.db.getBlobSidecar(blobId.block_root, blobId.index, blob_sidecar[]): + Opt.some blob_sidecar + else: + Opt.none(ref BlobSidecar) + + #TODO: + # removing this light client var + lightClientPool = newClone( + LightClientPool()) + + processor = Eth2Processor.new( + config.doppelgangerDetection, blockProcessor, node.validatorMonitor, dag, + attestationPool, validatorChangePool, node.attachedValidators, + syncCommitteeMsgPool, lightClientPool, quarantine, blobQuarantine, rng, + getBeaconTime, taskpool, + ) + syncManagerFlags = + if node.config.longRangeSync != LongRangeSyncMode.Lenient: + {SyncManagerFlag.NoGenesisSync} + else: + {} + syncManager = newSyncManager[Peer, PeerId]( + node.network.peerPool, + dag.cfg.DENEB_FORK_EPOCH, + dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + SyncQueueKind.Forward, + getLocalHeadSlot, + getLocalWallSlot, + getFirstSlotAtFinalizedEpoch, + getBackfillSlot, + getFrontfillSlot, + isWithinWeakSubjectivityPeriod, + dag.tail.slot, + blockVerifier, + shutdownEvent = node.shutdownEvent, + flags = syncManagerFlags, + ) + backfiller = newSyncManager[Peer, PeerId]( + node.network.peerPool, + dag.cfg.DENEB_FORK_EPOCH, + dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + SyncQueueKind.Backward, + getLocalHeadSlot, + getLocalWallSlot, + getFirstSlotAtFinalizedEpoch, + getBackfillSlot, + getFrontfillSlot, + isWithinWeakSubjectivityPeriod, + dag.backfill.slot, + blockVerifier, + maxHeadAge = 0, + shutdownEvent = node.shutdownEvent, + flags = syncManagerFlags, + ) + router = (ref MessageRouter)(processor: processor, network: node.network) + requestManager = RequestManager.init( + node.network, + dag.cfg.DENEB_FORK_EPOCH, + getBeaconTime, + ( + proc(): bool = + syncManager.inProgress + ), + quarantine, + blobQuarantine, + rmanBlockVerifier, + rmanBlockLoader, + rmanBlobLoader, + ) + # adapted from nimbus-eth2 + # if node.config.lightClientDataServe: + # proc scheduleSendingLightClientUpdates(slot: Slot) = + # if node.lightClientPool[].broadcastGossipFut != nil: + # return + # if slot <= node.lightClientPool[].latestBroadcastedSlot: + # return + # node.lightClientPool[].latestBroadcastedSlot = slot + + # template fut(): auto = + # node.lightClientPool[].broadcastGossipFut + + # fut = node.handleLightClientUpdates(slot) + # fut.addCallback do(p: pointer) {.gcsafe.}: + # fut = nil + + # router.onSyncCommitteeMessage = scheduleSendingLightClientUpdates + + dag.setFinalizationCb makeOnFinalizationCb(node.eventBus, node.elManager) + dag.setBlockCb(onBlockAdded) + dag.setHeadCb(onHeadChanged) + dag.setReorgCb(onChainReorg) + + node.dag = dag + node.blobQuarantine = blobQuarantine + node.quarantine = quarantine + node.attestationPool = attestationPool + node.syncCommitteeMsgPool = syncCommitteeMsgPool + # node.lightClientPool = lightClientPool + node.validatorChangePool = validatorChangePool + node.processor = processor + node.blockProcessor = blockProcessor + node.consensusManager = consensusManager + node.requestManager = requestManager + node.syncManager = syncManager + node.backfiller = backfiller + node.router = router + + await node.addValidators() + + block: + # Add in-process validators to the list of "known" validators such that + # we start with a reasonable ENR + let wallSlot = node.beaconClock.now().slotOrZero() + for validator in node.attachedValidators[].validators.values(): + if config.validatorMonitorAuto: + node.validatorMonitor[].addMonitor(validator.pubkey, validator.index) + + if validator.index.isSome(): + withState(dag.headState): + let idx = validator.index.get() + if distinctBase(idx) <= forkyState.data.validators.lenu64: + template v(): auto = + forkyState.data.validators.item(idx) + + if is_active_validator(v, wallSlot.epoch) or + is_active_validator(v, wallSlot.epoch + 1): + node.consensusManager[].actionTracker.knownValidators[idx] = wallSlot + elif is_exited_validator(v, wallSlot.epoch): + notice "Ignoring exited validator", + index = idx, pubkey = shortLog(v.pubkey) + let stabilitySubnets = + node.consensusManager[].actionTracker.stabilitySubnets(wallSlot) + # Here, we also set the correct ENR should we be in all subnets mode! + node.network.updateStabilitySubnetMetadata(stabilitySubnets) + + node.network.registerProtocol( + PeerSync, PeerSync.NetworkState.init(node.dag, node.beaconClock.getBeaconTimeFn()) + ) + + node.network.registerProtocol(BeaconSync, BeaconSync.NetworkState.init(node.dag)) + # adapted from nimbus-eth2 + + # if node.dag.lcDataStore.serve: + # node.network.registerProtocol( + # LightClientSync, LightClientSync.NetworkState.init(node.dag) + # ) + + # node.updateValidatorMetrics() + +func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = + case stdoutKind + of StdoutLogKind.Auto: raiseAssert "inadmissable here" + of StdoutLogKind.Colors: + VanityLogs( + onMergeTransitionBlock: bellatrixColor, + onFinalizedMergeTransitionBlock: bellatrixBlink, + onUpgradeToCapella: capellaColor, + onKnownBlsToExecutionChange: capellaBlink, + onUpgradeToDeneb: denebColor, + onUpgradeToElectra: electraColor) + of StdoutLogKind.NoColors: + VanityLogs( + onMergeTransitionBlock: bellatrixMono, + onFinalizedMergeTransitionBlock: bellatrixMono, + onUpgradeToCapella: capellaMono, + onKnownBlsToExecutionChange: capellaMono, + onUpgradeToDeneb: denebMono, + onUpgradeToElectra: electraMono) + of StdoutLogKind.Json, StdoutLogKind.None: + VanityLogs( + onMergeTransitionBlock: + (proc() = notice "🐼 Proof of Stake Activated 🐼"), + onFinalizedMergeTransitionBlock: + (proc() = notice "🐼 Proof of Stake Finalized 🐼"), + onUpgradeToCapella: + (proc() = notice "🦉 Withdrowls now available 🦉"), + onKnownBlsToExecutionChange: + (proc() = notice "🦉 BLS to execution changed 🦉"), + onUpgradeToDeneb: + (proc() = notice "🐟 Proto-Danksharding is ON 🐟"), + onUpgradeToElectra: + (proc() = notice "🦒 [PH] Electra 🦒")) + +func getVanityMascot(consensusFork: ConsensusFork): string = + case consensusFork + of ConsensusFork.Electra: + "🦒" + of ConsensusFork.Deneb: + "🐟" + of ConsensusFork.Capella: + "🦉" + of ConsensusFork.Bellatrix: + "🐼" + of ConsensusFork.Altair: + "✨" + of ConsensusFork.Phase0: + "🦏" + +# NOTE: light client related code commented +proc loadChainDag( + config: BeaconNodeConf, + cfg: RuntimeConfig, + db: BeaconChainDB, + eventBus: EventBus, + validatorMonitor: ref ValidatorMonitor, + networkGenesisValidatorsRoot: Opt[Eth2Digest], +): ChainDAGRef = + info "Loading block DAG from database", path = config.databaseDir + + var dag: ChainDAGRef + proc onLightClientFinalityUpdate(data: ForkedLightClientFinalityUpdate) = + if dag == nil: + return + withForkyFinalityUpdate(data): + when lcDataFork > LightClientDataFork.None: + let contextFork = dag.cfg.consensusForkAtEpoch(forkyFinalityUpdate.contextEpoch) + eventBus.finUpdateQueue.emit( + RestVersioned[ForkedLightClientFinalityUpdate]( + data: data, + jsonVersion: contextFork, + sszContext: dag.forkDigests[].atConsensusFork(contextFork), + ) + ) + + proc onLightClientOptimisticUpdate(data: ForkedLightClientOptimisticUpdate) = + if dag == nil: + return + withForkyOptimisticUpdate(data): + when lcDataFork > LightClientDataFork.None: + let contextFork = + dag.cfg.consensusForkAtEpoch(forkyOptimisticUpdate.contextEpoch) + eventBus.optUpdateQueue.emit( + RestVersioned[ForkedLightClientOptimisticUpdate]( + data: data, + jsonVersion: contextFork, + sszContext: dag.forkDigests[].atConsensusFork(contextFork), + ) + ) + + let + chainDagFlags = + if config.strictVerification: + {strictVerification} + else: + {} + onLightClientFinalityUpdateCb = + if config.lightClientDataServe: onLightClientFinalityUpdate else: nil + onLightClientOptimisticUpdateCb = + if config.lightClientDataServe: onLightClientOptimisticUpdate else: nil + + dag = ChainDAGRef.init( + cfg, db, validatorMonitor, chainDagFlags, config.eraDir, + vanityLogs = getVanityLogs(detectTTY(config.logStdout)), + lcDataConfig = LightClientDataConfig( + serve: config.lightClientDataServe, + importMode: config.lightClientDataImportMode, + maxPeriods: config.lightClientDataMaxPeriods, + onLightClientFinalityUpdate: onLightClientFinalityUpdateCb, + onLightClientOptimisticUpdate: onLightClientOptimisticUpdateCb)) + + if networkGenesisValidatorsRoot.isSome: + let databaseGenesisValidatorsRoot = + getStateField(dag.headState, genesis_validators_root) + if networkGenesisValidatorsRoot.get != databaseGenesisValidatorsRoot: + fatal "The specified --data-dir contains data for a different network", + networkGenesisValidatorsRoot = networkGenesisValidatorsRoot.get, + databaseGenesisValidatorsRoot, + dataDir = config.dataDir + quit 1 + + # The first pruning after restart may take a while.. + if config.historyMode == HistoryMode.Prune: + dag.pruneHistory(true) + + dag + +proc doRunTrustedNodeSync( + db: BeaconChainDB, + metadata: Eth2NetworkMetadata, + databaseDir: string, + eraDir: string, + restUrl: string, + stateId: Option[string], + trustedBlockRoot: Option[Eth2Digest], + backfill: bool, + reindex: bool, + downloadDepositSnapshot: bool, + genesisState: ref ForkedHashedBeaconState, +) {.async.} = + let syncTarget = + if stateId.isSome: + if trustedBlockRoot.isSome: + warn "Ignoring `trustedBlockRoot`, `stateId` is set", stateId, trustedBlockRoot + TrustedNodeSyncTarget(kind: TrustedNodeSyncKind.StateId, stateId: stateId.get) + elif trustedBlockRoot.isSome: + TrustedNodeSyncTarget( + kind: TrustedNodeSyncKind.TrustedBlockRoot, + trustedBlockRoot: trustedBlockRoot.get, + ) + else: + TrustedNodeSyncTarget(kind: TrustedNodeSyncKind.StateId, stateId: "finalized") + + await db.doTrustedNodeSync( + metadata.cfg, databaseDir, eraDir, restUrl, syncTarget, backfill, reindex, + downloadDepositSnapshot, genesisState, + ) + +proc initBeaconNode*(T: type BeaconNode, + rng: ref HmacDrbgContext, + config: BeaconNodeConf, + metadata: Eth2NetworkMetadata): Future[BeaconNode] + {.async.} = + var + taskpool: TaskPoolPtr + genesisState: ref ForkedHashedBeaconState = nil + + template cfg: auto = metadata.cfg + template eth1Network: auto = metadata.eth1Network + + if not(isDir(config.databaseDir)): + # If database directory missing, we going to use genesis state to check + # for weak_subjectivity_period. + genesisState = + await fetchGenesisState( + metadata, config.genesisState, config.genesisStateUrl) + let + genesisTime = getStateField(genesisState[], genesis_time) + beaconClock = BeaconClock.init(genesisTime).valueOr: + fatal "Invalid genesis time in genesis state", genesisTime + quit 1 + currentSlot = beaconClock.now().slotOrZero() + checkpoint = Checkpoint( + epoch: epoch(getStateField(genesisState[], slot)), + root: getStateField(genesisState[], latest_block_header).state_root) + # adapted from nimbus-eth2 + # if config.longRangeSync == LongRangeSyncMode.Light: + # if not is_within_weak_subjectivity_period(metadata.cfg, currentSlot, + # genesisState[], checkpoint): + # fatal WeakSubjectivityLogMessage, current_slot = currentSlot + # quit 1 + + try: + if config.numThreads < 0: + fatal "The number of threads --numThreads cannot be negative." + quit 1 + elif config.numThreads == 0: + taskpool = TaskPoolPtr.new(numThreads = min(countProcessors(), 16)) + else: + taskpool = TaskPoolPtr.new(numThreads = config.numThreads) + + info "Threadpool started", numThreads = taskpool.numThreads + except Exception: + raise newException(Defect, "Failure in taskpool initialization.") + + if metadata.genesis.kind == BakedIn: + if config.genesisState.isSome: + warn "The --genesis-state option has no effect on networks with built-in genesis state" + + if config.genesisStateUrl.isSome: + warn "The --genesis-state-url option has no effect on networks with built-in genesis state" + + let + eventBus = EventBus( + headQueue: newAsyncEventQueue[HeadChangeInfoObject](), + blocksQueue: newAsyncEventQueue[EventBeaconBlockObject](), + attestQueue: newAsyncEventQueue[phase0.Attestation](), + exitQueue: newAsyncEventQueue[SignedVoluntaryExit](), + blsToExecQueue: newAsyncEventQueue[SignedBLSToExecutionChange](), + propSlashQueue: newAsyncEventQueue[ProposerSlashing](), + attSlashQueue: newAsyncEventQueue[AttesterSlashing](), + blobSidecarQueue: newAsyncEventQueue[BlobSidecarInfoObject](), + finalQueue: newAsyncEventQueue[FinalizationInfoObject](), + reorgQueue: newAsyncEventQueue[ReorgInfoObject](), + contribQueue: newAsyncEventQueue[SignedContributionAndProof](), + finUpdateQueue: newAsyncEventQueue[ + RestVersioned[ForkedLightClientFinalityUpdate]](), + optUpdateQueue: newAsyncEventQueue[ + RestVersioned[ForkedLightClientOptimisticUpdate]]()) + db = BeaconChainDB.new(config.databaseDir, cfg, inMemory = false) + + if config.externalBeaconApiUrl.isSome and ChainDAGRef.isInitialized(db).isErr: + let trustedBlockRoot = + if config.trustedStateRoot.isSome or config.trustedBlockRoot.isSome: + config.trustedBlockRoot + elif cfg.ALTAIR_FORK_EPOCH == GENESIS_EPOCH: + # Sync can be bootstrapped from the genesis block root + if genesisState.isNil: + genesisState = await fetchGenesisState( + metadata, config.genesisState, config.genesisStateUrl) + if not genesisState.isNil: + let genesisBlockRoot = get_initial_beacon_block(genesisState[]).root + notice "Neither `--trusted-block-root` nor `--trusted-state-root` " & + "provided with `--external-beacon-api-url`, " & + "falling back to genesis block root", + externalBeaconApiUrl = config.externalBeaconApiUrl.get, + trustedBlockRoot = config.trustedBlockRoot, + trustedStateRoot = config.trustedStateRoot, + genesisBlockRoot = $genesisBlockRoot + some genesisBlockRoot + else: + none[Eth2Digest]() + else: + none[Eth2Digest]() + if config.trustedStateRoot.isNone and trustedBlockRoot.isNone: + warn "Ignoring `--external-beacon-api-url`, neither " & + "`--trusted-block-root` nor `--trusted-state-root` provided", + externalBeaconApiUrl = config.externalBeaconApiUrl.get, + trustedBlockRoot = config.trustedBlockRoot, + trustedStateRoot = config.trustedStateRoot + else: + if genesisState.isNil: + genesisState = await fetchGenesisState( + metadata, config.genesisState, config.genesisStateUrl) + await db.doRunTrustedNodeSync( + metadata, + config.databaseDir, + config.eraDir, + config.externalBeaconApiUrl.get, + config.trustedStateRoot.map do (x: Eth2Digest) -> string: + "0x" & x.data.toHex, + trustedBlockRoot, + backfill = false, + reindex = false, + downloadDepositSnapshot = false, + genesisState) + + if config.finalizedCheckpointBlock.isSome: + warn "--finalized-checkpoint-block has been deprecated, ignoring" + + let checkpointState = if config.finalizedCheckpointState.isSome: + let checkpointStatePath = config.finalizedCheckpointState.get.string + let tmp = try: + newClone(readSszForkedHashedBeaconState( + cfg, readAllBytes(checkpointStatePath).tryGet())) + except SszError as err: + fatal "Checkpoint state loading failed", + err = formatMsg(err, checkpointStatePath) + quit 1 + except CatchableError as err: + fatal "Failed to read checkpoint state file", err = err.msg + quit 1 + + if not getStateField(tmp[], slot).is_epoch: + fatal "--finalized-checkpoint-state must point to a state for an epoch slot", + slot = getStateField(tmp[], slot) + quit 1 + tmp + else: + nil + + if config.finalizedDepositTreeSnapshot.isSome: + let + depositTreeSnapshotPath = config.finalizedDepositTreeSnapshot.get.string + snapshot = + try: + SSZ.loadFile(depositTreeSnapshotPath, DepositTreeSnapshot) + except SszError as err: + fatal "Deposit tree snapshot loading failed", + err = formatMsg(err, depositTreeSnapshotPath) + quit 1 + except CatchableError as err: + fatal "Failed to read deposit tree snapshot file", err = err.msg + quit 1 + depositContractSnapshot = DepositContractSnapshot.init(snapshot).valueOr: + fatal "Invalid deposit tree snapshot file" + quit 1 + db.putDepositContractSnapshot(depositContractSnapshot) + + let engineApiUrls = config.engineApiUrls + + if engineApiUrls.len == 0: + notice "Running without execution client - validator features disabled (see https://nimbus.guide/eth1.html)" + + var networkGenesisValidatorsRoot = metadata.bakedGenesisValidatorsRoot + + if not ChainDAGRef.isInitialized(db).isOk(): + genesisState = + if not checkpointState.isNil and + getStateField(checkpointState[], slot) == 0: + checkpointState + else: + if genesisState.isNil: + await fetchGenesisState( + metadata, config.genesisState, config.genesisStateUrl) + else: + genesisState + + if genesisState.isNil and checkpointState.isNil: + fatal "No database and no genesis snapshot found. Please supply a genesis.ssz " & + "with the network configuration" + quit 1 + + if not genesisState.isNil and not checkpointState.isNil: + if getStateField(genesisState[], genesis_validators_root) != + getStateField(checkpointState[], genesis_validators_root): + fatal "Checkpoint state does not match genesis - check the --network parameter", + rootFromGenesis = getStateField( + genesisState[], genesis_validators_root), + rootFromCheckpoint = getStateField( + checkpointState[], genesis_validators_root) + quit 1 + + try: + # Always store genesis state if we have it - this allows reindexing and + # answering genesis queries + if not genesisState.isNil: + ChainDAGRef.preInit(db, genesisState[]) + networkGenesisValidatorsRoot = + Opt.some(getStateField(genesisState[], genesis_validators_root)) + + if not checkpointState.isNil: + if genesisState.isNil or + getStateField(checkpointState[], slot) != GENESIS_SLOT: + ChainDAGRef.preInit(db, checkpointState[]) + + doAssert ChainDAGRef.isInitialized(db).isOk(), "preInit should have initialized db" + except CatchableError as exc: + error "Failed to initialize database", err = exc.msg + quit 1 + else: + if not checkpointState.isNil: + fatal "A database already exists, cannot start from given checkpoint", + dataDir = config.dataDir + quit 1 + + # Doesn't use std/random directly, but dependencies might + randomize(rng[].rand(high(int))) + + # The validatorMonitorTotals flag has been deprecated and should eventually be + # removed - until then, it's given priority if set so as not to needlessly + # break existing setups + let + validatorMonitor = newClone(ValidatorMonitor.init( + config.validatorMonitorAuto, + config.validatorMonitorTotals.get( + not config.validatorMonitorDetails))) + + for key in config.validatorMonitorPubkeys: + validatorMonitor[].addMonitor(key, Opt.none(ValidatorIndex)) + + let + dag = loadChainDag( + config, cfg, db, eventBus, + validatorMonitor, networkGenesisValidatorsRoot) + genesisTime = getStateField(dag.headState, genesis_time) + beaconClock = BeaconClock.init(genesisTime).valueOr: + fatal "Invalid genesis time in state", genesisTime + quit 1 + + getBeaconTime = beaconClock.getBeaconTimeFn() + + if config.weakSubjectivityCheckpoint.isSome: + dag.checkWeakSubjectivityCheckpoint( + config.weakSubjectivityCheckpoint.get, beaconClock) + + let elManager = ELManager.new( + cfg, + metadata.depositContractBlock, + metadata.depositContractBlockHash, + db, + engineApiUrls, + eth1Network) + + if config.rpcEnabled.isSome: + warn "Nimbus's JSON-RPC server has been removed. This includes the --rpc, --rpc-port, and --rpc-address configuration options. https://nimbus.guide/rest-api.html shows how to enable and configure the REST Beacon API server which replaces it." + + let restServer = if config.restEnabled: + RestServerRef.init(config.restAddress, config.restPort, + config.restAllowedOrigin, + validateBeaconApiQueries, + nimbusAgentStr, + config) + else: + nil + + let + netKeys = getPersistentNetKeys(rng[], config) + nickname = if config.nodeName == "auto": shortForm(netKeys) + else: config.nodeName + network = createEth2Node( + rng, config, netKeys, cfg, dag.forkDigests, getBeaconTime, + getStateField(dag.headState, genesis_validators_root)) + + case config.slashingDbKind + of SlashingDbKind.v2: + discard + of SlashingDbKind.v1: + error "Slashing DB v1 is no longer supported for writing" + quit 1 + of SlashingDbKind.both: + warn "Slashing DB v1 deprecated, writing only v2" + + info "Loading slashing protection database (v2)", + path = config.validatorsDir() + + proc getValidatorAndIdx(pubkey: ValidatorPubKey): Opt[ValidatorAndIndex] = + withState(dag.headState): + getValidator(forkyState().data.validators.asSeq(), pubkey) + + func getCapellaForkVersion(): Opt[Version] = + Opt.some(cfg.CAPELLA_FORK_VERSION) + + func getDenebForkEpoch(): Opt[Epoch] = + Opt.some(cfg.DENEB_FORK_EPOCH) + + proc getForkForEpoch(epoch: Epoch): Opt[Fork] = + Opt.some(dag.forkAtEpoch(epoch)) + + proc getGenesisRoot(): Eth2Digest = + getStateField(dag.headState, genesis_validators_root) + + let + keystoreCache = KeystoreCacheRef.init() + slashingProtectionDB = + SlashingProtectionDB.init( + getStateField(dag.headState, genesis_validators_root), + config.validatorsDir(), SlashingDbName) + validatorPool = newClone(ValidatorPool.init( + slashingProtectionDB, config.doppelgangerDetection)) + + keymanagerInitResult = initKeymanagerServer(config, restServer) + keymanagerHost = if keymanagerInitResult.server != nil: + newClone KeymanagerHost.init( + validatorPool, + keystoreCache, + rng, + keymanagerInitResult.token, + config.validatorsDir, + config.secretsDir, + config.defaultFeeRecipient, + config.suggestedGasLimit, + config.defaultGraffitiBytes, + config.getPayloadBuilderAddress, + getValidatorAndIdx, + getBeaconTime, + getCapellaForkVersion, + getDenebForkEpoch, + getForkForEpoch, + getGenesisRoot) + else: nil + + stateTtlCache = + if config.restCacheSize > 0: + StateTtlCache.init( + cacheSize = config.restCacheSize, + cacheTtl = chronos.seconds(config.restCacheTtl)) + else: + nil + + if config.payloadBuilderEnable: + info "Using external payload builder", + payloadBuilderUrl = config.payloadBuilderUrl + + let node = BeaconNode( + nickname: nickname, + graffitiBytes: if config.graffiti.isSome: config.graffiti.get + else: defaultGraffitiBytes(), + network: network, + netKeys: netKeys, + db: db, + config: config, + attachedValidators: validatorPool, + elManager: elManager, + restServer: restServer, + keymanagerHost: keymanagerHost, + keymanagerServer: keymanagerInitResult.server, + keystoreCache: keystoreCache, + eventBus: eventBus, + gossipState: {}, + blocksGossipState: {}, + beaconClock: beaconClock, + validatorMonitor: validatorMonitor, + stateTtlCache: stateTtlCache, + shutdownEvent: newAsyncEvent(), + dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init())) + + # TODO: we are initializing the light client given that it has a function + # to validate if the sync should be done optimistically or not, and it used + # along beacon node + node.initLightClient( + rng, cfg, dag.forkDigests, getBeaconTime, dag.genesis_validators_root) + + await node.initFullNode(rng, dag, taskpool, getBeaconTime) + + node.updateLightClientFromDag() + + node + +proc installMessageValidators(node: BeaconNode) = + # These validators stay around the whole time, regardless of which specific + # subnets are subscribed to during any given epoch. + let forkDigests = node.dag.forkDigests + + for fork in ConsensusFork: + withConsensusFork(fork): + let digest = forkDigests[].atConsensusFork(consensusFork) + + # beacon_block + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#beacon_block + node.network.addValidator( + getBeaconBlocksTopic(digest), proc ( + signedBlock: consensusFork.SignedBeaconBlock + ): ValidationResult = + if node.shouldSyncOptimistically(node.currentSlot): + toValidationResult( + node.optimisticProcessor.processSignedBeaconBlock( + signedBlock)) + else: + toValidationResult( + node.processor[].processSignedBeaconBlock( + MsgSource.gossip, signedBlock))) + + # beacon_attestation_{subnet_id} + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id + when consensusFork >= ConsensusFork.Electra: + for it in SubnetId: + closureScope: # Needed for inner `proc`; don't lift it out of loop. + let subnet_id = it + node.network.addAsyncValidator( + getAttestationTopic(digest, subnet_id), proc ( + attestation: electra.Attestation + ): Future[ValidationResult] {. + async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processAttestation( + MsgSource.gossip, attestation, subnet_id, + checkSignature = true, checkValidator = false))) + else: + for it in SubnetId: + closureScope: # Needed for inner `proc`; don't lift it out of loop. + let subnet_id = it + node.network.addAsyncValidator( + getAttestationTopic(digest, subnet_id), proc ( + attestation: phase0.Attestation + ): Future[ValidationResult] {. + async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processAttestation( + MsgSource.gossip, attestation, subnet_id, + checkSignature = true, checkValidator = false))) + + # beacon_aggregate_and_proof + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof + when consensusFork >= ConsensusFork.Electra: + node.network.addAsyncValidator( + getAggregateAndProofsTopic(digest), proc ( + signedAggregateAndProof: electra.SignedAggregateAndProof + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processSignedAggregateAndProof( + MsgSource.gossip, signedAggregateAndProof))) + else: + node.network.addAsyncValidator( + getAggregateAndProofsTopic(digest), proc ( + signedAggregateAndProof: phase0.SignedAggregateAndProof + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processSignedAggregateAndProof( + MsgSource.gossip, signedAggregateAndProof))) + + # attester_slashing + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attester_slashing + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.6/specs/electra/p2p-interface.md#modifications-in-electra + when consensusFork >= ConsensusFork.Electra: + node.network.addValidator( + getAttesterSlashingsTopic(digest), proc ( + attesterSlashing: electra.AttesterSlashing + ): ValidationResult = + toValidationResult( + node.processor[].processAttesterSlashing( + MsgSource.gossip, attesterSlashing))) + else: + node.network.addValidator( + getAttesterSlashingsTopic(digest), proc ( + attesterSlashing: phase0.AttesterSlashing + ): ValidationResult = + toValidationResult( + node.processor[].processAttesterSlashing( + MsgSource.gossip, attesterSlashing))) + + # proposer_slashing + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#proposer_slashing + node.network.addValidator( + getProposerSlashingsTopic(digest), proc ( + proposerSlashing: ProposerSlashing + ): ValidationResult = + toValidationResult( + node.processor[].processProposerSlashing( + MsgSource.gossip, proposerSlashing))) + + # voluntary_exit + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#voluntary_exit + node.network.addValidator( + getVoluntaryExitsTopic(digest), proc ( + signedVoluntaryExit: SignedVoluntaryExit + ): ValidationResult = + toValidationResult( + node.processor[].processSignedVoluntaryExit( + MsgSource.gossip, signedVoluntaryExit))) + + when consensusFork >= ConsensusFork.Altair: + # sync_committee_{subnet_id} + # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#sync_committee_subnet_id + for subcommitteeIdx in SyncSubcommitteeIndex: + closureScope: # Needed for inner `proc`; don't lift it out of loop. + let idx = subcommitteeIdx + node.network.addAsyncValidator( + getSyncCommitteeTopic(digest, idx), proc ( + msg: SyncCommitteeMessage + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processSyncCommitteeMessage( + MsgSource.gossip, msg, idx))) + + # sync_committee_contribution_and_proof + # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof + node.network.addAsyncValidator( + getSyncCommitteeContributionAndProofTopic(digest), proc ( + msg: SignedContributionAndProof + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processSignedContributionAndProof( + MsgSource.gossip, msg))) + + when consensusFork >= ConsensusFork.Capella: + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/p2p-interface.md#bls_to_execution_change + node.network.addAsyncValidator( + getBlsToExecutionChangeTopic(digest), proc ( + msg: SignedBLSToExecutionChange + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processBlsToExecutionChange( + MsgSource.gossip, msg))) + + when consensusFork >= ConsensusFork.Deneb: + # blob_sidecar_{subnet_id} + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id + for it in BlobId: + closureScope: # Needed for inner `proc`; don't lift it out of loop. + let subnet_id = it + node.network.addValidator( + getBlobSidecarTopic(digest, subnet_id), proc ( + blobSidecar: deneb.BlobSidecar + ): ValidationResult = + toValidationResult( + node.processor[].processBlobSidecar( + MsgSource.gossip, blobSidecar, subnet_id))) + + # node.installLightClientMessageValidators() + +proc checkWeakSubjectivityCheckpoint( + dag: ChainDAGRef, wsCheckpoint: Checkpoint, beaconClock: BeaconClock +) = + let + currentSlot = beaconClock.now.slotOrZero + isCheckpointStale = + not is_within_weak_subjectivity_period( + dag.cfg, currentSlot, dag.headState, wsCheckpoint + ) + + if isCheckpointStale: + error "Weak subjectivity checkpoint is stale", + currentSlot, + checkpoint = wsCheckpoint, + headStateSlot = getStateField(dag.headState, slot) + quit 1 -## adapted/copied from nimbus-eth2 proc fetchGenesisState( metadata: Eth2NetworkMetadata, genesisState = none(InputFile), @@ -97,37 +1205,1208 @@ proc fetchGenesisState( else: nil -## adapted/copied from nimbus-eth2 -proc doRunTrustedNodeSync( - db: BeaconChainDB, - metadata: Eth2NetworkMetadata, - databaseDir: string, - eraDir: string, - restUrl: string, - stateId: Option[string], - trustedBlockRoot: Option[Eth2Digest], - backfill: bool, - reindex: bool, - downloadDepositSnapshot: bool, - genesisState: ref ForkedHashedBeaconState, -) {.async.} = - let syncTarget = - if stateId.isSome: - if trustedBlockRoot.isSome: - warn "Ignoring `trustedBlockRoot`, `stateId` is set", stateId, trustedBlockRoot - TrustedNodeSyncTarget(kind: TrustedNodeSyncKind.StateId, stateId: stateId.get) - elif trustedBlockRoot.isSome: - TrustedNodeSyncTarget( - kind: TrustedNodeSyncKind.TrustedBlockRoot, - trustedBlockRoot: trustedBlockRoot.get, - ) +proc pruneBlobs(node: BeaconNode, slot: Slot) = + let blobPruneEpoch = (slot.epoch - + node.dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS - 1) + if slot.is_epoch() and blobPruneEpoch >= node.dag.cfg.DENEB_FORK_EPOCH: + var blocks: array[SLOTS_PER_EPOCH.int, BlockId] + var count = 0 + let startIndex = node.dag.getBlockRange( + blobPruneEpoch.start_slot, 1, blocks.toOpenArray(0, SLOTS_PER_EPOCH - 1)) + for i in startIndex..= ConsensusFork.Altair: + forkyState.data.current_sync_committee + else: + return static(default(SyncnetBits)) + + getSyncSubnets(node.hasSyncPubKey(epoch), syncCommittee) + +func getNextSyncCommitteeSubnets(node: BeaconNode, epoch: Epoch): SyncnetBits = + let syncCommittee = withState(node.dag.headState): + when consensusFork >= ConsensusFork.Altair: + forkyState.data.next_sync_committee + else: + return static(default(SyncnetBits)) + + getSyncSubnets( + node.hasSyncPubKey((epoch.sync_committee_period + 1).start_slot().epoch), + syncCommittee) + +func getSyncCommitteeSubnets(node: BeaconNode, epoch: Epoch): SyncnetBits = + let + subnets = node.getCurrentSyncCommiteeSubnets(epoch) + epochsToSyncPeriod = nearSyncCommitteePeriod(epoch) + + # The end-slot tracker might call this when it's theoretically applicable, + # but more than SYNC_COMMITTEE_SUBNET_COUNT epochs from when the next sync + # committee period begins, in which case `epochsToNextSyncPeriod` is none. + if epochsToSyncPeriod.isNone or + node.dag.cfg.consensusForkAtEpoch(epoch + epochsToSyncPeriod.get) < + ConsensusFork.Altair: + return subnets + + subnets + node.getNextSyncCommitteeSubnets(epoch) + +func forkDigests(node: BeaconNode): auto = + let forkDigestsArray: array[ConsensusFork, auto] = [ + node.dag.forkDigests.phase0, + node.dag.forkDigests.altair, + node.dag.forkDigests.bellatrix, + node.dag.forkDigests.capella, + node.dag.forkDigests.deneb, + node.dag.forkDigests.electra] + forkDigestsArray + +proc updateSyncCommitteeTopics(node: BeaconNode, slot: Slot) = + template lastSyncUpdate: untyped = + node.consensusManager[].actionTracker.lastSyncUpdate + if lastSyncUpdate == Opt.some(slot.sync_committee_period()) and + nearSyncCommitteePeriod(slot.epoch).isNone(): + # No need to update unless we're close to the next sync committee period or + # new validators were registered with the action tracker + # TODO we _could_ skip running this in some of the "near" slots, but.. + return + + lastSyncUpdate = Opt.some(slot.sync_committee_period()) + + let syncnets = node.getSyncCommitteeSubnets(slot.epoch) + + debug "Updating sync committee subnets", + syncnets, + metadata_syncnets = node.network.metadata.syncnets, + gossipState = node.gossipState + + # Assume that different gossip fork sync committee setups are in sync; this + # only remains relevant, currently, for one gossip transition epoch, so the + # consequences of this not being true aren't exceptionally dire, while this + # allows for bookkeeping simplication. + if syncnets == node.network.metadata.syncnets: + return + + let + newSyncnets = + syncnets - node.network.metadata.syncnets + oldSyncnets = + node.network.metadata.syncnets - syncnets + forkDigests = node.forkDigests() + + for subcommitteeIdx in SyncSubcommitteeIndex: + doAssert not (newSyncnets[subcommitteeIdx] and + oldSyncnets[subcommitteeIdx]) + for gossipFork in node.gossipState: + template topic(): auto = + getSyncCommitteeTopic(forkDigests[gossipFork], subcommitteeIdx) + if oldSyncnets[subcommitteeIdx]: + node.network.unsubscribe(topic) + elif newSyncnets[subcommitteeIdx]: + node.network.subscribe(topic, basicParams) + + node.network.updateSyncnetsMetadata(syncnets) + +proc removePhase0MessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.network.unsubscribe(getVoluntaryExitsTopic(forkDigest)) + node.network.unsubscribe(getProposerSlashingsTopic(forkDigest)) + node.network.unsubscribe(getAttesterSlashingsTopic(forkDigest)) + node.network.unsubscribe(getAggregateAndProofsTopic(forkDigest)) + + for subnet_id in SubnetId: + node.network.unsubscribe(getAttestationTopic(forkDigest, subnet_id)) + + node.consensusManager[].actionTracker.subscribedSubnets = default(AttnetBits) + +# updateAttestationSubnetHandlers subscribes attestation subnets +proc addPhase0MessageHandlers( + node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + node.network.subscribe(getAttesterSlashingsTopic(forkDigest), basicParams) + node.network.subscribe(getProposerSlashingsTopic(forkDigest), basicParams) + node.network.subscribe(getVoluntaryExitsTopic(forkDigest), basicParams) + node.network.subscribe( + getAggregateAndProofsTopic(forkDigest), aggregateTopicParams, + enableTopicMetrics = true) + +proc addAltairMessageHandlers( + node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + node.addPhase0MessageHandlers(forkDigest, slot) + + # If this comes online near sync committee period, it'll immediately get + # replaced as usual by trackSyncCommitteeTopics, which runs at slot end. + let syncnets = node.getSyncCommitteeSubnets(slot.epoch) + + for subcommitteeIdx in SyncSubcommitteeIndex: + if syncnets[subcommitteeIdx]: + node.network.subscribe( + getSyncCommitteeTopic(forkDigest, subcommitteeIdx), basicParams) + + node.network.subscribe( + getSyncCommitteeContributionAndProofTopic(forkDigest), basicParams) + + node.network.updateSyncnetsMetadata(syncnets) + +proc addCapellaMessageHandlers( + node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + node.addAltairMessageHandlers(forkDigest, slot) + node.network.subscribe(getBlsToExecutionChangeTopic(forkDigest), basicParams) + +proc addDenebMessageHandlers( + node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + node.addCapellaMessageHandlers(forkDigest, slot) + for topic in blobSidecarTopics(forkDigest): + node.network.subscribe(topic, basicParams) + +proc addElectraMessageHandlers( + node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + node.addDenebMessageHandlers(forkDigest, slot) + +proc removeAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.removePhase0MessageHandlers(forkDigest) + + for subcommitteeIdx in SyncSubcommitteeIndex: + closureScope: + let idx = subcommitteeIdx + node.network.unsubscribe(getSyncCommitteeTopic(forkDigest, idx)) + + node.network.unsubscribe( + getSyncCommitteeContributionAndProofTopic(forkDigest)) + +proc removeCapellaMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.removeAltairMessageHandlers(forkDigest) + node.network.unsubscribe(getBlsToExecutionChangeTopic(forkDigest)) + +proc removeDenebMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.removeCapellaMessageHandlers(forkDigest) + for topic in blobSidecarTopics(forkDigest): + node.network.unsubscribe(topic) + +proc removeElectraMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.removeDenebMessageHandlers(forkDigest) + +proc doppelgangerChecked(node: BeaconNode, epoch: Epoch) = + if not node.processor[].doppelgangerDetectionEnabled: + return + + # broadcastStartEpoch is set to FAR_FUTURE_EPOCH when we're not monitoring + # gossip - it is only viable to assert liveness in epochs where gossip is + # active + if epoch > node.processor[].doppelgangerDetection.broadcastStartEpoch: + for validator in node.attachedValidators[]: + validator.doppelgangerChecked(epoch - 1) + + +proc updateBlocksGossipStatus*( + node: BeaconNode, slot: Slot, dagIsBehind: bool) = + template cfg(): auto = node.dag.cfg + + let + isBehind = + if node.shouldSyncOptimistically(slot): + # If optimistic sync is active, always subscribe to blocks gossip + false + else: + # Use DAG status to determine whether to subscribe for blocks gossip + dagIsBehind + + targetGossipState = getTargetGossipState( + slot.epoch, cfg.ALTAIR_FORK_EPOCH, cfg.BELLATRIX_FORK_EPOCH, + cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH, cfg.ELECTRA_FORK_EPOCH, + isBehind) + + template currentGossipState(): auto = node.blocksGossipState + if currentGossipState == targetGossipState: + return + + if currentGossipState.card == 0 and targetGossipState.card > 0: + debug "Enabling blocks topic subscriptions", + wallSlot = slot, targetGossipState + elif currentGossipState.card > 0 and targetGossipState.card == 0: + debug "Disabling blocks topic subscriptions", + wallSlot = slot + else: + # Individual forks added / removed + discard + + let + newGossipForks = targetGossipState - currentGossipState + oldGossipForks = currentGossipState - targetGossipState + + for gossipFork in oldGossipForks: + let forkDigest = node.dag.forkDigests[].atConsensusFork(gossipFork) + node.network.unsubscribe(getBeaconBlocksTopic(forkDigest)) + + for gossipFork in newGossipForks: + let forkDigest = node.dag.forkDigests[].atConsensusFork(gossipFork) + node.network.subscribe( + getBeaconBlocksTopic(forkDigest), blocksTopicParams, + enableTopicMetrics = true) + + node.blocksGossipState = targetGossipState + +func subnetLog(v: BitArray): string = + $toSeq(v.oneIndices()) + +# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attestation-subnet-subscription +proc updateAttestationSubnetHandlers(node: BeaconNode, slot: Slot) = + if node.gossipState.card == 0: + # When disconnected, updateBlocksGossipStatus is responsible for all things + # subnets - in particular, it will remove subscriptions on the edge where + # we enter the disconnected state. + return + + let + aggregateSubnets = + node.consensusManager[].actionTracker.aggregateSubnets(slot) + stabilitySubnets = + node.consensusManager[].actionTracker.stabilitySubnets(slot) + subnets = aggregateSubnets + stabilitySubnets + + node.network.updateStabilitySubnetMetadata(stabilitySubnets) + + # Now we know what we should be subscribed to - make it so + let + prevSubnets = node.consensusManager[].actionTracker.subscribedSubnets + unsubscribeSubnets = prevSubnets - subnets + subscribeSubnets = subnets - prevSubnets + + # Remember what we subscribed to, so we can unsubscribe later + node.consensusManager[].actionTracker.subscribedSubnets = subnets + + let forkDigests = node.forkDigests() + + for gossipFork in node.gossipState: + let forkDigest = forkDigests[gossipFork] + node.network.unsubscribeAttestationSubnets(unsubscribeSubnets, forkDigest) + node.network.subscribeAttestationSubnets(subscribeSubnets, forkDigest) + + debug "Attestation subnets", + slot, epoch = slot.epoch, gossipState = node.gossipState, + stabilitySubnets = subnetLog(stabilitySubnets), + aggregateSubnets = subnetLog(aggregateSubnets), + prevSubnets = subnetLog(prevSubnets), + subscribeSubnets = subnetLog(subscribeSubnets), + unsubscribeSubnets = subnetLog(unsubscribeSubnets), + gossipState = node.gossipState + +#TODO: overriden due to shadowing from +proc localUpdateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = + ## Subscribe to subnets that we are providing stability for or aggregating + ## and unsubscribe from the ones that are no longer relevant. + + # Let the tracker know what duties are approaching - this will tell us how + # many stability subnets we need to be subscribed to and what subnets we'll + # soon be aggregating - in addition to the in-beacon-node duties, there may + # also be duties coming from the validator client, but we don't control when + # these arrive + await node.registerDuties(slot) + + # We start subscribing to gossip before we're fully synced - this allows time + # to subscribe before the sync end game + const + TOPIC_SUBSCRIBE_THRESHOLD_SLOTS = 64 + HYSTERESIS_BUFFER = 16 + + static: doAssert high(ConsensusFork) == ConsensusFork.Electra + + let + head = node.dag.head + headDistance = + if slot > head.slot: (slot - head.slot).uint64 + else: 0'u64 + isBehind = + headDistance > TOPIC_SUBSCRIBE_THRESHOLD_SLOTS + HYSTERESIS_BUFFER + targetGossipState = + getTargetGossipState( + slot.epoch, + node.dag.cfg.ALTAIR_FORK_EPOCH, + node.dag.cfg.BELLATRIX_FORK_EPOCH, + node.dag.cfg.CAPELLA_FORK_EPOCH, + node.dag.cfg.DENEB_FORK_EPOCH, + node.dag.cfg.ELECTRA_FORK_EPOCH, + isBehind) + + doAssert targetGossipState.card <= 2 + + let + newGossipForks = targetGossipState - node.gossipState + oldGossipForks = node.gossipState - targetGossipState + + doAssert newGossipForks.card <= 2 + doAssert oldGossipForks.card <= 2 + + func maxGossipFork(gossipState: GossipState): int = + var res = -1 + for gossipFork in gossipState: + res = max(res, gossipFork.int) + res + + if maxGossipFork(targetGossipState) < maxGossipFork(node.gossipState) and + targetGossipState != {}: + warn "Unexpected clock regression during transition", + targetGossipState, + gossipState = node.gossipState + + if node.gossipState.card == 0 and targetGossipState.card > 0: + # We are synced, so we will connect + debug "Enabling topic subscriptions", + wallSlot = slot, + headSlot = head.slot, + headDistance, targetGossipState + + node.processor[].setupDoppelgangerDetection(slot) + + # Specially when waiting for genesis, we'll already be synced on startup - + # it might also happen on a sufficiently fast restart + + # We "know" the actions for the current and the next epoch + withState(node.dag.headState): + if node.consensusManager[].actionTracker.needsUpdate( + forkyState, slot.epoch): + let epochRef = node.dag.getEpochRef(head, slot.epoch, false).expect( + "Getting head EpochRef should never fail") + node.consensusManager[].actionTracker.updateActions( + epochRef.shufflingRef, epochRef.beacon_proposers) + + node.maybeUpdateActionTrackerNextEpoch(forkyState, slot.epoch + 1) + + if node.gossipState.card > 0 and targetGossipState.card == 0: + debug "Disabling topic subscriptions", + wallSlot = slot, + headSlot = head.slot, + headDistance + + node.processor[].clearDoppelgangerProtection() + + let forkDigests = node.forkDigests() + + const removeMessageHandlers: array[ConsensusFork, auto] = [ + removePhase0MessageHandlers, + removeAltairMessageHandlers, + removeAltairMessageHandlers, # bellatrix (altair handlers, different forkDigest) + removeCapellaMessageHandlers, + removeDenebMessageHandlers, + removeElectraMessageHandlers + ] + + for gossipFork in oldGossipForks: + removeMessageHandlers[gossipFork](node, forkDigests[gossipFork]) + + const addMessageHandlers: array[ConsensusFork, auto] = [ + addPhase0MessageHandlers, + addAltairMessageHandlers, + addAltairMessageHandlers, # bellatrix (altair handlers, different forkDigest) + addCapellaMessageHandlers, + addDenebMessageHandlers, + addElectraMessageHandlers + ] + + for gossipFork in newGossipForks: + addMessageHandlers[gossipFork](node, forkDigests[gossipFork], slot) + + node.gossipState = targetGossipState + node.doppelgangerChecked(slot.epoch) + node.updateAttestationSubnetHandlers(slot) + node.updateBlocksGossipStatus(slot, isBehind) + # node.updateLightClientGossipStatus(slot, isBehind) + +proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = + # Things we do when slot processing has ended and we're about to wait for the + # next slot + + # By waiting until close before slot end, ensure that preparation for next + # slot does not interfere with propagation of messages and with VC duties. + const endOffset = aggregateSlotOffset + nanos( + (NANOSECONDS_PER_SLOT - aggregateSlotOffset.nanoseconds.uint64).int64 div 2) + let endCutoff = node.beaconClock.fromNow(slot.start_beacon_time + endOffset) + if endCutoff.inFuture: + debug "Waiting for slot end", slot, endCutoff = shortLog(endCutoff.offset) + await sleepAsync(endCutoff.offset) + + if node.dag.needStateCachesAndForkChoicePruning(): + if node.attachedValidators[].validators.len > 0: + node.attachedValidators[] + .slashingProtection + # pruning is only done if the DB is set to pruning mode. + .pruneAfterFinalization( + node.dag.finalizedHead.slot.epoch() + ) + + # Delay part of pruning until latency critical duties are done. + # The other part of pruning, `pruneBlocksDAG`, is done eagerly. + # ---- + # This is the last pruning to do as it clears the "needPruning" condition. + node.consensusManager[].pruneStateCachesAndForkChoice() + + if node.config.historyMode == HistoryMode.Prune: + if not (slot + 1).is_epoch(): + # The epoch slot already is "heavy" due to the epoch processing, leave + # the pruning for later + node.dag.pruneHistory() + node.pruneBlobs(slot) + + when declared(GC_fullCollect): + # The slots in the beacon node work as frames in a game: we want to make + # sure that we're ready for the next one and don't get stuck in lengthy + # garbage collection tasks when time is of essence in the middle of a slot - + # while this does not guarantee that we'll never collect during a slot, it + # makes sure that all the scratch space we used during slot tasks (logging, + # temporary buffers etc) gets recycled for the next slot that is likely to + # need similar amounts of memory. + try: + GC_fullCollect() + except Defect as exc: + raise exc # Reraise to maintain call stack + except Exception: + # TODO upstream + raiseAssert "Unexpected exception during GC collection" + let gcCollectionTick = Moment.now() + + # Checkpoint the database to clear the WAL file and make sure changes in + # the database are synced with the filesystem. + node.db.checkpoint() + let + dbCheckpointTick = Moment.now() + dbCheckpointDur = dbCheckpointTick - gcCollectionTick + # db_checkpoint_seconds.inc(dbCheckpointDur.toFloatSeconds) + if dbCheckpointDur >= MinSignificantProcessingDuration: + info "Database checkpointed", dur = dbCheckpointDur + else: + debug "Database checkpointed", dur = dbCheckpointDur + + node.syncCommitteeMsgPool[].pruneData(slot) + if slot.is_epoch: + node.dynamicFeeRecipientsStore[].pruneOldMappings(slot.epoch) + + # Update upcoming actions - we do this every slot in case a reorg happens + let head = node.dag.head + if node.isSynced(head) and head.executionValid: + withState(node.dag.headState): + # maybeUpdateActionTrackerNextEpoch might not account for balance changes + # from the process_rewards_and_penalties() epoch transition but only from + # process_block() and other per-slot sources. This mainly matters insofar + # as it might trigger process_effective_balance_updates() changes in that + # same epoch transition, which function is therefore potentially blind to + # but which might then affect beacon proposers. + # + # Because this runs every slot, it can account naturally for slashings, + # which affect balances via slash_validator() when they happen, and any + # missed sync committee participation via process_sync_aggregate(), but + # attestation penalties for example, need, specific handling. + # checked by maybeUpdateActionTrackerNextEpoch. + node.maybeUpdateActionTrackerNextEpoch(forkyState, slot.epoch + 1) + + let + nextAttestationSlot = + node.consensusManager[].actionTracker.getNextAttestationSlot(slot) + nextProposalSlot = + node.consensusManager[].actionTracker.getNextProposalSlot(slot) + nextActionSlot = min(nextAttestationSlot, nextProposalSlot) + nextActionWaitTime = saturate(fromNow(node.beaconClock, nextActionSlot)) + + # -1 is a more useful output than 18446744073709551615 as an indicator of + # no future attestation/proposal known. + template formatInt64(x: Slot): int64 = + if x == high(uint64).Slot: + -1'i64 + else: + toGaugeValue(x) + + let + syncCommitteeSlot = slot + 1 + syncCommitteeEpoch = syncCommitteeSlot.epoch + inCurrentSyncCommittee = + not node.getCurrentSyncCommiteeSubnets(syncCommitteeEpoch).isZeros() + + template formatSyncCommitteeStatus(): string = + if inCurrentSyncCommittee: + "current" + elif not node.getNextSyncCommitteeSubnets(syncCommitteeEpoch).isZeros(): + let slotsToNextSyncCommitteePeriod = + SLOTS_PER_SYNC_COMMITTEE_PERIOD - + since_sync_committee_period_start(syncCommitteeSlot) + # int64 conversion is safe + doAssert slotsToNextSyncCommitteePeriod <= SLOTS_PER_SYNC_COMMITTEE_PERIOD + "in " & toTimeLeftString( + SECONDS_PER_SLOT.int64.seconds * slotsToNextSyncCommitteePeriod.int64) + else: + "none" + + info "Slot end", + slot = shortLog(slot), + nextActionWait = + if nextActionSlot == FAR_FUTURE_SLOT: + "n/a" + else: + shortLog(nextActionWaitTime), + nextAttestationSlot = formatInt64(nextAttestationSlot), + nextProposalSlot = formatInt64(nextProposalSlot), + syncCommitteeDuties = formatSyncCommitteeStatus(), + head = shortLog(head) + + # if nextActionSlot != FAR_FUTURE_SLOT: + # next_action_wait.set(nextActionWaitTime.toFloatSeconds) + + # next_proposal_wait.set( + # if nextProposalSlot != FAR_FUTURE_SLOT: + # saturate(fromNow(node.beaconClock, nextProposalSlot)).toFloatSeconds() + # else: + # Inf) + + # sync_committee_active.set(if inCurrentSyncCommittee: 1 else: 0) + + let epoch = slot.epoch + if epoch + 1 >= node.network.forkId.next_fork_epoch: + # Update 1 epoch early to block non-fork-ready peers + node.network.updateForkId(epoch, node.dag.genesis_validators_root) + + # When we're not behind schedule, we'll speculatively update the clearance + # state in anticipation of receiving the next block - we do it after + # logging slot end since the nextActionWaitTime can be short + let advanceCutoff = node.beaconClock.fromNow( + slot.start_beacon_time() + chronos.seconds(int(SECONDS_PER_SLOT - 1))) + if advanceCutoff.inFuture: + # We wait until there's only a second left before the next slot begins, then + # we advance the clearance state to the next slot - this gives us a high + # probability of being prepared for the block that will arrive and the + # epoch processing that follows + await sleepAsync(advanceCutoff.offset) + node.dag.advanceClearanceState() + + # Prepare action tracker for the next slot + node.consensusManager[].actionTracker.updateSlot(slot + 1) + + # The last thing we do is to perform the subscriptions and unsubscriptions for + # the next slot, just before that slot starts - because of the advance cuttoff + # above, this will be done just before the next slot starts + node.updateSyncCommitteeTopics(slot + 1) + + await node.localUpdateGossipStatus(slot + 1) + +func formatNextConsensusFork( + node: BeaconNode, withVanityArt = false): Opt[string] = + let consensusFork = + node.dag.cfg.consensusForkAtEpoch(node.dag.head.slot.epoch) + if consensusFork == ConsensusFork.high: + return Opt.none(string) + let + nextConsensusFork = consensusFork.succ() + nextForkEpoch = node.dag.cfg.consensusForkEpoch(nextConsensusFork) + if nextForkEpoch == FAR_FUTURE_EPOCH: + return Opt.none(string) + Opt.some( + (if withVanityArt: nextConsensusFork.getVanityMascot & " " else: "") & + $nextConsensusFork & ":" & $nextForkEpoch) + +func syncStatus(node: BeaconNode, wallSlot: Slot): string = + let optimisticHead = not node.dag.head.executionValid + if node.syncManager.inProgress: + let + optimisticSuffix = + if optimisticHead: + "/opt" + else: + "" + # lightClientSuffix = + # if node.consensusManager[].shouldSyncOptimistically(wallSlot): + # " - lc: " & $shortLog(node.consensusManager[].optimisticHead) + # else: + # "" + node.syncManager.syncStatus & optimisticSuffix #& lightClientSuffix + elif node.backfiller.inProgress: + "backfill: " & node.backfiller.syncStatus + elif optimisticHead: + "synced/opt" + else: + "synced" + +func connectedPeersCount(node: BeaconNode): int = + len(node.network.peerPool) + +func formatGwei(amount: Gwei): string = + # TODO This is implemented in a quite a silly way. + # Better routines for formatting decimal numbers + # should exists somewhere else. + let + eth = distinctBase(amount) div 1000000000 + remainder = distinctBase(amount) mod 1000000000 + + result = $eth + if remainder != 0: + result.add '.' + let remainderStr = $remainder + for i in remainderStr.len ..< 9: + result.add '0' + result.add remainderStr + while result[^1] == '0': + result.setLen(result.len - 1) + +when not defined(windows): + proc initStatusBar(node: BeaconNode) {.raises: [ValueError].} = + if not isatty(stdout): return + if not node.config.statusBarEnabled: return + + try: + enableTrueColors() + except Exception as exc: # TODO Exception + error "Couldn't enable colors", err = exc.msg + + proc dataResolver(expr: string): string {.raises: [].} = + template justified: untyped = node.dag.head.atEpochStart( + getStateField( + node.dag.headState, current_justified_checkpoint).epoch) + # TODO: + # We should introduce a general API for resolving dot expressions + # such as `db.latest_block.slot` or `metrics.connected_peers`. + # Such an API can be shared between the RPC back-end, CLI tools + # such as ncli, a potential GraphQL back-end and so on. + # The status bar feature would allow the user to specify an + # arbitrary expression that is resolvable through this API. + case expr.toLowerAscii + of "version": + versionAsStr + + of "full_version": + fullVersionStr + + of "connected_peers": + $(node.connectedPeersCount) + + of "head_root": + shortLog(node.dag.head.root) + of "head_epoch": + $(node.dag.head.slot.epoch) + of "head_epoch_slot": + $(node.dag.head.slot.since_epoch_start) + of "head_slot": + $(node.dag.head.slot) + + of "justifed_root": + shortLog(justified.blck.root) + of "justifed_epoch": + $(justified.slot.epoch) + of "justifed_epoch_slot": + $(justified.slot.since_epoch_start) + of "justifed_slot": + $(justified.slot) + + of "finalized_root": + shortLog(node.dag.finalizedHead.blck.root) + of "finalized_epoch": + $(node.dag.finalizedHead.slot.epoch) + of "finalized_epoch_slot": + $(node.dag.finalizedHead.slot.since_epoch_start) + of "finalized_slot": + $(node.dag.finalizedHead.slot) + + of "epoch": + $node.currentSlot.epoch + + of "epoch_slot": + $(node.currentSlot.since_epoch_start) + + of "slot": + $node.currentSlot + + of "slots_per_epoch": + $SLOTS_PER_EPOCH + + of "slot_trailing_digits": + var slotStr = $node.currentSlot + if slotStr.len > 3: slotStr = slotStr[^3..^1] + slotStr + + of "attached_validators_balance": + formatGwei(node.attachedValidatorBalanceTotal) + + of "next_consensus_fork": + let nextConsensusForkDescription = + node.formatNextConsensusFork(withVanityArt = true) + if nextConsensusForkDescription.isNone: + "" + else: + " (scheduled " & nextConsensusForkDescription.get & ")" + + of "sync_status": + node.syncStatus(node.currentSlot) + else: + # We ignore typos for now and just render the expression + # as it was written. TODO: come up with a good way to show + # an error message to the user. + "$" & expr + + var statusBar = StatusBarView.init( + node.config.statusBarContents, + dataResolver) + + when compiles(defaultChroniclesStream.outputs[0].writer): + let tmp = defaultChroniclesStream.outputs[0].writer + + defaultChroniclesStream.outputs[0].writer = + proc (logLevel: LogLevel, msg: LogOutputStr) {.raises: [].} = + try: + # p.hidePrompt + erase statusBar + # p.writeLine msg + tmp(logLevel, msg) + render statusBar + # p.showPrompt + except Exception as e: # render raises Exception + logLoggingFailure(cstring(msg), e) + + proc statusBarUpdatesPollingLoop() {.async.} = + try: + while true: + update statusBar + erase statusBar + render statusBar + await sleepAsync(chronos.seconds(1)) + except CatchableError as exc: + warn "Failed to update status bar, no further updates", err = exc.msg + + asyncSpawn statusBarUpdatesPollingLoop() + +proc initializeNetworking(node: BeaconNode) {.async.} = + node.installMessageValidators() + + info "Listening to incoming network requests" + await node.network.startListening() + + let addressFile = node.config.dataDir / "beacon_node.enr" + writeFile(addressFile, node.network.announcedENR.toURI) + + await node.network.start() + +proc installRestHandlers(restServer: RestServerRef, node: BeaconNode) = + restServer.router.installBeaconApiHandlers(node) + restServer.router.installBuilderApiHandlers(node) + restServer.router.installConfigApiHandlers(node) + restServer.router.installDebugApiHandlers(node) + restServer.router.installEventApiHandlers(node) + restServer.router.installNimbusApiHandlers(node) + restServer.router.installNodeApiHandlers(node) + restServer.router.installValidatorApiHandlers(node) + restServer.router.installRewardsApiHandlers(node) + if node.dag.lcDataStore.serve: + restServer.router.installLightClientApiHandlers(node) + +from beacon_chain/spec/datatypes/capella import SignedBeaconBlock + +proc stop(node: BeaconNode) = + bnStatus = BeaconNodeStatus.Stopping + notice "Graceful shutdown" + if not node.config.inProcessValidators: + try: + node.vcProcess.close() + except Exception as exc: + warn "Couldn't close vc process", msg = exc.msg + try: + waitFor node.network.stop() + except CatchableError as exc: + warn "Couldn't stop network", msg = exc.msg + + node.attachedValidators[].slashingProtection.close() + node.attachedValidators[].close() + node.db.close() + notice "Databases closed" + +func verifyFinalization(node: BeaconNode, slot: Slot) = + # Epoch must be >= 4 to check finalization + const SETTLING_TIME_OFFSET = 1'u64 + let epoch = slot.epoch() + + # Don't static-assert this -- if this isn't called, don't require it + doAssert SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET + + # Intentionally, loudly assert. Point is to fail visibly and unignorably + # during testing. + if epoch >= 4 and slot mod SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET: + let finalizedEpoch = + node.dag.finalizedHead.slot.epoch() + # Finalization rule 234, that has the most lag slots among the cases, sets + # state.finalized_checkpoint = old_previous_justified_checkpoint.epoch + 3 + # and then state.slot gets incremented, to increase the maximum offset, if + # finalization occurs every slot, to 4 slots vs scheduledSlot. + doAssert finalizedEpoch + 4 >= epoch + +proc onSlotStart(node: BeaconNode, wallTime: BeaconTime, + lastSlot: Slot): Future[bool] {.async.} = + ## Called at the beginning of a slot - usually every slot, but sometimes might + ## skip a few in case we're running late. + ## wallTime: current system time - we will strive to perform all duties up + ## to this point in time + ## lastSlot: the last slot that we successfully processed, so we know where to + ## start work from - there might be jumps if processing is delayed + let + # The slot we should be at, according to the clock + wallSlot = wallTime.slotOrZero + # If everything was working perfectly, the slot that we should be processing + expectedSlot = lastSlot + 1 + finalizedEpoch = node.dag.finalizedHead.blck.slot.epoch() + delay = wallTime - expectedSlot.start_beacon_time() + + node.processingDelay = Opt.some(nanoseconds(delay.nanoseconds)) + + block: + logScope: + slot = shortLog(wallSlot) + epoch = shortLog(wallSlot.epoch) + sync = node.syncStatus(wallSlot) + peers = len(node.network.peerPool) + head = shortLog(node.dag.head) + finalized = shortLog(getStateField( + node.dag.headState, finalized_checkpoint)) + delay = shortLog(delay) + let nextConsensusForkDescription = node.formatNextConsensusFork() + if nextConsensusForkDescription.isNone: + info "Slot start" + else: + info "Slot start", nextFork = nextConsensusForkDescription.get + + # Check before any re-scheduling of onSlotStart() + if checkIfShouldStopAtEpoch(wallSlot, node.config.stopAtEpoch): + quit(0) + + when defined(windows): + if node.config.runAsService: + reportServiceStatusSuccess() + + # TODO: metrics + # beacon_slot.set wallSlot.toGaugeValue + # beacon_current_epoch.set wallSlot.epoch.toGaugeValue + + # both non-negative, so difference can't overflow or underflow int64 + # finalization_delay.set( + # wallSlot.epoch.toGaugeValue - finalizedEpoch.toGaugeValue) + + if node.config.strictVerification: + verifyFinalization(node, wallSlot) + + node.consensusManager[].updateHead(wallSlot) + + await node.handleValidatorDuties(lastSlot, wallSlot) + + await onSlotEnd(node, wallSlot) + + # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#registration-dissemination + # This specification suggests validators re-submit to builder software every + # `EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION` epochs. + if wallSlot.is_epoch and + wallSlot.epoch mod EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION == 0: + asyncSpawn node.registerValidators(wallSlot.epoch) + + return false + +proc startBackfillTask(node: BeaconNode) {.async.} = + while node.dag.needsBackfill: + if not node.syncManager.inProgress: + # Only start the backfiller if it's needed _and_ head sync has completed - + # if we lose sync after having synced head, we could stop the backfilller, + # but this should be a fringe case - might as well keep the logic simple for + # now + node.backfiller.start() + return + + await sleepAsync(chronos.seconds(2)) + +proc onSecond(node: BeaconNode, time: Moment) = + # Nim GC metrics (for the main thread) + + # TODO: Collect metrics + # updateThreadMetrics() + + if node.config.stopAtSyncedEpoch != 0 and + node.dag.head.slot.epoch >= node.config.stopAtSyncedEpoch: + notice "Shutting down after having reached the target synced epoch" + bnStatus = BeaconNodeStatus.Stopping + +proc runOnSecondLoop(node: BeaconNode) {.async.} = + const + sleepTime = chronos.seconds(1) + nanosecondsIn1s = float(sleepTime.nanoseconds) + while true: + let start = chronos.now(chronos.Moment) + await chronos.sleepAsync(sleepTime) + let afterSleep = chronos.now(chronos.Moment) + let sleepTime = afterSleep - start + node.onSecond(start) + let finished = chronos.now(chronos.Moment) + let processingTime = finished - afterSleep + + # TODO: metrics + # ticks_delay.set(sleepTime.nanoseconds.float / nanosecondsIn1s) + trace "onSecond task completed", sleepTime, processingTime + +proc run(node: BeaconNode) {.raises: [CatchableError].} = + bnStatus = BeaconNodeStatus.Running + + if not isNil(node.restServer): + node.restServer.installRestHandlers(node) + node.restServer.start() + + if not isNil(node.keymanagerServer): + doAssert not isNil(node.keymanagerHost) + node.keymanagerServer.router.installKeymanagerHandlers(node.keymanagerHost[]) + if node.keymanagerServer != node.restServer: + node.keymanagerServer.start() + + let + wallTime = node.beaconClock.now() + wallSlot = wallTime.slotOrZero() + + # node.startLightClient() + node.requestManager.start() + node.syncManager.start() + + if node.dag.needsBackfill(): asyncSpawn node.startBackfillTask() + + waitFor node.localUpdateGossipStatus(wallSlot) + + for web3signerUrl in node.config.web3SignerUrls: + # TODO + # The current strategy polls all remote signers independently + # from each other which may lead to some race conditions of + # validators are migrated from one signer to another + # (because the updates to our validator pool are not atomic). + # Consider using different strategies that would detect such + # race conditions. + asyncSpawn node.pollForDynamicValidators( + web3signerUrl, node.config.web3signerUpdateInterval) + + asyncSpawn runSlotLoop(node, wallTime, onSlotStart) + asyncSpawn runOnSecondLoop(node) + asyncSpawn runQueueProcessingLoop(node.blockProcessor) + asyncSpawn runKeystoreCachePruningLoop(node.keystoreCache) + + # main event loop + while bnStatus == BeaconNodeStatus.Running: + poll() # if poll fails, the network is broken + + # time to say goodbye + node.stop() + +proc start*(node: BeaconNode) {.raises: [CatchableError].} = + let + head = node.dag.head + finalizedHead = node.dag.finalizedHead + genesisTime = node.beaconClock.fromNow(start_beacon_time(Slot 0)) + + notice "Starting beacon node", + version = fullVersionStr, + nimVersion = NimVersion, + enr = node.network.announcedENR.toURI, + peerId = $node.network.switch.peerInfo.peerId, + timeSinceFinalization = + node.beaconClock.now() - finalizedHead.slot.start_beacon_time(), + head = shortLog(head), + justified = shortLog(getStateField( + node.dag.headState, current_justified_checkpoint)), + finalized = shortLog(getStateField( + node.dag.headState, finalized_checkpoint)), + finalizedHead = shortLog(finalizedHead), + SLOTS_PER_EPOCH, + SECONDS_PER_SLOT, + SPEC_VERSION, + dataDir = node.config.dataDir.string, + validators = node.attachedValidators[].count + + if genesisTime.inFuture: + notice "Waiting for genesis", genesisIn = genesisTime.offset + + waitFor node.initializeNetworking() + + node.elManager.start() + node.run() + +## runs beacon node +## adapted from nimbus-eth2 +proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.raises: [CatchableError].} = + + # TODO: Define this varaibles somewhere + info "Launching beacon node", + version = fullVersionStr, + bls_backend = $BLS_BACKEND, + const_preset, + cmdParams = commandLineParams(), + config + + template ignoreDeprecatedOption(option: untyped): untyped = + if config.option.isSome: + warn "Config option is deprecated", + option = config.option.get + ignoreDeprecatedOption requireEngineAPI + ignoreDeprecatedOption safeSlotsToImportOptimistically + ignoreDeprecatedOption terminalTotalDifficultyOverride + ignoreDeprecatedOption optimistic + ignoreDeprecatedOption validatorMonitorTotals + ignoreDeprecatedOption web3ForcePolling + + #TODO: figure out the comment on createPidFile + # createPidFile(config.dataDir.string / "beacon_node.pid") + + config.createDumpDirs() + + # if config.metricsEnabled: + # let metricsAddress = config.metricsAddress + # notice "Starting metrics HTTP server", + # url = "http://" & $metricsAddress & ":" & $config.metricsPort & "/metrics" + # try: + # startMetricsHttpServer($metricsAddress, config.metricsPort) + # except CatchableError as exc: + # raise exc + # except Exception as exc: + # raiseAssert exc.msg # TODO fix metrics + + # Nim GC metrics (for the main thread) will be collected in onSecond(), but + # we disable piggy-backing on other metrics here. + setSystemMetricsAutomaticUpdate(false) + + # There are no managed event loops in here, to do a graceful shutdown, but + # letting the default Ctrl+C handler exit is safe, since we only read from + # the db. + let metadata = config.loadEth2Network() + + # Updating the config based on the metadata certainly is not beautiful but it + # works + for node in metadata.bootstrapNodes: + config.bootstrapNodes.add node + + ## Ctrl+C handling + proc controlCHandler() {.noconv.} = + when defined(windows): + # workaround for https://github.com/nim-lang/Nim/issues/4057 + try: + setupForeignThreadGc() + except Exception as exc: raiseAssert exc.msg # shouldn't happen + notice "Shutting down after having received SIGINT" + bnStatus = BeaconNodeStatus.Stopping + try: + setControlCHook(controlCHandler) + except Exception as exc: # TODO Exception + warn "Cannot set ctrl-c handler", msg = exc.msg + + # equivalent SIGTERM handler + when defined(posix): + proc SIGTERMHandler(signal: cint) {.noconv.} = + notice "Shutting down after having received SIGTERM" + bnStatus = BeaconNodeStatus.Stopping + c_signal(ansi_c.SIGTERM, SIGTERMHandler) + + block: + let res = + if config.trustedSetupFile.isNone: + conf.loadKzgTrustedSetup() + else: + conf.loadKzgTrustedSetup(config.trustedSetupFile.get) + if res.isErr(): + raiseAssert res.error() + + let node = waitFor BeaconNode.initBeaconNode(rng, config, metadata) + + if bnStatus == BeaconNodeStatus.Stopping: + return + + when not defined(windows): + # This status bar can lock a Windows terminal emulator, blocking the whole + # event loop (seen on Windows 10, with a default MSYS2 terminal). + initStatusBar(node) + + if node.nickname != "": + dynamicLogScope(node = node.nickname): node.start() + else: + node.start() ## --end copy paste file from nimbus-eth2/nimbus_beacon_node.nim @@ -149,4 +2428,4 @@ proc consensusWrapper*(parameters: TaskParameters) {.raises: [CatchableError].} fatal "error", message = e.msg isShutDownRequired.store(true) - warn "\tExiting consensus wrapper" \ No newline at end of file + warn "\tExiting consensus wrapper" From e72d10c0b3d6962b54ee68f58d845a6ff612a3af Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Tue, 5 Nov 2024 18:30:40 +0000 Subject: [PATCH 17/32] Fixed metrics library warnings by deactivating metrics collection. This is acting as hot fix for now, given that some metrics are requirements. However we need to collect the ones we need / want and find a way to extract them from nimbus-eth2 (or make them reusable by both ) --- Makefile | 2 +- config.nims | 4 +++- nimbus_unified/configs/nimbus_configs.nim | 14 +++++++++++++- .../consensus/adapted_block_chain_dag.nim | 11 ++++++----- nimbus_unified/consensus/consensus_wrapper.nim | 4 +++- 5 files changed, 26 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index 8f99d6fd1..0d6189c13 100644 --- a/Makefile +++ b/Makefile @@ -373,7 +373,7 @@ endif # NIM_PARAMS := -d:release --parallelBuild:1 -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku,grandine $(NIM_PARAMS) nimbus_unified: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:disable_libbacktrace -d:libp2p_pki_schemes=secp256k1 -o:build/$@ "nimbus_unified/$@.nim" + $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:disable_libbacktrace -d:libp2p_pki_schemes=secp256k1 -d:unified -o:build/$@ "nimbus_unified/$@.nim" # Note about building Nimbus as a library: # # There were `wrappers`, `wrappers-static`, `libnimbus.so` and `libnimbus.a` diff --git a/config.nims b/config.nims index e8dc4dca3..6b5e63d7e 100644 --- a/config.nims +++ b/config.nims @@ -125,7 +125,9 @@ if not defined(windows): --opt:speed --excessiveStackTrace:on # enable metric collection ---define:metrics +when not defined(unified): + --define:metrics + # for heap-usage-by-instance-type metrics and object base-type strings --define:nimTypeNames --styleCheck:usages diff --git a/nimbus_unified/configs/nimbus_configs.nim b/nimbus_unified/configs/nimbus_configs.nim index a42574c20..f4488e09c 100644 --- a/nimbus_unified/configs/nimbus_configs.nim +++ b/nimbus_unified/configs/nimbus_configs.nim @@ -5,7 +5,7 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -import std/[atomics, tables], beacon_chain/nimbus_binary_common +import std/[os, atomics, tables], beacon_chain/nimbus_binary_common ## Exceptions type NimbusTasksError* = object of CatchableError @@ -37,3 +37,15 @@ type TaskParameters* = object ## With this we avoid the overhead of locks var isShutDownRequired*: Atomic[bool] isShutDownRequired.store(false) + +# TODO: move this into config.nim file once we have the file in place +proc defaultDataDir*(): string = + let dataDir = + when defined(windows): + "AppData" / "Roaming" / "Nimbus_unified" + elif defined(macosx): + "Library" / "Application Support" / "Nimbus_unified" + else: + ".cache" / "nimbus_unified" + + getHomeDir() / dataDir \ No newline at end of file diff --git a/nimbus_unified/consensus/adapted_block_chain_dag.nim b/nimbus_unified/consensus/adapted_block_chain_dag.nim index e342480dc..e5107dabb 100644 --- a/nimbus_unified/consensus/adapted_block_chain_dag.nim +++ b/nimbus_unified/consensus/adapted_block_chain_dag.nim @@ -47,7 +47,7 @@ logScope: topics = "chaindag" # declareGauge beacon_pending_deposits, "Number of pending deposits (state.eth1_data.deposit_count - state.eth1_deposit_index)" # On block # declareGauge beacon_processed_deposits_total, "Number of total deposits included on chain" # On block -declareCounter beacon_dag_state_replay_seconds, "Time spent replaying states" +# declareCounter beacon_dag_state_replay_seconds, "Time spent replaying states" const EPOCHS_PER_STATE_SNAPSHOT* = 32 @@ -1861,10 +1861,11 @@ proc updateState*( # ...and make sure to load the state cache, if it exists loadStateCache(dag, cache, bsi.bid, getStateField(state, slot).epoch) - let - assignDur = assignTick - startTick - replayDur = Moment.now() - assignTick - beacon_dag_state_replay_seconds.inc(replayDur.toFloatSeconds) + + # let + # assignDur = assignTick - startTick + # replayDur = Moment.now() - assignTick + # beacon_dag_state_replay_seconds.inc(replayDur.toFloatSeconds) # TODO https://github.com/status-im/nim-chronicles/issues/108 if (assignDur + replayDur) >= MinSignificantProcessingDuration: diff --git a/nimbus_unified/consensus/consensus_wrapper.nim b/nimbus_unified/consensus/consensus_wrapper.nim index 8fc648997..4c10584b6 100644 --- a/nimbus_unified/consensus/consensus_wrapper.nim +++ b/nimbus_unified/consensus/consensus_wrapper.nim @@ -2351,7 +2351,9 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai # Nim GC metrics (for the main thread) will be collected in onSecond(), but # we disable piggy-backing on other metrics here. - setSystemMetricsAutomaticUpdate(false) + + #TODO: reactivate once we have metrics defined + # setSystemMetricsAutomaticUpdate(false) # There are no managed event loops in here, to do a graceful shutdown, but # letting the default Ctrl+C handler exit is safe, since we only read from From e508084003e1485e836fb94abd42ac0e32c85a73 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Tue, 19 Nov 2024 10:30:28 +0000 Subject: [PATCH 18/32] added software version --- nimbus_unified/nimbus_unified.nim | 18 +++------ nimbus_unified/version.nim | 62 +++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 12 deletions(-) create mode 100644 nimbus_unified/version.nim diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index d3dcb67bf..a7c9b276f 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -13,7 +13,8 @@ import execution/execution_wrapper, beacon_chain/[conf, conf_common], beacon_chain/[beacon_chain_db], - beacon_chain/validators/keystore_management + beacon_chain/validators/keystore_management, + version ## Constants ## TODO: evaluate the proposed timeouts with team @@ -144,29 +145,22 @@ proc startTasks*( # ------ when isMainModule: - info "Starting Nimbus" - ## TODO - ## - file limits - ## - setup logging - ## - read configuration (check nimbus_configs file anottations) + notice "Starting Nimbus" + ## - implement config reader for all components let nimbusConfigs = NimbusConfig() var tasksList: NimbusTasks = NimbusTasks.new ##TODO: this is an adapted call os the vars required by makeBannerAndConfig ##these values need to be read from some config file - const SPEC_VERSION = "tbd" - const copyrights = "status" - const nimBanner = "nimbus" - const clientId = "nimbus unified" var beaconNodeConfig = makeBannerAndConfig( - clientId, copyrights, nimBanner, SPEC_VERSION, [], BeaconNodeConf + clientName, copyrightBanner, nimBanner, versionAsStr, [], BeaconNodeConf ).valueOr: stderr.write error quit QuitFailure #TODO: if we don't add the "db" program crashes on - if not(checkAndCreateDataDir(string(beaconNodeConfig.dataDir/"db"))): + if not (checkAndCreateDataDir(string(beaconNodeConfig.dataDir / "db"))): # We are unable to access/create data folder or data folder's # permissions are insecure. quit QuitFailure diff --git a/nimbus_unified/version.nim b/nimbus_unified/version.nim new file mode 100644 index 000000000..463b32fcd --- /dev/null +++ b/nimbus_unified/version.nim @@ -0,0 +1,62 @@ +# Nimbus +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import std/strutils, stew/byteutils, metrics + +const + versionMajor* = 0 + versionMinor* = 1 + versionBuild* = 0 + + gitRevision* = strip(staticExec("git rev-parse --short HEAD"))[0 .. 5] + + versionAsStr* = $versionMajor & "." & $versionMinor & "." & $versionBuild + + fullVersionStr* = "v" & versionAsStr & "-" & gitRevision + + clientName* = "Nimbus" + + nimFullBanner = staticExec("nim --version") + nimBanner* = staticExec("nim --version | grep Version") + + # The web3_clientVersion + clientVersion* = + clientName & "/" & fullVersionStr & "/" & hostOS & "-" & hostCPU & "/" & "Nim" & + NimVersion + + compileYear = CompileDate[0 ..< 4] # YYYY-MM-DD (UTC) + copyrightBanner* = + "Copyright (c) 2021-" & compileYear & " Status Research & Development GmbH" + + # Short debugging identifier to be placed in the ENR + enrClientInfoShort* = toBytes("f") + +func getNimGitHash*(): string = + const gitPrefix = "git hash: " + let tmp = splitLines(nimFullBanner) + if tmp.len == 0: + return + for line in tmp: + if line.startsWith(gitPrefix) and line.len > 8 + gitPrefix.len: + result = line[gitPrefix.len ..< gitPrefix.len + 8] + +# TODO: Currently prefixing these metric names as the non prefixed names give +# a collector already registered conflict at runtime. This is due to the same +# names in nimbus-eth2 nimbus_binary_common.nim even though there are no direct +# imports of that file. + +declareGauge versionGauge, + "Nimbus version info (as metric labels)", + ["version", "commit"], + name = "nimbus_version" +versionGauge.set(1, labelValues = [fullVersionStr, gitRevision]) + +declareGauge nimVersionGauge, + "Nim version info", ["version", "nim_commit"], name = "nimbus_nim_version" +nimVersionGauge.set(1, labelValues = [NimVersion, getNimGitHash()]) From de42821b05c0491fa5b502a27507933dd6850c2b Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Wed, 20 Nov 2024 11:45:04 +0000 Subject: [PATCH 19/32] fixed create databasedir and fil limits bug --- .../consensus/consensus_wrapper.nim | 7 ------ nimbus_unified/nimbus_unified.nim | 24 +++++++++---------- 2 files changed, 11 insertions(+), 20 deletions(-) diff --git a/nimbus_unified/consensus/consensus_wrapper.nim b/nimbus_unified/consensus/consensus_wrapper.nim index 4c10584b6..bd1bb9d8c 100644 --- a/nimbus_unified/consensus/consensus_wrapper.nim +++ b/nimbus_unified/consensus/consensus_wrapper.nim @@ -2414,16 +2414,9 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai ## Consensus wrapper proc consensusWrapper*(parameters: TaskParameters) {.raises: [CatchableError].} = - # Single RNG instance for the application - will be seeded on construction - # and avoid using system resources (such as urandom) after that let rng = HmacDrbgContext.new() var config = parameters.beaconNodeConfigs - setupFileLimits() - - #TODO: Another FC unsafe procedure - # setupLogging(config.logLevel, config.logStdout, config.logFile) - try: doRunBeaconNode(config, rng) except CatchableError as e: diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index a7c9b276f..f28917fe0 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -145,33 +145,31 @@ proc startTasks*( # ------ when isMainModule: - notice "Starting Nimbus" - + info "Starting Nimbus" + ## TODO + ## - file limits + ## - setup logging + ## - read configuration (check nimbus_configs file anottations) ## - implement config reader for all components let nimbusConfigs = NimbusConfig() var tasksList: NimbusTasks = NimbusTasks.new - ##TODO: this is an adapted call os the vars required by makeBannerAndConfig - ##these values need to be read from some config file var beaconNodeConfig = makeBannerAndConfig( - clientName, copyrightBanner, nimBanner, versionAsStr, [], BeaconNodeConf + clientId, copyrights, nimBanner, SPEC_VERSION, [], BeaconNodeConf ).valueOr: stderr.write error quit QuitFailure - #TODO: if we don't add the "db" program crashes on - if not (checkAndCreateDataDir(string(beaconNodeConfig.dataDir / "db"))): - # We are unable to access/create data folder or data folder's - # permissions are insecure. + if not (checkAndCreateDataDir(string(beaconNodeConfig.dataDir))): quit QuitFailure - # TODO: data directory is not created(build/data/shared_holesky_0/db/) - # and "createPidFile" throws an exception - # solution: manually create the directory + setupFileLimits() + + setupLogging(config.logLevel, config.logStdout, config.logFile) + createPidFile(beaconNodeConfig.databaseDir.string / "unified.pid") ## Graceful shutdown by handling of Ctrl+C signal - ## TODO: we might need to declare it per thread proc controlCHandler() {.noconv.} = when defined(windows): # workaround for https://github.com/nim-lang/Nim/issues/4057 From 5056606f776e50f3671ec27e1a6cb495cff850ef Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Thu, 21 Nov 2024 12:00:49 +0000 Subject: [PATCH 20/32] added startup command with trusted synch --- .../consensus/consensus_wrapper.nim | 33 +++++++++++++++++-- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/nimbus_unified/consensus/consensus_wrapper.nim b/nimbus_unified/consensus/consensus_wrapper.nim index bd1bb9d8c..aa89a3c08 100644 --- a/nimbus_unified/consensus/consensus_wrapper.nim +++ b/nimbus_unified/consensus/consensus_wrapper.nim @@ -2412,15 +2412,42 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai ## --end copy paste file from nimbus-eth2/nimbus_beacon_node.nim +proc handleStartingOption(config: var BeaconNodeConf) {.raises: [CatchableError].} = + let rng = HmacDrbgContext.new() + + # More options can be added, might be out of scope given that they exist in eth2 + case config.cmd + of BNSStartUpCmd.noCommand: doRunBeaconNode(config, rng) + of BNSStartUpCmd.trustedNodeSync: + if config.blockId.isSome(): + error "--blockId option has been removed - use --state-id instead!" + quit 1 + + let + metadata = loadEth2Network(config) + db = BeaconChainDB.new(config.databaseDir, metadata.cfg, inMemory = false) + genesisState = waitFor fetchGenesisState(metadata) + waitFor db.doRunTrustedNodeSync( + config.databaseDir, + config.eraDir, + config.trustedNodeUrl, + config.stateId, + config.lcTrustedBlockRoot, + config.backfillBlocks, + config.reindex, + config.downloadDepositSnapshot, + genesisState) + db.close() + ## Consensus wrapper proc consensusWrapper*(parameters: TaskParameters) {.raises: [CatchableError].} = - let rng = HmacDrbgContext.new() - var config = parameters.beaconNodeConfigs - try: + var config = parameters.beaconNodeConfigs + try: doRunBeaconNode(config, rng) except CatchableError as e: fatal "error", message = e.msg + isShutDownRequired.store(true) isShutDownRequired.store(true) warn "\tExiting consensus wrapper" From 5cac51896de45bfcba92563ff5a0fd0505fcb414 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Mon, 25 Nov 2024 14:31:16 +0000 Subject: [PATCH 21/32] nph format --- .../consensus/consensus_wrapper.nim | 890 +++++++++--------- nimbus_unified/nimbus_unified.nim | 3 +- 2 files changed, 470 insertions(+), 423 deletions(-) diff --git a/nimbus_unified/consensus/consensus_wrapper.nim b/nimbus_unified/consensus/consensus_wrapper.nim index aa89a3c08..c77a6c2de 100644 --- a/nimbus_unified/consensus/consensus_wrapper.nim +++ b/nimbus_unified/consensus/consensus_wrapper.nim @@ -41,10 +41,9 @@ when defined(posix): import system/ansi_c from beacon_chain/spec/datatypes/deneb import SignedBeaconBlock -from beacon_chain/beacon_node_light_client - import shouldSyncOptimistically, initLightClient, updateLightClientFromDag -from libp2p/protocols/pubsub/gossipsub - import TopicParams, validateParameters, init +from beacon_chain/beacon_node_light_client import + shouldSyncOptimistically, initLightClient, updateLightClientFromDag +from libp2p/protocols/pubsub/gossipsub import TopicParams, validateParameters, init ## log logScope: @@ -77,12 +76,10 @@ logScope: const SlashingDbName = "slashing_protection" # changing this requires physical file rename as well or history is lost - ## NOTE ## following procedures are copies/adaptations from nimbus_beacon_node.nim. ## TODO: Extract do adequate structures and files - # TODO: need to figure out behaviour on threaded patterns # Using this function here is signaled as non GC SAFE given # that gPidFile might be accessed concurrently with no guards @@ -101,7 +98,7 @@ proc initFullNode( rng: ref HmacDrbgContext, dag: ChainDAGRef, taskpool: TaskPoolPtr, - getBeaconTime: GetBeaconTimeFn + getBeaconTime: GetBeaconTimeFn, ) {.async.} = template config(): auto = node.config @@ -139,8 +136,7 @@ proc initFullNode( some node.dag.is_optimistic(data.toBlockId()) else: none[bool]() - node.eventBus.blocksQueue.emit( - EventBeaconBlockObject.init(data, optimistic)) + node.eventBus.blocksQueue.emit(EventBeaconBlockObject.init(data, optimistic)) proc onHeadChanged(data: HeadChangeInfoObject) = let eventData = @@ -311,8 +307,7 @@ proc initFullNode( #TODO: # removing this light client var - lightClientPool = newClone( - LightClientPool()) + lightClientPool = newClone(LightClientPool()) processor = Eth2Processor.new( config.doppelgangerDetection, blockProcessor, node.validatorMonitor, dag, @@ -455,52 +450,62 @@ proc initFullNode( func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = case stdoutKind - of StdoutLogKind.Auto: raiseAssert "inadmissable here" + of StdoutLogKind.Auto: + raiseAssert "inadmissable here" of StdoutLogKind.Colors: VanityLogs( - onMergeTransitionBlock: bellatrixColor, + onMergeTransitionBlock: bellatrixColor, onFinalizedMergeTransitionBlock: bellatrixBlink, - onUpgradeToCapella: capellaColor, - onKnownBlsToExecutionChange: capellaBlink, - onUpgradeToDeneb: denebColor, - onUpgradeToElectra: electraColor) + onUpgradeToCapella: capellaColor, + onKnownBlsToExecutionChange: capellaBlink, + onUpgradeToDeneb: denebColor, + onUpgradeToElectra: electraColor, + ) of StdoutLogKind.NoColors: VanityLogs( - onMergeTransitionBlock: bellatrixMono, + onMergeTransitionBlock: bellatrixMono, onFinalizedMergeTransitionBlock: bellatrixMono, - onUpgradeToCapella: capellaMono, - onKnownBlsToExecutionChange: capellaMono, - onUpgradeToDeneb: denebMono, - onUpgradeToElectra: electraMono) + onUpgradeToCapella: capellaMono, + onKnownBlsToExecutionChange: capellaMono, + onUpgradeToDeneb: denebMono, + onUpgradeToElectra: electraMono, + ) of StdoutLogKind.Json, StdoutLogKind.None: VanityLogs( - onMergeTransitionBlock: - (proc() = notice "🐼 Proof of Stake Activated 🐼"), - onFinalizedMergeTransitionBlock: - (proc() = notice "🐼 Proof of Stake Finalized 🐼"), - onUpgradeToCapella: - (proc() = notice "🦉 Withdrowls now available 🦉"), - onKnownBlsToExecutionChange: - (proc() = notice "🦉 BLS to execution changed 🦉"), - onUpgradeToDeneb: - (proc() = notice "🐟 Proto-Danksharding is ON 🐟"), - onUpgradeToElectra: - (proc() = notice "🦒 [PH] Electra 🦒")) + onMergeTransitionBlock: ( + proc() = + notice "🐼 Proof of Stake Activated 🐼" + ), + onFinalizedMergeTransitionBlock: ( + proc() = + notice "🐼 Proof of Stake Finalized 🐼" + ), + onUpgradeToCapella: ( + proc() = + notice "🦉 Withdrowls now available 🦉" + ), + onKnownBlsToExecutionChange: ( + proc() = + notice "🦉 BLS to execution changed 🦉" + ), + onUpgradeToDeneb: ( + proc() = + notice "🐟 Proto-Danksharding is ON 🐟" + ), + onUpgradeToElectra: ( + proc() = + notice "🦒 [PH] Electra 🦒" + ), + ) func getVanityMascot(consensusFork: ConsensusFork): string = case consensusFork - of ConsensusFork.Electra: - "🦒" - of ConsensusFork.Deneb: - "🐟" - of ConsensusFork.Capella: - "🦉" - of ConsensusFork.Bellatrix: - "🐼" - of ConsensusFork.Altair: - "✨" - of ConsensusFork.Phase0: - "🦏" + of ConsensusFork.Electra: "🦒" + of ConsensusFork.Deneb: "🐟" + of ConsensusFork.Capella: "🦉" + of ConsensusFork.Bellatrix: "🐼" + of ConsensusFork.Altair: "✨" + of ConsensusFork.Phase0: "🦏" # NOTE: light client related code commented proc loadChainDag( @@ -555,14 +560,20 @@ proc loadChainDag( if config.lightClientDataServe: onLightClientOptimisticUpdate else: nil dag = ChainDAGRef.init( - cfg, db, validatorMonitor, chainDagFlags, config.eraDir, + cfg, + db, + validatorMonitor, + chainDagFlags, + config.eraDir, vanityLogs = getVanityLogs(detectTTY(config.logStdout)), lcDataConfig = LightClientDataConfig( serve: config.lightClientDataServe, importMode: config.lightClientDataImportMode, maxPeriods: config.lightClientDataMaxPeriods, onLightClientFinalityUpdate: onLightClientFinalityUpdateCb, - onLightClientOptimisticUpdate: onLightClientOptimisticUpdateCb)) + onLightClientOptimisticUpdate: onLightClientOptimisticUpdateCb, + ), + ) if networkGenesisValidatorsRoot.isSome: let databaseGenesisValidatorsRoot = @@ -611,24 +622,27 @@ proc doRunTrustedNodeSync( downloadDepositSnapshot, genesisState, ) -proc initBeaconNode*(T: type BeaconNode, - rng: ref HmacDrbgContext, - config: BeaconNodeConf, - metadata: Eth2NetworkMetadata): Future[BeaconNode] - {.async.} = +proc initBeaconNode*( + T: type BeaconNode, + rng: ref HmacDrbgContext, + config: BeaconNodeConf, + metadata: Eth2NetworkMetadata, +): Future[BeaconNode] {.async.} = var taskpool: TaskPoolPtr genesisState: ref ForkedHashedBeaconState = nil - template cfg: auto = metadata.cfg - template eth1Network: auto = metadata.eth1Network + template cfg(): auto = + metadata.cfg + + template eth1Network(): auto = + metadata.eth1Network - if not(isDir(config.databaseDir)): + if not (isDir(config.databaseDir)): # If database directory missing, we going to use genesis state to check # for weak_subjectivity_period. genesisState = - await fetchGenesisState( - metadata, config.genesisState, config.genesisStateUrl) + await fetchGenesisState(metadata, config.genesisState, config.genesisStateUrl) let genesisTime = getStateField(genesisState[], genesis_time) beaconClock = BeaconClock.init(genesisTime).valueOr: @@ -637,7 +651,8 @@ proc initBeaconNode*(T: type BeaconNode, currentSlot = beaconClock.now().slotOrZero() checkpoint = Checkpoint( epoch: epoch(getStateField(genesisState[], slot)), - root: getStateField(genesisState[], latest_block_header).state_root) + root: getStateField(genesisState[], latest_block_header).state_root, + ) # adapted from nimbus-eth2 # if config.longRangeSync == LongRangeSyncMode.Light: # if not is_within_weak_subjectivity_period(metadata.cfg, currentSlot, @@ -678,10 +693,11 @@ proc initBeaconNode*(T: type BeaconNode, finalQueue: newAsyncEventQueue[FinalizationInfoObject](), reorgQueue: newAsyncEventQueue[ReorgInfoObject](), contribQueue: newAsyncEventQueue[SignedContributionAndProof](), - finUpdateQueue: newAsyncEventQueue[ - RestVersioned[ForkedLightClientFinalityUpdate]](), - optUpdateQueue: newAsyncEventQueue[ - RestVersioned[ForkedLightClientOptimisticUpdate]]()) + finUpdateQueue: + newAsyncEventQueue[RestVersioned[ForkedLightClientFinalityUpdate]](), + optUpdateQueue: + newAsyncEventQueue[RestVersioned[ForkedLightClientOptimisticUpdate]](), + ) db = BeaconChainDB.new(config.databaseDir, cfg, inMemory = false) if config.externalBeaconApiUrl.isSome and ChainDAGRef.isInitialized(db).isErr: @@ -692,7 +708,8 @@ proc initBeaconNode*(T: type BeaconNode, # Sync can be bootstrapped from the genesis block root if genesisState.isNil: genesisState = await fetchGenesisState( - metadata, config.genesisState, config.genesisStateUrl) + metadata, config.genesisState, config.genesisStateUrl + ) if not genesisState.isNil: let genesisBlockRoot = get_initial_beacon_block(genesisState[]).root notice "Neither `--trusted-block-root` nor `--trusted-state-root` " & @@ -715,44 +732,50 @@ proc initBeaconNode*(T: type BeaconNode, trustedStateRoot = config.trustedStateRoot else: if genesisState.isNil: - genesisState = await fetchGenesisState( - metadata, config.genesisState, config.genesisStateUrl) + genesisState = + await fetchGenesisState(metadata, config.genesisState, config.genesisStateUrl) await db.doRunTrustedNodeSync( metadata, config.databaseDir, config.eraDir, config.externalBeaconApiUrl.get, - config.trustedStateRoot.map do (x: Eth2Digest) -> string: + config.trustedStateRoot.map do(x: Eth2Digest) -> string: "0x" & x.data.toHex, trustedBlockRoot, backfill = false, reindex = false, downloadDepositSnapshot = false, - genesisState) + genesisState, + ) if config.finalizedCheckpointBlock.isSome: warn "--finalized-checkpoint-block has been deprecated, ignoring" - let checkpointState = if config.finalizedCheckpointState.isSome: - let checkpointStatePath = config.finalizedCheckpointState.get.string - let tmp = try: - newClone(readSszForkedHashedBeaconState( - cfg, readAllBytes(checkpointStatePath).tryGet())) - except SszError as err: - fatal "Checkpoint state loading failed", + let checkpointState = + if config.finalizedCheckpointState.isSome: + let checkpointStatePath = config.finalizedCheckpointState.get.string + let tmp = + try: + newClone( + readSszForkedHashedBeaconState( + cfg, readAllBytes(checkpointStatePath).tryGet() + ) + ) + except SszError as err: + fatal "Checkpoint state loading failed", err = formatMsg(err, checkpointStatePath) - quit 1 - except CatchableError as err: - fatal "Failed to read checkpoint state file", err = err.msg - quit 1 + quit 1 + except CatchableError as err: + fatal "Failed to read checkpoint state file", err = err.msg + quit 1 - if not getStateField(tmp[], slot).is_epoch: - fatal "--finalized-checkpoint-state must point to a state for an epoch slot", - slot = getStateField(tmp[], slot) - quit 1 - tmp - else: - nil + if not getStateField(tmp[], slot).is_epoch: + fatal "--finalized-checkpoint-state must point to a state for an epoch slot", + slot = getStateField(tmp[], slot) + quit 1 + tmp + else: + nil if config.finalizedDepositTreeSnapshot.isSome: let @@ -762,7 +785,7 @@ proc initBeaconNode*(T: type BeaconNode, SSZ.loadFile(depositTreeSnapshotPath, DepositTreeSnapshot) except SszError as err: fatal "Deposit tree snapshot loading failed", - err = formatMsg(err, depositTreeSnapshotPath) + err = formatMsg(err, depositTreeSnapshotPath) quit 1 except CatchableError as err: fatal "Failed to read deposit tree snapshot file", err = err.msg @@ -781,29 +804,25 @@ proc initBeaconNode*(T: type BeaconNode, if not ChainDAGRef.isInitialized(db).isOk(): genesisState = - if not checkpointState.isNil and - getStateField(checkpointState[], slot) == 0: + if not checkpointState.isNil and getStateField(checkpointState[], slot) == 0: checkpointState else: if genesisState.isNil: - await fetchGenesisState( - metadata, config.genesisState, config.genesisStateUrl) + await fetchGenesisState(metadata, config.genesisState, config.genesisStateUrl) else: genesisState if genesisState.isNil and checkpointState.isNil: fatal "No database and no genesis snapshot found. Please supply a genesis.ssz " & - "with the network configuration" + "with the network configuration" quit 1 if not genesisState.isNil and not checkpointState.isNil: if getStateField(genesisState[], genesis_validators_root) != getStateField(checkpointState[], genesis_validators_root): fatal "Checkpoint state does not match genesis - check the --network parameter", - rootFromGenesis = getStateField( - genesisState[], genesis_validators_root), - rootFromCheckpoint = getStateField( - checkpointState[], genesis_validators_root) + rootFromGenesis = getStateField(genesisState[], genesis_validators_root), + rootFromCheckpoint = getStateField(checkpointState[], genesis_validators_root) quit 1 try: @@ -815,11 +834,11 @@ proc initBeaconNode*(T: type BeaconNode, Opt.some(getStateField(genesisState[], genesis_validators_root)) if not checkpointState.isNil: - if genesisState.isNil or - getStateField(checkpointState[], slot) != GENESIS_SLOT: + if genesisState.isNil or getStateField(checkpointState[], slot) != GENESIS_SLOT: ChainDAGRef.preInit(db, checkpointState[]) - doAssert ChainDAGRef.isInitialized(db).isOk(), "preInit should have initialized db" + doAssert ChainDAGRef.isInitialized(db).isOk(), + "preInit should have initialized db" except CatchableError as exc: error "Failed to initialize database", err = exc.msg quit 1 @@ -835,19 +854,20 @@ proc initBeaconNode*(T: type BeaconNode, # The validatorMonitorTotals flag has been deprecated and should eventually be # removed - until then, it's given priority if set so as not to needlessly # break existing setups - let - validatorMonitor = newClone(ValidatorMonitor.init( + let validatorMonitor = newClone( + ValidatorMonitor.init( config.validatorMonitorAuto, - config.validatorMonitorTotals.get( - not config.validatorMonitorDetails))) + config.validatorMonitorTotals.get(not config.validatorMonitorDetails), + ) + ) for key in config.validatorMonitorPubkeys: validatorMonitor[].addMonitor(key, Opt.none(ValidatorIndex)) let dag = loadChainDag( - config, cfg, db, eventBus, - validatorMonitor, networkGenesisValidatorsRoot) + config, cfg, db, eventBus, validatorMonitor, networkGenesisValidatorsRoot + ) genesisTime = getStateField(dag.headState, genesis_time) beaconClock = BeaconClock.init(genesisTime).valueOr: fatal "Invalid genesis time in state", genesisTime @@ -857,35 +877,42 @@ proc initBeaconNode*(T: type BeaconNode, if config.weakSubjectivityCheckpoint.isSome: dag.checkWeakSubjectivityCheckpoint( - config.weakSubjectivityCheckpoint.get, beaconClock) + config.weakSubjectivityCheckpoint.get, beaconClock + ) let elManager = ELManager.new( - cfg, - metadata.depositContractBlock, - metadata.depositContractBlockHash, - db, - engineApiUrls, - eth1Network) + cfg, metadata.depositContractBlock, metadata.depositContractBlockHash, db, + engineApiUrls, eth1Network, + ) if config.rpcEnabled.isSome: warn "Nimbus's JSON-RPC server has been removed. This includes the --rpc, --rpc-port, and --rpc-address configuration options. https://nimbus.guide/rest-api.html shows how to enable and configure the REST Beacon API server which replaces it." - let restServer = if config.restEnabled: - RestServerRef.init(config.restAddress, config.restPort, - config.restAllowedOrigin, - validateBeaconApiQueries, - nimbusAgentStr, - config) - else: - nil + let restServer = + if config.restEnabled: + RestServerRef.init( + config.restAddress, config.restPort, config.restAllowedOrigin, + validateBeaconApiQueries, nimbusAgentStr, config, + ) + else: + nil let netKeys = getPersistentNetKeys(rng[], config) - nickname = if config.nodeName == "auto": shortForm(netKeys) - else: config.nodeName + nickname = + if config.nodeName == "auto": + shortForm(netKeys) + else: + config.nodeName network = createEth2Node( - rng, config, netKeys, cfg, dag.forkDigests, getBeaconTime, - getStateField(dag.headState, genesis_validators_root)) + rng, + config, + netKeys, + cfg, + dag.forkDigests, + getBeaconTime, + getStateField(dag.headState, genesis_validators_root), + ) case config.slashingDbKind of SlashingDbKind.v2: @@ -896,8 +923,7 @@ proc initBeaconNode*(T: type BeaconNode, of SlashingDbKind.both: warn "Slashing DB v1 deprecated, writing only v2" - info "Loading slashing protection database (v2)", - path = config.validatorsDir() + info "Loading slashing protection database (v2)", path = config.validatorsDir() proc getValidatorAndIdx(pubkey: ValidatorPubKey): Opt[ValidatorAndIndex] = withState(dag.headState): @@ -917,50 +943,46 @@ proc initBeaconNode*(T: type BeaconNode, let keystoreCache = KeystoreCacheRef.init() - slashingProtectionDB = - SlashingProtectionDB.init( - getStateField(dag.headState, genesis_validators_root), - config.validatorsDir(), SlashingDbName) - validatorPool = newClone(ValidatorPool.init( - slashingProtectionDB, config.doppelgangerDetection)) + slashingProtectionDB = SlashingProtectionDB.init( + getStateField(dag.headState, genesis_validators_root), + config.validatorsDir(), + SlashingDbName, + ) + validatorPool = + newClone(ValidatorPool.init(slashingProtectionDB, config.doppelgangerDetection)) keymanagerInitResult = initKeymanagerServer(config, restServer) - keymanagerHost = if keymanagerInitResult.server != nil: - newClone KeymanagerHost.init( - validatorPool, - keystoreCache, - rng, - keymanagerInitResult.token, - config.validatorsDir, - config.secretsDir, - config.defaultFeeRecipient, - config.suggestedGasLimit, - config.defaultGraffitiBytes, - config.getPayloadBuilderAddress, - getValidatorAndIdx, - getBeaconTime, - getCapellaForkVersion, - getDenebForkEpoch, - getForkForEpoch, - getGenesisRoot) - else: nil + keymanagerHost = + if keymanagerInitResult.server != nil: + newClone KeymanagerHost.init( + validatorPool, keystoreCache, rng, keymanagerInitResult.token, + config.validatorsDir, config.secretsDir, config.defaultFeeRecipient, + config.suggestedGasLimit, config.defaultGraffitiBytes, + config.getPayloadBuilderAddress, getValidatorAndIdx, getBeaconTime, + getCapellaForkVersion, getDenebForkEpoch, getForkForEpoch, getGenesisRoot, + ) + else: + nil stateTtlCache = if config.restCacheSize > 0: StateTtlCache.init( cacheSize = config.restCacheSize, - cacheTtl = chronos.seconds(config.restCacheTtl)) + cacheTtl = chronos.seconds(config.restCacheTtl), + ) else: nil if config.payloadBuilderEnable: - info "Using external payload builder", - payloadBuilderUrl = config.payloadBuilderUrl + info "Using external payload builder", payloadBuilderUrl = config.payloadBuilderUrl let node = BeaconNode( nickname: nickname, - graffitiBytes: if config.graffiti.isSome: config.graffiti.get - else: defaultGraffitiBytes(), + graffitiBytes: + if config.graffiti.isSome: + config.graffiti.get + else: + defaultGraffitiBytes(), network: network, netKeys: netKeys, db: db, @@ -978,13 +1000,15 @@ proc initBeaconNode*(T: type BeaconNode, validatorMonitor: validatorMonitor, stateTtlCache: stateTtlCache, shutdownEvent: newAsyncEvent(), - dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init())) + dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init()), + ) # TODO: we are initializing the light client given that it has a function # to validate if the sync should be done optimistically or not, and it used # along beacon node node.initLightClient( - rng, cfg, dag.forkDigests, getBeaconTime, dag.genesis_validators_root) + rng, cfg, dag.forkDigests, getBeaconTime, dag.genesis_validators_root + ) await node.initFullNode(rng, dag, taskpool, getBeaconTime) @@ -1004,153 +1028,191 @@ proc installMessageValidators(node: BeaconNode) = # beacon_block # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#beacon_block node.network.addValidator( - getBeaconBlocksTopic(digest), proc ( - signedBlock: consensusFork.SignedBeaconBlock - ): ValidationResult = + getBeaconBlocksTopic(digest), + proc(signedBlock: consensusFork.SignedBeaconBlock): ValidationResult = if node.shouldSyncOptimistically(node.currentSlot): toValidationResult( - node.optimisticProcessor.processSignedBeaconBlock( - signedBlock)) + node.optimisticProcessor.processSignedBeaconBlock(signedBlock) + ) else: toValidationResult( - node.processor[].processSignedBeaconBlock( - MsgSource.gossip, signedBlock))) + node.processor[].processSignedBeaconBlock(MsgSource.gossip, signedBlock) + ), + ) # beacon_attestation_{subnet_id} # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id when consensusFork >= ConsensusFork.Electra: for it in SubnetId: - closureScope: # Needed for inner `proc`; don't lift it out of loop. + closureScope: let subnet_id = it node.network.addAsyncValidator( - getAttestationTopic(digest, subnet_id), proc ( - attestation: electra.Attestation - ): Future[ValidationResult] {. - async: (raises: [CancelledError]).} = + getAttestationTopic(digest, subnet_id), + proc( + attestation: electra.Attestation + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = return toValidationResult( await node.processor.processAttestation( - MsgSource.gossip, attestation, subnet_id, - checkSignature = true, checkValidator = false))) + MsgSource.gossip, + attestation, + subnet_id, + checkSignature = true, + checkValidator = false, + ) + ), + ) else: for it in SubnetId: - closureScope: # Needed for inner `proc`; don't lift it out of loop. + closureScope: let subnet_id = it node.network.addAsyncValidator( - getAttestationTopic(digest, subnet_id), proc ( - attestation: phase0.Attestation - ): Future[ValidationResult] {. - async: (raises: [CancelledError]).} = + getAttestationTopic(digest, subnet_id), + proc( + attestation: phase0.Attestation + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = return toValidationResult( await node.processor.processAttestation( - MsgSource.gossip, attestation, subnet_id, - checkSignature = true, checkValidator = false))) + MsgSource.gossip, + attestation, + subnet_id, + checkSignature = true, + checkValidator = false, + ) + ), + ) # beacon_aggregate_and_proof # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof when consensusFork >= ConsensusFork.Electra: node.network.addAsyncValidator( - getAggregateAndProofsTopic(digest), proc ( - signedAggregateAndProof: electra.SignedAggregateAndProof + getAggregateAndProofsTopic(digest), + proc( + signedAggregateAndProof: electra.SignedAggregateAndProof ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = return toValidationResult( await node.processor.processSignedAggregateAndProof( - MsgSource.gossip, signedAggregateAndProof))) + MsgSource.gossip, signedAggregateAndProof + ) + ), + ) else: node.network.addAsyncValidator( - getAggregateAndProofsTopic(digest), proc ( - signedAggregateAndProof: phase0.SignedAggregateAndProof + getAggregateAndProofsTopic(digest), + proc( + signedAggregateAndProof: phase0.SignedAggregateAndProof ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = return toValidationResult( await node.processor.processSignedAggregateAndProof( - MsgSource.gossip, signedAggregateAndProof))) + MsgSource.gossip, signedAggregateAndProof + ) + ), + ) # attester_slashing # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attester_slashing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.6/specs/electra/p2p-interface.md#modifications-in-electra when consensusFork >= ConsensusFork.Electra: node.network.addValidator( - getAttesterSlashingsTopic(digest), proc ( - attesterSlashing: electra.AttesterSlashing - ): ValidationResult = + getAttesterSlashingsTopic(digest), + proc(attesterSlashing: electra.AttesterSlashing): ValidationResult = toValidationResult( node.processor[].processAttesterSlashing( - MsgSource.gossip, attesterSlashing))) + MsgSource.gossip, attesterSlashing + ) + ), + ) else: node.network.addValidator( - getAttesterSlashingsTopic(digest), proc ( - attesterSlashing: phase0.AttesterSlashing - ): ValidationResult = + getAttesterSlashingsTopic(digest), + proc(attesterSlashing: phase0.AttesterSlashing): ValidationResult = toValidationResult( node.processor[].processAttesterSlashing( - MsgSource.gossip, attesterSlashing))) + MsgSource.gossip, attesterSlashing + ) + ), + ) # proposer_slashing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#proposer_slashing node.network.addValidator( - getProposerSlashingsTopic(digest), proc ( - proposerSlashing: ProposerSlashing - ): ValidationResult = + getProposerSlashingsTopic(digest), + proc(proposerSlashing: ProposerSlashing): ValidationResult = toValidationResult( - node.processor[].processProposerSlashing( - MsgSource.gossip, proposerSlashing))) + node.processor[].processProposerSlashing(MsgSource.gossip, proposerSlashing) + ), + ) # voluntary_exit # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#voluntary_exit node.network.addValidator( - getVoluntaryExitsTopic(digest), proc ( - signedVoluntaryExit: SignedVoluntaryExit - ): ValidationResult = + getVoluntaryExitsTopic(digest), + proc(signedVoluntaryExit: SignedVoluntaryExit): ValidationResult = toValidationResult( node.processor[].processSignedVoluntaryExit( - MsgSource.gossip, signedVoluntaryExit))) + MsgSource.gossip, signedVoluntaryExit + ) + ), + ) when consensusFork >= ConsensusFork.Altair: # sync_committee_{subnet_id} # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#sync_committee_subnet_id for subcommitteeIdx in SyncSubcommitteeIndex: - closureScope: # Needed for inner `proc`; don't lift it out of loop. + closureScope: let idx = subcommitteeIdx node.network.addAsyncValidator( - getSyncCommitteeTopic(digest, idx), proc ( - msg: SyncCommitteeMessage + getSyncCommitteeTopic(digest, idx), + proc( + msg: SyncCommitteeMessage ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = return toValidationResult( await node.processor.processSyncCommitteeMessage( - MsgSource.gossip, msg, idx))) + MsgSource.gossip, msg, idx + ) + ), + ) # sync_committee_contribution_and_proof # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof node.network.addAsyncValidator( - getSyncCommitteeContributionAndProofTopic(digest), proc ( - msg: SignedContributionAndProof + getSyncCommitteeContributionAndProofTopic(digest), + proc( + msg: SignedContributionAndProof ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = return toValidationResult( await node.processor.processSignedContributionAndProof( - MsgSource.gossip, msg))) + MsgSource.gossip, msg + ) + ), + ) when consensusFork >= ConsensusFork.Capella: # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/p2p-interface.md#bls_to_execution_change node.network.addAsyncValidator( - getBlsToExecutionChangeTopic(digest), proc ( - msg: SignedBLSToExecutionChange + getBlsToExecutionChangeTopic(digest), + proc( + msg: SignedBLSToExecutionChange ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = return toValidationResult( - await node.processor.processBlsToExecutionChange( - MsgSource.gossip, msg))) + await node.processor.processBlsToExecutionChange(MsgSource.gossip, msg) + ), + ) when consensusFork >= ConsensusFork.Deneb: # blob_sidecar_{subnet_id} # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id for it in BlobId: - closureScope: # Needed for inner `proc`; don't lift it out of loop. + closureScope: let subnet_id = it node.network.addValidator( - getBlobSidecarTopic(digest, subnet_id), proc ( - blobSidecar: deneb.BlobSidecar - ): ValidationResult = + getBlobSidecarTopic(digest, subnet_id), + proc(blobSidecar: deneb.BlobSidecar): ValidationResult = toValidationResult( node.processor[].processBlobSidecar( - MsgSource.gossip, blobSidecar, subnet_id))) + MsgSource.gossip, blobSidecar, subnet_id + ) + ), + ) # node.installLightClientMessageValidators() @@ -1206,33 +1268,37 @@ proc fetchGenesisState( nil proc pruneBlobs(node: BeaconNode, slot: Slot) = - let blobPruneEpoch = (slot.epoch - - node.dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS - 1) + let blobPruneEpoch = + (slot.epoch - node.dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS - 1) if slot.is_epoch() and blobPruneEpoch >= node.dag.cfg.DENEB_FORK_EPOCH: var blocks: array[SLOTS_PER_EPOCH.int, BlockId] var count = 0 let startIndex = node.dag.getBlockRange( - blobPruneEpoch.start_slot, 1, blocks.toOpenArray(0, SLOTS_PER_EPOCH - 1)) - for i in startIndex.. 0: - debug "Enabling blocks topic subscriptions", - wallSlot = slot, targetGossipState + debug "Enabling blocks topic subscriptions", wallSlot = slot, targetGossipState elif currentGossipState.card > 0 and targetGossipState.card == 0: - debug "Disabling blocks topic subscriptions", - wallSlot = slot + debug "Disabling blocks topic subscriptions", wallSlot = slot else: # Individual forks added / removed discard @@ -1531,8 +1596,8 @@ proc updateBlocksGossipStatus*( for gossipFork in newGossipForks: let forkDigest = node.dag.forkDigests[].atConsensusFork(gossipFork) node.network.subscribe( - getBeaconBlocksTopic(forkDigest), blocksTopicParams, - enableTopicMetrics = true) + getBeaconBlocksTopic(forkDigest), blocksTopicParams, enableTopicMetrics = true + ) node.blocksGossipState = targetGossipState @@ -1548,10 +1613,8 @@ proc updateAttestationSubnetHandlers(node: BeaconNode, slot: Slot) = return let - aggregateSubnets = - node.consensusManager[].actionTracker.aggregateSubnets(slot) - stabilitySubnets = - node.consensusManager[].actionTracker.stabilitySubnets(slot) + aggregateSubnets = node.consensusManager[].actionTracker.aggregateSubnets(slot) + stabilitySubnets = node.consensusManager[].actionTracker.stabilitySubnets(slot) subnets = aggregateSubnets + stabilitySubnets node.network.updateStabilitySubnetMetadata(stabilitySubnets) @@ -1573,7 +1636,9 @@ proc updateAttestationSubnetHandlers(node: BeaconNode, slot: Slot) = node.network.subscribeAttestationSubnets(subscribeSubnets, forkDigest) debug "Attestation subnets", - slot, epoch = slot.epoch, gossipState = node.gossipState, + slot, + epoch = slot.epoch, + gossipState = node.gossipState, stabilitySubnets = subnetLog(stabilitySubnets), aggregateSubnets = subnetLog(aggregateSubnets), prevSubnets = subnetLog(prevSubnets), @@ -1599,24 +1664,22 @@ proc localUpdateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = TOPIC_SUBSCRIBE_THRESHOLD_SLOTS = 64 HYSTERESIS_BUFFER = 16 - static: doAssert high(ConsensusFork) == ConsensusFork.Electra + static: + doAssert high(ConsensusFork) == ConsensusFork.Electra let head = node.dag.head headDistance = - if slot > head.slot: (slot - head.slot).uint64 - else: 0'u64 - isBehind = - headDistance > TOPIC_SUBSCRIBE_THRESHOLD_SLOTS + HYSTERESIS_BUFFER - targetGossipState = - getTargetGossipState( - slot.epoch, - node.dag.cfg.ALTAIR_FORK_EPOCH, - node.dag.cfg.BELLATRIX_FORK_EPOCH, - node.dag.cfg.CAPELLA_FORK_EPOCH, - node.dag.cfg.DENEB_FORK_EPOCH, - node.dag.cfg.ELECTRA_FORK_EPOCH, - isBehind) + if slot > head.slot: + (slot - head.slot).uint64 + else: + 0'u64 + isBehind = headDistance > TOPIC_SUBSCRIBE_THRESHOLD_SLOTS + HYSTERESIS_BUFFER + targetGossipState = getTargetGossipState( + slot.epoch, node.dag.cfg.ALTAIR_FORK_EPOCH, node.dag.cfg.BELLATRIX_FORK_EPOCH, + node.dag.cfg.CAPELLA_FORK_EPOCH, node.dag.cfg.DENEB_FORK_EPOCH, + node.dag.cfg.ELECTRA_FORK_EPOCH, isBehind, + ) doAssert targetGossipState.card <= 2 @@ -1633,18 +1696,15 @@ proc localUpdateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = res = max(res, gossipFork.int) res - if maxGossipFork(targetGossipState) < maxGossipFork(node.gossipState) and + if maxGossipFork(targetGossipState) < maxGossipFork(node.gossipState) and targetGossipState != {}: warn "Unexpected clock regression during transition", - targetGossipState, - gossipState = node.gossipState + targetGossipState, gossipState = node.gossipState if node.gossipState.card == 0 and targetGossipState.card > 0: # We are synced, so we will connect debug "Enabling topic subscriptions", - wallSlot = slot, - headSlot = head.slot, - headDistance, targetGossipState + wallSlot = slot, headSlot = head.slot, headDistance, targetGossipState node.processor[].setupDoppelgangerDetection(slot) @@ -1653,20 +1713,19 @@ proc localUpdateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = # We "know" the actions for the current and the next epoch withState(node.dag.headState): - if node.consensusManager[].actionTracker.needsUpdate( - forkyState, slot.epoch): + if node.consensusManager[].actionTracker.needsUpdate(forkyState, slot.epoch): let epochRef = node.dag.getEpochRef(head, slot.epoch, false).expect( - "Getting head EpochRef should never fail") + "Getting head EpochRef should never fail" + ) node.consensusManager[].actionTracker.updateActions( - epochRef.shufflingRef, epochRef.beacon_proposers) + epochRef.shufflingRef, epochRef.beacon_proposers + ) node.maybeUpdateActionTrackerNextEpoch(forkyState, slot.epoch + 1) if node.gossipState.card > 0 and targetGossipState.card == 0: debug "Disabling topic subscriptions", - wallSlot = slot, - headSlot = head.slot, - headDistance + wallSlot = slot, headSlot = head.slot, headDistance node.processor[].clearDoppelgangerProtection() @@ -1675,10 +1734,10 @@ proc localUpdateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = const removeMessageHandlers: array[ConsensusFork, auto] = [ removePhase0MessageHandlers, removeAltairMessageHandlers, - removeAltairMessageHandlers, # bellatrix (altair handlers, different forkDigest) + removeAltairMessageHandlers, # bellatrix (altair handlers, different forkDigest) removeCapellaMessageHandlers, removeDenebMessageHandlers, - removeElectraMessageHandlers + removeElectraMessageHandlers, ] for gossipFork in oldGossipForks: @@ -1687,10 +1746,10 @@ proc localUpdateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = const addMessageHandlers: array[ConsensusFork, auto] = [ addPhase0MessageHandlers, addAltairMessageHandlers, - addAltairMessageHandlers, # bellatrix (altair handlers, different forkDigest) + addAltairMessageHandlers, # bellatrix (altair handlers, different forkDigest) addCapellaMessageHandlers, addDenebMessageHandlers, - addElectraMessageHandlers + addElectraMessageHandlers, ] for gossipFork in newGossipForks: @@ -1708,8 +1767,9 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = # By waiting until close before slot end, ensure that preparation for next # slot does not interfere with propagation of messages and with VC duties. - const endOffset = aggregateSlotOffset + nanos( - (NANOSECONDS_PER_SLOT - aggregateSlotOffset.nanoseconds.uint64).int64 div 2) + const endOffset = + aggregateSlotOffset + + nanos((NANOSECONDS_PER_SLOT - aggregateSlotOffset.nanoseconds.uint64).int64 div 2) let endCutoff = node.beaconClock.fromNow(slot.start_beacon_time + endOffset) if endCutoff.inFuture: debug "Waiting for slot end", slot, endCutoff = shortLog(endCutoff.offset) @@ -1717,12 +1777,9 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = if node.dag.needStateCachesAndForkChoicePruning(): if node.attachedValidators[].validators.len > 0: - node.attachedValidators[] - .slashingProtection - # pruning is only done if the DB is set to pruning mode. - .pruneAfterFinalization( - node.dag.finalizedHead.slot.epoch() - ) + node.attachedValidators[].slashingProtection + # pruning is only done if the DB is set to pruning mode. + .pruneAfterFinalization(node.dag.finalizedHead.slot.epoch()) # Delay part of pruning until latency critical duties are done. # The other part of pruning, `pruneBlocksDAG`, is done eagerly. @@ -1791,8 +1848,7 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = let nextAttestationSlot = node.consensusManager[].actionTracker.getNextAttestationSlot(slot) - nextProposalSlot = - node.consensusManager[].actionTracker.getNextProposalSlot(slot) + nextProposalSlot = node.consensusManager[].actionTracker.getNextProposalSlot(slot) nextActionSlot = min(nextAttestationSlot, nextProposalSlot) nextActionWaitTime = saturate(fromNow(node.beaconClock, nextActionSlot)) @@ -1819,8 +1875,10 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = since_sync_committee_period_start(syncCommitteeSlot) # int64 conversion is safe doAssert slotsToNextSyncCommitteePeriod <= SLOTS_PER_SYNC_COMMITTEE_PERIOD - "in " & toTimeLeftString( - SECONDS_PER_SLOT.int64.seconds * slotsToNextSyncCommitteePeriod.int64) + "in " & + toTimeLeftString( + SECONDS_PER_SLOT.int64.seconds * slotsToNextSyncCommitteePeriod.int64 + ) else: "none" @@ -1856,7 +1914,8 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = # state in anticipation of receiving the next block - we do it after # logging slot end since the nextActionWaitTime can be short let advanceCutoff = node.beaconClock.fromNow( - slot.start_beacon_time() + chronos.seconds(int(SECONDS_PER_SLOT - 1))) + slot.start_beacon_time() + chronos.seconds(int(SECONDS_PER_SLOT - 1)) + ) if advanceCutoff.inFuture: # We wait until there's only a second left before the next slot begins, then # we advance the clearance state to the next slot - this gives us a high @@ -1875,10 +1934,8 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = await node.localUpdateGossipStatus(slot + 1) -func formatNextConsensusFork( - node: BeaconNode, withVanityArt = false): Opt[string] = - let consensusFork = - node.dag.cfg.consensusForkAtEpoch(node.dag.head.slot.epoch) +func formatNextConsensusFork(node: BeaconNode, withVanityArt = false): Opt[string] = + let consensusFork = node.dag.cfg.consensusForkAtEpoch(node.dag.head.slot.epoch) if consensusFork == ConsensusFork.high: return Opt.none(string) let @@ -1888,17 +1945,14 @@ func formatNextConsensusFork( return Opt.none(string) Opt.some( (if withVanityArt: nextConsensusFork.getVanityMascot & " " else: "") & - $nextConsensusFork & ":" & $nextForkEpoch) + $nextConsensusFork & ":" & $nextForkEpoch + ) func syncStatus(node: BeaconNode, wallSlot: Slot): string = let optimisticHead = not node.dag.head.executionValid if node.syncManager.inProgress: let - optimisticSuffix = - if optimisticHead: - "/opt" - else: - "" + optimisticSuffix = if optimisticHead: "/opt" else: "" # lightClientSuffix = # if node.consensusManager[].shouldSyncOptimistically(wallSlot): # " - lc: " & $shortLog(node.consensusManager[].optimisticHead) @@ -1935,8 +1989,10 @@ func formatGwei(amount: Gwei): string = when not defined(windows): proc initStatusBar(node: BeaconNode) {.raises: [ValueError].} = - if not isatty(stdout): return - if not node.config.statusBarEnabled: return + if not isatty(stdout): + return + if not node.config.statusBarEnabled: + return try: enableTrueColors() @@ -1944,9 +2000,11 @@ when not defined(windows): error "Couldn't enable colors", err = exc.msg proc dataResolver(expr: string): string {.raises: [].} = - template justified: untyped = node.dag.head.atEpochStart( - getStateField( - node.dag.headState, current_justified_checkpoint).epoch) + template justified(): untyped = + node.dag.head.atEpochStart( + getStateField(node.dag.headState, current_justified_checkpoint).epoch + ) + # TODO: # We should introduce a general API for resolving dot expressions # such as `db.latest_block.slot` or `metrics.connected_peers`. @@ -1957,13 +2015,10 @@ when not defined(windows): case expr.toLowerAscii of "version": versionAsStr - of "full_version": fullVersionStr - of "connected_peers": $(node.connectedPeersCount) - of "head_root": shortLog(node.dag.head.root) of "head_epoch": @@ -1972,7 +2027,6 @@ when not defined(windows): $(node.dag.head.slot.since_epoch_start) of "head_slot": $(node.dag.head.slot) - of "justifed_root": shortLog(justified.blck.root) of "justifed_epoch": @@ -1981,7 +2035,6 @@ when not defined(windows): $(justified.slot.since_epoch_start) of "justifed_slot": $(justified.slot) - of "finalized_root": shortLog(node.dag.finalizedHead.blck.root) of "finalized_epoch": @@ -1990,27 +2043,21 @@ when not defined(windows): $(node.dag.finalizedHead.slot.since_epoch_start) of "finalized_slot": $(node.dag.finalizedHead.slot) - of "epoch": $node.currentSlot.epoch - of "epoch_slot": $(node.currentSlot.since_epoch_start) - of "slot": $node.currentSlot - of "slots_per_epoch": $SLOTS_PER_EPOCH - of "slot_trailing_digits": var slotStr = $node.currentSlot - if slotStr.len > 3: slotStr = slotStr[^3..^1] + if slotStr.len > 3: + slotStr = slotStr[^3 ..^ 1] slotStr - of "attached_validators_balance": formatGwei(node.attachedValidatorBalanceTotal) - of "next_consensus_fork": let nextConsensusForkDescription = node.formatNextConsensusFork(withVanityArt = true) @@ -2018,7 +2065,6 @@ when not defined(windows): "" else: " (scheduled " & nextConsensusForkDescription.get & ")" - of "sync_status": node.syncStatus(node.currentSlot) else: @@ -2027,24 +2073,23 @@ when not defined(windows): # an error message to the user. "$" & expr - var statusBar = StatusBarView.init( - node.config.statusBarContents, - dataResolver) + var statusBar = StatusBarView.init(node.config.statusBarContents, dataResolver) when compiles(defaultChroniclesStream.outputs[0].writer): let tmp = defaultChroniclesStream.outputs[0].writer - defaultChroniclesStream.outputs[0].writer = - proc (logLevel: LogLevel, msg: LogOutputStr) {.raises: [].} = - try: - # p.hidePrompt - erase statusBar - # p.writeLine msg - tmp(logLevel, msg) - render statusBar - # p.showPrompt - except Exception as e: # render raises Exception - logLoggingFailure(cstring(msg), e) + defaultChroniclesStream.outputs[0].writer = proc( + logLevel: LogLevel, msg: LogOutputStr + ) {.raises: [].} = + try: + # p.hidePrompt + erase statusBar + # p.writeLine msg + tmp(logLevel, msg) + render statusBar + # p.showPrompt + except Exception as e: # render raises Exception + logLoggingFailure(cstring(msg), e) proc statusBarUpdatesPollingLoop() {.async.} = try: @@ -2113,16 +2158,16 @@ func verifyFinalization(node: BeaconNode, slot: Slot) = # Intentionally, loudly assert. Point is to fail visibly and unignorably # during testing. if epoch >= 4 and slot mod SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET: - let finalizedEpoch = - node.dag.finalizedHead.slot.epoch() + let finalizedEpoch = node.dag.finalizedHead.slot.epoch() # Finalization rule 234, that has the most lag slots among the cases, sets # state.finalized_checkpoint = old_previous_justified_checkpoint.epoch + 3 # and then state.slot gets incremented, to increase the maximum offset, if # finalization occurs every slot, to 4 slots vs scheduledSlot. doAssert finalizedEpoch + 4 >= epoch -proc onSlotStart(node: BeaconNode, wallTime: BeaconTime, - lastSlot: Slot): Future[bool] {.async.} = +proc onSlotStart( + node: BeaconNode, wallTime: BeaconTime, lastSlot: Slot +): Future[bool] {.async.} = ## Called at the beginning of a slot - usually every slot, but sometimes might ## skip a few in case we're running late. ## wallTime: current system time - we will strive to perform all duties up @@ -2146,8 +2191,7 @@ proc onSlotStart(node: BeaconNode, wallTime: BeaconTime, sync = node.syncStatus(wallSlot) peers = len(node.network.peerPool) head = shortLog(node.dag.head) - finalized = shortLog(getStateField( - node.dag.headState, finalized_checkpoint)) + finalized = shortLog(getStateField(node.dag.headState, finalized_checkpoint)) delay = shortLog(delay) let nextConsensusForkDescription = node.formatNextConsensusFork() if nextConsensusForkDescription.isNone: @@ -2183,7 +2227,7 @@ proc onSlotStart(node: BeaconNode, wallTime: BeaconTime, # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#registration-dissemination # This specification suggests validators re-submit to builder software every # `EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION` epochs. - if wallSlot.is_epoch and + if wallSlot.is_epoch and wallSlot.epoch mod EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION == 0: asyncSpawn node.registerValidators(wallSlot.epoch) @@ -2250,7 +2294,8 @@ proc run(node: BeaconNode) {.raises: [CatchableError].} = node.requestManager.start() node.syncManager.start() - if node.dag.needsBackfill(): asyncSpawn node.startBackfillTask() + if node.dag.needsBackfill(): + asyncSpawn node.startBackfillTask() waitFor node.localUpdateGossipStatus(wallSlot) @@ -2263,7 +2308,8 @@ proc run(node: BeaconNode) {.raises: [CatchableError].} = # Consider using different strategies that would detect such # race conditions. asyncSpawn node.pollForDynamicValidators( - web3signerUrl, node.config.web3signerUpdateInterval) + web3signerUrl, node.config.web3signerUpdateInterval + ) asyncSpawn runSlotLoop(node, wallTime, onSlotStart) asyncSpawn runOnSecondLoop(node) @@ -2291,10 +2337,9 @@ proc start*(node: BeaconNode) {.raises: [CatchableError].} = timeSinceFinalization = node.beaconClock.now() - finalizedHead.slot.start_beacon_time(), head = shortLog(head), - justified = shortLog(getStateField( - node.dag.headState, current_justified_checkpoint)), - finalized = shortLog(getStateField( - node.dag.headState, finalized_checkpoint)), + justified = + shortLog(getStateField(node.dag.headState, current_justified_checkpoint)), + finalized = shortLog(getStateField(node.dag.headState, finalized_checkpoint)), finalizedHead = shortLog(finalizedHead), SLOTS_PER_EPOCH, SECONDS_PER_SLOT, @@ -2312,20 +2357,21 @@ proc start*(node: BeaconNode) {.raises: [CatchableError].} = ## runs beacon node ## adapted from nimbus-eth2 -proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.raises: [CatchableError].} = - +proc doRunBeaconNode( + config: var BeaconNodeConf, rng: ref HmacDrbgContext +) {.raises: [CatchableError].} = # TODO: Define this varaibles somewhere info "Launching beacon node", - version = fullVersionStr, - bls_backend = $BLS_BACKEND, - const_preset, - cmdParams = commandLineParams(), - config + version = fullVersionStr, + bls_backend = $BLS_BACKEND, + const_preset, + cmdParams = commandLineParams(), + config template ignoreDeprecatedOption(option: untyped): untyped = if config.option.isSome: - warn "Config option is deprecated", - option = config.option.get + warn "Config option is deprecated", option = config.option.get + ignoreDeprecatedOption requireEngineAPI ignoreDeprecatedOption safeSlotsToImportOptimistically ignoreDeprecatedOption terminalTotalDifficultyOverride @@ -2371,9 +2417,12 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai # workaround for https://github.com/nim-lang/Nim/issues/4057 try: setupForeignThreadGc() - except Exception as exc: raiseAssert exc.msg # shouldn't happen + except Exception as exc: + raiseAssert exc.msg + # shouldn't happen notice "Shutting down after having received SIGINT" bnStatus = BeaconNodeStatus.Stopping + try: setControlCHook(controlCHandler) except Exception as exc: # TODO Exception @@ -2384,6 +2433,7 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai proc SIGTERMHandler(signal: cint) {.noconv.} = notice "Shutting down after having received SIGTERM" bnStatus = BeaconNodeStatus.Stopping + c_signal(ansi_c.SIGTERM, SIGTERMHandler) block: @@ -2406,7 +2456,8 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai initStatusBar(node) if node.nickname != "": - dynamicLogScope(node = node.nickname): node.start() + dynamicLogScope(node = node.nickname): + node.start() else: node.start() @@ -2417,9 +2468,10 @@ proc handleStartingOption(config: var BeaconNodeConf) {.raises: [CatchableError] # More options can be added, might be out of scope given that they exist in eth2 case config.cmd - of BNSStartUpCmd.noCommand: doRunBeaconNode(config, rng) + of BNSStartUpCmd.noCommand: + doRunBeaconNode(config, rng) of BNSStartUpCmd.trustedNodeSync: - if config.blockId.isSome(): + if config.blockId.isSome(): error "--blockId option has been removed - use --state-id instead!" quit 1 @@ -2428,22 +2480,16 @@ proc handleStartingOption(config: var BeaconNodeConf) {.raises: [CatchableError] db = BeaconChainDB.new(config.databaseDir, metadata.cfg, inMemory = false) genesisState = waitFor fetchGenesisState(metadata) waitFor db.doRunTrustedNodeSync( - config.databaseDir, - config.eraDir, - config.trustedNodeUrl, - config.stateId, - config.lcTrustedBlockRoot, - config.backfillBlocks, - config.reindex, - config.downloadDepositSnapshot, - genesisState) + config.databaseDir, config.eraDir, config.trustedNodeUrl, config.stateId, + config.lcTrustedBlockRoot, config.backfillBlocks, config.reindex, + config.downloadDepositSnapshot, genesisState, + ) db.close() ## Consensus wrapper proc consensusWrapper*(parameters: TaskParameters) {.raises: [CatchableError].} = - var config = parameters.beaconNodeConfigs - try: + try: doRunBeaconNode(config, rng) except CatchableError as e: fatal "error", message = e.msg diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index f28917fe0..acb277a02 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -48,7 +48,8 @@ proc executionLayerHandler(parameters: TaskParameters) {.thread.} = ## Consensus Layer handler proc consensusLayerHandler(parameters: TaskParameters) {.thread.} = info "Started task:", task = parameters.name - consensusWrapper(parameters) + {.gcsafe.}: + consensusWrapper(parameters) info "\tExiting task:", task = parameters.name ## Waits for tasks to finish (joinThreads) From 684d9188dee43f639b7ea09f74485940802cd40b Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Tue, 26 Nov 2024 17:51:18 +0000 Subject: [PATCH 22/32] trusted sync fixes --- nimbus_unified/consensus/consensus_wrapper.nim | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/nimbus_unified/consensus/consensus_wrapper.nim b/nimbus_unified/consensus/consensus_wrapper.nim index c77a6c2de..1569ee0bf 100644 --- a/nimbus_unified/consensus/consensus_wrapper.nim +++ b/nimbus_unified/consensus/consensus_wrapper.nim @@ -2468,9 +2468,9 @@ proc handleStartingOption(config: var BeaconNodeConf) {.raises: [CatchableError] # More options can be added, might be out of scope given that they exist in eth2 case config.cmd - of BNSStartUpCmd.noCommand: + of BNStartUpCmd.noCommand: doRunBeaconNode(config, rng) - of BNSStartUpCmd.trustedNodeSync: + of BNStartUpCmd.trustedNodeSync: if config.blockId.isSome(): error "--blockId option has been removed - use --state-id instead!" quit 1 @@ -2480,17 +2480,20 @@ proc handleStartingOption(config: var BeaconNodeConf) {.raises: [CatchableError] db = BeaconChainDB.new(config.databaseDir, metadata.cfg, inMemory = false) genesisState = waitFor fetchGenesisState(metadata) waitFor db.doRunTrustedNodeSync( - config.databaseDir, config.eraDir, config.trustedNodeUrl, config.stateId, - config.lcTrustedBlockRoot, config.backfillBlocks, config.reindex, + metadata, config.databaseDir, config.eraDir, config.trustedNodeUrl, + config.stateId, config.lcTrustedBlockRoot, config.backfillBlocks, config.reindex, config.downloadDepositSnapshot, genesisState, ) db.close() + else: + notice("unknonw option") + isShutDownRequired.store(true) -## Consensus wrapper proc consensusWrapper*(parameters: TaskParameters) {.raises: [CatchableError].} = var config = parameters.beaconNodeConfigs + try: - doRunBeaconNode(config, rng) + handleStartingOption(config) except CatchableError as e: fatal "error", message = e.msg isShutDownRequired.store(true) From dfc0ebc7094de99a37970913f52f23b8ad047131 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Wed, 27 Nov 2024 10:04:01 +0000 Subject: [PATCH 23/32] startup fixes --- nimbus_unified/nimbus_unified.nim | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index acb277a02..47b294225 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -6,7 +6,7 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import - std/[atomics, os, exitprocs], + std/[atomics, os], chronicles, stew/io2, consensus/consensus_wrapper, @@ -14,6 +14,7 @@ import beacon_chain/[conf, conf_common], beacon_chain/[beacon_chain_db], beacon_chain/validators/keystore_management, + beacon_chain/nimbus_binary_common, version ## Constants @@ -156,7 +157,7 @@ when isMainModule: var tasksList: NimbusTasks = NimbusTasks.new var beaconNodeConfig = makeBannerAndConfig( - clientId, copyrights, nimBanner, SPEC_VERSION, [], BeaconNodeConf + clientName, versionAsStr, nimBanner, "", [], BeaconNodeConf ).valueOr: stderr.write error quit QuitFailure @@ -166,7 +167,7 @@ when isMainModule: setupFileLimits() - setupLogging(config.logLevel, config.logStdout, config.logFile) + # setupLogging(config.logLevel, config.logStdout, config.logFile) createPidFile(beaconNodeConfig.databaseDir.string / "unified.pid") From 4c2fc659d053ea17fa975fe2c5ed1e143f7814a3 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Thu, 28 Nov 2024 09:34:44 +0000 Subject: [PATCH 24/32] Fix pid file creation --- nimbus_unified/nimbus_unified.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index 47b294225..a856c3077 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -169,7 +169,7 @@ when isMainModule: # setupLogging(config.logLevel, config.logStdout, config.logFile) - createPidFile(beaconNodeConfig.databaseDir.string / "unified.pid") + createPidFile(beaconNodeConfig.dataDir.string / "unified.pid") ## Graceful shutdown by handling of Ctrl+C signal proc controlCHandler() {.noconv.} = From 0caad596a335622920b30be12f24f9a1237a33c6 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Fri, 29 Nov 2024 11:36:39 +0000 Subject: [PATCH 25/32] Start using eth2 code. All possible constraints regarding eth2 are identified and tested for possible solutions. --- nimbus_unified/consensus/consensus_wrapper.nim | 4 ++-- nimbus_unified/nimbus_unified.nim | 6 ++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/nimbus_unified/consensus/consensus_wrapper.nim b/nimbus_unified/consensus/consensus_wrapper.nim index 1569ee0bf..5d8ead41c 100644 --- a/nimbus_unified/consensus/consensus_wrapper.nim +++ b/nimbus_unified/consensus/consensus_wrapper.nim @@ -9,7 +9,7 @@ import std/[os, atomics, random, terminal, times, exitprocs, sequtils], metrics, - beacon_chain/nimbus_binary_common, + beacon_chain/[nimbus_beacon_node, nimbus_binary_common], beacon_chain/spec/forks, beacon_chain/[beacon_chain_db, trusted_node_sync], beacon_chain/networking/network_metadata_downloads, @@ -2357,7 +2357,7 @@ proc start*(node: BeaconNode) {.raises: [CatchableError].} = ## runs beacon node ## adapted from nimbus-eth2 -proc doRunBeaconNode( +proc doRunBeaconNodeLocal( config: var BeaconNodeConf, rng: ref HmacDrbgContext ) {.raises: [CatchableError].} = # TODO: Define this varaibles somewhere diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index a856c3077..f4b00d68d 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -6,15 +6,13 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import - std/[atomics, os], + std/[atomics, os, exitprocs], chronicles, stew/io2, consensus/consensus_wrapper, execution/execution_wrapper, - beacon_chain/[conf, conf_common], - beacon_chain/[beacon_chain_db], + beacon_chain/[nimbus_binary_common, conf, conf_common], beacon_chain/validators/keystore_management, - beacon_chain/nimbus_binary_common, version ## Constants From 4ee02209c1a8e034fbd0a11792adfb244d5ab9f4 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Wed, 4 Dec 2024 13:02:39 +0000 Subject: [PATCH 26/32] activate known P2P agents on makefile --- Makefile | 2 +- nimbus_unified/nimbus_unified.nim | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 0d6189c13..5f08f1b12 100644 --- a/Makefile +++ b/Makefile @@ -370,7 +370,7 @@ endif # Nimbus unified related targets # builds the unified client -# NIM_PARAMS := -d:release --parallelBuild:1 -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku,grandine $(NIM_PARAMS) +NIM_PARAMS := -d:release --parallelBuild:1 -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku,grandine $(NIM_PARAMS) nimbus_unified: | build deps echo -e $(BUILD_MSG) "build/$@" && \ $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:disable_libbacktrace -d:libp2p_pki_schemes=secp256k1 -d:unified -o:build/$@ "nimbus_unified/$@.nim" diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index f4b00d68d..5daba33b3 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -145,12 +145,8 @@ proc startTasks*( # ------ when isMainModule: - info "Starting Nimbus" - ## TODO - ## - file limits - ## - setup logging - ## - read configuration (check nimbus_configs file anottations) - ## - implement config reader for all components + notice "Starting Nimbus" + let nimbusConfigs = NimbusConfig() var tasksList: NimbusTasks = NimbusTasks.new From 6d482923185d10bfe0e439ac46fce2e7e1b3397f Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Thu, 5 Dec 2024 09:12:00 +0000 Subject: [PATCH 27/32] removed file adapted_block_chain_dag.nim --- .../consensus/adapted_block_chain_dag.nim | 2885 ----------------- 1 file changed, 2885 deletions(-) delete mode 100644 nimbus_unified/consensus/adapted_block_chain_dag.nim diff --git a/nimbus_unified/consensus/adapted_block_chain_dag.nim b/nimbus_unified/consensus/adapted_block_chain_dag.nim deleted file mode 100644 index e5107dabb..000000000 --- a/nimbus_unified/consensus/adapted_block_chain_dag.nim +++ /dev/null @@ -1,2885 +0,0 @@ -# nimbus_unified -# Copyright (c) 2024 Status Research & Development GmbH -# Licensed and distributed under either of -# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). -# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. - -{.push raises: [].} - -import - std/[algorithm, sequtils, tables, sets], - stew/[arrayops, assign2, byteutils], - chronos, metrics, results, snappy, chronicles, - beacon_chain/spec/[beaconstate, eth2_merkleization, eth2_ssz_serialization, helpers, - state_transition, validator], - beacon_chain/spec/forks, - beacon_chain/[beacon_chain_db, beacon_clock, era_db], - beacon_chain/consensus_object_pools/[block_pools_types, block_quarantine] - -export - eth2_merkleization, eth2_ssz_serialization, - block_pools_types, results, beacon_chain_db - -logScope: topics = "chaindag" -# adapted from nimbus-eth2 - -# # https://github.com/ethereum/beacon-metrics/blob/master/metrics.md#interop-metrics -# declareGauge beacon_head_root, "Root of the head block of the beacon chain" -# declareGauge beacon_head_slot, "Slot of the head block of the beacon chain" - -# # https://github.com/ethereum/beacon-metrics/blob/master/metrics.md#interop-metrics -# declareGauge beacon_finalized_epoch, "Current finalized epoch" # On epoch transition -# declareGauge beacon_finalized_root, "Current finalized root" # On epoch transition -# declareGauge beacon_current_justified_epoch, "Current justified epoch" # On epoch transition -# declareGauge beacon_current_justified_root, "Current justified root" # On epoch transition -# declareGauge beacon_previous_justified_epoch, "Current previously justified epoch" # On epoch transition -# declareGauge beacon_previous_justified_root, "Current previously justified root" # On epoch transition - -# declareGauge beacon_reorgs_total_total, "Total occurrences of reorganizations of the chain" # On fork choice; backwards-compat name (used to be a counter) -# declareGauge beacon_reorgs_total, "Total occurrences of reorganizations of the chain" # Interop copy -# declareCounter beacon_state_data_cache_hits, "EpochRef hits" -# declareCounter beacon_state_data_cache_misses, "EpochRef misses" -# declareCounter beacon_state_rewinds, "State database rewinds" - -# declareGauge beacon_active_validators, "Number of validators in the active validator set" -# declareGauge beacon_current_active_validators, "Number of validators in the active validator set" # Interop copy -# declareGauge beacon_pending_deposits, "Number of pending deposits (state.eth1_data.deposit_count - state.eth1_deposit_index)" # On block -# declareGauge beacon_processed_deposits_total, "Number of total deposits included on chain" # On block - -# declareCounter beacon_dag_state_replay_seconds, "Time spent replaying states" - -const - EPOCHS_PER_STATE_SNAPSHOT* = 32 - ## When finality happens, we prune historical states from the database except - ## for a snapshot every 32 epochs from which replays can happen - there's a - ## balance here between making long replays and saving on disk space - MAX_SLOTS_PER_PRUNE* = SLOTS_PER_EPOCH - ## We prune the database incrementally so as not to introduce long - ## processing breaks - this number is the maximum number of blocks we allow - ## to be pruned every time the prune call is made (once per slot typically) - ## unless head is moving faster (ie during sync) - - -proc putBlock*( - dag: ChainDAGRef, signedBlock: ForkyTrustedSignedBeaconBlock) = - dag.db.putBlock(signedBlock) - -proc updateState*( - dag: ChainDAGRef, state: var ForkedHashedBeaconState, bsi: BlockSlotId, - save: bool, cache: var StateCache): bool {.gcsafe.} - -template withUpdatedState*( - dag: ChainDAGRef, stateParam: var ForkedHashedBeaconState, - bsiParam: BlockSlotId, okBody: untyped, failureBody: untyped): untyped = - ## Helper template that updates stateData to a particular BlockSlot - usage of - ## stateData is unsafe outside of block, or across `await` boundaries - - block: - let bsi {.inject.} = bsiParam - var cache {.inject.} = StateCache() - if updateState(dag, stateParam, bsi, false, cache): - template bid(): BlockId {.inject, used.} = bsi.bid - template updatedState(): ForkedHashedBeaconState {.inject, used.} = stateParam - okBody - else: - failureBody - -func get_effective_balances( - validators: openArray[Validator], epoch: Epoch): seq[Gwei] = - ## Get the balances from a state as counted for fork choice - result.newSeq(validators.len) # zero-init - - for i in 0 ..< result.len: - # All non-active validators have a 0 balance - let validator = unsafeAddr validators[i] - if validator[].is_active_validator(epoch) and not validator[].slashed: - result[i] = validator[].effective_balance - -proc updateValidatorKeys*(dag: ChainDAGRef, validators: openArray[Validator]) = - # Update validator key cache - must be called every time a valid block is - # applied to the state - this is important to ensure that when we sync blocks - # without storing a state (non-epoch blocks essentially), the deposits from - # those blocks are persisted to the in-database cache of immutable validator - # data (but no earlier than that the whole block as been validated) - dag.db.updateImmutableValidators(validators) - -proc updateFinalizedBlocks*(db: BeaconChainDB, newFinalized: openArray[BlockId]) = - if db.db.readOnly: return # TODO abstraction leak - where to put this? - - db.withManyWrites: - for bid in newFinalized: - db.finalizedBlocks.insert(bid.slot, bid.root) - -proc updateFrontfillBlocks*(dag: ChainDAGRef) = - # When backfilling is done and manages to reach the frontfill point, we can - # write the frontfill index knowing that the block information in the - # era files match the chain - if dag.db.db.readOnly: return # TODO abstraction leak - where to put this? - - if dag.frontfillBlocks.len == 0 or dag.backfill.slot > GENESIS_SLOT: - return - - info "Writing frontfill index", slots = dag.frontfillBlocks.len - - dag.db.withManyWrites: - let low = dag.db.finalizedBlocks.low.expect( - "wrote at least tailRef during init") - let blocks = min(low.int, dag.frontfillBlocks.len - 1) - var parent: Eth2Digest - for i in 0..blocks: - let root = dag.frontfillBlocks[i] - if not isZero(root): - dag.db.finalizedBlocks.insert(Slot(i), root) - dag.db.putBeaconBlockSummary( - root, BeaconBlockSummary(slot: Slot(i), parent_root: parent)) - parent = root - - reset(dag.frontfillBlocks) - -func validatorKey*( - dag: ChainDAGRef, index: ValidatorIndex or uint64): Opt[CookedPubKey] = - ## Returns the validator pubkey for the index, assuming it's been observed - ## at any point in time - this function may return pubkeys for indicies that - ## are not (yet) part of the head state (if the key has been observed on a - ## non-head branch)! - dag.db.immutableValidators.load(index) - -template is_merge_transition_complete*( - stateParam: ForkedHashedBeaconState): bool = - withState(stateParam): - when consensusFork >= ConsensusFork.Bellatrix: - is_merge_transition_complete(forkyState.data) - else: - false - -func effective_balances*(epochRef: EpochRef): seq[Gwei] = - try: - SSZ.decode(snappy.decode(epochRef.effective_balances_bytes, uint32.high), - List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT]).toSeq() - except CatchableError as exc: - raiseAssert exc.msg - -func getBlockRef*(dag: ChainDAGRef, root: Eth2Digest): Opt[BlockRef] = - ## Retrieve a resolved block reference, if available - this function does - ## not return historical finalized blocks, see `getBlockIdAtSlot` for a - ## function that covers the entire known history - let key = KeyedBlockRef.asLookupKey(root) - # HashSet lacks the api to do check-and-get in one lookup - `[]` will return - # the copy of the instance in the set which has more fields than `root` set! - if key in dag.forkBlocks: - try: ok(dag.forkBlocks[key].blockRef()) - except KeyError: raiseAssert "contains" - else: - err() - -func getBlockIdAtSlot*( - state: ForkyHashedBeaconState, slot: Slot): Opt[BlockSlotId] = - ## Use given state to attempt to find a historical `BlockSlotId`. - if slot > state.data.slot: - return Opt.none(BlockSlotId) # State does not know about requested slot - if state.data.slot > slot + SLOTS_PER_HISTORICAL_ROOT: - return Opt.none(BlockSlotId) # Cache has expired - - var idx = slot mod SLOTS_PER_HISTORICAL_ROOT - let root = - if slot == state.data.slot: - state.latest_block_root - else: - state.data.block_roots[idx] - var bid = BlockId(slot: slot, root: root) - - let availableSlots = - min(slot.uint64, slot + SLOTS_PER_HISTORICAL_ROOT - state.data.slot) - for i in 0 ..< availableSlots: - if idx == 0: - idx = SLOTS_PER_HISTORICAL_ROOT - dec idx - if state.data.block_roots[idx] != root: - return Opt.some BlockSlotId.init(bid, slot) - dec bid.slot - - if bid.slot == GENESIS_SLOT: - return Opt.some BlockSlotId.init(bid, slot) - Opt.none(BlockSlotId) # Unknown if there are more empty slots before - -func getBlockIdAtSlot*(dag: ChainDAGRef, slot: Slot): Opt[BlockSlotId] = - ## Retrieve the canonical block at the given slot, or the last block that - ## comes before - similar to atSlot, but without the linear scan - may hit - ## the database to look up early indices. - if slot > dag.finalizedHead.slot: - return dag.head.atSlot(slot).toBlockSlotId() # iterate to the given slot - - if dag.finalizedHead.blck == nil: - # Not initialized yet (in init) - return Opt.none(BlockSlotId) - - if slot >= dag.finalizedHead.blck.slot: - # finalized head is still in memory - return dag.finalizedHead.blck.atSlot(slot).toBlockSlotId() - - # Load from memory, if the block ID is sufficiently recent. - # For checkpoint sync, this is the only available of historical block IDs - # until sufficient blocks have been backfilled. - template tryWithState(state: ForkedHashedBeaconState) = - block: - withState(state): - # State must be a descendent of the finalized chain to be viable - let finBsi = forkyState.getBlockIdAtSlot(dag.finalizedHead.slot) - if finBsi.isSome and # DAG finalized bid slot wrong if CP not @ epoch - finBsi.unsafeGet.bid.root == dag.finalizedHead.blck.bid.root: - let bsi = forkyState.getBlockIdAtSlot(slot) - if bsi.isSome: - return bsi - tryWithState dag.headState - tryWithState dag.epochRefState - tryWithState dag.clearanceState - - # Fallback to database, this only works for backfilled blocks - let finlow = dag.db.finalizedBlocks.low.expect("at least tailRef written") - if slot >= finlow: - var pos = slot - while true: - let root = dag.db.finalizedBlocks.get(pos) - - if root.isSome(): - return ok BlockSlotId.init( - BlockId(root: root.get(), slot: pos), slot) - - doAssert pos > finlow, "We should have returned the finlow" - - pos = pos - 1 - - if slot == GENESIS_SLOT and dag.genesis.isSome(): - return ok dag.genesis.get().atSlot() - - err() # not backfilled yet - -proc containsBlock( - cfg: RuntimeConfig, db: BeaconChainDB, slot: Slot, root: Eth2Digest): bool = - db.containsBlock(root, cfg.consensusForkAtEpoch(slot.epoch)) - -proc containsBlock*(dag: ChainDAGRef, bid: BlockId): bool = - dag.cfg.containsBlock(dag.db, bid.slot, bid.root) - -proc getForkedBlock*(db: BeaconChainDB, root: Eth2Digest): - Opt[ForkedTrustedSignedBeaconBlock] = - # When we only have a digest, we don't know which fork it's from so we try - # them one by one - this should be used sparingly - static: doAssert high(ConsensusFork) == ConsensusFork.Electra - if (let blck = db.getBlock(root, electra.TrustedSignedBeaconBlock); - blck.isSome()): - ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) - elif (let blck = db.getBlock(root, deneb.TrustedSignedBeaconBlock); - blck.isSome()): - ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) - elif (let blck = db.getBlock(root, capella.TrustedSignedBeaconBlock); - blck.isSome()): - ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) - elif (let blck = db.getBlock(root, bellatrix.TrustedSignedBeaconBlock); - blck.isSome()): - ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) - elif (let blck = db.getBlock(root, altair.TrustedSignedBeaconBlock); - blck.isSome()): - ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) - elif (let blck = db.getBlock(root, phase0.TrustedSignedBeaconBlock); - blck.isSome()): - ok(ForkedTrustedSignedBeaconBlock.init(blck.get())) - else: - err() - -proc getBlock*( - dag: ChainDAGRef, bid: BlockId, - T: type ForkyTrustedSignedBeaconBlock): Opt[T] = - dag.db.getBlock(bid.root, T) or - getBlock( - dag.era, getStateField(dag.headState, historical_roots).asSeq, - dag.headState.historical_summaries().asSeq, - bid.slot, Opt[Eth2Digest].ok(bid.root), T) - -proc getBlockSSZ*(dag: ChainDAGRef, bid: BlockId, bytes: var seq[byte]): bool = - # Load the SSZ-encoded data of a block into `bytes`, overwriting the existing - # content - let fork = dag.cfg.consensusForkAtEpoch(bid.slot.epoch) - dag.db.getBlockSSZ(bid.root, bytes, fork) or - (bid.slot <= dag.finalizedHead.slot and - getBlockSSZ( - dag.era, getStateField(dag.headState, historical_roots).asSeq, - dag.headState.historical_summaries().asSeq, - bid.slot, bytes).isOk() and bytes.len > 0) - -proc getBlockSZ*(dag: ChainDAGRef, bid: BlockId, bytes: var seq[byte]): bool = - # Load the snappy-frame-compressed ("SZ") SSZ-encoded data of a block into - # `bytes`, overwriting the existing content - # careful: there are two snappy encodings in use, with and without framing! - # Returns true if the block is found, false if not - let fork = dag.cfg.consensusForkAtEpoch(bid.slot.epoch) - dag.db.getBlockSZ(bid.root, bytes, fork) or - (bid.slot <= dag.finalizedHead.slot and - getBlockSZ( - dag.era, getStateField(dag.headState, historical_roots).asSeq, - dag.headState.historical_summaries().asSeq, - bid.slot, bytes).isOk and bytes.len > 0) - -proc getForkedBlock*( - dag: ChainDAGRef, bid: BlockId): Opt[ForkedTrustedSignedBeaconBlock] = - - let fork = dag.cfg.consensusForkAtEpoch(bid.slot.epoch) - result.ok(ForkedTrustedSignedBeaconBlock(kind: fork)) - withBlck(result.get()): - type T = type(forkyBlck) - forkyBlck = getBlock(dag, bid, T).valueOr: - getBlock( - dag.era, getStateField(dag.headState, historical_roots).asSeq, - dag.headState.historical_summaries().asSeq, - bid.slot, Opt[Eth2Digest].ok(bid.root), T).valueOr: - result.err() - return - -proc getBlockId*(db: BeaconChainDB, root: Eth2Digest): Opt[BlockId] = - block: # We might have a summary in the database - let summary = db.getBeaconBlockSummary(root) - if summary.isOk(): - return ok(BlockId(root: root, slot: summary.get().slot)) - - block: - # We might have a block without having written a summary - this can happen - # if there was a crash between writing the block and writing the summary, - # specially in databases written by older nimbus versions - let forked = db.getForkedBlock(root) - if forked.isSome(): - # Shouldn't happen too often but.. - let - blck = forked.get() - summary = withBlck(blck): forkyBlck.message.toBeaconBlockSummary() - debug "Writing summary", blck = shortLog(blck) - db.putBeaconBlockSummary(root, summary) - return ok(BlockId(root: root, slot: summary.slot)) - - err() - -proc getBlockId*(dag: ChainDAGRef, root: Eth2Digest): Opt[BlockId] = - ## Look up block id by root in history - useful for turning a root into a - ## slot - may hit the database, may return blocks that have since become - ## unviable - use `getBlockIdAtSlot` to check that the block is still viable - ## if used in a sensitive context - block: # If we have a BlockRef, this is the fastest way to get a block id - let blck = dag.getBlockRef(root) - if blck.isOk(): - return ok(blck.get().bid) - - dag.db.getBlockId(root) - -proc getForkedBlock*( - dag: ChainDAGRef, root: Eth2Digest): Opt[ForkedTrustedSignedBeaconBlock] = - let bid = dag.getBlockId(root) - if bid.isSome(): - dag.getForkedBlock(bid.get()) - else: - # In case we didn't have a summary - should be rare, but .. - dag.db.getForkedBlock(root) - -func isCanonical*(dag: ChainDAGRef, bid: BlockId): bool = - ## Returns `true` if the given `bid` is part of the history selected by - ## `dag.head`. - let current = dag.getBlockIdAtSlot(bid.slot).valueOr: - return false # We don't know, so .. - return current.bid == bid - -func isFinalized*(dag: ChainDAGRef, bid: BlockId): bool = - ## Returns `true` if the given `bid` is part of the finalized history - ## selected by `dag.finalizedHead`. - dag.isCanonical(bid) and (bid.slot <= dag.finalizedHead.slot) - -func parent*(dag: ChainDAGRef, bid: BlockId): Opt[BlockId] = - if bid.slot == 0: - return err() - - if bid.slot > dag.finalizedHead.slot: - # Make sure we follow the correct history as there may be forks - let blck = ? dag.getBlockRef(bid.root) - - doAssert not isNil(blck.parent), "should reach finalized head" - return ok blck.parent.bid - - let bids = ? dag.getBlockIdAtSlot(bid.slot - 1) - ok(bids.bid) - -func parentOrSlot*(dag: ChainDAGRef, bsi: BlockSlotId): Opt[BlockSlotId] = - if bsi.slot == 0: - return err() - - if bsi.isProposed: - let parent = ? dag.parent(bsi.bid) - ok BlockSlotId.init(parent, bsi.slot) - else: - ok BlockSlotId.init(bsi.bid, bsi.slot - 1) - -func atSlot*(dag: ChainDAGRef, bid: BlockId, slot: Slot): Opt[BlockSlotId] = - if bid.slot > dag.finalizedHead.slot: - let blck = ? dag.getBlockRef(bid.root) - - if slot > dag.finalizedHead.slot: - return blck.atSlot(slot).toBlockSlotId() - else: - # Check if the given `bid` is still part of history - it might hail from an - # orphaned fork - let existing = ? dag.getBlockIdAtSlot(bid.slot) - if existing.bid != bid: - return err() # Not part of known / relevant history - - if existing.slot == slot: # and bid.slot == slot - return ok existing - - if bid.slot <= slot: - ok BlockSlotId.init(bid, slot) - else: - dag.getBlockIdAtSlot(slot) - -func nextTimestamp[I, T](cache: var LRUCache[I, T]): uint32 = - if cache.timestamp == uint32.high: - for i in 0 ..< I: - template e: untyped = cache.entries[i] - if e.lastUsed != 0: - e.lastUsed = 1 - cache.timestamp = 1 - inc cache.timestamp - cache.timestamp - -template peekIt[I, T](cache: var LRUCache[I, T], predicate: untyped): Opt[T] = - block: - var res: Opt[T] - for i in 0 ..< I: - template e: untyped = cache.entries[i] - template it: untyped {.inject, used.} = e.value - if e.lastUsed != 0 and predicate: - res.ok it - break - res - -template findIt[I, T](cache: var LRUCache[I, T], predicate: untyped): Opt[T] = - block: - var res: Opt[T] - for i in 0 ..< I: - template e: untyped = cache.entries[i] - template it: untyped {.inject, used.} = e.value - if e.lastUsed != 0 and predicate: - e.lastUsed = cache.nextTimestamp - res.ok it - break - res - -template delIt[I, T](cache: var LRUCache[I, T], predicate: untyped) = - block: - for i in 0 ..< I: - template e: untyped = cache.entries[i] - template it: untyped {.inject, used.} = e.value - if e.lastUsed != 0 and predicate: - e.reset() - -func put[I, T](cache: var LRUCache[I, T], value: T) = - var lru = 0 - block: - var min = uint32.high - for i in 0 ..< I: - template e: untyped = cache.entries[i] - if e.lastUsed < min: - min = e.lastUsed - lru = i - if min == 0: - break - - template e: untyped = cache.entries[lru] - e.value = value - e.lastUsed = cache.nextTimestamp - -func epochAncestor(dag: ChainDAGRef, bid: BlockId, epoch: Epoch): - Opt[BlockSlotId] = - ## The epoch ancestor is the last block that has an effect on the epoch- - ## related state data, as updated in `process_epoch` - this block determines - ## effective balances, validator addtions and removals etc and serves as a - ## base for `EpochRef` construction. - if epoch < dag.tail.slot.epoch or bid.slot < dag.tail.slot: - # Not enough information in database to meaningfully process pre-tail epochs - return Opt.none BlockSlotId - - let - dependentSlot = - if epoch == dag.tail.slot.epoch: - # Use the tail as "dependent block" - this may be the genesis block, or, - # in the case of checkpoint sync, the checkpoint block - dag.tail.slot - else: - epoch.start_slot() - 1 - bsi = ? dag.atSlot(bid, dependentSlot) - epochSlot = - if epoch == dag.tail.slot.epoch: - dag.tail.slot - else: - epoch.start_slot() - ok BlockSlotId(bid: bsi.bid, slot: epochSlot) - -func epochKey(dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[EpochKey] = - ## The state transition works by storing information from blocks in a - ## "working" area until the epoch transition, then batching work collected - ## during the epoch. Thus, last block in the ancestor epochs is the block - ## that has an impact on epoch currently considered. - ## - ## This function returns an epoch key pointing to that epoch boundary, i.e. the - ## boundary where the last block has been applied to the state and epoch - ## processing has been done. - let bsi = dag.epochAncestor(bid, epoch).valueOr: - return Opt.none(EpochKey) - - Opt.some(EpochKey(bid: bsi.bid, epoch: epoch)) - -func putShufflingRef*(dag: ChainDAGRef, shufflingRef: ShufflingRef) = - ## Store shuffling in the cache - if shufflingRef.epoch < dag.finalizedHead.slot.epoch(): - # Only cache epoch information for unfinalized blocks - earlier states - # are seldomly used (ie RPC), so no need to cache - return - - dag.shufflingRefs.put shufflingRef - -func findShufflingRef*( - dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[ShufflingRef] = - ## Lookup a shuffling in the cache, returning `none` if it's not present - see - ## `getShufflingRef` for a version that creates a new instance if it's missing - let - dependent_slot = epoch.attester_dependent_slot() - dependent_bsi = ? dag.atSlot(bid, dependent_slot) - - # Check `ShufflingRef` cache - let shufflingRef = dag.shufflingRefs.findIt( - it.epoch == epoch and it.attester_dependent_root == dependent_bsi.bid.root) - if shufflingRef.isOk: - return shufflingRef - - # Check `EpochRef` cache - let epochRef = dag.epochRefs.peekIt( - it.shufflingRef.epoch == epoch and - it.shufflingRef.attester_dependent_root == dependent_bsi.bid.root) - if epochRef.isOk: - dag.putShufflingRef(epochRef.get.shufflingRef) - return ok epochRef.get.shufflingRef - - err() - -func findEpochRef*( - dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[EpochRef] = - ## Lookup an EpochRef in the cache, returning `none` if it's not present - see - ## `getEpochRef` for a version that creates a new instance if it's missing - let key = ? dag.epochKey(bid, epoch) - - dag.epochRefs.findIt(it.key == key) - -func putEpochRef(dag: ChainDAGRef, epochRef: EpochRef) = - if epochRef.epoch < dag.finalizedHead.slot.epoch(): - # Only cache epoch information for unfinalized blocks - earlier states - # are seldomly used (ie RPC), so no need to cache - return - - dag.epochRefs.put epochRef - -func init*( - T: type ShufflingRef, state: ForkedHashedBeaconState, - cache: var StateCache, epoch: Epoch): T = - let attester_dependent_root = - withState(state): forkyState.dependent_root(epoch.get_previous_epoch) - - ShufflingRef( - epoch: epoch, - attester_dependent_root: attester_dependent_root, - shuffled_active_validator_indices: - cache.get_shuffled_active_validator_indices(state, epoch), - ) - -func init*( - T: type EpochRef, dag: ChainDAGRef, state: ForkedHashedBeaconState, - cache: var StateCache): T = - let - epoch = state.get_current_epoch() - proposer_dependent_root = withState(state): - forkyState.proposer_dependent_root - shufflingRef = dag.findShufflingRef(state.latest_block_id, epoch).valueOr: - let tmp = ShufflingRef.init(state, cache, epoch) - dag.putShufflingRef(tmp) - tmp - - total_active_balance = withState(state): - get_total_active_balance(forkyState.data, cache) - epochRef = EpochRef( - key: dag.epochKey(state.latest_block_id, epoch).expect( - "Valid epoch ancestor when processing state"), - - eth1_data: - getStateField(state, eth1_data), - eth1_deposit_index: - getStateField(state, eth1_deposit_index), - - checkpoints: - FinalityCheckpoints( - justified: getStateField(state, current_justified_checkpoint), - finalized: getStateField(state, finalized_checkpoint)), - - # beacon_proposers: Separately filled below - proposer_dependent_root: proposer_dependent_root, - - shufflingRef: shufflingRef, - total_active_balance: total_active_balance - ) - epochStart = epoch.start_slot() - - for i in 0'u64.. 0: - load(epoch - 1) - - if dag.head != nil: # nil during init.. sigh - let period = dag.head.slot.sync_committee_period - if period == epoch.sync_committee_period and - period notin cache.sync_committees and - period > dag.cfg.ALTAIR_FORK_EPOCH.sync_committee_period(): - # If the block we're aiming for shares ancestry with head, we can reuse - # the cached head committee - this accounts for most "live" cases like - # syncing and checking blocks since the committees rarely change - let periodBsi = dag.atSlot(bid, period.start_slot) - if periodBsi.isSome and periodBsi == - dag.atSlot(dag.head.bid, period.start_slot): - # We often end up sharing sync committees with head during sync / gossip - # validation / head updates - cache.sync_committees[period] = dag.headSyncCommittees - -func containsForkBlock*(dag: ChainDAGRef, root: Eth2Digest): bool = - ## Checks for blocks at the finalized checkpoint or newer - KeyedBlockRef.asLookupKey(root) in dag.forkBlocks - -func isFinalizedStateSnapshot(slot: Slot): bool = - slot.is_epoch and slot.epoch mod EPOCHS_PER_STATE_SNAPSHOT == 0 - -func isStateCheckpoint(dag: ChainDAGRef, bsi: BlockSlotId): bool = - ## State checkpoints are the points in time for which we store full state - ## snapshots, which later serve as rewind starting points when replaying state - ## transitions from database, for example during reorgs. - ## - # As a policy, we only store epoch boundary states without the epoch block - # (if it exists) applied - the rest can be reconstructed by loading an epoch - # boundary state and applying the missing blocks. - # We also avoid states that were produced with empty slots only - as such, - # there is only a checkpoint for the first epoch after a block. - - # The tail block also counts as a state checkpoint! - (bsi.isProposed and bsi.bid == dag.tail) or - (bsi.slot.is_epoch and bsi.slot.epoch == (bsi.bid.slot.epoch + 1)) - -proc getState( - db: BeaconChainDB, cfg: RuntimeConfig, block_root: Eth2Digest, slot: Slot, - state: var ForkedHashedBeaconState, rollback: RollbackProc): bool = - let state_root = db.getStateRoot(block_root, slot).valueOr: - return false - - db.getState(cfg.consensusForkAtEpoch(slot.epoch), state_root, state, rollback) - -proc containsState*( - db: BeaconChainDB, cfg: RuntimeConfig, block_root: Eth2Digest, - slots: Slice[Slot], legacy = true): bool = - var slot = slots.b - while slot >= slots.a: - let state_root = db.getStateRoot(block_root, slot) - if state_root.isSome() and - db.containsState( - cfg.consensusForkAtEpoch(slot.epoch), state_root.get(), legacy): - return true - - if slot == slots.a: # avoid underflow at genesis - break - slot -= 1 - false - -proc getState*( - db: BeaconChainDB, cfg: RuntimeConfig, block_root: Eth2Digest, - slots: Slice[Slot], state: var ForkedHashedBeaconState, - rollback: RollbackProc): bool = - var slot = slots.b - while slot >= slots.a: - let state_root = db.getStateRoot(block_root, slot) - if state_root.isSome() and - db.getState( - cfg.consensusForkAtEpoch(slot.epoch), state_root.get(), state, - rollback): - return true - - if slot == slots.a: # avoid underflow at genesis - break - slot -= 1 - false - -proc getState( - dag: ChainDAGRef, bsi: BlockSlotId, state: var ForkedHashedBeaconState): bool = - ## Load a state from the database given a block and a slot - this will first - ## lookup the state root in the state root table then load the corresponding - ## state, if it exists - if not dag.isStateCheckpoint(bsi): - return false - - let rollbackAddr = - # Any restore point will do as long as it's not the object being updated - if unsafeAddr(state) == unsafeAddr(dag.headState): - unsafeAddr dag.clearanceState - else: - unsafeAddr dag.headState - - let v = addr state - func rollback() = - assign(v[], rollbackAddr[]) - - dag.db.getState(dag.cfg, bsi.bid.root, bsi.slot, state, rollback) - -proc getStateByParent( - dag: ChainDAGRef, bid: BlockId, state: var ForkedHashedBeaconState): bool = - ## Try to load the state referenced by the parent of the given `bid` - this - ## state can be used to advance to the `bid` state itself. - let slot = bid.slot - - let - summary = dag.db.getBeaconBlockSummary(bid.root).valueOr: - return false - parentMinSlot = - dag.db.getBeaconBlockSummary(summary.parent_root). - map(proc(x: auto): auto = x.slot).valueOr: - # in the cases that we don't have slot information, we'll search for the - # state for a few back from the `bid` slot - if there are gaps of empty - # slots larger than this, we will not be able to load the state using this - # trick - if slot.uint64 >= (EPOCHS_PER_STATE_SNAPSHOT * 2) * SLOTS_PER_EPOCH: - slot - (EPOCHS_PER_STATE_SNAPSHOT * 2) * SLOTS_PER_EPOCH - else: - Slot(0) - - let rollbackAddr = - # Any restore point will do as long as it's not the object being updated - if unsafeAddr(state) == unsafeAddr(dag.headState): - unsafeAddr dag.clearanceState - else: - unsafeAddr dag.headState - - let v = addr state - func rollback() = - assign(v[], rollbackAddr[]) - - dag.db.getState( - dag.cfg, summary.parent_root, parentMinSlot..slot, state, rollback) - -proc getNearbyState( - dag: ChainDAGRef, state: var ForkedHashedBeaconState, bid: BlockId, - lowSlot: Slot): Opt[void] = - ## Load state from DB that is close to `bid` and has at least slot `lowSlot`. - var - e = bid.slot.epoch - b = bid - while true: - let stateSlot = e.start_slot - if stateSlot < lowSlot: - return err() - b = (? dag.atSlot(b, max(stateSlot, 1.Slot) - 1)).bid - let bsi = BlockSlotId.init(b, stateSlot) - if not dag.getState(bsi, state): - if e == GENESIS_EPOCH: - return err() - dec e - continue - return ok() - -proc currentSyncCommitteeForPeriod*( - dag: ChainDAGRef, - tmpState: var ForkedHashedBeaconState, - period: SyncCommitteePeriod): Opt[SyncCommittee] = - ## Fetch a `SyncCommittee` for a given sync committee period. - ## For non-finalized periods, follow the chain as selected by fork choice. - let lowSlot = max(dag.tail.slot, dag.cfg.ALTAIR_FORK_EPOCH.start_slot) - if period < lowSlot.sync_committee_period: - return err() - let - periodStartSlot = period.start_slot - syncCommitteeSlot = max(periodStartSlot, lowSlot) - bsi = ? dag.getBlockIdAtSlot(syncCommitteeSlot) - dag.withUpdatedState(tmpState, bsi) do: - withState(updatedState): - when consensusFork >= ConsensusFork.Altair: - ok forkyState.data.current_sync_committee - else: err() - do: err() - -proc getBlockIdAtSlot*( - dag: ChainDAGRef, state: ForkyHashedBeaconState, slot: Slot): Opt[BlockId] = - if slot >= state.data.slot: - Opt.some state.latest_block_id - elif state.data.slot <= slot + SLOTS_PER_HISTORICAL_ROOT: - dag.getBlockId(state.data.get_block_root_at_slot(slot)) - else: - Opt.none(BlockId) - -# adapted from nimbus-eth2 - -# proc updateBeaconMetrics( -# state: ForkedHashedBeaconState, bid: BlockId, cache: var StateCache) = - # https://github.com/ethereum/beacon-metrics/blob/master/metrics.md#additional-metrics - # both non-negative, so difference can't overflow or underflow int64 - - # beacon_head_root.set(bid.root.toGaugeValue) - # beacon_head_slot.set(bid.slot.toGaugeValue) - - # withState(state): - # beacon_pending_deposits.set( - # (forkyState.data.eth1_data.deposit_count - - # forkyState.data.eth1_deposit_index).toGaugeValue) - # beacon_processed_deposits_total.set( - # forkyState.data.eth1_deposit_index.toGaugeValue) - - # beacon_current_justified_epoch.set( - # forkyState.data.current_justified_checkpoint.epoch.toGaugeValue) - # beacon_current_justified_root.set( - # forkyState.data.current_justified_checkpoint.root.toGaugeValue) - # beacon_previous_justified_epoch.set( - # forkyState.data.previous_justified_checkpoint.epoch.toGaugeValue) - # beacon_previous_justified_root.set( - # forkyState.data.previous_justified_checkpoint.root.toGaugeValue) - # beacon_finalized_epoch.set( - # forkyState.data.finalized_checkpoint.epoch.toGaugeValue) - # beacon_finalized_root.set( - # forkyState.data.finalized_checkpoint.root.toGaugeValue) - - # let active_validators = count_active_validators( - # forkyState.data, forkyState.data.slot.epoch, cache).toGaugeValue - # beacon_active_validators.set(active_validators) - # beacon_current_active_validators.set(active_validators) - -# import blockchain_dag_light_client - -# export -# blockchain_dag_light_client.getLightClientBootstrap, -# blockchain_dag_light_client.getLightClientUpdateForPeriod, -# blockchain_dag_light_client.getLightClientFinalityUpdate, -# blockchain_dag_light_client.getLightClientOptimisticUpdate - -proc putState(dag: ChainDAGRef, state: ForkedHashedBeaconState, bid: BlockId) = - # Store a state and its root - let slot = getStateField(state, slot) - logScope: - blck = shortLog(bid) - stateSlot = shortLog(slot) - stateRoot = shortLog(getStateRoot(state)) - - if not dag.isStateCheckpoint(BlockSlotId.init(bid, slot)): - return - - # Don't consider legacy tables here, they are slow to read so we'll want to - # rewrite things in the new table anyway. - if dag.db.containsState( - dag.cfg.consensusForkAtEpoch(slot.epoch), getStateRoot(state), - legacy = false): - return - - let startTick = Moment.now() - # Ideally we would save the state and the root lookup cache in a single - # transaction to prevent database inconsistencies, but the state loading code - # is resilient against one or the other going missing - withState(state): - dag.db.putState(forkyState) - - debug "Stored state", putStateDur = Moment.now() - startTick - -proc advanceSlots*( - dag: ChainDAGRef, state: var ForkedHashedBeaconState, slot: Slot, save: bool, - cache: var StateCache, info: var ForkedEpochInfo) = - # Given a state, advance it zero or more slots by applying empty slot - # processing - the state must be positioned at or before `slot` - doAssert getStateField(state, slot) <= slot - - let stateBid = state.latest_block_id - while getStateField(state, slot) < slot: - let - preEpoch = getStateField(state, slot).epoch - - loadStateCache(dag, cache, stateBid, getStateField(state, slot).epoch) - - process_slots( - dag.cfg, state, getStateField(state, slot) + 1, cache, info, - dag.updateFlags).expect("process_slots shouldn't fail when state slot is correct") - if save: - dag.putState(state, stateBid) - - # The reward information in the state transition is computed for epoch - # transitions - when transitioning into epoch N, the activities in epoch - # N-2 are translated into balance updates, and this is what we capture - # in the monitor. This may be inaccurate during a deep reorg (>1 epoch) - # which is an acceptable tradeoff for monitoring. - withState(state): - let postEpoch = forkyState.data.slot.epoch - if preEpoch != postEpoch and postEpoch >= 2: - var proposers: array[SLOTS_PER_EPOCH, Opt[ValidatorIndex]] - let epochRef = dag.findEpochRef(stateBid, postEpoch - 2) - if epochRef.isSome(): - proposers = epochRef[][].beacon_proposers - - dag.validatorMonitor[].registerEpochInfo( - forkyState.data, proposers, info) - -proc applyBlock( - dag: ChainDAGRef, state: var ForkedHashedBeaconState, bid: BlockId, - cache: var StateCache, info: var ForkedEpochInfo): Result[void, cstring] = - loadStateCache(dag, cache, bid, getStateField(state, slot).epoch) - - discard case dag.cfg.consensusForkAtEpoch(bid.slot.epoch) - of ConsensusFork.Phase0: - let data = getBlock(dag, bid, phase0.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - dag.updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Altair: - let data = getBlock(dag, bid, altair.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - dag.updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Bellatrix: - let data = getBlock(dag, bid, bellatrix.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - dag.updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Capella: - let data = getBlock(dag, bid, capella.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - dag.updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Deneb: - let data = getBlock(dag, bid, deneb.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - dag.updateFlags + {slotProcessed}, noRollback) - of ConsensusFork.Electra: - let data = getBlock(dag, bid, electra.TrustedSignedBeaconBlock).valueOr: - return err("Block load failed") - ? state_transition( - dag.cfg, state, data, cache, info, - dag.updateFlags + {slotProcessed}, noRollback) - - ok() - -## NOTE: Adapted from nimbus-eth2/beacon_chain/consensus_object_pools/blockchain_dag.nim -## removed lightclient initialization -proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB, - validatorMonitor: ref ValidatorMonitor, updateFlags: UpdateFlags, - eraPath = ".", - onBlockCb: OnBlockCallback = nil, onHeadCb: OnHeadCallback = nil, - onReorgCb: OnReorgCallback = nil, onFinCb: OnFinalizedCallback = nil, - vanityLogs = default(VanityLogs) - # lcDataConfig = default(LightClientDataConfig) - ): ChainDAGRef = - cfg.checkForkConsistency() - - doAssert updateFlags - {strictVerification} == {}, - "Other flags not supported in ChainDAG" - - # TODO we require that the db contains both a head and a tail block - - # asserting here doesn't seem like the right way to go about it however.. - - # Tail is the first block for which we can construct a state - either - # genesis or a checkpoint - let - startTick = Moment.now() - genesisRoot = db.getGenesisBlock() - tailRoot = db.getTailBlock().expect( - "preInit should have initialized the database with a tail block root") - tail = db.getBlockId(tailRoot).expect( - "tail block summary in database, database corrupt?") - headRoot = db.getHeadBlock().expect("head root, database corrupt?") - head = db.getBlockId(headRoot).expect("head block id, database corrupt?") - - # Have to be careful with this instance, it is not yet fully initialized so - # as to avoid having to allocate a separate "init" state - dag = ChainDAGRef( - db: db, - validatorMonitor: validatorMonitor, - genesis: genesisRoot.map( - proc(x: auto): auto = BlockId(root: x, slot: GENESIS_SLOT)), - tail: tail, - - # The only allowed flag right now is strictVerification, as the others all - # allow skipping some validation. - updateFlags: updateFlags * {strictVerification}, - cfg: cfg, - - vanityLogs: vanityLogs, - - # NOTE: commented from original file - # lcDataStore: initLightClientDataStore( - # lcDataConfig, cfg, db.getLightClientDataDB()), - - onBlockAdded: onBlockCb, - onHeadChanged: onHeadCb, - onReorgHappened: onReorgCb, - onFinHappened: onFinCb, - ) - loadTick = Moment.now() - - var - headRef, curRef: BlockRef - - # When starting from a checkpoint with an empty block, we'll store the state - # "ahead" of the head slot - this slot would be considered finalized - slot = max(head.slot, (tail.slot.epoch + 1).start_slot) - # To know the finalized checkpoint of the head, we need to recreate its - # state - the tail is implicitly finalized, and if we have a finalized block - # table, that provides another hint - finalizedSlot = db.finalizedBlocks.high.get(tail.slot) - cache: StateCache - foundHeadState = false - headBlocks: seq[BlockRef] - - # Load head -> finalized, or all summaries in case the finalized block table - # hasn't been written yet - for blck in db.getAncestorSummaries(head.root): - # The execution block root gets filled in as needed. Nonfinalized Bellatrix - # and later blocks are loaded as optimistic, which gets adjusted that first - # `VALID` fcU from an EL plus markBlockVerified. Pre-merge blocks still get - # marked as `VALID`. - let newRef = BlockRef.init( - blck.root, Opt.none Eth2Digest, executionValid = false, - blck.summary.slot) - if headRef == nil: - headRef = newRef - - if curRef != nil: - link(newRef, curRef) - - curRef = newRef - - dag.forkBlocks.incl(KeyedBlockRef.init(curRef)) - - if not foundHeadState: - foundHeadState = db.getState( - cfg, blck.root, blck.summary.slot..slot, dag.headState, noRollback) - slot = blck.summary.slot - - if not foundHeadState: - # When the database has been written with a pre-fork version of the - # software, it may happen that blocks produced using an "unforked" - # chain get written to the database - we need to skip such blocks - # when loading the database with a fork-compatible version - if containsBlock(cfg, db, curRef.slot, curRef.root): - headBlocks.add curRef - else: - if headBlocks.len > 0: - fatal "Missing block needed to create head state, database corrupt?", - curRef = shortLog(curRef) - quit 1 - # Without the block data we can't form a state for this root, so - # we'll need to move the head back - headRef = nil - dag.forkBlocks.excl(KeyedBlockRef.init(curRef)) - - if curRef.slot <= finalizedSlot: - # Only non-finalized slots get a `BlockRef` - break - - let summariesTick = Moment.now() - - if not foundHeadState: - if not dag.getStateByParent(curRef.bid, dag.headState): - fatal "Could not load head state, database corrupt?", - head = shortLog(head), tail = shortLog(dag.tail) - quit 1 - - block: - # EpochRef needs an epoch boundary state - assign(dag.epochRefState, dag.headState) - - var info: ForkedEpochInfo - - while headBlocks.len > 0: - dag.applyBlock( - dag.headState, headBlocks.pop().bid, cache, - info).expect("head blocks should apply") - - dag.head = headRef - dag.heads = @[headRef] - - withState(dag.headState): - when consensusFork >= ConsensusFork.Altair: - dag.headSyncCommittees = forkyState.data.get_sync_committee_cache(cache) - - assign(dag.clearanceState, dag.headState) - - if dag.headState.latest_block_root == tail.root: - # In case we started from a checkpoint with an empty slot - finalizedSlot = getStateField(dag.headState, slot) - - finalizedSlot = - max( - finalizedSlot, - getStateField(dag.headState, finalized_checkpoint).epoch.start_slot) - - let - configFork = case dag.headState.kind - of ConsensusFork.Phase0: genesisFork(cfg) - of ConsensusFork.Altair: altairFork(cfg) - of ConsensusFork.Bellatrix: bellatrixFork(cfg) - of ConsensusFork.Capella: capellaFork(cfg) - of ConsensusFork.Deneb: denebFork(cfg) - of ConsensusFork.Electra: electraFork(cfg) - stateFork = getStateField(dag.headState, fork) - - # Here, we check only the `current_version` field because the spec - # mandates that testnets starting directly from a particular fork - # should have `previous_version` set to `current_version` while - # this doesn't happen to be the case in network that go through - # regular hard-fork upgrades. See for example: - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#testing - if stateFork.current_version != configFork.current_version: - error "State from database does not match network, check --network parameter", - tail = dag.tail, headRef, stateFork, configFork - quit 1 - - # Need to load state to find genesis validators root, before loading era db - dag.era = EraDB.new( - cfg, eraPath, getStateField(dag.headState, genesis_validators_root)) - - # We used an interim finalizedHead while loading the head state above - now - # that we have loaded the dag up to the finalized slot, we can also set - # finalizedHead to its real value - dag.finalizedHead = headRef.atSlot(finalizedSlot) - dag.lastPrunePoint = dag.finalizedHead.toBlockSlotId().expect("not nil") - - doAssert dag.finalizedHead.blck != nil, - "The finalized head should exist at the slot" - - block: # Top up finalized blocks - if db.finalizedBlocks.high.isNone or - db.finalizedBlocks.high.get() < dag.finalizedHead.blck.slot: - # Versions prior to 1.7.0 did not store finalized blocks in the - # database, and / or the application might have crashed between the head - # and finalized blocks updates. - info "Loading finalized blocks", - finHigh = db.finalizedBlocks.high, - finalizedHead = shortLog(dag.finalizedHead) - - var - newFinalized: seq[BlockId] - tmp = dag.finalizedHead.blck - while tmp.parent != nil: - newFinalized.add(tmp.bid) - let p = tmp.parent - tmp.parent = nil - tmp = p - - for blck in db.getAncestorSummaries(tmp.root): - if db.finalizedBlocks.high.isSome and - blck.summary.slot <= db.finalizedBlocks.high.get: - break - - newFinalized.add(BlockId(slot: blck.summary.slot, root: blck.root)) - - db.updateFinalizedBlocks(newFinalized) - - doAssert dag.finalizedHead.blck.parent == nil, - "The finalized head is the last BlockRef with a parent" - - block: - let finalized = db.finalizedBlocks.get(db.finalizedBlocks.high.get()).expect( - "tail at least") - if finalized != dag.finalizedHead.blck.root: - error "Head does not lead to finalized block, database corrupt?", - head = shortLog(head), finalizedHead = shortLog(dag.finalizedHead), - tail = shortLog(dag.tail), finalized = shortLog(finalized) - quit 1 - - dag.backfill = block: - let backfillSlot = db.finalizedBlocks.low.expect("tail at least") - if backfillSlot <= dag.horizon: - # Backfill done, no need to load anything - BeaconBlockSummary() - elif backfillSlot < dag.tail.slot: - let backfillRoot = db.finalizedBlocks.get(backfillSlot).expect( - "low to be loadable") - - db.getBeaconBlockSummary(backfillRoot).expect( - "Backfill block must have a summary: " & $backfillRoot) - elif dag.containsBlock(dag.tail): - db.getBeaconBlockSummary(dag.tail.root).expect( - "Tail block must have a summary: " & $dag.tail.root) - else: - # Checkpoint sync, checkpoint block unavailable - BeaconBlockSummary( - slot: dag.tail.slot + 1, - parent_root: dag.tail.root) - - dag.forkDigests = newClone ForkDigests.init( - cfg, getStateField(dag.headState, genesis_validators_root)) - - withState(dag.headState): - dag.validatorMonitor[].registerState(forkyState.data) - - # updateBeaconMetrics(dag.headState, dag.head.bid, cache) - - let finalizedTick = Moment.now() - - if dag.backfill.slot > GENESIS_SLOT: # Try frontfill from era files - let backfillSlot = dag.backfill.slot - 1 - dag.frontfillBlocks = newSeqOfCap[Eth2Digest](backfillSlot.int) - - let - historical_roots = getStateField(dag.headState, historical_roots).asSeq() - historical_summaries = dag.headState.historical_summaries.asSeq() - - var - blocks = 0 - - # Here, we'll build up the slot->root mapping in memory for the range of - # blocks from genesis to backfill, if possible. - for bid in dag.era.getBlockIds( - historical_roots, historical_summaries, Slot(0), Eth2Digest()): - # If backfill has not yet started, the backfill slot itself also needs - # to be served from era files. Checkpoint sync starts from state only - if bid.slot > backfillSlot or - (bid.slot == backfillSlot and bid.root != dag.tail.root): - # If we end up in here, we failed the root comparison just below in - # an earlier iteration - fatal "Era summaries don't lead up to backfill, database or era files corrupt?", - bid, backfillSlot - quit 1 - - # In BeaconState.block_roots, empty slots are filled with the root of - # the previous block - in our data structure, we use a zero hash instead - dag.frontfillBlocks.setLen(bid.slot.int + 1) - dag.frontfillBlocks[bid.slot.int] = bid.root - - if bid.root == dag.backfill.parent_root: - # We've reached the backfill point, meaning blocks are available - # in the sqlite database from here onwards - remember this point in - # time so that we can write summaries to the database - it's a lot - # faster to load from database than to iterate over era files with - # the current naive era file reader. - reset(dag.backfill) - - dag.updateFrontfillBlocks() - - break - - blocks += 1 - - if blocks > 0: - info "Front-filled blocks from era files", blocks, backfillSlot - - let frontfillTick = Moment.now() - - # Fill validator key cache in case we're loading an old database that doesn't - # have a cache - dag.updateValidatorKeys(getStateField(dag.headState, validators).asSeq()) - - # Initialize pruning such that when starting with a database that hasn't been - # pruned, we work our way from the tail to the horizon in incremental steps - dag.lastHistoryPruneHorizon = dag.horizon() - dag.lastHistoryPruneBlockHorizon = block: - let boundary = min(dag.tail.slot, dag.horizon()) - if boundary.epoch() >= EPOCHS_PER_STATE_SNAPSHOT: - start_slot(boundary.epoch() - EPOCHS_PER_STATE_SNAPSHOT) - else: - Slot(0) - - info "Block DAG initialized", - head = shortLog(dag.head), - finalizedHead = shortLog(dag.finalizedHead), - tail = shortLog(dag.tail), - backfill = shortLog(dag.backfill), - - loadDur = loadTick - startTick, - summariesDur = summariesTick - loadTick, - finalizedDur = finalizedTick - summariesTick, - frontfillDur = frontfillTick - finalizedTick, - keysDur = Moment.now() - frontfillTick - - dag.initLightClientDataCache() - - dag - -template genesis_validators_root*(dag: ChainDAGRef): Eth2Digest = - getStateField(dag.headState, genesis_validators_root) - -proc genesisBlockRoot*(dag: ChainDAGRef): Eth2Digest = - dag.db.getGenesisBlock().expect("DB must be initialized with genesis block") - -func getEpochRef*( - dag: ChainDAGRef, state: ForkedHashedBeaconState, cache: var StateCache): EpochRef = - ## Get a cached `EpochRef` or construct one based on the given state - always - ## returns an EpochRef instance - let - bid = state.latest_block_id - epoch = state.get_current_epoch() - - dag.findEpochRef(bid, epoch).valueOr: - let res = EpochRef.init(dag, state, cache) - dag.putEpochRef(res) - res - -proc getEpochRef*( - dag: ChainDAGRef, bid: BlockId, epoch: Epoch, - preFinalized: bool): Result[EpochRef, cstring] = - ## Return a cached EpochRef or construct one from the database, if possible - - ## returns `none` on failure. - ## - ## When `preFinalized` is true, include epochs from before the finalized - ## checkpoint in the search - this potentially can result in long processing - ## times due to state replays. - ## - ## Requests for epochs >= dag.finalizedHead.slot.epoch always return an - ## instance. One must be careful to avoid race conditions in `async` code - ## where the finalized head might change during an `await`. - ## - ## Requests for epochs < dag.finalizedHead.slot.epoch may fail, either because - ## the search was limited by the `preFinalized` flag or because state history - ## has been pruned - `none` will be returned in this case. - if not preFinalized and epoch < dag.finalizedHead.slot.epoch: - return err("Requesting pre-finalized EpochRef") - - if bid.slot < dag.tail.slot or epoch < dag.tail.slot.epoch: - return err("Requesting EpochRef for pruned state") - - let epochRef = dag.findEpochRef(bid, epoch) - if epochRef.isOk(): - # adapted from nimbus-eth2 - # beacon_state_data_cache_hits.inc - return ok epochRef.get() - - # beacon_state_data_cache_misses.inc - - let - ancestor = dag.epochAncestor(bid, epoch).valueOr: - # If we got in here, the bid must be unknown or we would have gotten - # _some_ ancestor (like the tail) - return err("Requesting EpochRef for non-canonical block") - - var cache: StateCache - if not updateState(dag, dag.epochRefState, ancestor, false, cache): - return err("Could not load requested state") - - ok(dag.getEpochRef(dag.epochRefState, cache)) - -proc getEpochRef*( - dag: ChainDAGRef, blck: BlockRef, epoch: Epoch, - preFinalized: bool): Result[EpochRef, cstring] = - dag.getEpochRef(blck.bid, epoch, preFinalized) - -proc getFinalizedEpochRef*(dag: ChainDAGRef): EpochRef = - dag.getEpochRef( - dag.finalizedHead.blck, dag.finalizedHead.slot.epoch, false).expect( - "getEpochRef for finalized head should always succeed") - -proc ancestorSlot*( - dag: ChainDAGRef, state: ForkyHashedBeaconState, bid: BlockId, - lowSlot: Slot): Opt[Slot] = - ## Return common ancestor slot of `bid` and `state`, if at least `lowSlot`. - ## Return `none` if no common ancestor is found with slot >= `lowSlot`. - if state.data.slot < lowSlot or bid.slot < lowSlot: - return Opt.none(Slot) - - var stateBid = ? dag.getBlockIdAtSlot(state, bid.slot) - if stateBid.slot < lowSlot: - return Opt.none(Slot) - - var blockBid = (? dag.atSlot(bid, stateBid.slot)).bid - if blockBid.slot < lowSlot: - return Opt.none(Slot) - - while stateBid != blockBid: - if stateBid.slot >= blockBid.slot: - stateBid = ? dag.getBlockIdAtSlot( - state, min(blockBid.slot, stateBid.slot - 1)) - if stateBid.slot < lowSlot: - return Opt.none(Slot) - else: - blockBid = ? dag.parent(blockBid) - if blockBid.slot < lowSlot: - return Opt.none(Slot) - - Opt.some stateBid.slot - -proc computeRandaoMix( - bdata: ForkedTrustedSignedBeaconBlock): Opt[Eth2Digest] = - ## Compute the requested RANDAO mix for `bdata` without `state`, if possible. - withBlck(bdata): - when consensusFork >= ConsensusFork.Bellatrix: - if forkyBlck.message.is_execution_block: - var mix = eth2digest(forkyBlck.message.body.randao_reveal.toRaw()) - mix.data.mxor forkyBlck.message.body.execution_payload.prev_randao.data - return ok mix - Opt.none(Eth2Digest) - -proc computeRandaoMix*( - dag: ChainDAGRef, state: ForkyHashedBeaconState, bid: BlockId, - lowSlot: Slot): Opt[Eth2Digest] = - ## Compute the requested RANDAO mix for `bid` based on `state`. - ## Return `none` if `state` and `bid` do not share a common ancestor - ## with slot >= `lowSlot`. - let ancestorSlot = ? dag.ancestorSlot(state, bid, lowSlot) - doAssert ancestorSlot <= state.data.slot - doAssert ancestorSlot <= bid.slot - - # If `blck` is post merge, RANDAO information is immediately available - let - bdata = ? dag.getForkedBlock(bid) - fullMix = computeRandaoMix(bdata) - if fullMix.isSome: - return fullMix - - # RANDAO mix has to be recomputed from `bid` and `state` - var mix {.noinit.}: Eth2Digest - proc mixToAncestor(highBid: BlockId): Opt[void] = - ## Mix in/out RANDAO reveals back to `ancestorSlot` - var bid = highBid - while bid.slot > ancestorSlot: - let bdata = ? dag.getForkedBlock(bid) - withBlck(bdata): # See `process_randao` / `process_randao_mixes_reset` - mix.data.mxor eth2digest( - forkyBlck.message.body.randao_reveal.toRaw()).data - bid = ? dag.parent(bid) - ok() - - # Mix in RANDAO from `bid` - if ancestorSlot < bid.slot: - withBlck(bdata): - mix = eth2digest(forkyBlck.message.body.randao_reveal.toRaw()) - ? mixToAncestor(? dag.parent(bid)) - else: - mix.reset() - - # Mix in RANDAO from `state` - let ancestorEpoch = ancestorSlot.epoch - if ancestorEpoch + EPOCHS_PER_HISTORICAL_VECTOR <= state.data.slot.epoch: - return Opt.none(Eth2Digest) - let mixRoot = state.dependent_root(ancestorEpoch + 1) - if mixRoot.isZero: - return Opt.none(Eth2Digest) - ? mixToAncestor(? dag.getBlockId(mixRoot)) - mix.data.mxor state.data.get_randao_mix(ancestorEpoch).data - - ok mix - -proc computeRandaoMixFromMemory*( - dag: ChainDAGRef, bid: BlockId, lowSlot: Slot): Opt[Eth2Digest] = - ## Compute requested RANDAO mix for `bid` from available states (~5 ms). - template tryWithState(state: ForkedHashedBeaconState) = - block: - withState(state): - let mix = dag.computeRandaoMix(forkyState, bid, lowSlot) - if mix.isSome: - return mix - tryWithState dag.headState - tryWithState dag.epochRefState - tryWithState dag.clearanceState - -proc computeRandaoMixFromDatabase*( - dag: ChainDAGRef, bid: BlockId, lowSlot: Slot): Opt[Eth2Digest] = - ## Compute requested RANDAO mix for `bid` using closest DB state (~500 ms). - let state = assignClone(dag.headState) - ? dag.getNearbyState(state[], bid, lowSlot) - withState(state[]): - dag.computeRandaoMix(forkyState, bid, lowSlot) - -proc computeRandaoMix( - dag: ChainDAGRef, bid: BlockId, lowSlot: Slot): Opt[Eth2Digest] = - # Try to compute from states available in memory - let mix = dag.computeRandaoMixFromMemory(bid, lowSlot) - if mix.isSome: - return mix - - # If `blck` is post merge, RANDAO information is immediately available - let - bdata = ? dag.getForkedBlock(bid) - fullMix = computeRandaoMix(bdata) - if fullMix.isSome: - return fullMix - - # Fall back to database - dag.computeRandaoMixFromDatabase(bid, lowSlot) - -proc computeRandaoMix*(dag: ChainDAGRef, bid: BlockId): Opt[Eth2Digest] = - ## Compute requested RANDAO mix for `bid`. - const maxSlotDistance = SLOTS_PER_HISTORICAL_ROOT - let lowSlot = max(bid.slot, maxSlotDistance.Slot) - maxSlotDistance - dag.computeRandaoMix(bid, lowSlot) - -proc lowSlotForAttesterShuffling*(epoch: Epoch): Slot = - ## Return minimum slot that a state must share ancestry with a block history - ## so that RANDAO at `epoch.attester_dependent_slot` can be computed. - - # A state must be somewhat recent so that `get_active_validator_indices` - # for the queried `epoch` cannot be affected by any such skipped processing. - const numDelayEpochs = compute_activation_exit_epoch(GENESIS_EPOCH).uint64 - let lowEpoch = max(epoch, (numDelayEpochs - 1).Epoch) - (numDelayEpochs - 1) - lowEpoch.start_slot - -proc computeShufflingRef*( - dag: ChainDAGRef, state: ForkyHashedBeaconState, - blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] = - ## Compute `ShufflingRef` for `blck@epoch` based on `state`. - ## If `state` has unviable `get_active_validator_indices`, return `none`. - - let - dependentBid = (? dag.atSlot(blck.bid, epoch.attester_dependent_slot)).bid - lowSlot = epoch.lowSlotForAttesterShuffling - mix = ? dag.computeRandaoMix(state, dependentBid, lowSlot) - - return ok ShufflingRef( - epoch: epoch, - attester_dependent_root: dependentBid.root, - shuffled_active_validator_indices: - state.data.get_shuffled_active_validator_indices(epoch, mix)) - -proc computeShufflingRefFromMemory*( - dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] = - ## Compute `ShufflingRef` from available states (~5 ms). - template tryWithState(state: ForkedHashedBeaconState) = - block: - withState(state): - let shufflingRef = dag.computeShufflingRef(forkyState, blck, epoch) - if shufflingRef.isOk: - return shufflingRef - tryWithState dag.headState - tryWithState dag.epochRefState - tryWithState dag.clearanceState - -proc getShufflingRef*( - dag: ChainDAGRef, blck: BlockRef, epoch: Epoch, - preFinalized: bool): Opt[ShufflingRef] = - ## Return the shuffling in the given history and epoch - this potentially is - ## faster than returning a full EpochRef because the shuffling is determined - ## an epoch in advance and therefore is less sensitive to reorgs - var shufflingRef = dag.findShufflingRef(blck.bid, epoch) - if shufflingRef.isSome: - return shufflingRef - - # Use existing states to quickly compute the shuffling - shufflingRef = dag.computeShufflingRefFromMemory(blck, epoch) - if shufflingRef.isSome: - dag.putShufflingRef(shufflingRef.get) - return shufflingRef - - # Last resort, this can take several seconds as this may replay states - let epochRef = dag.getEpochRef(blck, epoch, preFinalized).valueOr: - return Opt.none ShufflingRef - dag.putShufflingRef(epochRef.shufflingRef) - Opt.some epochRef.shufflingRef - -func stateCheckpoint*(dag: ChainDAGRef, bsi: BlockSlotId): BlockSlotId = - ## The first ancestor BlockSlot that is a state checkpoint - var bsi = bsi - while not dag.isStateCheckpoint(bsi): - if bsi.isProposed: - bsi.bid = dag.parent(bsi.bid).valueOr: - break - else: - bsi.slot = bsi.slot - 1 - bsi - -template forkAtEpoch*(dag: ChainDAGRef, epoch: Epoch): Fork = - forkAtEpoch(dag.cfg, epoch) - -proc getBlockRange*( - dag: ChainDAGRef, startSlot: Slot, skipStep: uint64, - output: var openArray[BlockId]): Natural = - ## This function populates an `output` buffer of blocks - ## with a slots ranging from `startSlot` up to, but not including, - ## `startSlot + skipStep * output.len`, skipping any slots that don't have - ## a block. - ## - ## Blocks will be written to `output` from the end without gaps, even if - ## a block is missing in a particular slot. The return value shows how - ## many slots were missing blocks - to iterate over the result, start - ## at this index. - ## - ## If there were no blocks in the range, `output.len` will be returned. - let - requestedCount = output.lenu64 - headSlot = dag.head.slot - - trace "getBlockRange entered", - head = shortLog(dag.head.root), requestedCount, startSlot, skipStep, headSlot - - if startSlot < dag.backfill.slot: - debug "Got request for pre-backfill slot", - startSlot, backfillSlot = dag.backfill.slot, horizonSlot = dag.horizon - return output.len - - if headSlot <= startSlot or requestedCount == 0: - return output.len # Identical to returning an empty set of block as indicated above - - let - runway = uint64(headSlot - startSlot) - - # This is the number of blocks that will follow the start block - extraSlots = min(runway div skipStep, requestedCount - 1) - - # If `skipStep` is very large, `extraSlots` should be 0 from - # the previous line, so `endSlot` will be equal to `startSlot`: - endSlot = startSlot + extraSlots * skipStep - - var - curSlot = endSlot - o = output.len - - # Process all blocks that follow the start block (may be zero blocks) - while curSlot > startSlot: - let bs = dag.getBlockIdAtSlot(curSlot) - if bs.isSome and bs.get().isProposed(): - o -= 1 - output[o] = bs.get().bid - curSlot -= skipStep - - # Handle start slot separately (to avoid underflow when computing curSlot) - let bs = dag.getBlockIdAtSlot(startSlot) - if bs.isSome and bs.get().isProposed(): - o -= 1 - output[o] = bs.get().bid - - o # Return the index of the first non-nil item in the output - -proc updateState*( - dag: ChainDAGRef, state: var ForkedHashedBeaconState, bsi: BlockSlotId, - save: bool, cache: var StateCache): bool = - ## Rewind or advance state such that it matches the given block and slot - - ## this may include replaying from an earlier snapshot if blck is on a - ## different branch or has advanced to a higher slot number than slot - ## If `bs.slot` is higher than `bs.blck.slot`, `updateState` will fill in - ## with empty/non-block slots - - # First, see if we're already at the requested block. If we are, also check - # that the state has not been advanced past the desired block - if it has, - # an earlier state must be loaded since there's no way to undo the slot - # transitions - - let - startTick = Moment.now() - current {.used.} = withState(state): - BlockSlotId.init(forkyState.latest_block_id, forkyState.data.slot) - - var - ancestors: seq[BlockId] - found = false - - template exactMatch(state: ForkedHashedBeaconState, bsi: BlockSlotId): bool = - # The block is the same and we're at an early enough slot - the state can - # be used to arrive at the desired blockslot - state.matches_block_slot(bsi.bid.root, bsi.slot) - - template canAdvance(state: ForkedHashedBeaconState, bsi: BlockSlotId): bool = - # The block is the same and we're at an early enough slot - the state can - # be used to arrive at the desired blockslot - state.can_advance_slots(bsi.bid.root, bsi.slot) - - # Fast path: check all caches for an exact match - this is faster than - # advancing a state where there's epoch processing to do, by a wide margin - - # it also avoids `hash_tree_root` for slot processing - if exactMatch(state, bsi): - found = true - elif not save: - # When required to save states, we cannot rely on the caches because that - # would skip the extra processing that save does - not all information that - # goes into the database is cached - if exactMatch(dag.headState, bsi): - assign(state, dag.headState) - found = true - elif exactMatch(dag.clearanceState, bsi): - assign(state, dag.clearanceState) - found = true - elif exactMatch(dag.epochRefState, bsi): - assign(state, dag.epochRefState) - found = true - - const RewindBlockThreshold = 64 - - if not found: - # No exact match found - see if any in-memory state can be used as a base - # onto which we can apply a few blocks - there's a tradeoff here between - # loading the state from disk and performing the block applications - var cur = bsi - while ancestors.len < RewindBlockThreshold: - if isZero(cur.bid.root): # tail reached - break - - if canAdvance(state, cur): # Typical case / fast path when there's no reorg - found = true - break - - if not save: # see above - if canAdvance(dag.headState, cur): - assign(state, dag.headState) - found = true - break - - if canAdvance(dag.clearanceState, cur): - assign(state, dag.clearanceState) - found = true - break - - if canAdvance(dag.epochRefState, cur): - assign(state, dag.epochRefState) - found = true - break - - if cur.isProposed(): - # This is not an empty slot, so the block will need to be applied to - # eventually reach bs - ancestors.add(cur.bid) - - # Move slot by slot to capture epoch boundary states - cur = dag.parentOrSlot(cur).valueOr: - break - - if not found: - debug "UpdateStateData cache miss", - current = shortLog(current), target = shortLog(bsi) - - # Either the state is too new or was created by applying a different block. - # We'll now resort to loading the state from the database then reapplying - # blocks until we reach the desired point in time. - - var cur = bsi - ancestors.setLen(0) - - # Look for a state in the database and load it - as long as it cannot be - # found, keep track of the blocks that are needed to reach it from the - # state that eventually will be found. - # If we hit the tail, it means that we've reached a point for which we can - # no longer recreate history - this happens for example when starting from - # a checkpoint block - let startEpoch = bsi.slot.epoch - while not canAdvance(state, cur) and - not dag.db.getState(dag.cfg, cur.bid.root, cur.slot, state, noRollback): - # There's no state saved for this particular BlockSlot combination, and - # the state we have can't trivially be advanced (in case it was older than - # RewindBlockThreshold), keep looking.. - if cur.isProposed(): - # This is not an empty slot, so the block will need to be applied to - # eventually reach bs - ancestors.add(cur.bid) - - if cur.slot == GENESIS_SLOT or (cur.slot < dag.finalizedHead.slot and - cur.slot.epoch + uint64(EPOCHS_PER_STATE_SNAPSHOT) * 2 < startEpoch): - # We've either walked two full state snapshot lengths or hit the tail - # and still can't find a matching state: this can happen when - # starting the node from an arbitrary finalized checkpoint and not - # backfilling the states - notice "Request for pruned historical state", - request = shortLog(bsi), tail = shortLog(dag.tail), - cur = shortLog(cur), finalized = shortLog(dag.finalizedHead) - return false - - # Move slot by slot to capture epoch boundary states - cur = dag.parentOrSlot(cur).valueOr: - if not dag.getStateByParent(cur.bid, state): - notice "Request for pruned historical state", - request = shortLog(bsi), tail = shortLog(dag.tail), - cur = shortLog(cur) - return false - break - - # beacon_state_rewinds.inc() - - # Starting state has been assigned, either from memory or database - let - assignTick = Moment.now() - ancestor {.used.} = withState(state): - BlockSlotId.init(forkyState.latest_block_id, forkyState.data.slot) - ancestorRoot {.used.} = getStateRoot(state) - - var info: ForkedEpochInfo - # Time to replay all the blocks between then and now - for i in countdown(ancestors.len - 1, 0): - # Because the ancestors are in the database, there's no need to persist them - # again. Also, because we're applying blocks that were loaded from the - # database, we can skip certain checks that have already been performed - # before adding the block to the database. - if (let res = dag.applyBlock(state, ancestors[i], cache, info); res.isErr): - warn "Failed to apply block from database", - blck = shortLog(ancestors[i]), - state_bid = shortLog(state.latest_block_id), - error = res.error() - - return false - - # ...and make sure to process empty slots as requested - dag.advanceSlots(state, bsi.slot, save, cache, info) - - # ...and make sure to load the state cache, if it exists - loadStateCache(dag, cache, bsi.bid, getStateField(state, slot).epoch) - - - # let - # assignDur = assignTick - startTick - # replayDur = Moment.now() - assignTick - # beacon_dag_state_replay_seconds.inc(replayDur.toFloatSeconds) - - # TODO https://github.com/status-im/nim-chronicles/issues/108 - if (assignDur + replayDur) >= MinSignificantProcessingDuration: - # This might indicate there's a cache that's not in order or a disk that is - # too slow - for now, it's here for investigative purposes and the cutoff - # time might need tuning - info "State replayed", - blocks = ancestors.len, - slots = getStateField(state, slot) - ancestor.slot, - current = shortLog(current), - ancestor = shortLog(ancestor), - target = shortLog(bsi), - ancestorStateRoot = shortLog(ancestorRoot), - targetStateRoot = shortLog(getStateRoot(state)), - found, - assignDur, - replayDur - elif ancestors.len > 0: - debug "State replayed", - blocks = ancestors.len, - slots = getStateField(state, slot) - ancestor.slot, - current = shortLog(current), - ancestor = shortLog(ancestor), - target = shortLog(bsi), - ancestorStateRoot = shortLog(ancestorRoot), - targetStateRoot = shortLog(getStateRoot(state)), - found, - assignDur, - replayDur - else: # Normal case! - trace "State advanced", - blocks = ancestors.len, - slots = getStateField(state, slot) - ancestor.slot, - current = shortLog(current), - ancestor = shortLog(ancestor), - target = shortLog(bsi), - ancestorStateRoot = shortLog(ancestorRoot), - targetStateRoot = shortLog(getStateRoot(state)), - found, - assignDur, - replayDur - - true - -proc delState(dag: ChainDAGRef, bsi: BlockSlotId) = - # Delete state and mapping for a particular block+slot - if not dag.isStateCheckpoint(bsi): - return # We only ever save epoch states - - if (let root = dag.db.getStateRoot(bsi.bid.root, bsi.slot); root.isSome()): - dag.db.withManyWrites: - dag.db.delStateRoot(bsi.bid.root, bsi.slot) - dag.db.delState( - dag.cfg.consensusForkAtEpoch(bsi.slot.epoch), root.get()) - -proc pruneBlockSlot(dag: ChainDAGRef, bs: BlockSlot) = - # TODO: should we move that disk I/O to `onSlotEnd` - dag.delState(bs.toBlockSlotId().expect("not nil")) - - if bs.isProposed(): - # Update light client data - # dag.deleteLightClientData(bs.blck.bid) - - bs.blck.executionValid = true - dag.forkBlocks.excl(KeyedBlockRef.init(bs.blck)) - discard dag.db.delBlock( - dag.cfg.consensusForkAtEpoch(bs.blck.slot.epoch), bs.blck.root) - -proc pruneBlocksDAG(dag: ChainDAGRef) = - ## This prunes the block DAG - ## This does NOT prune the cached state checkpoints and EpochRef - ## This must be done after a new finalization point is reached - ## to invalidate pending blocks or attestations referring - ## to a now invalid fork. - ## - ## This does NOT update the `dag.lastPrunePoint` field. - ## as the caches and fork choice can be pruned at a later time. - - # Clean up block refs, walking block by block - let startTick = Moment.now() - - # Finalization means that we choose a single chain as the canonical one - - # it also means we're no longer interested in any branches from that chain - # up to the finalization point - let hlen = dag.heads.len - for i in 0..= ConsensusFork.Altair: - let - period = sync_committee_period(slot) - curPeriod = sync_committee_period(forkyState.data.slot) - - if period == curPeriod: - @(dag.headSyncCommittees.current_sync_committee) - elif period == curPeriod + 1: - @(dag.headSyncCommittees.next_sync_committee) - else: @[] - else: - @[] - -func getSubcommitteePositionsAux( - dag: ChainDAGRef, - syncCommittee: openArray[ValidatorIndex], - subcommitteeIdx: SyncSubcommitteeIndex, - validatorIdx: uint64): seq[uint64] = - var pos = 0'u64 - for valIdx in syncCommittee.syncSubcommittee(subcommitteeIdx): - if validatorIdx == uint64(valIdx): - result.add pos - inc pos - -func getSubcommitteePositions*( - dag: ChainDAGRef, - slot: Slot, - subcommitteeIdx: SyncSubcommitteeIndex, - validatorIdx: uint64): seq[uint64] = - withState(dag.headState): - when consensusFork >= ConsensusFork.Altair: - let - period = sync_committee_period(slot) - curPeriod = sync_committee_period(forkyState.data.slot) - - template search(syncCommittee: openArray[ValidatorIndex]): seq[uint64] = - dag.getSubcommitteePositionsAux( - syncCommittee, subcommitteeIdx, validatorIdx) - - if period == curPeriod: - search(dag.headSyncCommittees.current_sync_committee) - elif period == curPeriod + 1: - search(dag.headSyncCommittees.next_sync_committee) - else: @[] - else: - @[] - -template syncCommitteeParticipants*( - dag: ChainDAGRef, - slot: Slot, - subcommitteeIdx: SyncSubcommitteeIndex): seq[ValidatorIndex] = - toSeq(syncSubcommittee(dag.syncCommitteeParticipants(slot), subcommitteeIdx)) - -iterator syncCommitteeParticipants*( - dag: ChainDAGRef, - slot: Slot, - subcommitteeIdx: SyncSubcommitteeIndex, - aggregationBits: SyncCommitteeAggregationBits): ValidatorIndex = - for pos, valIdx in dag.syncCommitteeParticipants(slot, subcommitteeIdx): - if pos < aggregationBits.bits and aggregationBits[pos]: - yield valIdx - -func needStateCachesAndForkChoicePruning*(dag: ChainDAGRef): bool = - dag.lastPrunePoint != dag.finalizedHead.toBlockSlotId().expect("not nil") - -proc pruneStateCachesDAG*(dag: ChainDAGRef) = - ## This prunes the cached state checkpoints and EpochRef - ## This does NOT prune the state associated with invalidated blocks on a fork - ## They are pruned via `pruneBlocksDAG` - ## - ## This updates the `dag.lastPrunePoint` variable - doAssert dag.needStateCachesAndForkChoicePruning() - let startTick = Moment.now() - block: # Remove states, walking slot by slot - # We remove all state checkpoints that come _before_ the current finalized - # head, as we might frequently be asked to replay states from the - # finalized checkpoint and onwards (for example when validating blocks and - # attestations) - var - finPoint = dag.finalizedHead.toBlockSlotId().expect("not nil") - cur = dag.parentOrSlot(dag.stateCheckpoint(finPoint)) - prev = dag.parentOrSlot(dag.stateCheckpoint(dag.lastPrunePoint)) - - while cur.isSome and prev.isSome and cur.get() != prev.get(): - let bs = cur.get() - if not isFinalizedStateSnapshot(bs.slot) and - bs.slot != dag.tail.slot: - dag.delState(bs) - let tmp = cur.get() - cur = dag.parentOrSlot(tmp) - - let statePruneTick = Moment.now() - - block: # Clean up old EpochRef instances - # After finalization, we can clear up the epoch cache and save memory - - # it will be recomputed if needed - dag.epochRefs.delIt(it.epoch < dag.finalizedHead.slot.epoch) - dag.shufflingRefs.delIt(it.epoch < dag.finalizedHead.slot.epoch) - - let epochRefPruneTick = Moment.now() - - dag.lastPrunePoint = dag.finalizedHead.toBlockSlotId().expect("not nil") - - debug "Pruned the state checkpoints and DAG caches.", - statePruneDur = statePruneTick - startTick, - epochRefPruneDur = epochRefPruneTick - statePruneTick - -func pruneStep(horizon, lastHorizon, lastBlockHorizon: Slot): - tuple[stateHorizon, blockHorizon: Slot] = - ## Compute a reasonable incremental pruning step considering the current - ## horizon, how far the database has been pruned already and where we want the - ## tail to be - the return value shows the first state and block that we - ## should _keep_ (inclusive). - - const SLOTS_PER_STATE_SNAPSHOT = - uint64(EPOCHS_PER_STATE_SNAPSHOT * SLOTS_PER_EPOCH) - - let - blockHorizon = block: - let - # Keep up with horizon if it's moving fast, ie if we're syncing - maxSlots = max(horizon - lastHorizon, MAX_SLOTS_PER_PRUNE) - - # Move the block horizon cap with a lag so that it moves slot-by-slot - # instead of a big jump every time we prune a state - assuming we - # prune every slot, this makes us prune one slot at a time instead of - # a burst of prunes (as computed by maxSlots) around every snapshot - # change followed by no pruning for the rest of the period - maxBlockHorizon = - if horizon + 1 >= SLOTS_PER_STATE_SNAPSHOT: - horizon + 1 - SLOTS_PER_STATE_SNAPSHOT - else: - Slot(0) - - # `lastBlockHorizon` captures the case where we're incrementally - # pruning a database that hasn't been pruned for a while: it's - # initialized to a pre-tail value on startup and moves to approach - # `maxBlockHorizon`. - min(maxBlockHorizon, lastBlockHorizon + maxSlots) - - # Round up such that we remove state only once blocks have been removed - stateHorizon = - ((blockHorizon + SLOTS_PER_STATE_SNAPSHOT - 1) div - SLOTS_PER_STATE_SNAPSHOT) * SLOTS_PER_STATE_SNAPSHOT - - (Slot(stateHorizon), blockHorizon) - -proc pruneHistory*(dag: ChainDAGRef, startup = false) = - ## Perform an incremental pruning step of the history - if dag.db.db.readOnly: - return - - let - horizon = dag.horizon() - (stateHorizon, blockHorizon) = pruneStep( - horizon, dag.lastHistoryPruneHorizon, dag.lastHistoryPruneBlockHorizon) - - doAssert blockHorizon <= stateHorizon, - "we must never prune blocks while leaving the state" - - debug "Pruning history", - horizon, blockHorizon, stateHorizon, - lastHorizon = dag.lastHistoryPruneHorizon, - lastBlockHorizon = dag.lastHistoryPruneBlockHorizon, - tail = dag.tail, head = dag.head - - dag.lastHistoryPruneHorizon = horizon - dag.lastHistoryPruneBlockHorizon = blockHorizon - - dag.db.withManyWrites: - if stateHorizon > dag.tail.slot: - # First, we want to see if it's possible to prune any states - we store one - # state every EPOCHS_PER_STATE_SNAPSHOT, so this happens infrequently. - - var - cur = dag.getBlockIdAtSlot(stateHorizon) - - var first = true - while cur.isSome(): - let bs = cur.get() - # We don't delete legacy states because the legacy database is openend - # in read-only and slow to delete from due to its sub-optimal structure - if dag.db.containsState( - dag.cfg, bs.bid.root, bs.slot..bs.slot, legacy = first): - if first: - # We leave the state on the prune horizon intact and update the tail - # to point to this state, indicating the new point in time from - # which we can load states in general. - debug "Updating tail", bs - dag.db.putTailBlock(bs.bid.root) - dag.tail = bs.bid - first = false - else: - debug "Pruning historical state", bs - dag.delState(bs) - elif not bs.isProposed: - trace "Reached already-pruned slot, done pruning states", bs - break - - if bs.isProposed: - # We store states either at the same slot at the block (checkpoint) or - # by advancing the slot to the nearest epoch start - check both when - # pruning - cur = dag.parentOrSlot(bs) - elif bs.slot.epoch > EPOCHS_PER_STATE_SNAPSHOT: - # Jump one snapshot interval at a time, but don't prune genesis - cur = dag.getBlockIdAtSlot(start_slot(bs.slot.epoch() - EPOCHS_PER_STATE_SNAPSHOT)) - else: - break - - # Prune blocks after sanity-checking that we don't prune post-tail blocks - - # this could happen if a state is missing at the expected state horizon and - # would indicate a partially inconsistent database since the base - # invariant is that there exists a state at the snapshot slot - better not - # further mess things up regardless - if blockHorizon > GENESIS_SLOT and blockHorizon <= dag.tail.slot: - var - # Leave the horizon block itself - cur = dag.getBlockIdAtSlot(blockHorizon - 1).map(proc(x: auto): auto = x.bid) - - while cur.isSome: - let - bid = cur.get() - fork = dag.cfg.consensusForkAtEpoch(bid.slot.epoch) - - if bid.slot == GENESIS_SLOT: - # Leave genesis block for nostalgia and the REST API - break - - if not dag.db.delBlock(fork, bid.root): - # Stop at the first gap - this is typically the pruning point of the - # previous call to pruneHistory. An inconsistent DB might have more - # blocks beyond that point but we have no efficient way of detecting - # that. - break - - cur = dag.parent(bid) - - # TODO There have been varied reports of startup pruning causing long - # startup times - an incremental approach would be needed here also - if false and - startup and - dag.cfg.consensusForkAtEpoch(blockHorizon.epoch) > ConsensusFork.Phase0: - # Once during start, we'll clear all "old fork" data - this ensures we get - # rid of any leftover junk in the tables - we do so after linear pruning - # so as to "mostly" clean up the phase0 tables as well (which cannot be - # pruned easily by fork) - one fork at a time, so as not to take too long - - let stateFork = dag.cfg.consensusForkAtEpoch(dag.tail.slot.epoch) - var clearedStates = false - if stateFork > ConsensusFork.Phase0: - for fork in ConsensusFork.Phase0.. ConsensusFork.Phase0: - for fork in ConsensusFork.Phase0..= ConsensusFork.Bellatrix: - Opt.some forkyBlck.message.body.execution_payload.block_hash - else: - Opt.some ZERO_HASH - -proc loadExecutionBlockHash*( - dag: ChainDAGRef, blck: BlockRef): Opt[Eth2Digest] = - if blck.executionBlockHash.isNone: - blck.executionBlockHash = dag.loadExecutionBlockHash(blck.bid) - blck.executionBlockHash - -from std/packedsets import PackedSet, incl, items - -func getValidatorChangeStatuses( - state: ForkedHashedBeaconState, vis: openArray[ValidatorIndex]): - PackedSet[ValidatorIndex] = - var res: PackedSet[ValidatorIndex] - withState(state): - for vi in vis: - if forkyState.data.validators[vi].withdrawal_credentials.data[0] == - BLS_WITHDRAWAL_PREFIX: - res.incl vi - res - -func checkBlsToExecutionChanges( - state: ForkedHashedBeaconState, vis: PackedSet[ValidatorIndex]): bool = - # Within each fork, BLS_WITHDRAWAL_PREFIX to ETH1_ADDRESS_WITHDRAWAL_PREFIX - # and never ETH1_ADDRESS_WITHDRAWAL_PREFIX to BLS_WITHDRAWAL_PREFIX. Latter - # can still happen via reorgs. - # Cases: - # 1) unchanged (BLS_WITHDRAWAL_PREFIX or ETH1_ADDRESS_WITHDRAWAL_PREFIX) from - # old to new head. - # 2) ETH1_ADDRESS_WITHDRAWAL_PREFIX to BLS_WITHDRAWAL_PREFIX - # 3) BLS_WITHDRAWAL_PREFIX to ETH1_ADDRESS_WITHDRAWAL_PREFIX - # - # Only report (3), i.e. whether there were validator indices with withdrawal - # credentials previously using BLS_WITHDRAWAL_PREFIX now using, instead, the - # ETH1_ADDRESS_WITHDRAWAL_PREFIX prefix indicating a BLS to execution change - # went through. - # - # Since it tracks head, it's possible reorgs trigger reporting the same - # validator indices multiple times; this is fine. - withState(state): - anyIt( vis, forkyState.data.validators[it].has_eth1_withdrawal_credential) - -proc updateHead*( - dag: ChainDAGRef, newHead: BlockRef, quarantine: var Quarantine, - knownValidators: openArray[ValidatorIndex]) = - ## Update what we consider to be the current head, as given by the fork - ## choice. - ## - ## The choice of head affects the choice of finalization point - the order - ## of operations naturally becomes important here - after updating the head, - ## blocks that were once considered potential candidates for a tree will - ## now fall from grace, or no longer be considered resolved. - doAssert not newHead.isNil() - - # Could happen if enough blocks get invalidated and would corrupt database - - # When finalized checkpoint is empty, the slot may also be smaller - doAssert newHead.slot >= dag.finalizedHead.slot or - newHead == dag.finalizedHead.blck - - let lastHead = dag.head - - logScope: - newHead = shortLog(newHead) - lastHead = shortLog(lastHead) - - if lastHead == newHead: - trace "No head block update" - return - - if newHead.parent.isNil: - # The new head should always have the finalizedHead as ancestor - thus, - # this should not happen except in a race condition where the selected - # `BlockRef` had its parent set to nil as happens during finalization - - # notably, resetting the head to be the finalizedHead is not allowed - error "Cannot update head to block without parent" - return - - let - lastHeadStateRoot = getStateRoot(dag.headState) - lastHeadMergeComplete = dag.headState.is_merge_transition_complete() - lastHeadKind = dag.headState.kind - lastKnownValidatorsChangeStatuses = getValidatorChangeStatuses( - dag.headState, knownValidators) - - # Start off by making sure we have the right state - updateState will try - # to use existing in-memory states to make this smooth - var cache: StateCache - if not updateState( - dag, dag.headState, newHead.bid.atSlot(), false, cache): - # Advancing the head state should never fail, given that the tail is - # implicitly finalised, the head is an ancestor of the tail and we always - # store the tail state in the database, as well as every epoch slot state in - # between - fatal "Unable to load head state during head update, database corrupt?", - lastHead = shortLog(lastHead) - quit 1 - - dag.head = newHead - - if dag.headState.is_merge_transition_complete() and not - lastHeadMergeComplete and - dag.vanityLogs.onMergeTransitionBlock != nil: - dag.vanityLogs.onMergeTransitionBlock() - - if dag.headState.kind > lastHeadKind: - case dag.headState.kind - of ConsensusFork.Phase0 .. ConsensusFork.Bellatrix: - discard - of ConsensusFork.Capella: - if dag.vanityLogs.onUpgradeToCapella != nil: - dag.vanityLogs.onUpgradeToCapella() - of ConsensusFork.Deneb: - if dag.vanityLogs.onUpgradeToDeneb != nil: - dag.vanityLogs.onUpgradeToDeneb() - of ConsensusFork.Electra: - if dag.vanityLogs.onUpgradeToElectra != nil: - dag.vanityLogs.onUpgradeToElectra() - - if dag.vanityLogs.onKnownBlsToExecutionChange != nil and - checkBlsToExecutionChanges( - dag.headState, lastKnownValidatorsChangeStatuses): - dag.vanityLogs.onKnownBlsToExecutionChange() - - dag.db.putHeadBlock(newHead.root) - - # updateBeaconMetrics(dag.headState, dag.head.bid, cache) - - withState(dag.headState): - when consensusFork >= ConsensusFork.Altair: - dag.headSyncCommittees = forkyState.data.get_sync_committee_cache(cache) - - let - finalized_checkpoint = - getStateField(dag.headState, finalized_checkpoint) - finalizedSlot = - # finalized checkpoint may move back in the head state compared to what - # we've seen in other forks - it does not move back in fork choice - # however, so we'll use the last-known-finalized in that case - max(finalized_checkpoint.epoch.start_slot(), dag.finalizedHead.slot) - finalizedHead = newHead.atSlot(finalizedSlot) - - doAssert (not finalizedHead.blck.isNil), - "Block graph should always lead to a finalized block" - - # Update light client data - # dag.processHeadChangeForLightClient() - - let (isAncestor, ancestorDepth) = lastHead.getDepth(newHead) - if not(isAncestor): - notice "Updated head block with chain reorg", - headParent = shortLog(newHead.parent), - stateRoot = shortLog(getStateRoot(dag.headState)), - justified = shortLog(getStateField( - dag.headState, current_justified_checkpoint)), - finalized = shortLog(getStateField(dag.headState, finalized_checkpoint)), - isOptHead = not newHead.executionValid - - if not(isNil(dag.onReorgHappened)): - let - # TODO (cheatfate): Proper implementation required - data = ReorgInfoObject.init(dag.head.slot, uint64(ancestorDepth), - lastHead.root, newHead.root, - lastHeadStateRoot, - getStateRoot(dag.headState)) - dag.onReorgHappened(data) - - # A reasonable criterion for "reorganizations of the chain" - quarantine.clearAfterReorg() - - # beacon_reorgs_total_total.inc() - # beacon_reorgs_total.inc() - else: - debug "Updated head block", - stateRoot = shortLog(getStateRoot(dag.headState)), - justified = shortLog(getStateField( - dag.headState, current_justified_checkpoint)), - finalized = shortLog(getStateField(dag.headState, finalized_checkpoint)), - isOptHead = not newHead.executionValid - - if not(isNil(dag.onHeadChanged)): - let - depRoot = withState(dag.headState): forkyState.proposer_dependent_root - prevDepRoot = withState(dag.headState): - forkyState.attester_dependent_root - epochTransition = (finalizedHead != dag.finalizedHead) - # TODO (cheatfate): Proper implementation required - data = HeadChangeInfoObject.init(dag.head.slot, dag.head.root, - getStateRoot(dag.headState), - epochTransition, prevDepRoot, - depRoot) - dag.onHeadChanged(data) - - withState(dag.headState): - # Every time the head changes, the "canonical" view of balances and other - # state-related metrics change - notify the validator monitor. - # Doing this update during head update ensures there's a reasonable number - # of such updates happening - at most once per valid block. - dag.validatorMonitor[].registerState(forkyState.data) - - if finalizedHead != dag.finalizedHead: - debug "Reached new finalization checkpoint", - stateRoot = shortLog(getStateRoot(dag.headState)), - justified = shortLog(getStateField( - dag.headState, current_justified_checkpoint)), - finalized = shortLog(getStateField(dag.headState, finalized_checkpoint)) - let oldFinalizedHead = dag.finalizedHead - - block: - # Update `dag.finalizedBlocks` with all newly finalized blocks (those - # newer than the previous finalized head), then update `dag.finalizedHead` - var newFinalized: seq[BlockId] - var tmp = finalizedHead.blck - while not isNil(tmp) and tmp.slot >= dag.finalizedHead.slot: - newFinalized.add(tmp.bid) - if tmp != finalizedHead.blck: - # The newly finalized block itself should remain in here so that fork - # choice still can find it via root - dag.forkBlocks.excl(KeyedBlockRef.init(tmp)) - - let p = tmp.parent - tmp.parent = nil # Reset all parent links to release memory - tmp = p - - dag.finalizedHead = finalizedHead - - dag.db.updateFinalizedBlocks(newFinalized) - - let oldBlockHash = dag.loadExecutionBlockHash(oldFinalizedHead.blck) - if oldBlockHash.isSome and oldBlockHash.unsafeGet.isZero: - let newBlockHash = dag.loadExecutionBlockHash(dag.finalizedHead.blck) - if newBlockHash.isSome and not newBlockHash.unsafeGet.isZero: - if dag.vanityLogs.onFinalizedMergeTransitionBlock != nil: - dag.vanityLogs.onFinalizedMergeTransitionBlock() - - # Pruning the block dag is required every time the finalized head changes - # in order to clear out blocks that are no longer viable and should - # therefore no longer be considered as part of the chain we're following - dag.pruneBlocksDAG() - - # Update light client data - # dag.processFinalizationForLightClient(oldFinalizedHead) - - # Send notification about new finalization point via callback. - if not(isNil(dag.onFinHappened)): - let stateRoot = - if dag.finalizedHead.slot == dag.head.slot: getStateRoot(dag.headState) - elif dag.finalizedHead.slot + SLOTS_PER_HISTORICAL_ROOT > dag.head.slot: - getStateField(dag.headState, state_roots).data[ - int(dag.finalizedHead.slot mod SLOTS_PER_HISTORICAL_ROOT)] - else: - Eth2Digest() # The thing that finalized was >8192 blocks old? - # TODO (cheatfate): Proper implementation required - let data = FinalizationInfoObject.init( - dag.finalizedHead.blck.root, stateRoot, dag.finalizedHead.slot.epoch) - dag.onFinHappened(dag, data) - -proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): Result[void, cstring] = - ## Lightweight check to see if it is likely that the given database has been - ## initialized - let - tailBlockRoot = db.getTailBlock() - if not tailBlockRoot.isSome(): - return err("Tail block root missing") - - let - tailBlock = db.getBlockId(tailBlockRoot.get()) - if not tailBlock.isSome(): - return err("Tail block information missing") - - ok() - -proc preInit*( - T: type ChainDAGRef, db: BeaconChainDB, state: ForkedHashedBeaconState) = - ## Initialize a database using the given state, which potentially may be a - ## non-genesis state. - ## - ## When used with a non-genesis state, the resulting database will not be - ## compatible with pre-22.11 versions. - logScope: - stateRoot = $getStateRoot(state) - stateSlot = getStateField(state, slot) - - doAssert getStateField(state, slot).is_epoch, - "Can only initialize database from epoch states" - - withState(state): - db.putState(forkyState) - - if forkyState.data.slot == GENESIS_SLOT: - let blck = get_initial_beacon_block(forkyState) - db.putBlock(blck) - db.putGenesisBlock(blck.root) - db.putHeadBlock(blck.root) - db.putTailBlock(blck.root) - - notice "Database initialized from genesis", - blockRoot = $blck.root - else: - let blockRoot = forkyState.latest_block_root() - # We write a summary but not the block contents - these will have to be - # backfilled from the network - db.putBeaconBlockSummary(blockRoot, BeaconBlockSummary( - slot: forkyState.data.latest_block_header.slot, - parent_root: forkyState.data.latest_block_header.parent_root - )) - db.putHeadBlock(blockRoot) - db.putTailBlock(blockRoot) - - if db.getGenesisBlock().isSome(): - notice "Checkpoint written to database", blockRoot = $blockRoot - else: - notice "Database initialized from checkpoint", blockRoot = $blockRoot - -proc getProposer*( - dag: ChainDAGRef, head: BlockRef, slot: Slot): Opt[ValidatorIndex] = - let - epochRef = dag.getEpochRef(head.bid, slot.epoch(), false).valueOr: - notice "Cannot load EpochRef for given head", head, slot, error - return Opt.none(ValidatorIndex) - - slotInEpoch = slot.since_epoch_start() - - let proposer = epochRef.beacon_proposers[slotInEpoch] - if proposer.isSome(): - if proposer.get().uint64 >= dag.db.immutableValidators.lenu64(): - # Sanity check - it should never happen that the key cache doesn't contain - # a key for the selected proposer - that would mean that we somehow - # created validators in the state without updating the cache! - warn "Proposer key not found", - keys = dag.db.immutableValidators.lenu64(), proposer = proposer.get() - return Opt.none(ValidatorIndex) - - proposer - -proc getProposalState*( - dag: ChainDAGRef, head: BlockRef, slot: Slot, cache: var StateCache): - Result[ref ForkedHashedBeaconState, cstring] = - ## Return a state suitable for making proposals for the given head and slot - - ## in particular, the state can be discarded after use and does not have a - ## state root set - - # Start with the clearance state, since this one typically has been advanced - # and thus has a hot hash tree cache - let state = assignClone(dag.clearanceState) - - var - info = ForkedEpochInfo() - if not state[].can_advance_slots(head.root, slot): - # The last state root will be computed as part of block production, so skip - # it now - if not dag.updateState( - state[], head.atSlot(slot - 1).toBlockSlotId().expect("not nil"), - false, cache): - error "Cannot get proposal state - skipping block production, database corrupt?", - head = shortLog(head), - slot - return err("Cannot create proposal state") - else: - loadStateCache(dag, cache, head.bid, slot.epoch) - - if getStateField(state[], slot) < slot: - process_slots( - dag.cfg, state[], slot, cache, info, - {skipLastStateRootCalculation}).expect("advancing 1 slot should not fail") - - ok state - -func aggregateAll*( - dag: ChainDAGRef, - validator_indices: openArray[ValidatorIndex]): Result[CookedPubKey, cstring] = - if validator_indices.len == 0: - # Aggregation spec requires non-empty collection - # - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04 - # Consensus specs require at least one attesting index in attestation - # - https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#is_valid_indexed_attestation - return err("aggregate: no attesting keys") - - let - firstKey = dag.validatorKey(validator_indices[0]).valueOr: - return err("aggregate: invalid validator index") - - var aggregateKey{.noinit.}: AggregatePublicKey - - aggregateKey.init(firstKey) - - for i in 1 ..< validator_indices.len: - let key = dag.validatorKey(validator_indices[i]).valueOr: - return err("aggregate: invalid validator index") - aggregateKey.aggregate(key) - - ok(finish(aggregateKey)) - -func aggregateAll*( - dag: ChainDAGRef, - validator_indices: openArray[ValidatorIndex|uint64], - bits: BitSeq | BitArray): Result[CookedPubKey, cstring] = - if validator_indices.len() != bits.len(): - return err("aggregateAll: mismatch in bits length") - - var - aggregateKey{.noinit.}: AggregatePublicKey - inited = false - - for i in 0.. dag.horizon - -proc rebuildIndex*(dag: ChainDAGRef) = - ## After a checkpoint sync, we lack intermediate states to replay from - this - ## function rebuilds them so that historical replay can take place again - ## TODO the pruning of junk states could be moved to a separate function that - ## runs either on startup - # First, we check what states we already have in the database - that allows - # resuming the operation at any time - let - roots = dag.db.loadStateRoots() - historicalRoots = getStateField(dag.headState, historical_roots).asSeq() - historicalSummaries = dag.headState.historical_summaries.asSeq() - - var - canonical = newSeq[Eth2Digest]( - (dag.finalizedHead.slot.epoch + EPOCHS_PER_STATE_SNAPSHOT - 1) div - EPOCHS_PER_STATE_SNAPSHOT) - # `junk` puts in place some infrastructure to prune unnecessary states - it - # will be more useful in the future as a base for pruning - junk: seq[((Slot, Eth2Digest), Eth2Digest)] - - for k, v in roots: - if k[0] >= dag.finalizedHead.slot: - continue # skip newer stuff - if k[0] < dag.backfill.slot: - continue # skip stuff for which we have no blocks - - if not isFinalizedStateSnapshot(k[0]): - # `tail` will move at the end of the process, so we won't need any - # intermediate states - junk.add((k, v)) - - continue # skip non-snapshot slots - - if k[0] > 0: - let bs = dag.getBlockIdAtSlot(k[0] - 1) - if bs.isNone or bs.get().bid.root != k[1]: - # remove things that are no longer a canonical part of the chain or - # cannot be reached via a block - junk.add((k, v)) - continue - - if not dag.db.containsState(dag.cfg.consensusForkAtEpoch(k[0].epoch), v): - continue # If it's not in the database.. - - canonical[k[0].epoch div EPOCHS_PER_STATE_SNAPSHOT] = v - - let - state = (ref ForkedHashedBeaconState)() - - var - cache: StateCache - info: ForkedEpochInfo - tailBid: Opt[BlockId] - states: int - - # `canonical` holds all slots at which a state is expected to appear, using a - # zero root whenever a particular state is missing - this way, if there's - # partial progress or gaps, they will be dealt with correctly - for i, state_root in canonical.mpairs(): - let - slot = Epoch(i * EPOCHS_PER_STATE_SNAPSHOT).start_slot - - if slot < dag.backfill.slot: - # TODO if we have era files, we could try to load blocks from them at - # this point - # TODO if we don't do the above, we can of course compute the starting `i` - continue - - if tailBid.isNone(): - if state_root.isZero: - # If we can find an era file with this state, use it as an alternative - # starting point - ignore failures for now - if dag.era.getState( - historicalRoots, historicalSummaries, slot, state[]).isOk(): - state_root = getStateRoot(state[]) - - withState(state[]): dag.db.putState(forkyState) - tailBid = Opt.some state[].latest_block_id() - - else: - if not dag.db.getState( - dag.cfg.consensusForkAtEpoch(slot.epoch), state_root, state[], - noRollback): - fatal "Cannot load state, database corrupt or created for a different network?", - state_root, slot - quit 1 - tailBid = Opt.some state[].latest_block_id() - - continue - - if i == 0 or canonical[i - 1].isZero: - reset(tailBid) # No unbroken history! - continue - - if not state_root.isZero: - states += 1 - continue - - let - startSlot = Epoch((i - 1) * EPOCHS_PER_STATE_SNAPSHOT).start_slot - - info "Recreating state snapshot", - slot, startStateRoot = canonical[i - 1], startSlot - - if getStateRoot(state[]) != canonical[i - 1]: - if not dag.db.getState( - dag.cfg.consensusForkAtEpoch(startSlot.epoch), canonical[i - 1], - state[], noRollback): - error "Can't load start state, database corrupt?", - startStateRoot = shortLog(canonical[i - 1]), slot = startSlot - return - - for slot in startSlot.. 0: - info "Dropping redundant states", states, redundant = junk.len - - for i in junk: - dag.db.delStateRoot(i[0][1], i[0][0]) - dag.db.delState(dag.cfg.consensusForkAtEpoch(i[0][0].epoch), i[1]) From 9ae60baa8fdee008bffc7ccd7febb6864aa93cba Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Mon, 9 Dec 2024 15:02:31 +0000 Subject: [PATCH 28/32] Removal adapted beacon node code from eth2 --- .../consensus/consensus_wrapper.nim | 2414 ----------------- 1 file changed, 2414 deletions(-) diff --git a/nimbus_unified/consensus/consensus_wrapper.nim b/nimbus_unified/consensus/consensus_wrapper.nim index 5d8ead41c..89cb16943 100644 --- a/nimbus_unified/consensus/consensus_wrapper.nim +++ b/nimbus_unified/consensus/consensus_wrapper.nim @@ -49,2420 +49,6 @@ from libp2p/protocols/pubsub/gossipsub import TopicParams, validateParameters, i logScope: topics = "Consensus layer" -# adapted from nimbus-eth2 -# # https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics -# declareGauge beacon_slot, "Latest slot of the beacon chain state" -# declareGauge beacon_current_epoch, "Current epoch" - -# # Finalization tracking -# declareGauge finalization_delay, -# "Epoch delay between scheduled epoch and finalized epoch" - -# declareGauge ticks_delay, -# "How long does to take to run the onSecond loop" - -# declareGauge next_action_wait, -# "Seconds until the next attestation will be sent" - -# declareGauge next_proposal_wait, -# "Seconds until the next proposal will be sent, or Inf if not known" - -# declareGauge sync_committee_active, -# "1 if there are current sync committee duties, 0 otherwise" - -# declareCounter db_checkpoint_seconds, -# "Time spent checkpointing the database to clear the WAL file" - -const SlashingDbName = "slashing_protection" -# changing this requires physical file rename as well or history is lost - -## NOTE -## following procedures are copies/adaptations from nimbus_beacon_node.nim. -## TODO: Extract do adequate structures and files - -# TODO: need to figure out behaviour on threaded patterns -# Using this function here is signaled as non GC SAFE given -# that gPidFile might be accessed concurrently with no guards - -# var gPidFile: string -# proc createPidFile(filename: string) {.raises: [IOError].} = -# writeFile filename, $os.getCurrentProcessId() -# gPidFile = filename -# addExitProc ( -# proc() = -# discard io2.removeFile(filename) -# ) - -proc initFullNode( - node: BeaconNode, - rng: ref HmacDrbgContext, - dag: ChainDAGRef, - taskpool: TaskPoolPtr, - getBeaconTime: GetBeaconTimeFn, -) {.async.} = - template config(): auto = - node.config - - proc onPhase0AttestationReceived(data: phase0.Attestation) = - node.eventBus.attestQueue.emit(data) - - proc onElectraAttestationReceived(data: electra.Attestation) = - debugComment "electra attestation queue" - - proc onSyncContribution(data: SignedContributionAndProof) = - node.eventBus.contribQueue.emit(data) - - proc onVoluntaryExitAdded(data: SignedVoluntaryExit) = - node.eventBus.exitQueue.emit(data) - - proc onBLSToExecutionChangeAdded(data: SignedBLSToExecutionChange) = - node.eventBus.blsToExecQueue.emit(data) - - proc onProposerSlashingAdded(data: ProposerSlashing) = - node.eventBus.propSlashQueue.emit(data) - - proc onPhase0AttesterSlashingAdded(data: phase0.AttesterSlashing) = - node.eventBus.attSlashQueue.emit(data) - - proc onElectraAttesterSlashingAdded(data: electra.AttesterSlashing) = - debugComment "electra att slasher queue" - - proc onBlobSidecarAdded(data: BlobSidecarInfoObject) = - node.eventBus.blobSidecarQueue.emit(data) - - proc onBlockAdded(data: ForkedTrustedSignedBeaconBlock) = - let optimistic = - if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: - some node.dag.is_optimistic(data.toBlockId()) - else: - none[bool]() - node.eventBus.blocksQueue.emit(EventBeaconBlockObject.init(data, optimistic)) - - proc onHeadChanged(data: HeadChangeInfoObject) = - let eventData = - if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: - var res = data - res.optimistic = - some node.dag.is_optimistic(BlockId(slot: data.slot, root: data.block_root)) - res - else: - data - node.eventBus.headQueue.emit(eventData) - - proc onChainReorg(data: ReorgInfoObject) = - let eventData = - if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: - var res = data - res.optimistic = some node.dag.is_optimistic( - BlockId(slot: data.slot, root: data.new_head_block) - ) - res - else: - data - node.eventBus.reorgQueue.emit(eventData) - - proc makeOnFinalizationCb( - # This `nimcall` functions helps for keeping track of what - # needs to be captured by the onFinalization closure. - eventBus: EventBus, - elManager: ELManager, - ): OnFinalizedCallback {.nimcall.} = - static: - doAssert (elManager is ref) - return proc(dag: ChainDAGRef, data: FinalizationInfoObject) = - if elManager != nil: - let finalizedEpochRef = dag.getFinalizedEpochRef() - discard trackFinalizedState( - elManager, finalizedEpochRef.eth1_data, finalizedEpochRef.eth1_deposit_index - ) - # node.updateLightClientFromDag() - let eventData = - if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: - var res = data - # `slot` in this `BlockId` may be higher than block's actual slot, - # this is alright for the purpose of calling `is_optimistic`. - res.optimistic = some node.dag.is_optimistic( - BlockId(slot: data.epoch.start_slot, root: data.block_root) - ) - res - else: - data - eventBus.finalQueue.emit(eventData) - - func getLocalHeadSlot(): Slot = - dag.head.slot - - proc getLocalWallSlot(): Slot = - node.beaconClock.now.slotOrZero - - func getFirstSlotAtFinalizedEpoch(): Slot = - dag.finalizedHead.slot - - func getBackfillSlot(): Slot = - if dag.backfill.parent_root != dag.tail.root: dag.backfill.slot else: dag.tail.slot - - func getFrontfillSlot(): Slot = - max(dag.frontfill.get(BlockId()).slot, dag.horizon) - - proc isWithinWeakSubjectivityPeriod(): bool = - let - currentSlot = node.beaconClock.now().slotOrZero() - checkpoint = Checkpoint( - epoch: epoch(getStateField(node.dag.headState, slot)), - root: getStateField(node.dag.headState, latest_block_header).state_root, - ) - is_within_weak_subjectivity_period( - node.dag.cfg, currentSlot, node.dag.headState, checkpoint - ) - - proc eventWaiter(): Future[void] {.async: (raises: [CancelledError]).} = - await node.shutdownEvent.wait() - bnStatus = BeaconNodeStatus.Stopping - - asyncSpawn eventWaiter() - - let - quarantine = newClone(Quarantine.init()) - attestationPool = newClone( - AttestationPool.init( - dag, quarantine, onPhase0AttestationReceived, onElectraAttestationReceived - ) - ) - syncCommitteeMsgPool = - newClone(SyncCommitteeMsgPool.init(rng, dag.cfg, onSyncContribution)) - # adapted from nimbus-eth2 - # lightClientPool = newClone(LightClientPool()) - validatorChangePool = newClone( - ValidatorChangePool.init( - dag, attestationPool, onVoluntaryExitAdded, onBLSToExecutionChangeAdded, - onProposerSlashingAdded, onPhase0AttesterSlashingAdded, - onElectraAttesterSlashingAdded, - ) - ) - blobQuarantine = newClone(BlobQuarantine.init(onBlobSidecarAdded)) - consensusManager = ConsensusManager.new( - dag, - attestationPool, - quarantine, - node.elManager, - ActionTracker.init(node.network.nodeId, config.subscribeAllSubnets), - node.dynamicFeeRecipientsStore, - config.validatorsDir, - config.defaultFeeRecipient, - config.suggestedGasLimit, - ) - blockProcessor = BlockProcessor.new( - config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming, rng, taskpool, - consensusManager, node.validatorMonitor, blobQuarantine, getBeaconTime, - ) - blockVerifier = proc( - signedBlock: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars], - maybeFinalized: bool, - ): Future[Result[void, VerifierError]] {. - async: (raises: [CancelledError], raw: true) - .} = - # The design with a callback for block verification is unusual compared - # to the rest of the application, but fits with the general approach - # taken in the sync/request managers - this is an architectural compromise - # that should probably be reimagined more holistically in the future. - blockProcessor[].addBlock( - MsgSource.gossip, signedBlock, blobs, maybeFinalized = maybeFinalized - ) - rmanBlockVerifier = proc( - signedBlock: ForkedSignedBeaconBlock, maybeFinalized: bool - ): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} = - withBlck(signedBlock): - when consensusFork >= ConsensusFork.Deneb: - if not blobQuarantine[].hasBlobs(forkyBlck): - # We don't have all the blobs for this block, so we have - # to put it in blobless quarantine. - if not quarantine[].addBlobless(dag.finalizedHead.slot, forkyBlck): - err(VerifierError.UnviableFork) - else: - err(VerifierError.MissingParent) - else: - let blobs = blobQuarantine[].popBlobs(forkyBlck.root, forkyBlck) - await blockProcessor[].addBlock( - MsgSource.gossip, - signedBlock, - Opt.some(blobs), - maybeFinalized = maybeFinalized, - ) - else: - await blockProcessor[].addBlock( - MsgSource.gossip, - signedBlock, - Opt.none(BlobSidecars), - maybeFinalized = maybeFinalized, - ) - rmanBlockLoader = proc(blockRoot: Eth2Digest): Opt[ForkedTrustedSignedBeaconBlock] = - dag.getForkedBlock(blockRoot) - rmanBlobLoader = proc(blobId: BlobIdentifier): Opt[ref BlobSidecar] = - var blob_sidecar = BlobSidecar.new() - if dag.db.getBlobSidecar(blobId.block_root, blobId.index, blob_sidecar[]): - Opt.some blob_sidecar - else: - Opt.none(ref BlobSidecar) - - #TODO: - # removing this light client var - lightClientPool = newClone(LightClientPool()) - - processor = Eth2Processor.new( - config.doppelgangerDetection, blockProcessor, node.validatorMonitor, dag, - attestationPool, validatorChangePool, node.attachedValidators, - syncCommitteeMsgPool, lightClientPool, quarantine, blobQuarantine, rng, - getBeaconTime, taskpool, - ) - syncManagerFlags = - if node.config.longRangeSync != LongRangeSyncMode.Lenient: - {SyncManagerFlag.NoGenesisSync} - else: - {} - syncManager = newSyncManager[Peer, PeerId]( - node.network.peerPool, - dag.cfg.DENEB_FORK_EPOCH, - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, - SyncQueueKind.Forward, - getLocalHeadSlot, - getLocalWallSlot, - getFirstSlotAtFinalizedEpoch, - getBackfillSlot, - getFrontfillSlot, - isWithinWeakSubjectivityPeriod, - dag.tail.slot, - blockVerifier, - shutdownEvent = node.shutdownEvent, - flags = syncManagerFlags, - ) - backfiller = newSyncManager[Peer, PeerId]( - node.network.peerPool, - dag.cfg.DENEB_FORK_EPOCH, - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, - SyncQueueKind.Backward, - getLocalHeadSlot, - getLocalWallSlot, - getFirstSlotAtFinalizedEpoch, - getBackfillSlot, - getFrontfillSlot, - isWithinWeakSubjectivityPeriod, - dag.backfill.slot, - blockVerifier, - maxHeadAge = 0, - shutdownEvent = node.shutdownEvent, - flags = syncManagerFlags, - ) - router = (ref MessageRouter)(processor: processor, network: node.network) - requestManager = RequestManager.init( - node.network, - dag.cfg.DENEB_FORK_EPOCH, - getBeaconTime, - ( - proc(): bool = - syncManager.inProgress - ), - quarantine, - blobQuarantine, - rmanBlockVerifier, - rmanBlockLoader, - rmanBlobLoader, - ) - # adapted from nimbus-eth2 - # if node.config.lightClientDataServe: - # proc scheduleSendingLightClientUpdates(slot: Slot) = - # if node.lightClientPool[].broadcastGossipFut != nil: - # return - # if slot <= node.lightClientPool[].latestBroadcastedSlot: - # return - # node.lightClientPool[].latestBroadcastedSlot = slot - - # template fut(): auto = - # node.lightClientPool[].broadcastGossipFut - - # fut = node.handleLightClientUpdates(slot) - # fut.addCallback do(p: pointer) {.gcsafe.}: - # fut = nil - - # router.onSyncCommitteeMessage = scheduleSendingLightClientUpdates - - dag.setFinalizationCb makeOnFinalizationCb(node.eventBus, node.elManager) - dag.setBlockCb(onBlockAdded) - dag.setHeadCb(onHeadChanged) - dag.setReorgCb(onChainReorg) - - node.dag = dag - node.blobQuarantine = blobQuarantine - node.quarantine = quarantine - node.attestationPool = attestationPool - node.syncCommitteeMsgPool = syncCommitteeMsgPool - # node.lightClientPool = lightClientPool - node.validatorChangePool = validatorChangePool - node.processor = processor - node.blockProcessor = blockProcessor - node.consensusManager = consensusManager - node.requestManager = requestManager - node.syncManager = syncManager - node.backfiller = backfiller - node.router = router - - await node.addValidators() - - block: - # Add in-process validators to the list of "known" validators such that - # we start with a reasonable ENR - let wallSlot = node.beaconClock.now().slotOrZero() - for validator in node.attachedValidators[].validators.values(): - if config.validatorMonitorAuto: - node.validatorMonitor[].addMonitor(validator.pubkey, validator.index) - - if validator.index.isSome(): - withState(dag.headState): - let idx = validator.index.get() - if distinctBase(idx) <= forkyState.data.validators.lenu64: - template v(): auto = - forkyState.data.validators.item(idx) - - if is_active_validator(v, wallSlot.epoch) or - is_active_validator(v, wallSlot.epoch + 1): - node.consensusManager[].actionTracker.knownValidators[idx] = wallSlot - elif is_exited_validator(v, wallSlot.epoch): - notice "Ignoring exited validator", - index = idx, pubkey = shortLog(v.pubkey) - let stabilitySubnets = - node.consensusManager[].actionTracker.stabilitySubnets(wallSlot) - # Here, we also set the correct ENR should we be in all subnets mode! - node.network.updateStabilitySubnetMetadata(stabilitySubnets) - - node.network.registerProtocol( - PeerSync, PeerSync.NetworkState.init(node.dag, node.beaconClock.getBeaconTimeFn()) - ) - - node.network.registerProtocol(BeaconSync, BeaconSync.NetworkState.init(node.dag)) - # adapted from nimbus-eth2 - - # if node.dag.lcDataStore.serve: - # node.network.registerProtocol( - # LightClientSync, LightClientSync.NetworkState.init(node.dag) - # ) - - # node.updateValidatorMetrics() - -func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = - case stdoutKind - of StdoutLogKind.Auto: - raiseAssert "inadmissable here" - of StdoutLogKind.Colors: - VanityLogs( - onMergeTransitionBlock: bellatrixColor, - onFinalizedMergeTransitionBlock: bellatrixBlink, - onUpgradeToCapella: capellaColor, - onKnownBlsToExecutionChange: capellaBlink, - onUpgradeToDeneb: denebColor, - onUpgradeToElectra: electraColor, - ) - of StdoutLogKind.NoColors: - VanityLogs( - onMergeTransitionBlock: bellatrixMono, - onFinalizedMergeTransitionBlock: bellatrixMono, - onUpgradeToCapella: capellaMono, - onKnownBlsToExecutionChange: capellaMono, - onUpgradeToDeneb: denebMono, - onUpgradeToElectra: electraMono, - ) - of StdoutLogKind.Json, StdoutLogKind.None: - VanityLogs( - onMergeTransitionBlock: ( - proc() = - notice "🐼 Proof of Stake Activated 🐼" - ), - onFinalizedMergeTransitionBlock: ( - proc() = - notice "🐼 Proof of Stake Finalized 🐼" - ), - onUpgradeToCapella: ( - proc() = - notice "🦉 Withdrowls now available 🦉" - ), - onKnownBlsToExecutionChange: ( - proc() = - notice "🦉 BLS to execution changed 🦉" - ), - onUpgradeToDeneb: ( - proc() = - notice "🐟 Proto-Danksharding is ON 🐟" - ), - onUpgradeToElectra: ( - proc() = - notice "🦒 [PH] Electra 🦒" - ), - ) - -func getVanityMascot(consensusFork: ConsensusFork): string = - case consensusFork - of ConsensusFork.Electra: "🦒" - of ConsensusFork.Deneb: "🐟" - of ConsensusFork.Capella: "🦉" - of ConsensusFork.Bellatrix: "🐼" - of ConsensusFork.Altair: "✨" - of ConsensusFork.Phase0: "🦏" - -# NOTE: light client related code commented -proc loadChainDag( - config: BeaconNodeConf, - cfg: RuntimeConfig, - db: BeaconChainDB, - eventBus: EventBus, - validatorMonitor: ref ValidatorMonitor, - networkGenesisValidatorsRoot: Opt[Eth2Digest], -): ChainDAGRef = - info "Loading block DAG from database", path = config.databaseDir - - var dag: ChainDAGRef - proc onLightClientFinalityUpdate(data: ForkedLightClientFinalityUpdate) = - if dag == nil: - return - withForkyFinalityUpdate(data): - when lcDataFork > LightClientDataFork.None: - let contextFork = dag.cfg.consensusForkAtEpoch(forkyFinalityUpdate.contextEpoch) - eventBus.finUpdateQueue.emit( - RestVersioned[ForkedLightClientFinalityUpdate]( - data: data, - jsonVersion: contextFork, - sszContext: dag.forkDigests[].atConsensusFork(contextFork), - ) - ) - - proc onLightClientOptimisticUpdate(data: ForkedLightClientOptimisticUpdate) = - if dag == nil: - return - withForkyOptimisticUpdate(data): - when lcDataFork > LightClientDataFork.None: - let contextFork = - dag.cfg.consensusForkAtEpoch(forkyOptimisticUpdate.contextEpoch) - eventBus.optUpdateQueue.emit( - RestVersioned[ForkedLightClientOptimisticUpdate]( - data: data, - jsonVersion: contextFork, - sszContext: dag.forkDigests[].atConsensusFork(contextFork), - ) - ) - - let - chainDagFlags = - if config.strictVerification: - {strictVerification} - else: - {} - onLightClientFinalityUpdateCb = - if config.lightClientDataServe: onLightClientFinalityUpdate else: nil - onLightClientOptimisticUpdateCb = - if config.lightClientDataServe: onLightClientOptimisticUpdate else: nil - - dag = ChainDAGRef.init( - cfg, - db, - validatorMonitor, - chainDagFlags, - config.eraDir, - vanityLogs = getVanityLogs(detectTTY(config.logStdout)), - lcDataConfig = LightClientDataConfig( - serve: config.lightClientDataServe, - importMode: config.lightClientDataImportMode, - maxPeriods: config.lightClientDataMaxPeriods, - onLightClientFinalityUpdate: onLightClientFinalityUpdateCb, - onLightClientOptimisticUpdate: onLightClientOptimisticUpdateCb, - ), - ) - - if networkGenesisValidatorsRoot.isSome: - let databaseGenesisValidatorsRoot = - getStateField(dag.headState, genesis_validators_root) - if networkGenesisValidatorsRoot.get != databaseGenesisValidatorsRoot: - fatal "The specified --data-dir contains data for a different network", - networkGenesisValidatorsRoot = networkGenesisValidatorsRoot.get, - databaseGenesisValidatorsRoot, - dataDir = config.dataDir - quit 1 - - # The first pruning after restart may take a while.. - if config.historyMode == HistoryMode.Prune: - dag.pruneHistory(true) - - dag - -proc doRunTrustedNodeSync( - db: BeaconChainDB, - metadata: Eth2NetworkMetadata, - databaseDir: string, - eraDir: string, - restUrl: string, - stateId: Option[string], - trustedBlockRoot: Option[Eth2Digest], - backfill: bool, - reindex: bool, - downloadDepositSnapshot: bool, - genesisState: ref ForkedHashedBeaconState, -) {.async.} = - let syncTarget = - if stateId.isSome: - if trustedBlockRoot.isSome: - warn "Ignoring `trustedBlockRoot`, `stateId` is set", stateId, trustedBlockRoot - TrustedNodeSyncTarget(kind: TrustedNodeSyncKind.StateId, stateId: stateId.get) - elif trustedBlockRoot.isSome: - TrustedNodeSyncTarget( - kind: TrustedNodeSyncKind.TrustedBlockRoot, - trustedBlockRoot: trustedBlockRoot.get, - ) - else: - TrustedNodeSyncTarget(kind: TrustedNodeSyncKind.StateId, stateId: "finalized") - - await db.doTrustedNodeSync( - metadata.cfg, databaseDir, eraDir, restUrl, syncTarget, backfill, reindex, - downloadDepositSnapshot, genesisState, - ) - -proc initBeaconNode*( - T: type BeaconNode, - rng: ref HmacDrbgContext, - config: BeaconNodeConf, - metadata: Eth2NetworkMetadata, -): Future[BeaconNode] {.async.} = - var - taskpool: TaskPoolPtr - genesisState: ref ForkedHashedBeaconState = nil - - template cfg(): auto = - metadata.cfg - - template eth1Network(): auto = - metadata.eth1Network - - if not (isDir(config.databaseDir)): - # If database directory missing, we going to use genesis state to check - # for weak_subjectivity_period. - genesisState = - await fetchGenesisState(metadata, config.genesisState, config.genesisStateUrl) - let - genesisTime = getStateField(genesisState[], genesis_time) - beaconClock = BeaconClock.init(genesisTime).valueOr: - fatal "Invalid genesis time in genesis state", genesisTime - quit 1 - currentSlot = beaconClock.now().slotOrZero() - checkpoint = Checkpoint( - epoch: epoch(getStateField(genesisState[], slot)), - root: getStateField(genesisState[], latest_block_header).state_root, - ) - # adapted from nimbus-eth2 - # if config.longRangeSync == LongRangeSyncMode.Light: - # if not is_within_weak_subjectivity_period(metadata.cfg, currentSlot, - # genesisState[], checkpoint): - # fatal WeakSubjectivityLogMessage, current_slot = currentSlot - # quit 1 - - try: - if config.numThreads < 0: - fatal "The number of threads --numThreads cannot be negative." - quit 1 - elif config.numThreads == 0: - taskpool = TaskPoolPtr.new(numThreads = min(countProcessors(), 16)) - else: - taskpool = TaskPoolPtr.new(numThreads = config.numThreads) - - info "Threadpool started", numThreads = taskpool.numThreads - except Exception: - raise newException(Defect, "Failure in taskpool initialization.") - - if metadata.genesis.kind == BakedIn: - if config.genesisState.isSome: - warn "The --genesis-state option has no effect on networks with built-in genesis state" - - if config.genesisStateUrl.isSome: - warn "The --genesis-state-url option has no effect on networks with built-in genesis state" - - let - eventBus = EventBus( - headQueue: newAsyncEventQueue[HeadChangeInfoObject](), - blocksQueue: newAsyncEventQueue[EventBeaconBlockObject](), - attestQueue: newAsyncEventQueue[phase0.Attestation](), - exitQueue: newAsyncEventQueue[SignedVoluntaryExit](), - blsToExecQueue: newAsyncEventQueue[SignedBLSToExecutionChange](), - propSlashQueue: newAsyncEventQueue[ProposerSlashing](), - attSlashQueue: newAsyncEventQueue[AttesterSlashing](), - blobSidecarQueue: newAsyncEventQueue[BlobSidecarInfoObject](), - finalQueue: newAsyncEventQueue[FinalizationInfoObject](), - reorgQueue: newAsyncEventQueue[ReorgInfoObject](), - contribQueue: newAsyncEventQueue[SignedContributionAndProof](), - finUpdateQueue: - newAsyncEventQueue[RestVersioned[ForkedLightClientFinalityUpdate]](), - optUpdateQueue: - newAsyncEventQueue[RestVersioned[ForkedLightClientOptimisticUpdate]](), - ) - db = BeaconChainDB.new(config.databaseDir, cfg, inMemory = false) - - if config.externalBeaconApiUrl.isSome and ChainDAGRef.isInitialized(db).isErr: - let trustedBlockRoot = - if config.trustedStateRoot.isSome or config.trustedBlockRoot.isSome: - config.trustedBlockRoot - elif cfg.ALTAIR_FORK_EPOCH == GENESIS_EPOCH: - # Sync can be bootstrapped from the genesis block root - if genesisState.isNil: - genesisState = await fetchGenesisState( - metadata, config.genesisState, config.genesisStateUrl - ) - if not genesisState.isNil: - let genesisBlockRoot = get_initial_beacon_block(genesisState[]).root - notice "Neither `--trusted-block-root` nor `--trusted-state-root` " & - "provided with `--external-beacon-api-url`, " & - "falling back to genesis block root", - externalBeaconApiUrl = config.externalBeaconApiUrl.get, - trustedBlockRoot = config.trustedBlockRoot, - trustedStateRoot = config.trustedStateRoot, - genesisBlockRoot = $genesisBlockRoot - some genesisBlockRoot - else: - none[Eth2Digest]() - else: - none[Eth2Digest]() - if config.trustedStateRoot.isNone and trustedBlockRoot.isNone: - warn "Ignoring `--external-beacon-api-url`, neither " & - "`--trusted-block-root` nor `--trusted-state-root` provided", - externalBeaconApiUrl = config.externalBeaconApiUrl.get, - trustedBlockRoot = config.trustedBlockRoot, - trustedStateRoot = config.trustedStateRoot - else: - if genesisState.isNil: - genesisState = - await fetchGenesisState(metadata, config.genesisState, config.genesisStateUrl) - await db.doRunTrustedNodeSync( - metadata, - config.databaseDir, - config.eraDir, - config.externalBeaconApiUrl.get, - config.trustedStateRoot.map do(x: Eth2Digest) -> string: - "0x" & x.data.toHex, - trustedBlockRoot, - backfill = false, - reindex = false, - downloadDepositSnapshot = false, - genesisState, - ) - - if config.finalizedCheckpointBlock.isSome: - warn "--finalized-checkpoint-block has been deprecated, ignoring" - - let checkpointState = - if config.finalizedCheckpointState.isSome: - let checkpointStatePath = config.finalizedCheckpointState.get.string - let tmp = - try: - newClone( - readSszForkedHashedBeaconState( - cfg, readAllBytes(checkpointStatePath).tryGet() - ) - ) - except SszError as err: - fatal "Checkpoint state loading failed", - err = formatMsg(err, checkpointStatePath) - quit 1 - except CatchableError as err: - fatal "Failed to read checkpoint state file", err = err.msg - quit 1 - - if not getStateField(tmp[], slot).is_epoch: - fatal "--finalized-checkpoint-state must point to a state for an epoch slot", - slot = getStateField(tmp[], slot) - quit 1 - tmp - else: - nil - - if config.finalizedDepositTreeSnapshot.isSome: - let - depositTreeSnapshotPath = config.finalizedDepositTreeSnapshot.get.string - snapshot = - try: - SSZ.loadFile(depositTreeSnapshotPath, DepositTreeSnapshot) - except SszError as err: - fatal "Deposit tree snapshot loading failed", - err = formatMsg(err, depositTreeSnapshotPath) - quit 1 - except CatchableError as err: - fatal "Failed to read deposit tree snapshot file", err = err.msg - quit 1 - depositContractSnapshot = DepositContractSnapshot.init(snapshot).valueOr: - fatal "Invalid deposit tree snapshot file" - quit 1 - db.putDepositContractSnapshot(depositContractSnapshot) - - let engineApiUrls = config.engineApiUrls - - if engineApiUrls.len == 0: - notice "Running without execution client - validator features disabled (see https://nimbus.guide/eth1.html)" - - var networkGenesisValidatorsRoot = metadata.bakedGenesisValidatorsRoot - - if not ChainDAGRef.isInitialized(db).isOk(): - genesisState = - if not checkpointState.isNil and getStateField(checkpointState[], slot) == 0: - checkpointState - else: - if genesisState.isNil: - await fetchGenesisState(metadata, config.genesisState, config.genesisStateUrl) - else: - genesisState - - if genesisState.isNil and checkpointState.isNil: - fatal "No database and no genesis snapshot found. Please supply a genesis.ssz " & - "with the network configuration" - quit 1 - - if not genesisState.isNil and not checkpointState.isNil: - if getStateField(genesisState[], genesis_validators_root) != - getStateField(checkpointState[], genesis_validators_root): - fatal "Checkpoint state does not match genesis - check the --network parameter", - rootFromGenesis = getStateField(genesisState[], genesis_validators_root), - rootFromCheckpoint = getStateField(checkpointState[], genesis_validators_root) - quit 1 - - try: - # Always store genesis state if we have it - this allows reindexing and - # answering genesis queries - if not genesisState.isNil: - ChainDAGRef.preInit(db, genesisState[]) - networkGenesisValidatorsRoot = - Opt.some(getStateField(genesisState[], genesis_validators_root)) - - if not checkpointState.isNil: - if genesisState.isNil or getStateField(checkpointState[], slot) != GENESIS_SLOT: - ChainDAGRef.preInit(db, checkpointState[]) - - doAssert ChainDAGRef.isInitialized(db).isOk(), - "preInit should have initialized db" - except CatchableError as exc: - error "Failed to initialize database", err = exc.msg - quit 1 - else: - if not checkpointState.isNil: - fatal "A database already exists, cannot start from given checkpoint", - dataDir = config.dataDir - quit 1 - - # Doesn't use std/random directly, but dependencies might - randomize(rng[].rand(high(int))) - - # The validatorMonitorTotals flag has been deprecated and should eventually be - # removed - until then, it's given priority if set so as not to needlessly - # break existing setups - let validatorMonitor = newClone( - ValidatorMonitor.init( - config.validatorMonitorAuto, - config.validatorMonitorTotals.get(not config.validatorMonitorDetails), - ) - ) - - for key in config.validatorMonitorPubkeys: - validatorMonitor[].addMonitor(key, Opt.none(ValidatorIndex)) - - let - dag = loadChainDag( - config, cfg, db, eventBus, validatorMonitor, networkGenesisValidatorsRoot - ) - genesisTime = getStateField(dag.headState, genesis_time) - beaconClock = BeaconClock.init(genesisTime).valueOr: - fatal "Invalid genesis time in state", genesisTime - quit 1 - - getBeaconTime = beaconClock.getBeaconTimeFn() - - if config.weakSubjectivityCheckpoint.isSome: - dag.checkWeakSubjectivityCheckpoint( - config.weakSubjectivityCheckpoint.get, beaconClock - ) - - let elManager = ELManager.new( - cfg, metadata.depositContractBlock, metadata.depositContractBlockHash, db, - engineApiUrls, eth1Network, - ) - - if config.rpcEnabled.isSome: - warn "Nimbus's JSON-RPC server has been removed. This includes the --rpc, --rpc-port, and --rpc-address configuration options. https://nimbus.guide/rest-api.html shows how to enable and configure the REST Beacon API server which replaces it." - - let restServer = - if config.restEnabled: - RestServerRef.init( - config.restAddress, config.restPort, config.restAllowedOrigin, - validateBeaconApiQueries, nimbusAgentStr, config, - ) - else: - nil - - let - netKeys = getPersistentNetKeys(rng[], config) - nickname = - if config.nodeName == "auto": - shortForm(netKeys) - else: - config.nodeName - network = createEth2Node( - rng, - config, - netKeys, - cfg, - dag.forkDigests, - getBeaconTime, - getStateField(dag.headState, genesis_validators_root), - ) - - case config.slashingDbKind - of SlashingDbKind.v2: - discard - of SlashingDbKind.v1: - error "Slashing DB v1 is no longer supported for writing" - quit 1 - of SlashingDbKind.both: - warn "Slashing DB v1 deprecated, writing only v2" - - info "Loading slashing protection database (v2)", path = config.validatorsDir() - - proc getValidatorAndIdx(pubkey: ValidatorPubKey): Opt[ValidatorAndIndex] = - withState(dag.headState): - getValidator(forkyState().data.validators.asSeq(), pubkey) - - func getCapellaForkVersion(): Opt[Version] = - Opt.some(cfg.CAPELLA_FORK_VERSION) - - func getDenebForkEpoch(): Opt[Epoch] = - Opt.some(cfg.DENEB_FORK_EPOCH) - - proc getForkForEpoch(epoch: Epoch): Opt[Fork] = - Opt.some(dag.forkAtEpoch(epoch)) - - proc getGenesisRoot(): Eth2Digest = - getStateField(dag.headState, genesis_validators_root) - - let - keystoreCache = KeystoreCacheRef.init() - slashingProtectionDB = SlashingProtectionDB.init( - getStateField(dag.headState, genesis_validators_root), - config.validatorsDir(), - SlashingDbName, - ) - validatorPool = - newClone(ValidatorPool.init(slashingProtectionDB, config.doppelgangerDetection)) - - keymanagerInitResult = initKeymanagerServer(config, restServer) - keymanagerHost = - if keymanagerInitResult.server != nil: - newClone KeymanagerHost.init( - validatorPool, keystoreCache, rng, keymanagerInitResult.token, - config.validatorsDir, config.secretsDir, config.defaultFeeRecipient, - config.suggestedGasLimit, config.defaultGraffitiBytes, - config.getPayloadBuilderAddress, getValidatorAndIdx, getBeaconTime, - getCapellaForkVersion, getDenebForkEpoch, getForkForEpoch, getGenesisRoot, - ) - else: - nil - - stateTtlCache = - if config.restCacheSize > 0: - StateTtlCache.init( - cacheSize = config.restCacheSize, - cacheTtl = chronos.seconds(config.restCacheTtl), - ) - else: - nil - - if config.payloadBuilderEnable: - info "Using external payload builder", payloadBuilderUrl = config.payloadBuilderUrl - - let node = BeaconNode( - nickname: nickname, - graffitiBytes: - if config.graffiti.isSome: - config.graffiti.get - else: - defaultGraffitiBytes(), - network: network, - netKeys: netKeys, - db: db, - config: config, - attachedValidators: validatorPool, - elManager: elManager, - restServer: restServer, - keymanagerHost: keymanagerHost, - keymanagerServer: keymanagerInitResult.server, - keystoreCache: keystoreCache, - eventBus: eventBus, - gossipState: {}, - blocksGossipState: {}, - beaconClock: beaconClock, - validatorMonitor: validatorMonitor, - stateTtlCache: stateTtlCache, - shutdownEvent: newAsyncEvent(), - dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init()), - ) - - # TODO: we are initializing the light client given that it has a function - # to validate if the sync should be done optimistically or not, and it used - # along beacon node - node.initLightClient( - rng, cfg, dag.forkDigests, getBeaconTime, dag.genesis_validators_root - ) - - await node.initFullNode(rng, dag, taskpool, getBeaconTime) - - node.updateLightClientFromDag() - - node - -proc installMessageValidators(node: BeaconNode) = - # These validators stay around the whole time, regardless of which specific - # subnets are subscribed to during any given epoch. - let forkDigests = node.dag.forkDigests - - for fork in ConsensusFork: - withConsensusFork(fork): - let digest = forkDigests[].atConsensusFork(consensusFork) - - # beacon_block - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#beacon_block - node.network.addValidator( - getBeaconBlocksTopic(digest), - proc(signedBlock: consensusFork.SignedBeaconBlock): ValidationResult = - if node.shouldSyncOptimistically(node.currentSlot): - toValidationResult( - node.optimisticProcessor.processSignedBeaconBlock(signedBlock) - ) - else: - toValidationResult( - node.processor[].processSignedBeaconBlock(MsgSource.gossip, signedBlock) - ), - ) - - # beacon_attestation_{subnet_id} - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id - when consensusFork >= ConsensusFork.Electra: - for it in SubnetId: - closureScope: - let subnet_id = it - node.network.addAsyncValidator( - getAttestationTopic(digest, subnet_id), - proc( - attestation: electra.Attestation - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processAttestation( - MsgSource.gossip, - attestation, - subnet_id, - checkSignature = true, - checkValidator = false, - ) - ), - ) - else: - for it in SubnetId: - closureScope: - let subnet_id = it - node.network.addAsyncValidator( - getAttestationTopic(digest, subnet_id), - proc( - attestation: phase0.Attestation - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processAttestation( - MsgSource.gossip, - attestation, - subnet_id, - checkSignature = true, - checkValidator = false, - ) - ), - ) - - # beacon_aggregate_and_proof - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof - when consensusFork >= ConsensusFork.Electra: - node.network.addAsyncValidator( - getAggregateAndProofsTopic(digest), - proc( - signedAggregateAndProof: electra.SignedAggregateAndProof - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processSignedAggregateAndProof( - MsgSource.gossip, signedAggregateAndProof - ) - ), - ) - else: - node.network.addAsyncValidator( - getAggregateAndProofsTopic(digest), - proc( - signedAggregateAndProof: phase0.SignedAggregateAndProof - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processSignedAggregateAndProof( - MsgSource.gossip, signedAggregateAndProof - ) - ), - ) - - # attester_slashing - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attester_slashing - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.6/specs/electra/p2p-interface.md#modifications-in-electra - when consensusFork >= ConsensusFork.Electra: - node.network.addValidator( - getAttesterSlashingsTopic(digest), - proc(attesterSlashing: electra.AttesterSlashing): ValidationResult = - toValidationResult( - node.processor[].processAttesterSlashing( - MsgSource.gossip, attesterSlashing - ) - ), - ) - else: - node.network.addValidator( - getAttesterSlashingsTopic(digest), - proc(attesterSlashing: phase0.AttesterSlashing): ValidationResult = - toValidationResult( - node.processor[].processAttesterSlashing( - MsgSource.gossip, attesterSlashing - ) - ), - ) - - # proposer_slashing - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#proposer_slashing - node.network.addValidator( - getProposerSlashingsTopic(digest), - proc(proposerSlashing: ProposerSlashing): ValidationResult = - toValidationResult( - node.processor[].processProposerSlashing(MsgSource.gossip, proposerSlashing) - ), - ) - - # voluntary_exit - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#voluntary_exit - node.network.addValidator( - getVoluntaryExitsTopic(digest), - proc(signedVoluntaryExit: SignedVoluntaryExit): ValidationResult = - toValidationResult( - node.processor[].processSignedVoluntaryExit( - MsgSource.gossip, signedVoluntaryExit - ) - ), - ) - - when consensusFork >= ConsensusFork.Altair: - # sync_committee_{subnet_id} - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#sync_committee_subnet_id - for subcommitteeIdx in SyncSubcommitteeIndex: - closureScope: - let idx = subcommitteeIdx - node.network.addAsyncValidator( - getSyncCommitteeTopic(digest, idx), - proc( - msg: SyncCommitteeMessage - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processSyncCommitteeMessage( - MsgSource.gossip, msg, idx - ) - ), - ) - - # sync_committee_contribution_and_proof - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof - node.network.addAsyncValidator( - getSyncCommitteeContributionAndProofTopic(digest), - proc( - msg: SignedContributionAndProof - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processSignedContributionAndProof( - MsgSource.gossip, msg - ) - ), - ) - - when consensusFork >= ConsensusFork.Capella: - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/capella/p2p-interface.md#bls_to_execution_change - node.network.addAsyncValidator( - getBlsToExecutionChangeTopic(digest), - proc( - msg: SignedBLSToExecutionChange - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processBlsToExecutionChange(MsgSource.gossip, msg) - ), - ) - - when consensusFork >= ConsensusFork.Deneb: - # blob_sidecar_{subnet_id} - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id - for it in BlobId: - closureScope: - let subnet_id = it - node.network.addValidator( - getBlobSidecarTopic(digest, subnet_id), - proc(blobSidecar: deneb.BlobSidecar): ValidationResult = - toValidationResult( - node.processor[].processBlobSidecar( - MsgSource.gossip, blobSidecar, subnet_id - ) - ), - ) - - # node.installLightClientMessageValidators() - -proc checkWeakSubjectivityCheckpoint( - dag: ChainDAGRef, wsCheckpoint: Checkpoint, beaconClock: BeaconClock -) = - let - currentSlot = beaconClock.now.slotOrZero - isCheckpointStale = - not is_within_weak_subjectivity_period( - dag.cfg, currentSlot, dag.headState, wsCheckpoint - ) - - if isCheckpointStale: - error "Weak subjectivity checkpoint is stale", - currentSlot, - checkpoint = wsCheckpoint, - headStateSlot = getStateField(dag.headState, slot) - quit 1 - -proc fetchGenesisState( - metadata: Eth2NetworkMetadata, - genesisState = none(InputFile), - genesisStateUrl = none(Uri), -): Future[ref ForkedHashedBeaconState] {.async: (raises: []).} = - let genesisBytes = - if metadata.genesis.kind != BakedIn and genesisState.isSome: - let res = io2.readAllBytes(genesisState.get.string) - res.valueOr: - error "Failed to read genesis state file", err = res.error.ioErrorMsg - quit 1 - elif metadata.hasGenesis: - try: - if metadata.genesis.kind == BakedInUrl: - info "Obtaining genesis state", - sourceUrl = $genesisStateUrl.get(parseUri metadata.genesis.url) - await metadata.fetchGenesisBytes(genesisStateUrl) - except CatchableError as err: - error "Failed to obtain genesis state", - source = metadata.genesis.sourceDesc, err = err.msg - quit 1 - else: - @[] - - if genesisBytes.len > 0: - try: - newClone readSszForkedHashedBeaconState(metadata.cfg, genesisBytes) - except CatchableError as err: - error "Invalid genesis state", - size = genesisBytes.len, digest = eth2digest(genesisBytes), err = err.msg - quit 1 - else: - nil - -proc pruneBlobs(node: BeaconNode, slot: Slot) = - let blobPruneEpoch = - (slot.epoch - node.dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS - 1) - if slot.is_epoch() and blobPruneEpoch >= node.dag.cfg.DENEB_FORK_EPOCH: - var blocks: array[SLOTS_PER_EPOCH.int, BlockId] - var count = 0 - let startIndex = node.dag.getBlockRange( - blobPruneEpoch.start_slot, 1, blocks.toOpenArray(0, SLOTS_PER_EPOCH - 1) - ) - for i in startIndex ..< SLOTS_PER_EPOCH: - let blck = node.dag.getForkedBlock(blocks[int(i)]).valueOr: - continue - withBlck(blck): - when typeof(forkyBlck).kind < ConsensusFork.Deneb: - continue - else: - for j in 0 .. len(forkyBlck.message.body.blob_kzg_commitments) - 1: - if node.db.delBlobSidecar(blocks[int(i)].root, BlobIndex(j)): - count = count + 1 - debug "pruned blobs", count, blobPruneEpoch - -proc maybeUpdateActionTrackerNextEpoch( - node: BeaconNode, forkyState: ForkyHashedBeaconState, nextEpoch: Epoch -) = - if node.consensusManager[].actionTracker.needsUpdate(forkyState, nextEpoch): - template epochRefFallback() = - let epochRef = node.dag.getEpochRef(node.dag.head, nextEpoch, false).expect( - "Getting head EpochRef should never fail" - ) - node.consensusManager[].actionTracker.updateActions( - epochRef.shufflingRef, epochRef.beacon_proposers - ) - - when forkyState is phase0.HashedBeaconState: - # The previous_epoch_participation-based logic requires Altair or newer - epochRefFallback() - else: - let - shufflingRef = node.dag.getShufflingRef(node.dag.head, nextEpoch, false).valueOr: - # epochRefFallback() won't work in this case either - return - nextEpochProposers = get_beacon_proposer_indices( - forkyState.data, shufflingRef.shuffled_active_validator_indices, nextEpoch - ) - nextEpochFirstProposer = nextEpochProposers[0].valueOr: - # All proposers except the first can be more straightforwardly and - # efficiently (re)computed correctly once in that epoch. - epochRefFallback() - return - - # Has to account for potential epoch transition TIMELY_SOURCE_FLAG_INDEX, - # TIMELY_TARGET_FLAG_INDEX, and inactivity penalties, resulting from spec - # functions get_flag_index_deltas() and get_inactivity_penalty_deltas(). - # - # There are no penalties associated with TIMELY_HEAD_FLAG_INDEX, but a - # reward exists. effective_balance == MAX_EFFECTIVE_BALANCE.Gwei ensures - # if even so, then the effective balance cannot change as a result. - # - # It's not truly necessary to avoid all rewards and penalties, but only - # to bound them to ensure they won't unexpected alter effective balance - # during the upcoming epoch transition. - # - # During genesis epoch, the check for epoch participation is against - # current, not previous, epoch, and therefore there's a possibility of - # checking for if a validator has participated in an epoch before it will - # happen. - # - # Because process_rewards_and_penalties() in epoch processing happens - # before the current/previous participation swap, previous is correct - # even here, and consistent with what the epoch transition uses. - # - # Whilst slashing, proposal, and sync committee rewards and penalties do - # update the balances as they occur, they don't update effective_balance - # until the end of epoch, so detect via effective_balance_might_update. - # - # On EF mainnet epoch 233906, this matches 99.5% of active validators; - # with Holesky epoch 2041, 83% of active validators. - let - participation_flags = - forkyState.data.previous_epoch_participation.item(nextEpochFirstProposer) - effective_balance = - forkyState.data.validators.item(nextEpochFirstProposer).effective_balance - - if participation_flags.has_flag(TIMELY_SOURCE_FLAG_INDEX) and - participation_flags.has_flag(TIMELY_TARGET_FLAG_INDEX) and - effective_balance == MAX_EFFECTIVE_BALANCE.Gwei and - forkyState.data.slot.epoch != GENESIS_EPOCH and - forkyState.data.inactivity_scores.item(nextEpochFirstProposer) == 0 and - not effective_balance_might_update( - forkyState.data.balances.item(nextEpochFirstProposer), effective_balance - ): - node.consensusManager[].actionTracker.updateActions( - shufflingRef, nextEpochProposers - ) - else: - epochRefFallback() - -func hasSyncPubKey(node: BeaconNode, epoch: Epoch): auto = - # Only used to determine which gossip topics to which to subscribe - if node.config.subscribeAllSubnets: - ( - func (pubkey: ValidatorPubKey): bool {.closure.} = - true - ) - else: - ( - func (pubkey: ValidatorPubKey): bool = - node.consensusManager[].actionTracker.hasSyncDuty(pubkey, epoch) or - pubkey in node.attachedValidators[].validators - ) - -func getCurrentSyncCommiteeSubnets(node: BeaconNode, epoch: Epoch): SyncnetBits = - let syncCommittee = withState(node.dag.headState): - when consensusFork >= ConsensusFork.Altair: - forkyState.data.current_sync_committee - else: - return static(default(SyncnetBits)) - - getSyncSubnets(node.hasSyncPubKey(epoch), syncCommittee) - -func getNextSyncCommitteeSubnets(node: BeaconNode, epoch: Epoch): SyncnetBits = - let syncCommittee = withState(node.dag.headState): - when consensusFork >= ConsensusFork.Altair: - forkyState.data.next_sync_committee - else: - return static(default(SyncnetBits)) - - getSyncSubnets( - node.hasSyncPubKey((epoch.sync_committee_period + 1).start_slot().epoch), - syncCommittee, - ) - -func getSyncCommitteeSubnets(node: BeaconNode, epoch: Epoch): SyncnetBits = - let - subnets = node.getCurrentSyncCommiteeSubnets(epoch) - epochsToSyncPeriod = nearSyncCommitteePeriod(epoch) - - # The end-slot tracker might call this when it's theoretically applicable, - # but more than SYNC_COMMITTEE_SUBNET_COUNT epochs from when the next sync - # committee period begins, in which case `epochsToNextSyncPeriod` is none. - if epochsToSyncPeriod.isNone or - node.dag.cfg.consensusForkAtEpoch(epoch + epochsToSyncPeriod.get) < - ConsensusFork.Altair: - return subnets - - subnets + node.getNextSyncCommitteeSubnets(epoch) - -func forkDigests(node: BeaconNode): auto = - let forkDigestsArray: array[ConsensusFork, auto] = [ - node.dag.forkDigests.phase0, node.dag.forkDigests.altair, - node.dag.forkDigests.bellatrix, node.dag.forkDigests.capella, - node.dag.forkDigests.deneb, node.dag.forkDigests.electra, - ] - forkDigestsArray - -proc updateSyncCommitteeTopics(node: BeaconNode, slot: Slot) = - template lastSyncUpdate(): untyped = - node.consensusManager[].actionTracker.lastSyncUpdate - - if lastSyncUpdate == Opt.some(slot.sync_committee_period()) and - nearSyncCommitteePeriod(slot.epoch).isNone(): - # No need to update unless we're close to the next sync committee period or - # new validators were registered with the action tracker - # TODO we _could_ skip running this in some of the "near" slots, but.. - return - - lastSyncUpdate = Opt.some(slot.sync_committee_period()) - - let syncnets = node.getSyncCommitteeSubnets(slot.epoch) - - debug "Updating sync committee subnets", - syncnets, - metadata_syncnets = node.network.metadata.syncnets, - gossipState = node.gossipState - - # Assume that different gossip fork sync committee setups are in sync; this - # only remains relevant, currently, for one gossip transition epoch, so the - # consequences of this not being true aren't exceptionally dire, while this - # allows for bookkeeping simplication. - if syncnets == node.network.metadata.syncnets: - return - - let - newSyncnets = syncnets - node.network.metadata.syncnets - oldSyncnets = node.network.metadata.syncnets - syncnets - forkDigests = node.forkDigests() - - for subcommitteeIdx in SyncSubcommitteeIndex: - doAssert not (newSyncnets[subcommitteeIdx] and oldSyncnets[subcommitteeIdx]) - for gossipFork in node.gossipState: - template topic(): auto = - getSyncCommitteeTopic(forkDigests[gossipFork], subcommitteeIdx) - - if oldSyncnets[subcommitteeIdx]: - node.network.unsubscribe(topic) - elif newSyncnets[subcommitteeIdx]: - node.network.subscribe(topic, basicParams) - - node.network.updateSyncnetsMetadata(syncnets) - -proc removePhase0MessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = - node.network.unsubscribe(getVoluntaryExitsTopic(forkDigest)) - node.network.unsubscribe(getProposerSlashingsTopic(forkDigest)) - node.network.unsubscribe(getAttesterSlashingsTopic(forkDigest)) - node.network.unsubscribe(getAggregateAndProofsTopic(forkDigest)) - - for subnet_id in SubnetId: - node.network.unsubscribe(getAttestationTopic(forkDigest, subnet_id)) - - node.consensusManager[].actionTracker.subscribedSubnets = default(AttnetBits) - -# updateAttestationSubnetHandlers subscribes attestation subnets -proc addPhase0MessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = - node.network.subscribe(getAttesterSlashingsTopic(forkDigest), basicParams) - node.network.subscribe(getProposerSlashingsTopic(forkDigest), basicParams) - node.network.subscribe(getVoluntaryExitsTopic(forkDigest), basicParams) - node.network.subscribe( - getAggregateAndProofsTopic(forkDigest), - aggregateTopicParams, - enableTopicMetrics = true, - ) - -proc addAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = - node.addPhase0MessageHandlers(forkDigest, slot) - - # If this comes online near sync committee period, it'll immediately get - # replaced as usual by trackSyncCommitteeTopics, which runs at slot end. - let syncnets = node.getSyncCommitteeSubnets(slot.epoch) - - for subcommitteeIdx in SyncSubcommitteeIndex: - if syncnets[subcommitteeIdx]: - node.network.subscribe( - getSyncCommitteeTopic(forkDigest, subcommitteeIdx), basicParams - ) - - node.network.subscribe( - getSyncCommitteeContributionAndProofTopic(forkDigest), basicParams - ) - - node.network.updateSyncnetsMetadata(syncnets) - -proc addCapellaMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = - node.addAltairMessageHandlers(forkDigest, slot) - node.network.subscribe(getBlsToExecutionChangeTopic(forkDigest), basicParams) - -proc addDenebMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = - node.addCapellaMessageHandlers(forkDigest, slot) - for topic in blobSidecarTopics(forkDigest): - node.network.subscribe(topic, basicParams) - -proc addElectraMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = - node.addDenebMessageHandlers(forkDigest, slot) - -proc removeAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = - node.removePhase0MessageHandlers(forkDigest) - - for subcommitteeIdx in SyncSubcommitteeIndex: - closureScope: - let idx = subcommitteeIdx - node.network.unsubscribe(getSyncCommitteeTopic(forkDigest, idx)) - - node.network.unsubscribe(getSyncCommitteeContributionAndProofTopic(forkDigest)) - -proc removeCapellaMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = - node.removeAltairMessageHandlers(forkDigest) - node.network.unsubscribe(getBlsToExecutionChangeTopic(forkDigest)) - -proc removeDenebMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = - node.removeCapellaMessageHandlers(forkDigest) - for topic in blobSidecarTopics(forkDigest): - node.network.unsubscribe(topic) - -proc removeElectraMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = - node.removeDenebMessageHandlers(forkDigest) - -proc doppelgangerChecked(node: BeaconNode, epoch: Epoch) = - if not node.processor[].doppelgangerDetectionEnabled: - return - - # broadcastStartEpoch is set to FAR_FUTURE_EPOCH when we're not monitoring - # gossip - it is only viable to assert liveness in epochs where gossip is - # active - if epoch > node.processor[].doppelgangerDetection.broadcastStartEpoch: - for validator in node.attachedValidators[]: - validator.doppelgangerChecked(epoch - 1) - -proc updateBlocksGossipStatus*(node: BeaconNode, slot: Slot, dagIsBehind: bool) = - template cfg(): auto = - node.dag.cfg - - let - isBehind = - if node.shouldSyncOptimistically(slot): - # If optimistic sync is active, always subscribe to blocks gossip - false - else: - # Use DAG status to determine whether to subscribe for blocks gossip - dagIsBehind - - targetGossipState = getTargetGossipState( - slot.epoch, cfg.ALTAIR_FORK_EPOCH, cfg.BELLATRIX_FORK_EPOCH, - cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH, cfg.ELECTRA_FORK_EPOCH, isBehind, - ) - - template currentGossipState(): auto = - node.blocksGossipState - - if currentGossipState == targetGossipState: - return - - if currentGossipState.card == 0 and targetGossipState.card > 0: - debug "Enabling blocks topic subscriptions", wallSlot = slot, targetGossipState - elif currentGossipState.card > 0 and targetGossipState.card == 0: - debug "Disabling blocks topic subscriptions", wallSlot = slot - else: - # Individual forks added / removed - discard - - let - newGossipForks = targetGossipState - currentGossipState - oldGossipForks = currentGossipState - targetGossipState - - for gossipFork in oldGossipForks: - let forkDigest = node.dag.forkDigests[].atConsensusFork(gossipFork) - node.network.unsubscribe(getBeaconBlocksTopic(forkDigest)) - - for gossipFork in newGossipForks: - let forkDigest = node.dag.forkDigests[].atConsensusFork(gossipFork) - node.network.subscribe( - getBeaconBlocksTopic(forkDigest), blocksTopicParams, enableTopicMetrics = true - ) - - node.blocksGossipState = targetGossipState - -func subnetLog(v: BitArray): string = - $toSeq(v.oneIndices()) - -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attestation-subnet-subscription -proc updateAttestationSubnetHandlers(node: BeaconNode, slot: Slot) = - if node.gossipState.card == 0: - # When disconnected, updateBlocksGossipStatus is responsible for all things - # subnets - in particular, it will remove subscriptions on the edge where - # we enter the disconnected state. - return - - let - aggregateSubnets = node.consensusManager[].actionTracker.aggregateSubnets(slot) - stabilitySubnets = node.consensusManager[].actionTracker.stabilitySubnets(slot) - subnets = aggregateSubnets + stabilitySubnets - - node.network.updateStabilitySubnetMetadata(stabilitySubnets) - - # Now we know what we should be subscribed to - make it so - let - prevSubnets = node.consensusManager[].actionTracker.subscribedSubnets - unsubscribeSubnets = prevSubnets - subnets - subscribeSubnets = subnets - prevSubnets - - # Remember what we subscribed to, so we can unsubscribe later - node.consensusManager[].actionTracker.subscribedSubnets = subnets - - let forkDigests = node.forkDigests() - - for gossipFork in node.gossipState: - let forkDigest = forkDigests[gossipFork] - node.network.unsubscribeAttestationSubnets(unsubscribeSubnets, forkDigest) - node.network.subscribeAttestationSubnets(subscribeSubnets, forkDigest) - - debug "Attestation subnets", - slot, - epoch = slot.epoch, - gossipState = node.gossipState, - stabilitySubnets = subnetLog(stabilitySubnets), - aggregateSubnets = subnetLog(aggregateSubnets), - prevSubnets = subnetLog(prevSubnets), - subscribeSubnets = subnetLog(subscribeSubnets), - unsubscribeSubnets = subnetLog(unsubscribeSubnets), - gossipState = node.gossipState - -#TODO: overriden due to shadowing from -proc localUpdateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = - ## Subscribe to subnets that we are providing stability for or aggregating - ## and unsubscribe from the ones that are no longer relevant. - - # Let the tracker know what duties are approaching - this will tell us how - # many stability subnets we need to be subscribed to and what subnets we'll - # soon be aggregating - in addition to the in-beacon-node duties, there may - # also be duties coming from the validator client, but we don't control when - # these arrive - await node.registerDuties(slot) - - # We start subscribing to gossip before we're fully synced - this allows time - # to subscribe before the sync end game - const - TOPIC_SUBSCRIBE_THRESHOLD_SLOTS = 64 - HYSTERESIS_BUFFER = 16 - - static: - doAssert high(ConsensusFork) == ConsensusFork.Electra - - let - head = node.dag.head - headDistance = - if slot > head.slot: - (slot - head.slot).uint64 - else: - 0'u64 - isBehind = headDistance > TOPIC_SUBSCRIBE_THRESHOLD_SLOTS + HYSTERESIS_BUFFER - targetGossipState = getTargetGossipState( - slot.epoch, node.dag.cfg.ALTAIR_FORK_EPOCH, node.dag.cfg.BELLATRIX_FORK_EPOCH, - node.dag.cfg.CAPELLA_FORK_EPOCH, node.dag.cfg.DENEB_FORK_EPOCH, - node.dag.cfg.ELECTRA_FORK_EPOCH, isBehind, - ) - - doAssert targetGossipState.card <= 2 - - let - newGossipForks = targetGossipState - node.gossipState - oldGossipForks = node.gossipState - targetGossipState - - doAssert newGossipForks.card <= 2 - doAssert oldGossipForks.card <= 2 - - func maxGossipFork(gossipState: GossipState): int = - var res = -1 - for gossipFork in gossipState: - res = max(res, gossipFork.int) - res - - if maxGossipFork(targetGossipState) < maxGossipFork(node.gossipState) and - targetGossipState != {}: - warn "Unexpected clock regression during transition", - targetGossipState, gossipState = node.gossipState - - if node.gossipState.card == 0 and targetGossipState.card > 0: - # We are synced, so we will connect - debug "Enabling topic subscriptions", - wallSlot = slot, headSlot = head.slot, headDistance, targetGossipState - - node.processor[].setupDoppelgangerDetection(slot) - - # Specially when waiting for genesis, we'll already be synced on startup - - # it might also happen on a sufficiently fast restart - - # We "know" the actions for the current and the next epoch - withState(node.dag.headState): - if node.consensusManager[].actionTracker.needsUpdate(forkyState, slot.epoch): - let epochRef = node.dag.getEpochRef(head, slot.epoch, false).expect( - "Getting head EpochRef should never fail" - ) - node.consensusManager[].actionTracker.updateActions( - epochRef.shufflingRef, epochRef.beacon_proposers - ) - - node.maybeUpdateActionTrackerNextEpoch(forkyState, slot.epoch + 1) - - if node.gossipState.card > 0 and targetGossipState.card == 0: - debug "Disabling topic subscriptions", - wallSlot = slot, headSlot = head.slot, headDistance - - node.processor[].clearDoppelgangerProtection() - - let forkDigests = node.forkDigests() - - const removeMessageHandlers: array[ConsensusFork, auto] = [ - removePhase0MessageHandlers, - removeAltairMessageHandlers, - removeAltairMessageHandlers, # bellatrix (altair handlers, different forkDigest) - removeCapellaMessageHandlers, - removeDenebMessageHandlers, - removeElectraMessageHandlers, - ] - - for gossipFork in oldGossipForks: - removeMessageHandlers[gossipFork](node, forkDigests[gossipFork]) - - const addMessageHandlers: array[ConsensusFork, auto] = [ - addPhase0MessageHandlers, - addAltairMessageHandlers, - addAltairMessageHandlers, # bellatrix (altair handlers, different forkDigest) - addCapellaMessageHandlers, - addDenebMessageHandlers, - addElectraMessageHandlers, - ] - - for gossipFork in newGossipForks: - addMessageHandlers[gossipFork](node, forkDigests[gossipFork], slot) - - node.gossipState = targetGossipState - node.doppelgangerChecked(slot.epoch) - node.updateAttestationSubnetHandlers(slot) - node.updateBlocksGossipStatus(slot, isBehind) - # node.updateLightClientGossipStatus(slot, isBehind) - -proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = - # Things we do when slot processing has ended and we're about to wait for the - # next slot - - # By waiting until close before slot end, ensure that preparation for next - # slot does not interfere with propagation of messages and with VC duties. - const endOffset = - aggregateSlotOffset + - nanos((NANOSECONDS_PER_SLOT - aggregateSlotOffset.nanoseconds.uint64).int64 div 2) - let endCutoff = node.beaconClock.fromNow(slot.start_beacon_time + endOffset) - if endCutoff.inFuture: - debug "Waiting for slot end", slot, endCutoff = shortLog(endCutoff.offset) - await sleepAsync(endCutoff.offset) - - if node.dag.needStateCachesAndForkChoicePruning(): - if node.attachedValidators[].validators.len > 0: - node.attachedValidators[].slashingProtection - # pruning is only done if the DB is set to pruning mode. - .pruneAfterFinalization(node.dag.finalizedHead.slot.epoch()) - - # Delay part of pruning until latency critical duties are done. - # The other part of pruning, `pruneBlocksDAG`, is done eagerly. - # ---- - # This is the last pruning to do as it clears the "needPruning" condition. - node.consensusManager[].pruneStateCachesAndForkChoice() - - if node.config.historyMode == HistoryMode.Prune: - if not (slot + 1).is_epoch(): - # The epoch slot already is "heavy" due to the epoch processing, leave - # the pruning for later - node.dag.pruneHistory() - node.pruneBlobs(slot) - - when declared(GC_fullCollect): - # The slots in the beacon node work as frames in a game: we want to make - # sure that we're ready for the next one and don't get stuck in lengthy - # garbage collection tasks when time is of essence in the middle of a slot - - # while this does not guarantee that we'll never collect during a slot, it - # makes sure that all the scratch space we used during slot tasks (logging, - # temporary buffers etc) gets recycled for the next slot that is likely to - # need similar amounts of memory. - try: - GC_fullCollect() - except Defect as exc: - raise exc # Reraise to maintain call stack - except Exception: - # TODO upstream - raiseAssert "Unexpected exception during GC collection" - let gcCollectionTick = Moment.now() - - # Checkpoint the database to clear the WAL file and make sure changes in - # the database are synced with the filesystem. - node.db.checkpoint() - let - dbCheckpointTick = Moment.now() - dbCheckpointDur = dbCheckpointTick - gcCollectionTick - # db_checkpoint_seconds.inc(dbCheckpointDur.toFloatSeconds) - if dbCheckpointDur >= MinSignificantProcessingDuration: - info "Database checkpointed", dur = dbCheckpointDur - else: - debug "Database checkpointed", dur = dbCheckpointDur - - node.syncCommitteeMsgPool[].pruneData(slot) - if slot.is_epoch: - node.dynamicFeeRecipientsStore[].pruneOldMappings(slot.epoch) - - # Update upcoming actions - we do this every slot in case a reorg happens - let head = node.dag.head - if node.isSynced(head) and head.executionValid: - withState(node.dag.headState): - # maybeUpdateActionTrackerNextEpoch might not account for balance changes - # from the process_rewards_and_penalties() epoch transition but only from - # process_block() and other per-slot sources. This mainly matters insofar - # as it might trigger process_effective_balance_updates() changes in that - # same epoch transition, which function is therefore potentially blind to - # but which might then affect beacon proposers. - # - # Because this runs every slot, it can account naturally for slashings, - # which affect balances via slash_validator() when they happen, and any - # missed sync committee participation via process_sync_aggregate(), but - # attestation penalties for example, need, specific handling. - # checked by maybeUpdateActionTrackerNextEpoch. - node.maybeUpdateActionTrackerNextEpoch(forkyState, slot.epoch + 1) - - let - nextAttestationSlot = - node.consensusManager[].actionTracker.getNextAttestationSlot(slot) - nextProposalSlot = node.consensusManager[].actionTracker.getNextProposalSlot(slot) - nextActionSlot = min(nextAttestationSlot, nextProposalSlot) - nextActionWaitTime = saturate(fromNow(node.beaconClock, nextActionSlot)) - - # -1 is a more useful output than 18446744073709551615 as an indicator of - # no future attestation/proposal known. - template formatInt64(x: Slot): int64 = - if x == high(uint64).Slot: - -1'i64 - else: - toGaugeValue(x) - - let - syncCommitteeSlot = slot + 1 - syncCommitteeEpoch = syncCommitteeSlot.epoch - inCurrentSyncCommittee = - not node.getCurrentSyncCommiteeSubnets(syncCommitteeEpoch).isZeros() - - template formatSyncCommitteeStatus(): string = - if inCurrentSyncCommittee: - "current" - elif not node.getNextSyncCommitteeSubnets(syncCommitteeEpoch).isZeros(): - let slotsToNextSyncCommitteePeriod = - SLOTS_PER_SYNC_COMMITTEE_PERIOD - - since_sync_committee_period_start(syncCommitteeSlot) - # int64 conversion is safe - doAssert slotsToNextSyncCommitteePeriod <= SLOTS_PER_SYNC_COMMITTEE_PERIOD - "in " & - toTimeLeftString( - SECONDS_PER_SLOT.int64.seconds * slotsToNextSyncCommitteePeriod.int64 - ) - else: - "none" - - info "Slot end", - slot = shortLog(slot), - nextActionWait = - if nextActionSlot == FAR_FUTURE_SLOT: - "n/a" - else: - shortLog(nextActionWaitTime), - nextAttestationSlot = formatInt64(nextAttestationSlot), - nextProposalSlot = formatInt64(nextProposalSlot), - syncCommitteeDuties = formatSyncCommitteeStatus(), - head = shortLog(head) - - # if nextActionSlot != FAR_FUTURE_SLOT: - # next_action_wait.set(nextActionWaitTime.toFloatSeconds) - - # next_proposal_wait.set( - # if nextProposalSlot != FAR_FUTURE_SLOT: - # saturate(fromNow(node.beaconClock, nextProposalSlot)).toFloatSeconds() - # else: - # Inf) - - # sync_committee_active.set(if inCurrentSyncCommittee: 1 else: 0) - - let epoch = slot.epoch - if epoch + 1 >= node.network.forkId.next_fork_epoch: - # Update 1 epoch early to block non-fork-ready peers - node.network.updateForkId(epoch, node.dag.genesis_validators_root) - - # When we're not behind schedule, we'll speculatively update the clearance - # state in anticipation of receiving the next block - we do it after - # logging slot end since the nextActionWaitTime can be short - let advanceCutoff = node.beaconClock.fromNow( - slot.start_beacon_time() + chronos.seconds(int(SECONDS_PER_SLOT - 1)) - ) - if advanceCutoff.inFuture: - # We wait until there's only a second left before the next slot begins, then - # we advance the clearance state to the next slot - this gives us a high - # probability of being prepared for the block that will arrive and the - # epoch processing that follows - await sleepAsync(advanceCutoff.offset) - node.dag.advanceClearanceState() - - # Prepare action tracker for the next slot - node.consensusManager[].actionTracker.updateSlot(slot + 1) - - # The last thing we do is to perform the subscriptions and unsubscriptions for - # the next slot, just before that slot starts - because of the advance cuttoff - # above, this will be done just before the next slot starts - node.updateSyncCommitteeTopics(slot + 1) - - await node.localUpdateGossipStatus(slot + 1) - -func formatNextConsensusFork(node: BeaconNode, withVanityArt = false): Opt[string] = - let consensusFork = node.dag.cfg.consensusForkAtEpoch(node.dag.head.slot.epoch) - if consensusFork == ConsensusFork.high: - return Opt.none(string) - let - nextConsensusFork = consensusFork.succ() - nextForkEpoch = node.dag.cfg.consensusForkEpoch(nextConsensusFork) - if nextForkEpoch == FAR_FUTURE_EPOCH: - return Opt.none(string) - Opt.some( - (if withVanityArt: nextConsensusFork.getVanityMascot & " " else: "") & - $nextConsensusFork & ":" & $nextForkEpoch - ) - -func syncStatus(node: BeaconNode, wallSlot: Slot): string = - let optimisticHead = not node.dag.head.executionValid - if node.syncManager.inProgress: - let - optimisticSuffix = if optimisticHead: "/opt" else: "" - # lightClientSuffix = - # if node.consensusManager[].shouldSyncOptimistically(wallSlot): - # " - lc: " & $shortLog(node.consensusManager[].optimisticHead) - # else: - # "" - node.syncManager.syncStatus & optimisticSuffix #& lightClientSuffix - elif node.backfiller.inProgress: - "backfill: " & node.backfiller.syncStatus - elif optimisticHead: - "synced/opt" - else: - "synced" - -func connectedPeersCount(node: BeaconNode): int = - len(node.network.peerPool) - -func formatGwei(amount: Gwei): string = - # TODO This is implemented in a quite a silly way. - # Better routines for formatting decimal numbers - # should exists somewhere else. - let - eth = distinctBase(amount) div 1000000000 - remainder = distinctBase(amount) mod 1000000000 - - result = $eth - if remainder != 0: - result.add '.' - let remainderStr = $remainder - for i in remainderStr.len ..< 9: - result.add '0' - result.add remainderStr - while result[^1] == '0': - result.setLen(result.len - 1) - -when not defined(windows): - proc initStatusBar(node: BeaconNode) {.raises: [ValueError].} = - if not isatty(stdout): - return - if not node.config.statusBarEnabled: - return - - try: - enableTrueColors() - except Exception as exc: # TODO Exception - error "Couldn't enable colors", err = exc.msg - - proc dataResolver(expr: string): string {.raises: [].} = - template justified(): untyped = - node.dag.head.atEpochStart( - getStateField(node.dag.headState, current_justified_checkpoint).epoch - ) - - # TODO: - # We should introduce a general API for resolving dot expressions - # such as `db.latest_block.slot` or `metrics.connected_peers`. - # Such an API can be shared between the RPC back-end, CLI tools - # such as ncli, a potential GraphQL back-end and so on. - # The status bar feature would allow the user to specify an - # arbitrary expression that is resolvable through this API. - case expr.toLowerAscii - of "version": - versionAsStr - of "full_version": - fullVersionStr - of "connected_peers": - $(node.connectedPeersCount) - of "head_root": - shortLog(node.dag.head.root) - of "head_epoch": - $(node.dag.head.slot.epoch) - of "head_epoch_slot": - $(node.dag.head.slot.since_epoch_start) - of "head_slot": - $(node.dag.head.slot) - of "justifed_root": - shortLog(justified.blck.root) - of "justifed_epoch": - $(justified.slot.epoch) - of "justifed_epoch_slot": - $(justified.slot.since_epoch_start) - of "justifed_slot": - $(justified.slot) - of "finalized_root": - shortLog(node.dag.finalizedHead.blck.root) - of "finalized_epoch": - $(node.dag.finalizedHead.slot.epoch) - of "finalized_epoch_slot": - $(node.dag.finalizedHead.slot.since_epoch_start) - of "finalized_slot": - $(node.dag.finalizedHead.slot) - of "epoch": - $node.currentSlot.epoch - of "epoch_slot": - $(node.currentSlot.since_epoch_start) - of "slot": - $node.currentSlot - of "slots_per_epoch": - $SLOTS_PER_EPOCH - of "slot_trailing_digits": - var slotStr = $node.currentSlot - if slotStr.len > 3: - slotStr = slotStr[^3 ..^ 1] - slotStr - of "attached_validators_balance": - formatGwei(node.attachedValidatorBalanceTotal) - of "next_consensus_fork": - let nextConsensusForkDescription = - node.formatNextConsensusFork(withVanityArt = true) - if nextConsensusForkDescription.isNone: - "" - else: - " (scheduled " & nextConsensusForkDescription.get & ")" - of "sync_status": - node.syncStatus(node.currentSlot) - else: - # We ignore typos for now and just render the expression - # as it was written. TODO: come up with a good way to show - # an error message to the user. - "$" & expr - - var statusBar = StatusBarView.init(node.config.statusBarContents, dataResolver) - - when compiles(defaultChroniclesStream.outputs[0].writer): - let tmp = defaultChroniclesStream.outputs[0].writer - - defaultChroniclesStream.outputs[0].writer = proc( - logLevel: LogLevel, msg: LogOutputStr - ) {.raises: [].} = - try: - # p.hidePrompt - erase statusBar - # p.writeLine msg - tmp(logLevel, msg) - render statusBar - # p.showPrompt - except Exception as e: # render raises Exception - logLoggingFailure(cstring(msg), e) - - proc statusBarUpdatesPollingLoop() {.async.} = - try: - while true: - update statusBar - erase statusBar - render statusBar - await sleepAsync(chronos.seconds(1)) - except CatchableError as exc: - warn "Failed to update status bar, no further updates", err = exc.msg - - asyncSpawn statusBarUpdatesPollingLoop() - -proc initializeNetworking(node: BeaconNode) {.async.} = - node.installMessageValidators() - - info "Listening to incoming network requests" - await node.network.startListening() - - let addressFile = node.config.dataDir / "beacon_node.enr" - writeFile(addressFile, node.network.announcedENR.toURI) - - await node.network.start() - -proc installRestHandlers(restServer: RestServerRef, node: BeaconNode) = - restServer.router.installBeaconApiHandlers(node) - restServer.router.installBuilderApiHandlers(node) - restServer.router.installConfigApiHandlers(node) - restServer.router.installDebugApiHandlers(node) - restServer.router.installEventApiHandlers(node) - restServer.router.installNimbusApiHandlers(node) - restServer.router.installNodeApiHandlers(node) - restServer.router.installValidatorApiHandlers(node) - restServer.router.installRewardsApiHandlers(node) - if node.dag.lcDataStore.serve: - restServer.router.installLightClientApiHandlers(node) - -from beacon_chain/spec/datatypes/capella import SignedBeaconBlock - -proc stop(node: BeaconNode) = - bnStatus = BeaconNodeStatus.Stopping - notice "Graceful shutdown" - if not node.config.inProcessValidators: - try: - node.vcProcess.close() - except Exception as exc: - warn "Couldn't close vc process", msg = exc.msg - try: - waitFor node.network.stop() - except CatchableError as exc: - warn "Couldn't stop network", msg = exc.msg - - node.attachedValidators[].slashingProtection.close() - node.attachedValidators[].close() - node.db.close() - notice "Databases closed" - -func verifyFinalization(node: BeaconNode, slot: Slot) = - # Epoch must be >= 4 to check finalization - const SETTLING_TIME_OFFSET = 1'u64 - let epoch = slot.epoch() - - # Don't static-assert this -- if this isn't called, don't require it - doAssert SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET - - # Intentionally, loudly assert. Point is to fail visibly and unignorably - # during testing. - if epoch >= 4 and slot mod SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET: - let finalizedEpoch = node.dag.finalizedHead.slot.epoch() - # Finalization rule 234, that has the most lag slots among the cases, sets - # state.finalized_checkpoint = old_previous_justified_checkpoint.epoch + 3 - # and then state.slot gets incremented, to increase the maximum offset, if - # finalization occurs every slot, to 4 slots vs scheduledSlot. - doAssert finalizedEpoch + 4 >= epoch - -proc onSlotStart( - node: BeaconNode, wallTime: BeaconTime, lastSlot: Slot -): Future[bool] {.async.} = - ## Called at the beginning of a slot - usually every slot, but sometimes might - ## skip a few in case we're running late. - ## wallTime: current system time - we will strive to perform all duties up - ## to this point in time - ## lastSlot: the last slot that we successfully processed, so we know where to - ## start work from - there might be jumps if processing is delayed - let - # The slot we should be at, according to the clock - wallSlot = wallTime.slotOrZero - # If everything was working perfectly, the slot that we should be processing - expectedSlot = lastSlot + 1 - finalizedEpoch = node.dag.finalizedHead.blck.slot.epoch() - delay = wallTime - expectedSlot.start_beacon_time() - - node.processingDelay = Opt.some(nanoseconds(delay.nanoseconds)) - - block: - logScope: - slot = shortLog(wallSlot) - epoch = shortLog(wallSlot.epoch) - sync = node.syncStatus(wallSlot) - peers = len(node.network.peerPool) - head = shortLog(node.dag.head) - finalized = shortLog(getStateField(node.dag.headState, finalized_checkpoint)) - delay = shortLog(delay) - let nextConsensusForkDescription = node.formatNextConsensusFork() - if nextConsensusForkDescription.isNone: - info "Slot start" - else: - info "Slot start", nextFork = nextConsensusForkDescription.get - - # Check before any re-scheduling of onSlotStart() - if checkIfShouldStopAtEpoch(wallSlot, node.config.stopAtEpoch): - quit(0) - - when defined(windows): - if node.config.runAsService: - reportServiceStatusSuccess() - - # TODO: metrics - # beacon_slot.set wallSlot.toGaugeValue - # beacon_current_epoch.set wallSlot.epoch.toGaugeValue - - # both non-negative, so difference can't overflow or underflow int64 - # finalization_delay.set( - # wallSlot.epoch.toGaugeValue - finalizedEpoch.toGaugeValue) - - if node.config.strictVerification: - verifyFinalization(node, wallSlot) - - node.consensusManager[].updateHead(wallSlot) - - await node.handleValidatorDuties(lastSlot, wallSlot) - - await onSlotEnd(node, wallSlot) - - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#registration-dissemination - # This specification suggests validators re-submit to builder software every - # `EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION` epochs. - if wallSlot.is_epoch and - wallSlot.epoch mod EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION == 0: - asyncSpawn node.registerValidators(wallSlot.epoch) - - return false - -proc startBackfillTask(node: BeaconNode) {.async.} = - while node.dag.needsBackfill: - if not node.syncManager.inProgress: - # Only start the backfiller if it's needed _and_ head sync has completed - - # if we lose sync after having synced head, we could stop the backfilller, - # but this should be a fringe case - might as well keep the logic simple for - # now - node.backfiller.start() - return - - await sleepAsync(chronos.seconds(2)) - -proc onSecond(node: BeaconNode, time: Moment) = - # Nim GC metrics (for the main thread) - - # TODO: Collect metrics - # updateThreadMetrics() - - if node.config.stopAtSyncedEpoch != 0 and - node.dag.head.slot.epoch >= node.config.stopAtSyncedEpoch: - notice "Shutting down after having reached the target synced epoch" - bnStatus = BeaconNodeStatus.Stopping - -proc runOnSecondLoop(node: BeaconNode) {.async.} = - const - sleepTime = chronos.seconds(1) - nanosecondsIn1s = float(sleepTime.nanoseconds) - while true: - let start = chronos.now(chronos.Moment) - await chronos.sleepAsync(sleepTime) - let afterSleep = chronos.now(chronos.Moment) - let sleepTime = afterSleep - start - node.onSecond(start) - let finished = chronos.now(chronos.Moment) - let processingTime = finished - afterSleep - - # TODO: metrics - # ticks_delay.set(sleepTime.nanoseconds.float / nanosecondsIn1s) - trace "onSecond task completed", sleepTime, processingTime - -proc run(node: BeaconNode) {.raises: [CatchableError].} = - bnStatus = BeaconNodeStatus.Running - - if not isNil(node.restServer): - node.restServer.installRestHandlers(node) - node.restServer.start() - - if not isNil(node.keymanagerServer): - doAssert not isNil(node.keymanagerHost) - node.keymanagerServer.router.installKeymanagerHandlers(node.keymanagerHost[]) - if node.keymanagerServer != node.restServer: - node.keymanagerServer.start() - - let - wallTime = node.beaconClock.now() - wallSlot = wallTime.slotOrZero() - - # node.startLightClient() - node.requestManager.start() - node.syncManager.start() - - if node.dag.needsBackfill(): - asyncSpawn node.startBackfillTask() - - waitFor node.localUpdateGossipStatus(wallSlot) - - for web3signerUrl in node.config.web3SignerUrls: - # TODO - # The current strategy polls all remote signers independently - # from each other which may lead to some race conditions of - # validators are migrated from one signer to another - # (because the updates to our validator pool are not atomic). - # Consider using different strategies that would detect such - # race conditions. - asyncSpawn node.pollForDynamicValidators( - web3signerUrl, node.config.web3signerUpdateInterval - ) - - asyncSpawn runSlotLoop(node, wallTime, onSlotStart) - asyncSpawn runOnSecondLoop(node) - asyncSpawn runQueueProcessingLoop(node.blockProcessor) - asyncSpawn runKeystoreCachePruningLoop(node.keystoreCache) - - # main event loop - while bnStatus == BeaconNodeStatus.Running: - poll() # if poll fails, the network is broken - - # time to say goodbye - node.stop() - -proc start*(node: BeaconNode) {.raises: [CatchableError].} = - let - head = node.dag.head - finalizedHead = node.dag.finalizedHead - genesisTime = node.beaconClock.fromNow(start_beacon_time(Slot 0)) - - notice "Starting beacon node", - version = fullVersionStr, - nimVersion = NimVersion, - enr = node.network.announcedENR.toURI, - peerId = $node.network.switch.peerInfo.peerId, - timeSinceFinalization = - node.beaconClock.now() - finalizedHead.slot.start_beacon_time(), - head = shortLog(head), - justified = - shortLog(getStateField(node.dag.headState, current_justified_checkpoint)), - finalized = shortLog(getStateField(node.dag.headState, finalized_checkpoint)), - finalizedHead = shortLog(finalizedHead), - SLOTS_PER_EPOCH, - SECONDS_PER_SLOT, - SPEC_VERSION, - dataDir = node.config.dataDir.string, - validators = node.attachedValidators[].count - - if genesisTime.inFuture: - notice "Waiting for genesis", genesisIn = genesisTime.offset - - waitFor node.initializeNetworking() - - node.elManager.start() - node.run() - -## runs beacon node -## adapted from nimbus-eth2 -proc doRunBeaconNodeLocal( - config: var BeaconNodeConf, rng: ref HmacDrbgContext -) {.raises: [CatchableError].} = - # TODO: Define this varaibles somewhere - info "Launching beacon node", - version = fullVersionStr, - bls_backend = $BLS_BACKEND, - const_preset, - cmdParams = commandLineParams(), - config - - template ignoreDeprecatedOption(option: untyped): untyped = - if config.option.isSome: - warn "Config option is deprecated", option = config.option.get - - ignoreDeprecatedOption requireEngineAPI - ignoreDeprecatedOption safeSlotsToImportOptimistically - ignoreDeprecatedOption terminalTotalDifficultyOverride - ignoreDeprecatedOption optimistic - ignoreDeprecatedOption validatorMonitorTotals - ignoreDeprecatedOption web3ForcePolling - - #TODO: figure out the comment on createPidFile - # createPidFile(config.dataDir.string / "beacon_node.pid") - - config.createDumpDirs() - - # if config.metricsEnabled: - # let metricsAddress = config.metricsAddress - # notice "Starting metrics HTTP server", - # url = "http://" & $metricsAddress & ":" & $config.metricsPort & "/metrics" - # try: - # startMetricsHttpServer($metricsAddress, config.metricsPort) - # except CatchableError as exc: - # raise exc - # except Exception as exc: - # raiseAssert exc.msg # TODO fix metrics - - # Nim GC metrics (for the main thread) will be collected in onSecond(), but - # we disable piggy-backing on other metrics here. - - #TODO: reactivate once we have metrics defined - # setSystemMetricsAutomaticUpdate(false) - - # There are no managed event loops in here, to do a graceful shutdown, but - # letting the default Ctrl+C handler exit is safe, since we only read from - # the db. - let metadata = config.loadEth2Network() - - # Updating the config based on the metadata certainly is not beautiful but it - # works - for node in metadata.bootstrapNodes: - config.bootstrapNodes.add node - - ## Ctrl+C handling - proc controlCHandler() {.noconv.} = - when defined(windows): - # workaround for https://github.com/nim-lang/Nim/issues/4057 - try: - setupForeignThreadGc() - except Exception as exc: - raiseAssert exc.msg - # shouldn't happen - notice "Shutting down after having received SIGINT" - bnStatus = BeaconNodeStatus.Stopping - - try: - setControlCHook(controlCHandler) - except Exception as exc: # TODO Exception - warn "Cannot set ctrl-c handler", msg = exc.msg - - # equivalent SIGTERM handler - when defined(posix): - proc SIGTERMHandler(signal: cint) {.noconv.} = - notice "Shutting down after having received SIGTERM" - bnStatus = BeaconNodeStatus.Stopping - - c_signal(ansi_c.SIGTERM, SIGTERMHandler) - - block: - let res = - if config.trustedSetupFile.isNone: - conf.loadKzgTrustedSetup() - else: - conf.loadKzgTrustedSetup(config.trustedSetupFile.get) - if res.isErr(): - raiseAssert res.error() - - let node = waitFor BeaconNode.initBeaconNode(rng, config, metadata) - - if bnStatus == BeaconNodeStatus.Stopping: - return - - when not defined(windows): - # This status bar can lock a Windows terminal emulator, blocking the whole - # event loop (seen on Windows 10, with a default MSYS2 terminal). - initStatusBar(node) - - if node.nickname != "": - dynamicLogScope(node = node.nickname): - node.start() - else: - node.start() - -## --end copy paste file from nimbus-eth2/nimbus_beacon_node.nim - proc handleStartingOption(config: var BeaconNodeConf) {.raises: [CatchableError].} = let rng = HmacDrbgContext.new() From 43d8042d1ce3f399fc98beaf57dea5d35f045d81 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Tue, 10 Dec 2024 09:04:04 +0000 Subject: [PATCH 29/32] Removed unused imports --- .../consensus/consensus_wrapper.nim | 37 +++---------------- 1 file changed, 6 insertions(+), 31 deletions(-) diff --git a/nimbus_unified/consensus/consensus_wrapper.nim b/nimbus_unified/consensus/consensus_wrapper.nim index 89cb16943..44dbb3e3f 100644 --- a/nimbus_unified/consensus/consensus_wrapper.nim +++ b/nimbus_unified/consensus/consensus_wrapper.nim @@ -7,48 +7,23 @@ #TODO: Clean these imports import - std/[os, atomics, random, terminal, times, exitprocs, sequtils], + std/atomics, metrics, - beacon_chain/[nimbus_beacon_node, nimbus_binary_common], - beacon_chain/spec/forks, - beacon_chain/[beacon_chain_db, trusted_node_sync], - beacon_chain/networking/network_metadata_downloads, chronos, chronicles, - stew/io2, - eth/p2p/discoveryv5/[enr, random2], ../configs/nimbus_configs, - beacon_chain/consensus_object_pools/vanity_logs/vanity_logs, - beacon_chain/statusbar, - beacon_chain/nimbus_binary_common, - beacon_chain/spec/[forks, digest, helpers], - beacon_chain/spec/datatypes/base, - beacon_chain/[beacon_chain_db, trusted_node_sync, beacon_node], - beacon_chain/spec/weak_subjectivity, - beacon_chain/rpc/[rest_beacon_api, rest_api, state_ttl_cache], - beacon_chain/consensus_object_pools/blob_quarantine, - beacon_chain/networking/[topic_params, network_metadata, network_metadata_downloads], - beacon_chain/spec/datatypes/[bellatrix], - beacon_chain/sync/[sync_protocol], - beacon_chain/validators/[keystore_management, beacon_validators], - beacon_chain/consensus_object_pools/[blockchain_dag], - beacon_chain/spec/ - [beaconstate, state_transition, state_transition_epoch, validator, ssz_codec] + beacon_chain/[beacon_chain_db, beacon_node, nimbus_beacon_node, nimbus_binary_common], + beacon_chain/rpc/[rest_beacon_api, rest_api], + beacon_chain/networking/[network_metadata, network_metadata_downloads], + beacon_chain/validators/[keystore_management] export nimbus_configs -when defined(posix): - import system/ansi_c - -from beacon_chain/spec/datatypes/deneb import SignedBeaconBlock -from beacon_chain/beacon_node_light_client import - shouldSyncOptimistically, initLightClient, updateLightClientFromDag -from libp2p/protocols/pubsub/gossipsub import TopicParams, validateParameters, init - ## log logScope: topics = "Consensus layer" +# handles option of eth2 beacon node proc handleStartingOption(config: var BeaconNodeConf) {.raises: [CatchableError].} = let rng = HmacDrbgContext.new() From bd0dbdaa447bacd5df2219b8c8d8948d58cc0700 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Wed, 11 Dec 2024 09:06:12 +0000 Subject: [PATCH 30/32] nimbus_unified unit tests --- nimbus_unified/nimbus_unified.nim | 2 +- nimbus_unified/tests/nim.cfg | 14 +++ nimbus_unified/tests/test_nimbus_unified.nim | 120 +++++++++++++++++++ 3 files changed, 135 insertions(+), 1 deletion(-) create mode 100644 nimbus_unified/tests/nim.cfg create mode 100644 nimbus_unified/tests/test_nimbus_unified.nim diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index 5daba33b3..82e3efd84 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -52,7 +52,7 @@ proc consensusLayerHandler(parameters: TaskParameters) {.thread.} = info "\tExiting task:", task = parameters.name ## Waits for tasks to finish (joinThreads) -proc joinTasks(tasks: var NimbusTasks) = +proc joinTasks*(tasks: var NimbusTasks) = warn "Waiting all tasks to finish ... " for i in 0 .. cNimbusMaxTasks - 1: if not tasks.taskList[i].isNil: diff --git a/nimbus_unified/tests/nim.cfg b/nimbus_unified/tests/nim.cfg new file mode 100644 index 000000000..0a0504a96 --- /dev/null +++ b/nimbus_unified/tests/nim.cfg @@ -0,0 +1,14 @@ +# Nimbus +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +# Use only `secp256k1` public key cryptography as an identity in LibP2P. +-d:"libp2p_pki_schemes=secp256k1" +-d:"chronicles_runtime_filtering=on" + +--styleCheck:usages +--styleCheck:hint +--hint[Processing]:off \ No newline at end of file diff --git a/nimbus_unified/tests/test_nimbus_unified.nim b/nimbus_unified/tests/test_nimbus_unified.nim new file mode 100644 index 000000000..189b2a6a1 --- /dev/null +++ b/nimbus_unified/tests/test_nimbus_unified.nim @@ -0,0 +1,120 @@ +# nimbus_unified +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import + std/[os, atomics], + unittest2, + chronicles, + ../nimbus_unified, + ../configs/nimbus_configs, + beacon_chain/conf + +# ---------------------------------------------------------------------------- +# Helper Functions +# ---------------------------------------------------------------------------- + +template fileExists(filename: string): bool = + try: + discard readFile(filename) + true + except IOError: + false + +template removeFile(filename: string) = + try: + discard io2.removeFile(filename) + except IOError: + discard # Ignore if the file does not exist + +proc handlerMock(parameters: TaskParameters) {.thread.} = + echo "handler mock" + +# ---------------------------------------------------------------------------- +# Unit Tests +# ---------------------------------------------------------------------------- + +suite "Nimbus Task Management Tests": + # Test: Creating a new task successfully + test "addNewTask successfully adds a task": + var tasks: NimbusTasks = NimbusTasks.new() + var params: TaskParameters = TaskParameters(name: "TestTask") + + tasks.addNewTask(cNimbusTaskTimeoutMs, handlerMock, params) + + check not tasks.taskList[0].isNil + check tasks.taskList[0].name == "TestTask" + + # Test: Adding more tasks than the maximum allowed + test "addNewTask fails when NimbusTasks is full": + var tasks: NimbusTasks = NimbusTasks.new() + + for i in 0 ..< cNimbusMaxTasks: + var params: TaskParameters = TaskParameters(name: "Task" & $i) + tasks.addNewTask(cNimbusTaskTimeoutMs, handlerMock, params) + + # Attempt to add one more task than allowed + var extraParams: TaskParameters = TaskParameters(name: "ExtraTask") + check: + try: + tasks.addNewTask(cNimbusTaskTimeoutMs, handlerMock, extraParams) + false # If no exception, test fails + except NimbusTasksError: + true # Exception was correctly raised + + # Test: Tasks finish properly and joinTasks correctly joins all threads + test "joinTasks waits for all tasks to finish": + var tasks: NimbusTasks = NimbusTasks.new() + + for i in 0 ..< cNimbusMaxTasks: + var params: TaskParameters = TaskParameters(name: "Task" & $i) + tasks.addNewTask(cNimbusTaskTimeoutMs, handlerMock, params) + + tasks.joinTasks() + + # Check that all task slots are still non-nil but threads have finished + for i in 0 ..< cNimbusMaxTasks: + check not tasks.taskList[i].isNil + + # Test: startTasks initializes both the execution and consensus layer tasks + test "startTasks initializes execution and consensus tasks": + var tasks: NimbusTasks = NimbusTasks.new() + let nimbusConfigs = NimbusConfig() + var beaconNodeConfig: BeaconNodeConf = BeaconNodeConf() + + tasks.startTasks(nimbusConfigs, beaconNodeConfig) + + # Check that at least two tasks were created + check not tasks.taskList[0].isNil + check not tasks.taskList[1].isNil + + # Test: Monitor detects shutdown and calls joinTasks + test "monitor stops on shutdown signal and calls joinTasks": + var tasks: NimbusTasks = NimbusTasks.new() + let config: NimbusConfig = NimbusConfig() + + # Simulate a shutdown signal + isShutDownRequired.store(true) + tasks.monitor(config) + + # Check that the monitor loop exits correctly (this is difficult to test directly, but we can infer it) + check isShutDownRequired.load() == true + + # Test: Control-C handler properly initiates shutdown + test "controlCHandler triggers shutdown sequence": + var tasks: NimbusTasks = NimbusTasks.new() + let config: NimbusConfig = NimbusConfig() + + proc localControlCHandler() {.noconv.} = + isShutDownRequired.store(true) + + # Set up a simulated control-C hook + setControlCHook(localControlCHandler) + + # Trigger the hook manually + localControlCHandler() + + check isShutDownRequired.load() == true From 5a637d18c2c799fdd7efd5ae1f7769895dad7155 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Wed, 11 Dec 2024 16:18:06 +0000 Subject: [PATCH 31/32] consensus wrapper unit tests --- .../consensus/consensus_wrapper.nim | 7 +- nimbus_unified/tests/all_tests.nim | 12 ++++ .../consensus/test_consensus_wrapper.nim | 72 +++++++++++++++++++ 3 files changed, 88 insertions(+), 3 deletions(-) create mode 100644 nimbus_unified/tests/all_tests.nim create mode 100644 nimbus_unified/tests/consensus/test_consensus_wrapper.nim diff --git a/nimbus_unified/consensus/consensus_wrapper.nim b/nimbus_unified/consensus/consensus_wrapper.nim index 44dbb3e3f..c96e19025 100644 --- a/nimbus_unified/consensus/consensus_wrapper.nim +++ b/nimbus_unified/consensus/consensus_wrapper.nim @@ -24,7 +24,7 @@ logScope: topics = "Consensus layer" # handles option of eth2 beacon node -proc handleStartingOption(config: var BeaconNodeConf) {.raises: [CatchableError].} = +proc handleStartingOption*(config: var BeaconNodeConf) {.raises: [CatchableError].} = let rng = HmacDrbgContext.new() # More options can be added, might be out of scope given that they exist in eth2 @@ -33,8 +33,9 @@ proc handleStartingOption(config: var BeaconNodeConf) {.raises: [CatchableError] doRunBeaconNode(config, rng) of BNStartUpCmd.trustedNodeSync: if config.blockId.isSome(): - error "--blockId option has been removed - use --state-id instead!" - quit 1 + raise newException( + ValueError, "--blockId option has been removed - use --state-id instead!" + ) let metadata = loadEth2Network(config) diff --git a/nimbus_unified/tests/all_tests.nim b/nimbus_unified/tests/all_tests.nim new file mode 100644 index 000000000..6f6311d3e --- /dev/null +++ b/nimbus_unified/tests/all_tests.nim @@ -0,0 +1,12 @@ +# nimbus_unified +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import + ./test_unified, + ./consensus/test_consensus_wrapper + +summarizeLongTests("AllTests") diff --git a/nimbus_unified/tests/consensus/test_consensus_wrapper.nim b/nimbus_unified/tests/consensus/test_consensus_wrapper.nim new file mode 100644 index 000000000..bd508242f --- /dev/null +++ b/nimbus_unified/tests/consensus/test_consensus_wrapper.nim @@ -0,0 +1,72 @@ +# nimbus_unified +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import + std/atomics, + unittest2, + beacon_chain/[conf, conf_common], + ../../consensus/consensus_wrapper, + ../../version + +# ---------------------------------------------------------------------------- +# Helper Functions +# ---------------------------------------------------------------------------- +proc createBeaconNodeConf(): BeaconNodeConf = + var conf = makeBannerAndConfig( + clientName, versionAsStr, nimBanner, "", [], BeaconNodeConf + ).valueOr: + stderr.write error + quit QuitFailure + + conf + +# ---------------------------------------------------------------------------- +# Unit Tests +# ---------------------------------------------------------------------------- + +suite "Nimbus Consensus Wrapper Tests": + # Test: handleStartingOption with BNStartUpCmd.trustedNodeSync and missing blockId + test "handleStartingOption errors on missing blockId with trustedNodeSync command": + var config = createBeaconNodeConf() + config.cmd = BNStartUpCmd.trustedNodeSync + config.blockId = some("blockId") # Simulate missing blockId + + check: + try: + config.handleStartingOption() + false # If no exception, test fails + except CatchableError: + true # Correctly raised an error + + # Test: handleStartingOption with an unknown command + test "handleStartingOption handles unknown command gracefully": + var config = createBeaconNodeConf() + config.cmd = BNStartUpCmd(cast[BNStartUpCmd](999)) # Invalid command enum + + check: + try: + config.handleStartingOption() + true # No exception should be raised + except CatchableError: + false # If an exception is raised, the test fails + + # Test: consensusWrapper handles CatchableError gracefully + test "consensusWrapper handles CatchableError and sets shutdown flag": + var params: TaskParameters = TaskParameters( + name: "ErrorTest", + beaconNodeConfigs: BeaconNodeConf(cmd: BNStartUpCmd(cast[BNStartUpCmd](999))), + # Invalid command enum), # Invalid command + ) + + check: + try: + consensusWrapper(params) + true # No uncaught exceptions + except CatchableError: + false # If an exception is raised, the test fails + + check isShutDownRequired.load() == true # Verify shutdown flag is set From 21ac186b7e8afa188caee1423a2f5339ee423419 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Fri, 13 Dec 2024 02:06:28 +0000 Subject: [PATCH 32/32] Use nimbus-eth2 draft branch --- .gitmodules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index 3ec525e08..13bfa3b8a 100644 --- a/.gitmodules +++ b/.gitmodules @@ -171,7 +171,7 @@ [submodule "vendor/nimbus-eth2"] path = vendor/nimbus-eth2 url = https://github.com/status-im/nimbus-eth2.git - branch = unstable + branch = dev/pedro/unified_changes [submodule "vendor/nim-taskpools"] path = vendor/nim-taskpools url = https://github.com/status-im/nim-taskpools.git