diff --git a/.bazelversion b/.bazelversion index 19b860c1872d..66ce77b7ead5 100644 --- a/.bazelversion +++ b/.bazelversion @@ -1 +1 @@ -6.4.0 +7.0.0 diff --git a/.buildkite-bazelrc b/.buildkite-bazelrc index 6aacfaa4d4e3..6c26c3de74e6 100644 --- a/.buildkite-bazelrc +++ b/.buildkite-bazelrc @@ -10,7 +10,7 @@ # Prysm specific remote-cache properties. build:remote-cache --remote_download_minimal -build:remote-cache --experimental_remote_build_event_upload=minimal +build:remote-cache --remote_build_event_upload=minimal build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092 # Does not work with rules_oci. See https://github.com/bazel-contrib/rules_oci/issues/292 #build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092 diff --git a/BUILD.bazel b/BUILD.bazel index 9bcaf6fed17b..030bffe39bed 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -194,33 +194,6 @@ nogo( config = ":nogo_config_with_excludes", visibility = ["//visibility:public"], deps = [ - "@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library", - "@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library", - "@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library", - "@org_golang_x_tools//go/analysis/passes/tests:go_default_library", - "@org_golang_x_tools//go/analysis/passes/structtag:go_default_library", - "@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library", - "@org_golang_x_tools//go/analysis/passes/shift:go_default_library", - # "@org_golang_x_tools//go/analysis/passes/shadow:go_default_library", - "@org_golang_x_tools//go/analysis/passes/printf:go_default_library", - "@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library", - "@org_golang_x_tools//go/analysis/passes/nilness:go_default_library", - "@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library", - "@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library", - "@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library", - "@org_golang_x_tools//go/analysis/passes/findcall:go_default_library", - "@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library", - "@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library", - "@org_golang_x_tools//go/analysis/passes/copylock:go_default_library", - # "@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library", - "@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library", - "@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library", - "@org_golang_x_tools//go/analysis/passes/bools:go_default_library", - "@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library", - "@org_golang_x_tools//go/analysis/passes/atomic:go_default_library", - "@org_golang_x_tools//go/analysis/passes/assign:go_default_library", - "@org_golang_x_tools//go/analysis/passes/inspect:go_default_library", - "@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library", "//tools/analyzers/comparesame:go_default_library", "//tools/analyzers/cryptorand:go_default_library", "//tools/analyzers/errcheck:go_default_library", @@ -236,6 +209,53 @@ nogo( "//tools/analyzers/shadowpredecl:go_default_library", "//tools/analyzers/slicedirect:go_default_library", "//tools/analyzers/uintcast:go_default_library", + "@org_golang_x_tools//go/analysis/passes/appends:go_default_library", + "@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library", + "@org_golang_x_tools//go/analysis/passes/assign:go_default_library", + "@org_golang_x_tools//go/analysis/passes/atomic:go_default_library", + "@org_golang_x_tools//go/analysis/passes/atomicalign:go_default_library", + "@org_golang_x_tools//go/analysis/passes/bools:go_default_library", + "@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library", + "@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library", + # cgocall disabled + #"@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library", + "@org_golang_x_tools//go/analysis/passes/copylock:go_default_library", + "@org_golang_x_tools//go/analysis/passes/ctrlflow:go_default_library", + "@org_golang_x_tools//go/analysis/passes/deepequalerrors:go_default_library", + "@org_golang_x_tools//go/analysis/passes/defers:go_default_library", + "@org_golang_x_tools//go/analysis/passes/directive:go_default_library", + # fieldalignment disabled + #"@org_golang_x_tools//go/analysis/passes/fieldalignment:go_default_library", + "@org_golang_x_tools//go/analysis/passes/findcall:go_default_library", + "@org_golang_x_tools//go/analysis/passes/framepointer:go_default_library", + "@org_golang_x_tools//go/analysis/passes/httpmux:go_default_library", + "@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library", + "@org_golang_x_tools//go/analysis/passes/ifaceassert:go_default_library", + "@org_golang_x_tools//go/analysis/passes/inspect:go_default_library", + "@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library", + "@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library", + "@org_golang_x_tools//go/analysis/passes/nilness:go_default_library", + "@org_golang_x_tools//go/analysis/passes/pkgfact:go_default_library", + "@org_golang_x_tools//go/analysis/passes/printf:go_default_library", + "@org_golang_x_tools//go/analysis/passes/reflectvaluecompare:go_default_library", + # shadow disabled + #"@org_golang_x_tools//go/analysis/passes/shadow:go_default_library", + "@org_golang_x_tools//go/analysis/passes/shift:go_default_library", + "@org_golang_x_tools//go/analysis/passes/sigchanyzer:go_default_library", + "@org_golang_x_tools//go/analysis/passes/slog:go_default_library", + "@org_golang_x_tools//go/analysis/passes/sortslice:go_default_library", + "@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library", + "@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library", + "@org_golang_x_tools//go/analysis/passes/structtag:go_default_library", + "@org_golang_x_tools//go/analysis/passes/testinggoroutine:go_default_library", + "@org_golang_x_tools//go/analysis/passes/tests:go_default_library", + "@org_golang_x_tools//go/analysis/passes/timeformat:go_default_library", + "@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library", + "@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library", + "@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library", + "@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library", + "@org_golang_x_tools//go/analysis/passes/unusedwrite:go_default_library", + "@org_golang_x_tools//go/analysis/passes/usesgenerics:go_default_library", ] + select({ # nogo checks that fail with coverage enabled. ":coverage_enabled": [], diff --git a/MODULE.bazel b/MODULE.bazel new file mode 100644 index 000000000000..00bb18361f7f --- /dev/null +++ b/MODULE.bazel @@ -0,0 +1,6 @@ +############################################################################### +# Bazel now uses Bzlmod by default to manage external dependencies. +# Please consider migrating your external dependencies from WORKSPACE to MODULE.bazel. +# +# For more details, please check https://github.com/bazelbuild/bazel/issues/18958 +############################################################################### diff --git a/MODULE.bazel.lock b/MODULE.bazel.lock new file mode 100644 index 000000000000..964faf91b3b1 --- /dev/null +++ b/MODULE.bazel.lock @@ -0,0 +1,1245 @@ +{ + "lockFileVersion": 3, + "moduleFileHash": "0e3e315145ac7ee7a4e0ac825e1c5e03c068ec1254dd42c3caaecb27e921dc4d", + "flags": { + "cmdRegistries": [ + "https://bcr.bazel.build/" + ], + "cmdModuleOverrides": {}, + "allowedYankedVersions": [], + "envVarAllowedYankedVersions": "", + "ignoreDevDependency": false, + "directDependenciesMode": "WARNING", + "compatibilityMode": "ERROR" + }, + "localOverrideHashes": { + "bazel_tools": "922ea6752dc9105de5af957f7a99a6933c0a6a712d23df6aad16a9c399f7e787" + }, + "moduleDepGraph": { + "": { + "name": "", + "version": "", + "key": "", + "repoName": "", + "executionPlatformsToRegister": [], + "toolchainsToRegister": [], + "extensionUsages": [], + "deps": { + "bazel_tools": "bazel_tools@_", + "local_config_platform": "local_config_platform@_" + } + }, + "bazel_tools@_": { + "name": "bazel_tools", + "version": "", + "key": "bazel_tools@_", + "repoName": "bazel_tools", + "executionPlatformsToRegister": [], + "toolchainsToRegister": [ + "@local_config_cc_toolchains//:all", + "@local_config_sh//:local_sh_toolchain" + ], + "extensionUsages": [ + { + "extensionBzlFile": "@bazel_tools//tools/cpp:cc_configure.bzl", + "extensionName": "cc_configure_extension", + "usingModule": "bazel_tools@_", + "location": { + "file": "@@bazel_tools//:MODULE.bazel", + "line": 17, + "column": 29 + }, + "imports": { + "local_config_cc": "local_config_cc", + "local_config_cc_toolchains": "local_config_cc_toolchains" + }, + "devImports": [], + "tags": [], + "hasDevUseExtension": false, + "hasNonDevUseExtension": true + }, + { + "extensionBzlFile": "@bazel_tools//tools/osx:xcode_configure.bzl", + "extensionName": "xcode_configure_extension", + "usingModule": "bazel_tools@_", + "location": { + "file": "@@bazel_tools//:MODULE.bazel", + "line": 21, + "column": 32 + }, + "imports": { + "local_config_xcode": "local_config_xcode" + }, + "devImports": [], + "tags": [], + "hasDevUseExtension": false, + "hasNonDevUseExtension": true + }, + { + "extensionBzlFile": "@rules_java//java:extensions.bzl", + "extensionName": "toolchains", + "usingModule": "bazel_tools@_", + "location": { + "file": "@@bazel_tools//:MODULE.bazel", + "line": 24, + "column": 32 + }, + "imports": { + "local_jdk": "local_jdk", + "remote_java_tools": "remote_java_tools", + "remote_java_tools_linux": "remote_java_tools_linux", + "remote_java_tools_windows": "remote_java_tools_windows", + "remote_java_tools_darwin_x86_64": "remote_java_tools_darwin_x86_64", + "remote_java_tools_darwin_arm64": "remote_java_tools_darwin_arm64" + }, + "devImports": [], + "tags": [], + "hasDevUseExtension": false, + "hasNonDevUseExtension": true + }, + { + "extensionBzlFile": "@bazel_tools//tools/sh:sh_configure.bzl", + "extensionName": "sh_configure_extension", + "usingModule": "bazel_tools@_", + "location": { + "file": "@@bazel_tools//:MODULE.bazel", + "line": 35, + "column": 39 + }, + "imports": { + "local_config_sh": "local_config_sh" + }, + "devImports": [], + "tags": [], + "hasDevUseExtension": false, + "hasNonDevUseExtension": true + }, + { + "extensionBzlFile": "@bazel_tools//tools/test:extensions.bzl", + "extensionName": "remote_coverage_tools_extension", + "usingModule": "bazel_tools@_", + "location": { + "file": "@@bazel_tools//:MODULE.bazel", + "line": 39, + "column": 48 + }, + "imports": { + "remote_coverage_tools": "remote_coverage_tools" + }, + "devImports": [], + "tags": [], + "hasDevUseExtension": false, + "hasNonDevUseExtension": true + }, + { + "extensionBzlFile": "@bazel_tools//tools/android:android_extensions.bzl", + "extensionName": "remote_android_tools_extensions", + "usingModule": "bazel_tools@_", + "location": { + "file": "@@bazel_tools//:MODULE.bazel", + "line": 42, + "column": 42 + }, + "imports": { + "android_gmaven_r8": "android_gmaven_r8", + "android_tools": "android_tools" + }, + "devImports": [], + "tags": [], + "hasDevUseExtension": false, + "hasNonDevUseExtension": true + } + ], + "deps": { + "rules_cc": "rules_cc@0.0.9", + "rules_java": "rules_java@7.1.0", + "rules_license": "rules_license@0.0.7", + "rules_proto": "rules_proto@4.0.0", + "rules_python": "rules_python@0.4.0", + "platforms": "platforms@0.0.7", + "com_google_protobuf": "protobuf@3.19.6", + "zlib": "zlib@1.3", + "build_bazel_apple_support": "apple_support@1.5.0", + "local_config_platform": "local_config_platform@_" + } + }, + "local_config_platform@_": { + "name": "local_config_platform", + "version": "", + "key": "local_config_platform@_", + "repoName": "local_config_platform", + "executionPlatformsToRegister": [], + "toolchainsToRegister": [], + "extensionUsages": [], + "deps": { + "platforms": "platforms@0.0.7", + "bazel_tools": "bazel_tools@_" + } + }, + "rules_cc@0.0.9": { + "name": "rules_cc", + "version": "0.0.9", + "key": "rules_cc@0.0.9", + "repoName": "rules_cc", + "executionPlatformsToRegister": [], + "toolchainsToRegister": [ + "@local_config_cc_toolchains//:all" + ], + "extensionUsages": [ + { + "extensionBzlFile": "@bazel_tools//tools/cpp:cc_configure.bzl", + "extensionName": "cc_configure_extension", + "usingModule": "rules_cc@0.0.9", + "location": { + "file": "https://bcr.bazel.build/modules/rules_cc/0.0.9/MODULE.bazel", + "line": 9, + "column": 29 + }, + "imports": { + "local_config_cc_toolchains": "local_config_cc_toolchains" + }, + "devImports": [], + "tags": [], + "hasDevUseExtension": false, + "hasNonDevUseExtension": true + } + ], + "deps": { + "platforms": "platforms@0.0.7", + "bazel_tools": "bazel_tools@_", + "local_config_platform": "local_config_platform@_" + }, + "repoSpec": { + "bzlFile": "@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_cc~0.0.9", + "urls": [ + "https://github.com/bazelbuild/rules_cc/releases/download/0.0.9/rules_cc-0.0.9.tar.gz" + ], + "integrity": "sha256-IDeHW5pEVtzkp50RKorohbvEqtlo5lh9ym5k86CQDN8=", + "strip_prefix": "rules_cc-0.0.9", + "remote_patches": { + "https://bcr.bazel.build/modules/rules_cc/0.0.9/patches/module_dot_bazel_version.patch": "sha256-mM+qzOI0SgAdaJBlWOSMwMPKpaA9b7R37Hj/tp5bb4g=" + }, + "remote_patch_strip": 0 + } + } + }, + "rules_java@7.1.0": { + "name": "rules_java", + "version": "7.1.0", + "key": "rules_java@7.1.0", + "repoName": "rules_java", + "executionPlatformsToRegister": [], + "toolchainsToRegister": [ + "//toolchains:all", + "@local_jdk//:runtime_toolchain_definition", + "@local_jdk//:bootstrap_runtime_toolchain_definition", + "@remotejdk11_linux_toolchain_config_repo//:all", + "@remotejdk11_linux_aarch64_toolchain_config_repo//:all", + "@remotejdk11_linux_ppc64le_toolchain_config_repo//:all", + "@remotejdk11_linux_s390x_toolchain_config_repo//:all", + "@remotejdk11_macos_toolchain_config_repo//:all", + "@remotejdk11_macos_aarch64_toolchain_config_repo//:all", + "@remotejdk11_win_toolchain_config_repo//:all", + "@remotejdk11_win_arm64_toolchain_config_repo//:all", + "@remotejdk17_linux_toolchain_config_repo//:all", + "@remotejdk17_linux_aarch64_toolchain_config_repo//:all", + "@remotejdk17_linux_ppc64le_toolchain_config_repo//:all", + "@remotejdk17_linux_s390x_toolchain_config_repo//:all", + "@remotejdk17_macos_toolchain_config_repo//:all", + "@remotejdk17_macos_aarch64_toolchain_config_repo//:all", + "@remotejdk17_win_toolchain_config_repo//:all", + "@remotejdk17_win_arm64_toolchain_config_repo//:all", + "@remotejdk21_linux_toolchain_config_repo//:all", + "@remotejdk21_linux_aarch64_toolchain_config_repo//:all", + "@remotejdk21_macos_toolchain_config_repo//:all", + "@remotejdk21_macos_aarch64_toolchain_config_repo//:all", + "@remotejdk21_win_toolchain_config_repo//:all" + ], + "extensionUsages": [ + { + "extensionBzlFile": "@rules_java//java:extensions.bzl", + "extensionName": "toolchains", + "usingModule": "rules_java@7.1.0", + "location": { + "file": "https://bcr.bazel.build/modules/rules_java/7.1.0/MODULE.bazel", + "line": 19, + "column": 27 + }, + "imports": { + "remote_java_tools": "remote_java_tools", + "remote_java_tools_linux": "remote_java_tools_linux", + "remote_java_tools_windows": "remote_java_tools_windows", + "remote_java_tools_darwin_x86_64": "remote_java_tools_darwin_x86_64", + "remote_java_tools_darwin_arm64": "remote_java_tools_darwin_arm64", + "local_jdk": "local_jdk", + "remotejdk11_linux_toolchain_config_repo": "remotejdk11_linux_toolchain_config_repo", + "remotejdk11_linux_aarch64_toolchain_config_repo": "remotejdk11_linux_aarch64_toolchain_config_repo", + "remotejdk11_linux_ppc64le_toolchain_config_repo": "remotejdk11_linux_ppc64le_toolchain_config_repo", + "remotejdk11_linux_s390x_toolchain_config_repo": "remotejdk11_linux_s390x_toolchain_config_repo", + "remotejdk11_macos_toolchain_config_repo": "remotejdk11_macos_toolchain_config_repo", + "remotejdk11_macos_aarch64_toolchain_config_repo": "remotejdk11_macos_aarch64_toolchain_config_repo", + "remotejdk11_win_toolchain_config_repo": "remotejdk11_win_toolchain_config_repo", + "remotejdk11_win_arm64_toolchain_config_repo": "remotejdk11_win_arm64_toolchain_config_repo", + "remotejdk17_linux_toolchain_config_repo": "remotejdk17_linux_toolchain_config_repo", + "remotejdk17_linux_aarch64_toolchain_config_repo": "remotejdk17_linux_aarch64_toolchain_config_repo", + "remotejdk17_linux_ppc64le_toolchain_config_repo": "remotejdk17_linux_ppc64le_toolchain_config_repo", + "remotejdk17_linux_s390x_toolchain_config_repo": "remotejdk17_linux_s390x_toolchain_config_repo", + "remotejdk17_macos_toolchain_config_repo": "remotejdk17_macos_toolchain_config_repo", + "remotejdk17_macos_aarch64_toolchain_config_repo": "remotejdk17_macos_aarch64_toolchain_config_repo", + "remotejdk17_win_toolchain_config_repo": "remotejdk17_win_toolchain_config_repo", + "remotejdk17_win_arm64_toolchain_config_repo": "remotejdk17_win_arm64_toolchain_config_repo", + "remotejdk21_linux_toolchain_config_repo": "remotejdk21_linux_toolchain_config_repo", + "remotejdk21_linux_aarch64_toolchain_config_repo": "remotejdk21_linux_aarch64_toolchain_config_repo", + "remotejdk21_macos_toolchain_config_repo": "remotejdk21_macos_toolchain_config_repo", + "remotejdk21_macos_aarch64_toolchain_config_repo": "remotejdk21_macos_aarch64_toolchain_config_repo", + "remotejdk21_win_toolchain_config_repo": "remotejdk21_win_toolchain_config_repo" + }, + "devImports": [], + "tags": [], + "hasDevUseExtension": false, + "hasNonDevUseExtension": true + } + ], + "deps": { + "platforms": "platforms@0.0.7", + "rules_cc": "rules_cc@0.0.9", + "bazel_skylib": "bazel_skylib@1.3.0", + "rules_proto": "rules_proto@4.0.0", + "rules_license": "rules_license@0.0.7", + "bazel_tools": "bazel_tools@_", + "local_config_platform": "local_config_platform@_" + }, + "repoSpec": { + "bzlFile": "@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0", + "urls": [ + "https://github.com/bazelbuild/rules_java/releases/download/7.1.0/rules_java-7.1.0.tar.gz" + ], + "integrity": "sha256-o3pOX2OrgnFuXdau75iO2EYcegC46TYnImKJn1h81OE=", + "strip_prefix": "", + "remote_patches": {}, + "remote_patch_strip": 0 + } + } + }, + "rules_license@0.0.7": { + "name": "rules_license", + "version": "0.0.7", + "key": "rules_license@0.0.7", + "repoName": "rules_license", + "executionPlatformsToRegister": [], + "toolchainsToRegister": [], + "extensionUsages": [], + "deps": { + "bazel_tools": "bazel_tools@_", + "local_config_platform": "local_config_platform@_" + }, + "repoSpec": { + "bzlFile": "@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_license~0.0.7", + "urls": [ + "https://github.com/bazelbuild/rules_license/releases/download/0.0.7/rules_license-0.0.7.tar.gz" + ], + "integrity": "sha256-RTHezLkTY5ww5cdRKgVNXYdWmNrrddjPkPKEN1/nw2A=", + "strip_prefix": "", + "remote_patches": {}, + "remote_patch_strip": 0 + } + } + }, + "rules_proto@4.0.0": { + "name": "rules_proto", + "version": "4.0.0", + "key": "rules_proto@4.0.0", + "repoName": "rules_proto", + "executionPlatformsToRegister": [], + "toolchainsToRegister": [], + "extensionUsages": [], + "deps": { + "bazel_skylib": "bazel_skylib@1.3.0", + "rules_cc": "rules_cc@0.0.9", + "bazel_tools": "bazel_tools@_", + "local_config_platform": "local_config_platform@_" + }, + "repoSpec": { + "bzlFile": "@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_proto~4.0.0", + "urls": [ + "https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0.zip" + ], + "integrity": "sha256-Lr5z6xyuRA19pNtRYMGjKaynwQpck4H/lwYyVjyhoq4=", + "strip_prefix": "rules_proto-4.0.0", + "remote_patches": { + "https://bcr.bazel.build/modules/rules_proto/4.0.0/patches/module_dot_bazel.patch": "sha256-MclJO7tIAM2ElDAmscNId9pKTpOuDGHgVlW/9VBOIp0=" + }, + "remote_patch_strip": 0 + } + } + }, + "rules_python@0.4.0": { + "name": "rules_python", + "version": "0.4.0", + "key": "rules_python@0.4.0", + "repoName": "rules_python", + "executionPlatformsToRegister": [], + "toolchainsToRegister": [ + "@bazel_tools//tools/python:autodetecting_toolchain" + ], + "extensionUsages": [ + { + "extensionBzlFile": "@rules_python//bzlmod:extensions.bzl", + "extensionName": "pip_install", + "usingModule": "rules_python@0.4.0", + "location": { + "file": "https://bcr.bazel.build/modules/rules_python/0.4.0/MODULE.bazel", + "line": 7, + "column": 28 + }, + "imports": { + "pypi__click": "pypi__click", + "pypi__pip": "pypi__pip", + "pypi__pip_tools": "pypi__pip_tools", + "pypi__pkginfo": "pypi__pkginfo", + "pypi__setuptools": "pypi__setuptools", + "pypi__wheel": "pypi__wheel" + }, + "devImports": [], + "tags": [], + "hasDevUseExtension": false, + "hasNonDevUseExtension": true + } + ], + "deps": { + "bazel_tools": "bazel_tools@_", + "local_config_platform": "local_config_platform@_" + }, + "repoSpec": { + "bzlFile": "@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_python~0.4.0", + "urls": [ + "https://github.com/bazelbuild/rules_python/releases/download/0.4.0/rules_python-0.4.0.tar.gz" + ], + "integrity": "sha256-lUqom0kb5KCDMEosuDgBnIuMNyCnq7nEy4GseiQjDOo=", + "strip_prefix": "", + "remote_patches": { + "https://bcr.bazel.build/modules/rules_python/0.4.0/patches/propagate_pip_install_dependencies.patch": "sha256-v7S/dem/mixg63MF4KoRGDA4KEol9ab/tIVp+6Xq0D0=", + "https://bcr.bazel.build/modules/rules_python/0.4.0/patches/module_dot_bazel.patch": "sha256-kG4VIfWxQazzTuh50mvsx6pmyoRVA4lfH5rkto/Oq+Y=" + }, + "remote_patch_strip": 1 + } + } + }, + "platforms@0.0.7": { + "name": "platforms", + "version": "0.0.7", + "key": "platforms@0.0.7", + "repoName": "platforms", + "executionPlatformsToRegister": [], + "toolchainsToRegister": [], + "extensionUsages": [], + "deps": { + "rules_license": "rules_license@0.0.7", + "bazel_tools": "bazel_tools@_", + "local_config_platform": "local_config_platform@_" + }, + "repoSpec": { + "bzlFile": "@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "platforms", + "urls": [ + "https://github.com/bazelbuild/platforms/releases/download/0.0.7/platforms-0.0.7.tar.gz" + ], + "integrity": "sha256-OlYcmee9vpFzqmU/1Xn+hJ8djWc5V4CrR3Cx84FDHVE=", + "strip_prefix": "", + "remote_patches": {}, + "remote_patch_strip": 0 + } + } + }, + "protobuf@3.19.6": { + "name": "protobuf", + "version": "3.19.6", + "key": "protobuf@3.19.6", + "repoName": "protobuf", + "executionPlatformsToRegister": [], + "toolchainsToRegister": [], + "extensionUsages": [], + "deps": { + "bazel_skylib": "bazel_skylib@1.3.0", + "zlib": "zlib@1.3", + "rules_python": "rules_python@0.4.0", + "rules_cc": "rules_cc@0.0.9", + "rules_proto": "rules_proto@4.0.0", + "rules_java": "rules_java@7.1.0", + "bazel_tools": "bazel_tools@_", + "local_config_platform": "local_config_platform@_" + }, + "repoSpec": { + "bzlFile": "@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "protobuf~3.19.6", + "urls": [ + "https://github.com/protocolbuffers/protobuf/archive/refs/tags/v3.19.6.zip" + ], + "integrity": "sha256-OH4sVZuyx8G8N5jE5s/wFTgaebJ1hpavy/johzC0c4k=", + "strip_prefix": "protobuf-3.19.6", + "remote_patches": { + "https://bcr.bazel.build/modules/protobuf/3.19.6/patches/relative_repo_names.patch": "sha256-w/5gw/zGv8NFId+669hcdw1Uus2lxgYpulATHIwIByI=", + "https://bcr.bazel.build/modules/protobuf/3.19.6/patches/remove_dependency_on_rules_jvm_external.patch": "sha256-THUTnVgEBmjA0W7fKzIyZOVG58DnW9HQTkr4D2zKUUc=", + "https://bcr.bazel.build/modules/protobuf/3.19.6/patches/add_module_dot_bazel_for_examples.patch": "sha256-s/b1gi3baK3LsXefI2rQilhmkb2R5jVJdnT6zEcdfHY=", + "https://bcr.bazel.build/modules/protobuf/3.19.6/patches/module_dot_bazel.patch": "sha256-S0DEni8zgx7rHscW3z/rCEubQnYec0XhNet640cw0h4=" + }, + "remote_patch_strip": 1 + } + } + }, + "zlib@1.3": { + "name": "zlib", + "version": "1.3", + "key": "zlib@1.3", + "repoName": "zlib", + "executionPlatformsToRegister": [], + "toolchainsToRegister": [], + "extensionUsages": [], + "deps": { + "platforms": "platforms@0.0.7", + "rules_cc": "rules_cc@0.0.9", + "bazel_tools": "bazel_tools@_", + "local_config_platform": "local_config_platform@_" + }, + "repoSpec": { + "bzlFile": "@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "zlib~1.3", + "urls": [ + "https://github.com/madler/zlib/releases/download/v1.3/zlib-1.3.tar.gz" + ], + "integrity": "sha256-/wukwpIBPbwnUws6geH5qBPNOd4Byl4Pi/NVcC76WT4=", + "strip_prefix": "zlib-1.3", + "remote_patches": { + "https://bcr.bazel.build/modules/zlib/1.3/patches/add_build_file.patch": "sha256-Ei+FYaaOo7A3jTKunMEodTI0Uw5NXQyZEcboMC8JskY=", + "https://bcr.bazel.build/modules/zlib/1.3/patches/module_dot_bazel.patch": "sha256-fPWLM+2xaF/kuy+kZc1YTfW6hNjrkG400Ho7gckuyJk=" + }, + "remote_patch_strip": 0 + } + } + }, + "apple_support@1.5.0": { + "name": "apple_support", + "version": "1.5.0", + "key": "apple_support@1.5.0", + "repoName": "build_bazel_apple_support", + "executionPlatformsToRegister": [], + "toolchainsToRegister": [ + "@local_config_apple_cc_toolchains//:all" + ], + "extensionUsages": [ + { + "extensionBzlFile": "@build_bazel_apple_support//crosstool:setup.bzl", + "extensionName": "apple_cc_configure_extension", + "usingModule": "apple_support@1.5.0", + "location": { + "file": "https://bcr.bazel.build/modules/apple_support/1.5.0/MODULE.bazel", + "line": 17, + "column": 35 + }, + "imports": { + "local_config_apple_cc": "local_config_apple_cc", + "local_config_apple_cc_toolchains": "local_config_apple_cc_toolchains" + }, + "devImports": [], + "tags": [], + "hasDevUseExtension": false, + "hasNonDevUseExtension": true + } + ], + "deps": { + "bazel_skylib": "bazel_skylib@1.3.0", + "platforms": "platforms@0.0.7", + "bazel_tools": "bazel_tools@_", + "local_config_platform": "local_config_platform@_" + }, + "repoSpec": { + "bzlFile": "@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "apple_support~1.5.0", + "urls": [ + "https://github.com/bazelbuild/apple_support/releases/download/1.5.0/apple_support.1.5.0.tar.gz" + ], + "integrity": "sha256-miM41vja0yRPgj8txghKA+TQ+7J8qJLclw5okNW0gYQ=", + "strip_prefix": "", + "remote_patches": {}, + "remote_patch_strip": 0 + } + } + }, + "bazel_skylib@1.3.0": { + "name": "bazel_skylib", + "version": "1.3.0", + "key": "bazel_skylib@1.3.0", + "repoName": "bazel_skylib", + "executionPlatformsToRegister": [], + "toolchainsToRegister": [ + "//toolchains/unittest:cmd_toolchain", + "//toolchains/unittest:bash_toolchain" + ], + "extensionUsages": [], + "deps": { + "platforms": "platforms@0.0.7", + "bazel_tools": "bazel_tools@_", + "local_config_platform": "local_config_platform@_" + }, + "repoSpec": { + "bzlFile": "@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "bazel_skylib~1.3.0", + "urls": [ + "https://github.com/bazelbuild/bazel-skylib/releases/download/1.3.0/bazel-skylib-1.3.0.tar.gz" + ], + "integrity": "sha256-dNVE2W9KW7Yw1GXKi7z+Ix41lOWq5X4e2/F6brPKJQY=", + "strip_prefix": "", + "remote_patches": {}, + "remote_patch_strip": 0 + } + } + } + }, + "moduleExtensions": { + "@@apple_support~1.5.0//crosstool:setup.bzl%apple_cc_configure_extension": { + "general": { + "bzlTransitiveDigest": "pMLFCYaRPkgXPQ8vtuNkMfiHfPmRBy6QJfnid4sWfv0=", + "accumulatedFileDigests": {}, + "envVariables": {}, + "generatedRepoSpecs": { + "local_config_apple_cc": { + "bzlFile": "@@apple_support~1.5.0//crosstool:setup.bzl", + "ruleClassName": "_apple_cc_autoconf", + "attributes": { + "name": "apple_support~1.5.0~apple_cc_configure_extension~local_config_apple_cc" + } + }, + "local_config_apple_cc_toolchains": { + "bzlFile": "@@apple_support~1.5.0//crosstool:setup.bzl", + "ruleClassName": "_apple_cc_autoconf_toolchains", + "attributes": { + "name": "apple_support~1.5.0~apple_cc_configure_extension~local_config_apple_cc_toolchains" + } + } + } + } + }, + "@@bazel_tools//tools/cpp:cc_configure.bzl%cc_configure_extension": { + "general": { + "bzlTransitiveDigest": "O9sf6ilKWU9Veed02jG9o2HM/xgV/UAyciuFBuxrFRY=", + "accumulatedFileDigests": {}, + "envVariables": {}, + "generatedRepoSpecs": { + "local_config_cc": { + "bzlFile": "@@bazel_tools//tools/cpp:cc_configure.bzl", + "ruleClassName": "cc_autoconf", + "attributes": { + "name": "bazel_tools~cc_configure_extension~local_config_cc" + } + }, + "local_config_cc_toolchains": { + "bzlFile": "@@bazel_tools//tools/cpp:cc_configure.bzl", + "ruleClassName": "cc_autoconf_toolchains", + "attributes": { + "name": "bazel_tools~cc_configure_extension~local_config_cc_toolchains" + } + } + } + } + }, + "@@bazel_tools//tools/osx:xcode_configure.bzl%xcode_configure_extension": { + "general": { + "bzlTransitiveDigest": "Qh2bWTU6QW6wkrd87qrU4YeY+SG37Nvw3A0PR4Y0L2Y=", + "accumulatedFileDigests": {}, + "envVariables": {}, + "generatedRepoSpecs": { + "local_config_xcode": { + "bzlFile": "@@bazel_tools//tools/osx:xcode_configure.bzl", + "ruleClassName": "xcode_autoconf", + "attributes": { + "name": "bazel_tools~xcode_configure_extension~local_config_xcode", + "xcode_locator": "@bazel_tools//tools/osx:xcode_locator.m", + "remote_xcode": "" + } + } + } + } + }, + "@@bazel_tools//tools/sh:sh_configure.bzl%sh_configure_extension": { + "general": { + "bzlTransitiveDigest": "hp4NgmNjEg5+xgvzfh6L83bt9/aiiWETuNpwNuF1MSU=", + "accumulatedFileDigests": {}, + "envVariables": {}, + "generatedRepoSpecs": { + "local_config_sh": { + "bzlFile": "@@bazel_tools//tools/sh:sh_configure.bzl", + "ruleClassName": "sh_config", + "attributes": { + "name": "bazel_tools~sh_configure_extension~local_config_sh" + } + } + } + } + }, + "@@rules_java~7.1.0//java:extensions.bzl%toolchains": { + "general": { + "bzlTransitiveDigest": "iUIRqCK7tkhvcDJCAfPPqSd06IHG0a8HQD0xeQyVAqw=", + "accumulatedFileDigests": {}, + "envVariables": {}, + "generatedRepoSpecs": { + "remotejdk21_linux_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk21_linux_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_21\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"21\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk21_linux//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk21_linux//:jdk\",\n)\n" + } + }, + "remotejdk17_linux_s390x_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk17_linux_s390x_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_17\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"17\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:s390x\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk17_linux_s390x//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:s390x\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk17_linux_s390x//:jdk\",\n)\n" + } + }, + "remotejdk17_macos_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk17_macos_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_17\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"17\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:macos\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk17_macos//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:macos\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk17_macos//:jdk\",\n)\n" + } + }, + "remotejdk21_macos_aarch64_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk21_macos_aarch64_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_21\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"21\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:macos\", \"@platforms//cpu:aarch64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk21_macos_aarch64//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:macos\", \"@platforms//cpu:aarch64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk21_macos_aarch64//:jdk\",\n)\n" + } + }, + "remotejdk17_linux_aarch64_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk17_linux_aarch64_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_17\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"17\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:aarch64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk17_linux_aarch64//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:aarch64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk17_linux_aarch64//:jdk\",\n)\n" + } + }, + "remotejdk21_macos_aarch64": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk21_macos_aarch64", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 21,\n)\n", + "sha256": "2a7a99a3ea263dbd8d32a67d1e6e363ba8b25c645c826f5e167a02bbafaff1fa", + "strip_prefix": "zulu21.28.85-ca-jdk21.0.0-macosx_aarch64", + "urls": [ + "https://mirror.bazel.build/cdn.azul.com/zulu/bin/zulu21.28.85-ca-jdk21.0.0-macosx_aarch64.tar.gz", + "https://cdn.azul.com/zulu/bin/zulu21.28.85-ca-jdk21.0.0-macosx_aarch64.tar.gz" + ] + } + }, + "remotejdk17_linux_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk17_linux_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_17\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"17\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk17_linux//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk17_linux//:jdk\",\n)\n" + } + }, + "remotejdk17_macos_aarch64": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk17_macos_aarch64", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 17,\n)\n", + "sha256": "314b04568ec0ae9b36ba03c9cbd42adc9e1265f74678923b19297d66eb84dcca", + "strip_prefix": "zulu17.44.53-ca-jdk17.0.8.1-macosx_aarch64", + "urls": [ + "https://mirror.bazel.build/cdn.azul.com/zulu/bin/zulu17.44.53-ca-jdk17.0.8.1-macosx_aarch64.tar.gz", + "https://cdn.azul.com/zulu/bin/zulu17.44.53-ca-jdk17.0.8.1-macosx_aarch64.tar.gz" + ] + } + }, + "remote_java_tools_windows": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remote_java_tools_windows", + "sha256": "c5c70c214a350f12cbf52da8270fa43ba629b795f3dd328028a38f8f0d39c2a1", + "urls": [ + "https://mirror.bazel.build/bazel_java_tools/releases/java/v13.1/java_tools_windows-v13.1.zip", + "https://github.com/bazelbuild/java_tools/releases/download/java_v13.1/java_tools_windows-v13.1.zip" + ] + } + }, + "remotejdk11_win": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk11_win", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 11,\n)\n", + "sha256": "43408193ce2fa0862819495b5ae8541085b95660153f2adcf91a52d3a1710e83", + "strip_prefix": "zulu11.66.15-ca-jdk11.0.20-win_x64", + "urls": [ + "https://mirror.bazel.build/cdn.azul.com/zulu/bin/zulu11.66.15-ca-jdk11.0.20-win_x64.zip", + "https://cdn.azul.com/zulu/bin/zulu11.66.15-ca-jdk11.0.20-win_x64.zip" + ] + } + }, + "remotejdk11_win_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk11_win_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_11\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"11\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:windows\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk11_win//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:windows\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk11_win//:jdk\",\n)\n" + } + }, + "remotejdk11_linux_aarch64": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk11_linux_aarch64", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 11,\n)\n", + "sha256": "54174439f2b3fddd11f1048c397fe7bb45d4c9d66d452d6889b013d04d21c4de", + "strip_prefix": "zulu11.66.15-ca-jdk11.0.20-linux_aarch64", + "urls": [ + "https://mirror.bazel.build/cdn.azul.com/zulu/bin/zulu11.66.15-ca-jdk11.0.20-linux_aarch64.tar.gz", + "https://cdn.azul.com/zulu/bin/zulu11.66.15-ca-jdk11.0.20-linux_aarch64.tar.gz" + ] + } + }, + "remotejdk17_linux": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk17_linux", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 17,\n)\n", + "sha256": "b9482f2304a1a68a614dfacddcf29569a72f0fac32e6c74f83dc1b9a157b8340", + "strip_prefix": "zulu17.44.53-ca-jdk17.0.8.1-linux_x64", + "urls": [ + "https://mirror.bazel.build/cdn.azul.com/zulu/bin/zulu17.44.53-ca-jdk17.0.8.1-linux_x64.tar.gz", + "https://cdn.azul.com/zulu/bin/zulu17.44.53-ca-jdk17.0.8.1-linux_x64.tar.gz" + ] + } + }, + "remotejdk11_linux_s390x_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk11_linux_s390x_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_11\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"11\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:s390x\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk11_linux_s390x//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:s390x\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk11_linux_s390x//:jdk\",\n)\n" + } + }, + "remotejdk11_linux_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk11_linux_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_11\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"11\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk11_linux//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk11_linux//:jdk\",\n)\n" + } + }, + "remotejdk11_macos": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk11_macos", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 11,\n)\n", + "sha256": "bcaab11cfe586fae7583c6d9d311c64384354fb2638eb9a012eca4c3f1a1d9fd", + "strip_prefix": "zulu11.66.15-ca-jdk11.0.20-macosx_x64", + "urls": [ + "https://mirror.bazel.build/cdn.azul.com/zulu/bin/zulu11.66.15-ca-jdk11.0.20-macosx_x64.tar.gz", + "https://cdn.azul.com/zulu/bin/zulu11.66.15-ca-jdk11.0.20-macosx_x64.tar.gz" + ] + } + }, + "remotejdk11_win_arm64": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk11_win_arm64", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 11,\n)\n", + "sha256": "b8a28e6e767d90acf793ea6f5bed0bb595ba0ba5ebdf8b99f395266161e53ec2", + "strip_prefix": "jdk-11.0.13+8", + "urls": [ + "https://mirror.bazel.build/aka.ms/download-jdk/microsoft-jdk-11.0.13.8.1-windows-aarch64.zip" + ] + } + }, + "remotejdk17_macos": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk17_macos", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 17,\n)\n", + "sha256": "640453e8afe8ffe0fb4dceb4535fb50db9c283c64665eebb0ba68b19e65f4b1f", + "strip_prefix": "zulu17.44.53-ca-jdk17.0.8.1-macosx_x64", + "urls": [ + "https://mirror.bazel.build/cdn.azul.com/zulu/bin/zulu17.44.53-ca-jdk17.0.8.1-macosx_x64.tar.gz", + "https://cdn.azul.com/zulu/bin/zulu17.44.53-ca-jdk17.0.8.1-macosx_x64.tar.gz" + ] + } + }, + "remotejdk21_macos": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk21_macos", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 21,\n)\n", + "sha256": "9639b87db586d0c89f7a9892ae47f421e442c64b97baebdff31788fbe23265bd", + "strip_prefix": "zulu21.28.85-ca-jdk21.0.0-macosx_x64", + "urls": [ + "https://mirror.bazel.build/cdn.azul.com/zulu/bin/zulu21.28.85-ca-jdk21.0.0-macosx_x64.tar.gz", + "https://cdn.azul.com/zulu/bin/zulu21.28.85-ca-jdk21.0.0-macosx_x64.tar.gz" + ] + } + }, + "remotejdk21_macos_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk21_macos_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_21\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"21\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:macos\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk21_macos//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:macos\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk21_macos//:jdk\",\n)\n" + } + }, + "remotejdk17_macos_aarch64_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk17_macos_aarch64_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_17\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"17\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:macos\", \"@platforms//cpu:aarch64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk17_macos_aarch64//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:macos\", \"@platforms//cpu:aarch64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk17_macos_aarch64//:jdk\",\n)\n" + } + }, + "remotejdk17_win": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk17_win", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 17,\n)\n", + "sha256": "192f2afca57701de6ec496234f7e45d971bf623ff66b8ee4a5c81582054e5637", + "strip_prefix": "zulu17.44.53-ca-jdk17.0.8.1-win_x64", + "urls": [ + "https://mirror.bazel.build/cdn.azul.com/zulu/bin/zulu17.44.53-ca-jdk17.0.8.1-win_x64.zip", + "https://cdn.azul.com/zulu/bin/zulu17.44.53-ca-jdk17.0.8.1-win_x64.zip" + ] + } + }, + "remotejdk11_macos_aarch64_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk11_macos_aarch64_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_11\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"11\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:macos\", \"@platforms//cpu:aarch64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk11_macos_aarch64//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:macos\", \"@platforms//cpu:aarch64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk11_macos_aarch64//:jdk\",\n)\n" + } + }, + "remotejdk11_linux_ppc64le_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk11_linux_ppc64le_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_11\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"11\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:ppc\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk11_linux_ppc64le//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:ppc\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk11_linux_ppc64le//:jdk\",\n)\n" + } + }, + "remotejdk21_linux": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk21_linux", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 21,\n)\n", + "sha256": "0c0eadfbdc47a7ca64aeab51b9c061f71b6e4d25d2d87674512e9b6387e9e3a6", + "strip_prefix": "zulu21.28.85-ca-jdk21.0.0-linux_x64", + "urls": [ + "https://mirror.bazel.build/cdn.azul.com/zulu/bin/zulu21.28.85-ca-jdk21.0.0-linux_x64.tar.gz", + "https://cdn.azul.com/zulu/bin/zulu21.28.85-ca-jdk21.0.0-linux_x64.tar.gz" + ] + } + }, + "remote_java_tools_linux": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remote_java_tools_linux", + "sha256": "d134da9b04c9023fb6e56a5d4bffccee73f7bc9572ddc4e747778dacccd7a5a7", + "urls": [ + "https://mirror.bazel.build/bazel_java_tools/releases/java/v13.1/java_tools_linux-v13.1.zip", + "https://github.com/bazelbuild/java_tools/releases/download/java_v13.1/java_tools_linux-v13.1.zip" + ] + } + }, + "remotejdk21_win": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk21_win", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 21,\n)\n", + "sha256": "e9959d500a0d9a7694ac243baf657761479da132f0f94720cbffd092150bd802", + "strip_prefix": "zulu21.28.85-ca-jdk21.0.0-win_x64", + "urls": [ + "https://mirror.bazel.build/cdn.azul.com/zulu/bin/zulu21.28.85-ca-jdk21.0.0-win_x64.zip", + "https://cdn.azul.com/zulu/bin/zulu21.28.85-ca-jdk21.0.0-win_x64.zip" + ] + } + }, + "remotejdk21_linux_aarch64": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk21_linux_aarch64", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 21,\n)\n", + "sha256": "1fb64b8036c5d463d8ab59af06bf5b6b006811e6012e3b0eb6bccf57f1c55835", + "strip_prefix": "zulu21.28.85-ca-jdk21.0.0-linux_aarch64", + "urls": [ + "https://mirror.bazel.build/cdn.azul.com/zulu/bin/zulu21.28.85-ca-jdk21.0.0-linux_aarch64.tar.gz", + "https://cdn.azul.com/zulu/bin/zulu21.28.85-ca-jdk21.0.0-linux_aarch64.tar.gz" + ] + } + }, + "remotejdk11_linux_aarch64_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk11_linux_aarch64_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_11\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"11\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:aarch64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk11_linux_aarch64//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:aarch64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk11_linux_aarch64//:jdk\",\n)\n" + } + }, + "remotejdk11_linux_s390x": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk11_linux_s390x", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 11,\n)\n", + "sha256": "a58fc0361966af0a5d5a31a2d8a208e3c9bb0f54f345596fd80b99ea9a39788b", + "strip_prefix": "jdk-11.0.15+10", + "urls": [ + "https://mirror.bazel.build/github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.15+10/OpenJDK11U-jdk_s390x_linux_hotspot_11.0.15_10.tar.gz", + "https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.15+10/OpenJDK11U-jdk_s390x_linux_hotspot_11.0.15_10.tar.gz" + ] + } + }, + "remotejdk17_linux_aarch64": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk17_linux_aarch64", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 17,\n)\n", + "sha256": "6531cef61e416d5a7b691555c8cf2bdff689201b8a001ff45ab6740062b44313", + "strip_prefix": "zulu17.44.53-ca-jdk17.0.8.1-linux_aarch64", + "urls": [ + "https://mirror.bazel.build/cdn.azul.com/zulu/bin/zulu17.44.53-ca-jdk17.0.8.1-linux_aarch64.tar.gz", + "https://cdn.azul.com/zulu/bin/zulu17.44.53-ca-jdk17.0.8.1-linux_aarch64.tar.gz" + ] + } + }, + "remotejdk17_win_arm64_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk17_win_arm64_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_17\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"17\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:windows\", \"@platforms//cpu:arm64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk17_win_arm64//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:windows\", \"@platforms//cpu:arm64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk17_win_arm64//:jdk\",\n)\n" + } + }, + "remotejdk11_linux": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk11_linux", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 11,\n)\n", + "sha256": "a34b404f87a08a61148b38e1416d837189e1df7a040d949e743633daf4695a3c", + "strip_prefix": "zulu11.66.15-ca-jdk11.0.20-linux_x64", + "urls": [ + "https://mirror.bazel.build/cdn.azul.com/zulu/bin/zulu11.66.15-ca-jdk11.0.20-linux_x64.tar.gz", + "https://cdn.azul.com/zulu/bin/zulu11.66.15-ca-jdk11.0.20-linux_x64.tar.gz" + ] + } + }, + "remotejdk11_macos_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk11_macos_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_11\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"11\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:macos\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk11_macos//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:macos\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk11_macos//:jdk\",\n)\n" + } + }, + "remotejdk17_linux_ppc64le_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk17_linux_ppc64le_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_17\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"17\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:ppc\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk17_linux_ppc64le//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:ppc\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk17_linux_ppc64le//:jdk\",\n)\n" + } + }, + "remotejdk17_win_arm64": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk17_win_arm64", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 17,\n)\n", + "sha256": "6802c99eae0d788e21f52d03cab2e2b3bf42bc334ca03cbf19f71eb70ee19f85", + "strip_prefix": "zulu17.44.53-ca-jdk17.0.8.1-win_aarch64", + "urls": [ + "https://mirror.bazel.build/cdn.azul.com/zulu/bin/zulu17.44.53-ca-jdk17.0.8.1-win_aarch64.zip", + "https://cdn.azul.com/zulu/bin/zulu17.44.53-ca-jdk17.0.8.1-win_aarch64.zip" + ] + } + }, + "remote_java_tools_darwin_arm64": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remote_java_tools_darwin_arm64", + "sha256": "dab5bb87ec43e980faea6e1cec14bafb217b8e2f5346f53aa784fd715929a930", + "urls": [ + "https://mirror.bazel.build/bazel_java_tools/releases/java/v13.1/java_tools_darwin_arm64-v13.1.zip", + "https://github.com/bazelbuild/java_tools/releases/download/java_v13.1/java_tools_darwin_arm64-v13.1.zip" + ] + } + }, + "remotejdk17_linux_ppc64le": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk17_linux_ppc64le", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 17,\n)\n", + "sha256": "00a4c07603d0218cd678461b5b3b7e25b3253102da4022d31fc35907f21a2efd", + "strip_prefix": "jdk-17.0.8.1+1", + "urls": [ + "https://mirror.bazel.build/github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.8.1%2B1/OpenJDK17U-jdk_ppc64le_linux_hotspot_17.0.8.1_1.tar.gz", + "https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.8.1%2B1/OpenJDK17U-jdk_ppc64le_linux_hotspot_17.0.8.1_1.tar.gz" + ] + } + }, + "remotejdk21_linux_aarch64_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk21_linux_aarch64_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_21\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"21\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:aarch64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk21_linux_aarch64//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:linux\", \"@platforms//cpu:aarch64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk21_linux_aarch64//:jdk\",\n)\n" + } + }, + "remotejdk11_win_arm64_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk11_win_arm64_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_11\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"11\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:windows\", \"@platforms//cpu:arm64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk11_win_arm64//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:windows\", \"@platforms//cpu:arm64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk11_win_arm64//:jdk\",\n)\n" + } + }, + "local_jdk": { + "bzlFile": "@@rules_java~7.1.0//toolchains:local_java_repository.bzl", + "ruleClassName": "_local_java_repository_rule", + "attributes": { + "name": "rules_java~7.1.0~toolchains~local_jdk", + "java_home": "", + "version": "", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = {RUNTIME_VERSION},\n)\n" + } + }, + "remote_java_tools_darwin_x86_64": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remote_java_tools_darwin_x86_64", + "sha256": "0db40d8505a2b65ef0ed46e4256757807db8162f7acff16225be57c1d5726dbc", + "urls": [ + "https://mirror.bazel.build/bazel_java_tools/releases/java/v13.1/java_tools_darwin_x86_64-v13.1.zip", + "https://github.com/bazelbuild/java_tools/releases/download/java_v13.1/java_tools_darwin_x86_64-v13.1.zip" + ] + } + }, + "remote_java_tools": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remote_java_tools", + "sha256": "286bdbbd66e616fc4ed3f90101418729a73baa7e8c23a98ffbef558f74c0ad14", + "urls": [ + "https://mirror.bazel.build/bazel_java_tools/releases/java/v13.1/java_tools-v13.1.zip", + "https://github.com/bazelbuild/java_tools/releases/download/java_v13.1/java_tools-v13.1.zip" + ] + } + }, + "remotejdk17_linux_s390x": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk17_linux_s390x", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 17,\n)\n", + "sha256": "ffacba69c6843d7ca70d572489d6cc7ab7ae52c60f0852cedf4cf0d248b6fc37", + "strip_prefix": "jdk-17.0.8.1+1", + "urls": [ + "https://mirror.bazel.build/github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.8.1%2B1/OpenJDK17U-jdk_s390x_linux_hotspot_17.0.8.1_1.tar.gz", + "https://github.com/adoptium/temurin17-binaries/releases/download/jdk-17.0.8.1%2B1/OpenJDK17U-jdk_s390x_linux_hotspot_17.0.8.1_1.tar.gz" + ] + } + }, + "remotejdk17_win_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk17_win_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_17\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"17\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:windows\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk17_win//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:windows\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk17_win//:jdk\",\n)\n" + } + }, + "remotejdk11_linux_ppc64le": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk11_linux_ppc64le", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 11,\n)\n", + "sha256": "a8fba686f6eb8ae1d1a9566821dbd5a85a1108b96ad857fdbac5c1e4649fc56f", + "strip_prefix": "jdk-11.0.15+10", + "urls": [ + "https://mirror.bazel.build/github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.15+10/OpenJDK11U-jdk_ppc64le_linux_hotspot_11.0.15_10.tar.gz", + "https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.15+10/OpenJDK11U-jdk_ppc64le_linux_hotspot_11.0.15_10.tar.gz" + ] + } + }, + "remotejdk11_macos_aarch64": { + "bzlFile": "@@bazel_tools//tools/build_defs/repo:http.bzl", + "ruleClassName": "http_archive", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk11_macos_aarch64", + "build_file_content": "load(\"@rules_java//java:defs.bzl\", \"java_runtime\")\n\npackage(default_visibility = [\"//visibility:public\"])\n\nexports_files([\"WORKSPACE\", \"BUILD.bazel\"])\n\nfilegroup(\n name = \"jre\",\n srcs = glob(\n [\n \"jre/bin/**\",\n \"jre/lib/**\",\n ],\n allow_empty = True,\n # In some configurations, Java browser plugin is considered harmful and\n # common antivirus software blocks access to npjp2.dll interfering with Bazel,\n # so do not include it in JRE on Windows.\n exclude = [\"jre/bin/plugin2/**\"],\n ),\n)\n\nfilegroup(\n name = \"jdk-bin\",\n srcs = glob(\n [\"bin/**\"],\n # The JDK on Windows sometimes contains a directory called\n # \"%systemroot%\", which is not a valid label.\n exclude = [\"**/*%*/**\"],\n ),\n)\n\n# This folder holds security policies.\nfilegroup(\n name = \"jdk-conf\",\n srcs = glob(\n [\"conf/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-include\",\n srcs = glob(\n [\"include/**\"],\n allow_empty = True,\n ),\n)\n\nfilegroup(\n name = \"jdk-lib\",\n srcs = glob(\n [\"lib/**\", \"release\"],\n allow_empty = True,\n exclude = [\n \"lib/missioncontrol/**\",\n \"lib/visualvm/**\",\n ],\n ),\n)\n\njava_runtime(\n name = \"jdk\",\n srcs = [\n \":jdk-bin\",\n \":jdk-conf\",\n \":jdk-include\",\n \":jdk-lib\",\n \":jre\",\n ],\n # Provide the 'java` binary explicitly so that the correct path is used by\n # Bazel even when the host platform differs from the execution platform.\n # Exactly one of the two globs will be empty depending on the host platform.\n # When --incompatible_disallow_empty_glob is enabled, each individual empty\n # glob will fail without allow_empty = True, even if the overall result is\n # non-empty.\n java = glob([\"bin/java.exe\", \"bin/java\"], allow_empty = True)[0],\n version = 11,\n)\n", + "sha256": "7632bc29f8a4b7d492b93f3bc75a7b61630894db85d136456035ab2a24d38885", + "strip_prefix": "zulu11.66.15-ca-jdk11.0.20-macosx_aarch64", + "urls": [ + "https://mirror.bazel.build/cdn.azul.com/zulu/bin/zulu11.66.15-ca-jdk11.0.20-macosx_aarch64.tar.gz", + "https://cdn.azul.com/zulu/bin/zulu11.66.15-ca-jdk11.0.20-macosx_aarch64.tar.gz" + ] + } + }, + "remotejdk21_win_toolchain_config_repo": { + "bzlFile": "@@rules_java~7.1.0//toolchains:remote_java_repository.bzl", + "ruleClassName": "_toolchain_config", + "attributes": { + "name": "rules_java~7.1.0~toolchains~remotejdk21_win_toolchain_config_repo", + "build_file": "\nconfig_setting(\n name = \"prefix_version_setting\",\n values = {\"java_runtime_version\": \"remotejdk_21\"},\n visibility = [\"//visibility:private\"],\n)\nconfig_setting(\n name = \"version_setting\",\n values = {\"java_runtime_version\": \"21\"},\n visibility = [\"//visibility:private\"],\n)\nalias(\n name = \"version_or_prefix_version_setting\",\n actual = select({\n \":version_setting\": \":version_setting\",\n \"//conditions:default\": \":prefix_version_setting\",\n }),\n visibility = [\"//visibility:private\"],\n)\ntoolchain(\n name = \"toolchain\",\n target_compatible_with = [\"@platforms//os:windows\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:runtime_toolchain_type\",\n toolchain = \"@remotejdk21_win//:jdk\",\n)\ntoolchain(\n name = \"bootstrap_runtime_toolchain\",\n # These constraints are not required for correctness, but prevent fetches of remote JDK for\n # different architectures. As every Java compilation toolchain depends on a bootstrap runtime in\n # the same configuration, this constraint will not result in toolchain resolution failures.\n exec_compatible_with = [\"@platforms//os:windows\", \"@platforms//cpu:x86_64\"],\n target_settings = [\":version_or_prefix_version_setting\"],\n toolchain_type = \"@bazel_tools//tools/jdk:bootstrap_runtime_toolchain_type\",\n toolchain = \"@remotejdk21_win//:jdk\",\n)\n" + } + } + } + } + } + } +} diff --git a/WORKSPACE b/WORKSPACE index 31379b236b17..59679656d3b4 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -182,7 +182,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe go_rules_dependencies() go_register_toolchains( - go_version = "1.21.5", + go_version = "1.21.6", nogo = "@//:nogo", ) @@ -356,10 +356,10 @@ filegroup( http_archive( name = "com_google_protobuf", - sha256 = "4e176116949be52b0408dfd24f8925d1eb674a781ae242a75296b17a1c721395", - strip_prefix = "protobuf-23.3", + sha256 = "9bd87b8280ef720d3240514f884e56a712f2218f0d693b48050c836028940a42", + strip_prefix = "protobuf-25.1", urls = [ - "https://github.com/protocolbuffers/protobuf/archive/v23.3.tar.gz", + "https://github.com/protocolbuffers/protobuf/archive/v25.1.tar.gz", ], ) diff --git a/api/headers.go b/api/headers.go index 245cdbf59a06..5973b814087c 100644 --- a/api/headers.go +++ b/api/headers.go @@ -7,5 +7,4 @@ const ( ConsensusBlockValueHeader = "Eth-Consensus-Block-Value" JsonMediaType = "application/json" OctetStreamMediaType = "application/octet-stream" - EventStreamMediaType = "text/event-stream" ) diff --git a/beacon-chain/blockchain/BUILD.bazel b/beacon-chain/blockchain/BUILD.bazel index 561d4fd699e6..ef7d9f9f6a09 100644 --- a/beacon-chain/blockchain/BUILD.bazel +++ b/beacon-chain/blockchain/BUILD.bazel @@ -6,6 +6,7 @@ go_library( "chain_info.go", "chain_info_forkchoice.go", "currently_syncing_block.go", + "defragment.go", "error.go", "execution_engine.go", "forkchoice_update_execution.go", @@ -50,6 +51,7 @@ go_library( "//beacon-chain/core/signing:go_default_library", "//beacon-chain/core/time:go_default_library", "//beacon-chain/core/transition:go_default_library", + "//beacon-chain/das:go_default_library", "//beacon-chain/db:go_default_library", "//beacon-chain/db/filesystem:go_default_library", "//beacon-chain/db/filters:go_default_library", @@ -141,6 +143,7 @@ go_test( "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/core/signing:go_default_library", "//beacon-chain/core/transition:go_default_library", + "//beacon-chain/das:go_default_library", "//beacon-chain/db:go_default_library", "//beacon-chain/db/filesystem:go_default_library", "//beacon-chain/db/testing:go_default_library", diff --git a/beacon-chain/blockchain/chain_info.go b/beacon-chain/blockchain/chain_info.go index 052e4503b818..c7dbf96c2490 100644 --- a/beacon-chain/blockchain/chain_info.go +++ b/beacon-chain/blockchain/chain_info.go @@ -6,7 +6,10 @@ import ( "time" "github.com/pkg/errors" + "go.opencensus.io/trace" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers" + f "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice" doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree" forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" @@ -18,7 +21,6 @@ import ( "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/v4/time/slots" - "go.opencensus.io/trace" ) // ChainInfoFetcher defines a common interface for methods in blockchain service which @@ -334,12 +336,21 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index primiti return v.PublicKey(), nil } +// ForkChoicer returns the forkchoice interface. +func (s *Service) ForkChoicer() f.ForkChoicer { + return s.cfg.ForkChoiceStore +} + // IsOptimistic returns true if the current head is optimistic. func (s *Service) IsOptimistic(_ context.Context) (bool, error) { if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch { return false, nil } s.headLock.RLock() + if s.head == nil { + s.headLock.RUnlock() + return false, ErrNilHead + } headRoot := s.head.root headSlot := s.head.slot headOptimistic := s.head.optimistic @@ -545,3 +556,18 @@ func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) { defer s.cfg.ForkChoiceStore.RUnlock() return s.cfg.ForkChoiceStore.Slot(root) } + +// inRegularSync applies the following heuristics to decide if the node is in +// regular sync mode vs init sync mode using only forkchoice. +// It checks that the highest received block is behind the current time by at least 2 epochs +// and that it was imported at least one epoch late if both of these +// tests pass then the node is in init sync. The caller of this function MUST +// have a lock on forkchoice +func (s *Service) inRegularSync() bool { + currentSlot := s.CurrentSlot() + fc := s.cfg.ForkChoiceStore + if currentSlot-fc.HighestReceivedBlockSlot() < 2*params.BeaconConfig().SlotsPerEpoch { + return true + } + return fc.HighestReceivedBlockDelay() < params.BeaconConfig().SlotsPerEpoch +} diff --git a/beacon-chain/blockchain/chain_info_test.go b/beacon-chain/blockchain/chain_info_test.go index ed22b29395f8..5d1187e73d2d 100644 --- a/beacon-chain/blockchain/chain_info_test.go +++ b/beacon-chain/blockchain/chain_info_test.go @@ -429,6 +429,11 @@ func TestService_IsOptimistic(t *testing.T) { opt, err = c.IsOptimistic(ctx) require.NoError(t, err) require.Equal(t, true, opt) + + // If head is nil, for some reason, an error should be returned rather than panic. + c = &Service{} + _, err = c.IsOptimistic(ctx) + require.ErrorIs(t, err, ErrNilHead) } func TestService_IsOptimisticBeforeBellatrix(t *testing.T) { @@ -588,3 +593,26 @@ func TestService_IsFinalized(t *testing.T) { require.Equal(t, true, c.IsFinalized(ctx, br)) require.Equal(t, false, c.IsFinalized(ctx, [32]byte{'c'})) } + +func TestService_inRegularSync(t *testing.T) { + ctx := context.Background() + c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}} + ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} + ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]} + st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc) + require.NoError(t, err) + require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot)) + require.Equal(t, false, c.inRegularSync()) + c.SetGenesisTime(time.Now().Add(time.Second * time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)))) + st, blkRoot, err = prepareForkchoiceState(ctx, 128, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc) + require.NoError(t, err) + require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot)) + require.Equal(t, false, c.inRegularSync()) + + c.SetGenesisTime(time.Now().Add(time.Second * time.Duration(-5*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)))) + require.Equal(t, true, c.inRegularSync()) + + c.SetGenesisTime(time.Now().Add(time.Second * time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)))) + c.cfg.ForkChoiceStore.SetGenesisTime(uint64(time.Now().Unix())) + require.Equal(t, true, c.inRegularSync()) +} diff --git a/beacon-chain/blockchain/defragment.go b/beacon-chain/blockchain/defragment.go new file mode 100644 index 000000000000..2f3c888e5880 --- /dev/null +++ b/beacon-chain/blockchain/defragment.go @@ -0,0 +1,27 @@ +package blockchain + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" + "github.com/prysmaticlabs/prysm/v4/config/features" + "github.com/prysmaticlabs/prysm/v4/time" +) + +var stateDefragmentationTime = promauto.NewSummary(prometheus.SummaryOpts{ + Name: "head_state_defragmentation_milliseconds", + Help: "Milliseconds it takes to defragment the head state", +}) + +// This method defragments our state, so that any specific fields which have +// a higher number of fragmented indexes are reallocated to a new separate slice for +// that field. +func (s *Service) defragmentState(st state.BeaconState) { + if !features.Get().EnableExperimentalState { + return + } + startTime := time.Now() + st.Defragment() + elapsedTime := time.Since(startTime) + stateDefragmentationTime.Observe(float64(elapsedTime.Milliseconds())) +} diff --git a/beacon-chain/blockchain/error.go b/beacon-chain/blockchain/error.go index 2058a97e31c4..87ed0d2416db 100644 --- a/beacon-chain/blockchain/error.go +++ b/beacon-chain/blockchain/error.go @@ -28,6 +28,8 @@ var ( // ErrNotCheckpoint is returned when a given checkpoint is not a // checkpoint in any chain known to forkchoice ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice") + // ErrNilHead is returned when no head is present in the blockchain service. + ErrNilHead = errors.New("nil head") ) var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK") diff --git a/beacon-chain/blockchain/execution_engine.go b/beacon-chain/blockchain/execution_engine.go index 564648a694cf..f449481f0abc 100644 --- a/beacon-chain/blockchain/execution_engine.go +++ b/beacon-chain/blockchain/execution_engine.go @@ -387,9 +387,9 @@ func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32 // This is an irreparable condition, it would me a justified or finalized block has become invalid. return err } - // No op if the sidecar does not exist. - if err := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err != nil { - return err + if err := s.blobStorage.Remove(root); err != nil { + // Blobs may not exist for some blocks, leading to deletion failures. Log such errors at debug level. + log.WithError(err).Debug("Could not remove blob from blob storage") } } return nil diff --git a/beacon-chain/blockchain/execution_engine_test.go b/beacon-chain/blockchain/execution_engine_test.go index 3c76db73b411..8b3912ba7adc 100644 --- a/beacon-chain/blockchain/execution_engine_test.go +++ b/beacon-chain/blockchain/execution_engine_test.go @@ -151,8 +151,10 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), + Transactions: make([][]byte, 0), }, }, }}) @@ -494,8 +496,10 @@ func Test_NotifyNewPayload(t *testing.T) { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), + Transactions: make([][]byte, 0), }, }, }, @@ -597,8 +601,10 @@ func Test_NotifyNewPayload(t *testing.T) { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), + Transactions: make([][]byte, 0), }, }, }, @@ -1096,3 +1102,35 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) { require.Equal(t, vhs[0].String(), vh0) require.Equal(t, vhs[1].String(), vh1) } + +func TestComputePayloadAttribute(t *testing.T) { + service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache())) + ctx := tr.ctx + + st, _ := util.DeterministicGenesisStateBellatrix(t, 1) + + service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0}) + // Cache hit, advance state, no fee recipient + slot := primitives.Slot(1) + service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{}) + cfg := &postBlockProcessConfig{ + ctx: ctx, + blockRoot: [32]byte{'a'}, + } + fcu := &fcuConfig{ + headState: st, + proposingSlot: slot, + headRoot: [32]byte{}, + } + require.NoError(t, service.computePayloadAttributes(cfg, fcu)) + require.Equal(t, false, fcu.attributes.IsEmpty()) + require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String()) + + // Cache hit, advance state, has fee recipient + suggestedAddr := common.HexToAddress("123") + service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0}) + service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{}) + require.NoError(t, service.computePayloadAttributes(cfg, fcu)) + require.Equal(t, false, fcu.attributes.IsEmpty()) + require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient())) +} diff --git a/beacon-chain/blockchain/forkchoice_update_execution.go b/beacon-chain/blockchain/forkchoice_update_execution.go index 4f51175b11ac..bb334dae2bdc 100644 --- a/beacon-chain/blockchain/forkchoice_update_execution.go +++ b/beacon-chain/blockchain/forkchoice_update_execution.go @@ -52,6 +52,41 @@ type fcuConfig struct { attributes payloadattribute.Attributer } +// sendFCU handles the logic to notify the engine of a forckhoice update +// for the first time when processing an incoming block during regular sync. It +// always updates the shuffling caches and handles epoch transitions when the +// incoming block is late, preparing payload attributes in this case while it +// only sends a message with empty attributes for early blocks. +func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error { + if !s.isNewHead(cfg.headRoot) { + return nil + } + if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) { + return nil + } + return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs) +} + +// sendFCUWithAttributes computes the payload attributes and sends an FCU message +// to the engine if needed +func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) { + slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline) + defer cancel() + cfg.ctx = slotCtx + if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil { + log.WithError(err).Error("could not compute payload attributes") + return + } + if fcuArgs.attributes.IsEmpty() { + return + } + s.cfg.ForkChoiceStore.RLock() + defer s.cfg.ForkChoiceStore.RUnlock() + if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil { + log.WithError(err).Error("could not update forkchoice with payload attributes for proposal") + } +} + // fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made. func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error { _, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution") diff --git a/beacon-chain/blockchain/kzg/BUILD.bazel b/beacon-chain/blockchain/kzg/BUILD.bazel index 4f30dc2dcdcd..21c8fe9d71bd 100644 --- a/beacon-chain/blockchain/kzg/BUILD.bazel +++ b/beacon-chain/blockchain/kzg/BUILD.bazel @@ -11,7 +11,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//consensus-types/blocks:go_default_library", - "//proto/prysm/v1alpha1:go_default_library", "@com_github_crate_crypto_go_kzg_4844//:go_default_library", "@com_github_pkg_errors//:go_default_library", ], @@ -25,7 +24,7 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//proto/prysm/v1alpha1:go_default_library", + "//consensus-types/blocks:go_default_library", "//testing/require:go_default_library", "@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library", "@com_github_crate_crypto_go_kzg_4844//:go_default_library", diff --git a/beacon-chain/blockchain/kzg/validation.go b/beacon-chain/blockchain/kzg/validation.go index f4a46a02ce89..1549a3f45914 100644 --- a/beacon-chain/blockchain/kzg/validation.go +++ b/beacon-chain/blockchain/kzg/validation.go @@ -1,42 +1,32 @@ package kzg import ( - "fmt" - GoKZG "github.com/crate-crypto/go-kzg-4844" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" - ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" ) -// IsDataAvailable checks that -// - all blobs in the block are available -// - Expected KZG commitments match the number of blobs in the block -// - That the number of proofs match the number of blobs -// - That the proofs are verified against the KZG commitments -func IsDataAvailable(commitments [][]byte, sidecars []*ethpb.DeprecatedBlobSidecar) error { - if len(commitments) != len(sidecars) { - return fmt.Errorf("could not check data availability, expected %d commitments, obtained %d", - len(commitments), len(sidecars)) - } - if len(commitments) == 0 { +// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars. +func Verify(sidecars ...blocks.ROBlob) error { + if len(sidecars) == 0 { return nil } - blobs := make([]GoKZG.Blob, len(commitments)) - proofs := make([]GoKZG.KZGProof, len(commitments)) - cmts := make([]GoKZG.KZGCommitment, len(commitments)) + if len(sidecars) == 1 { + return kzgContext.VerifyBlobKZGProof( + bytesToBlob(sidecars[0].Blob), + bytesToCommitment(sidecars[0].KzgCommitment), + bytesToKZGProof(sidecars[0].KzgProof)) + } + blobs := make([]GoKZG.Blob, len(sidecars)) + cmts := make([]GoKZG.KZGCommitment, len(sidecars)) + proofs := make([]GoKZG.KZGProof, len(sidecars)) for i, sidecar := range sidecars { blobs[i] = bytesToBlob(sidecar.Blob) + cmts[i] = bytesToCommitment(sidecar.KzgCommitment) proofs[i] = bytesToKZGProof(sidecar.KzgProof) - cmts[i] = bytesToCommitment(commitments[i]) } return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs) } -// VerifyROBlobCommitment is a helper that massages the fields of an ROBlob into the types needed to call VerifyBlobKZGProof. -func VerifyROBlobCommitment(sc blocks.ROBlob) error { - return kzgContext.VerifyBlobKZGProof(bytesToBlob(sc.Blob), bytesToCommitment(sc.KzgCommitment), bytesToKZGProof(sc.KzgProof)) -} - func bytesToBlob(blob []byte) (ret GoKZG.Blob) { copy(ret[:], blob) return diff --git a/beacon-chain/blockchain/kzg/validation_test.go b/beacon-chain/blockchain/kzg/validation_test.go index b6f905ef0957..032fbadf02e6 100644 --- a/beacon-chain/blockchain/kzg/validation_test.go +++ b/beacon-chain/blockchain/kzg/validation_test.go @@ -8,7 +8,7 @@ import ( "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" GoKZG "github.com/crate-crypto/go-kzg-4844" - ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v4/testing/require" "github.com/sirupsen/logrus" ) @@ -58,10 +58,9 @@ func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZG return commitment, proof, err } -func TestIsDataAvailable(t *testing.T) { - sidecars := make([]*ethpb.DeprecatedBlobSidecar, 0) - commitments := make([][]byte, 0) - require.NoError(t, IsDataAvailable(commitments, sidecars)) +func TestVerify(t *testing.T) { + sidecars := make([]blocks.ROBlob, 0) + require.NoError(t, Verify(sidecars...)) } func TestBytesToAny(t *testing.T) { diff --git a/beacon-chain/blockchain/metrics.go b/beacon-chain/blockchain/metrics.go index 7d6242bfbe13..7cbb177991f5 100644 --- a/beacon-chain/blockchain/metrics.go +++ b/beacon-chain/blockchain/metrics.go @@ -358,6 +358,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt for name, val := range refMap { stateTrieReferences.WithLabelValues(name).Set(float64(val)) } + postState.RecordStateMetrics() return nil } diff --git a/beacon-chain/blockchain/process_block.go b/beacon-chain/blockchain/process_block.go index 5a4238f30921..152cd19adf80 100644 --- a/beacon-chain/blockchain/process_block.go +++ b/beacon-chain/blockchain/process_block.go @@ -6,12 +6,15 @@ import ( "time" "github.com/pkg/errors" + "go.opencensus.io/trace" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers" coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/das" "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem" forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" @@ -28,8 +31,6 @@ import ( "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation" "github.com/prysmaticlabs/prysm/v4/runtime/version" "github.com/prysmaticlabs/prysm/v4/time/slots" - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" ) // A custom slot deadline for processing state slots in our cache. @@ -41,149 +42,71 @@ const depositDeadline = 20 * time.Second // This defines size of the upper bound for initial sync block cache. var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch) +// postBlockProcessConfig is a structure that contains the data needed to +// process the beacon block after validating the state transition function +type postBlockProcessConfig struct { + ctx context.Context + signed interfaces.ReadOnlySignedBeaconBlock + blockRoot [32]byte + headRoot [32]byte + postState state.BeaconState + isValidPayload bool +} + // postBlockProcess is called when a gossip block is received. This function performs // several duties most importantly informing the engine if head was updated, // saving the new head information to the blockchain package and // handling attestations, slashings and similar included in the block. -func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, postState state.BeaconState, isValidPayload bool) error { - ctx, span := trace.StartSpan(ctx, "blockChain.onBlock") +func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error { + ctx, span := trace.StartSpan(cfg.ctx, "blockChain.onBlock") defer span.End() - if err := consensusblocks.BeaconBlockIsNil(signed); err != nil { + cfg.ctx = ctx + if err := consensusblocks.BeaconBlockIsNil(cfg.signed); err != nil { return invalidBlock{error: err} } startTime := time.Now() - b := signed.Block() + fcuArgs := &fcuConfig{} - if err := s.cfg.ForkChoiceStore.InsertNode(ctx, postState, blockRoot); err != nil { - return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot()) + if s.inRegularSync() { + defer s.handleSecondFCUCall(cfg, fcuArgs) } - if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil { + defer s.sendLightClientFeeds(cfg) + defer s.sendStateFeedOnBlock(cfg) + defer reportProcessingTime(startTime) + defer reportAttestationInclusion(cfg.signed.Block()) + + err := s.cfg.ForkChoiceStore.InsertNode(ctx, cfg.postState, cfg.blockRoot) + if err != nil { + return errors.Wrapf(err, "could not insert block %d to fork choice store", cfg.signed.Block().Slot()) + } + if err := s.handleBlockAttestations(ctx, cfg.signed.Block(), cfg.postState); err != nil { return errors.Wrap(err, "could not handle block's attestations") } - s.InsertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings()) - if isValidPayload { - if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil { + s.InsertSlashingsToForkChoiceStore(ctx, cfg.signed.Block().Body().AttesterSlashings()) + if cfg.isValidPayload { + if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, cfg.blockRoot); err != nil { return errors.Wrap(err, "could not set optimistic block to valid") } } - start := time.Now() - headRoot, err := s.cfg.ForkChoiceStore.Head(ctx) + cfg.headRoot, err = s.cfg.ForkChoiceStore.Head(ctx) if err != nil { log.WithError(err).Warn("Could not update head") } newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds())) - proposingSlot := s.CurrentSlot() + 1 - var fcuArgs *fcuConfig - if blockRoot != headRoot { - receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot) - if err != nil { - log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("could not determine node weight") - } - headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot) - if err != nil { - log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("could not determine node weight") - } - log.WithFields(logrus.Fields{ - "receivedRoot": fmt.Sprintf("%#x", blockRoot), - "receivedWeight": receivedWeight, - "headRoot": fmt.Sprintf("%#x", headRoot), - "headWeight": headWeight, - }).Debug("Head block is not the received block") - headState, headBlock, err := s.getStateAndBlock(ctx, headRoot) - if err != nil { - log.WithError(err).Error("Could not get forkchoice update argument") - return nil - } - fcuArgs = &fcuConfig{ - headState: headState, - headBlock: headBlock, - headRoot: headRoot, - proposingSlot: proposingSlot, - } - } else { - fcuArgs = &fcuConfig{ - headState: postState, - headBlock: signed, - headRoot: headRoot, - proposingSlot: proposingSlot, - } + if cfg.headRoot != cfg.blockRoot { + s.logNonCanonicalBlockReceived(cfg.blockRoot, cfg.headRoot) + return nil } - isEarly := slots.WithinVotingWindow(uint64(s.genesisTime.Unix())) - shouldOverrideFCU := false - slot := postState.Slot() - if s.isNewHead(headRoot) { - // if the block is early send FCU without any payload attributes - if isEarly { - if err := s.forkchoiceUpdateWithExecution(ctx, fcuArgs); err != nil { - return err - } - } else { - // if the block is late lock and update the caches - if blockRoot == headRoot { - if err := transition.UpdateNextSlotCache(ctx, blockRoot[:], postState); err != nil { - return errors.Wrap(err, "could not update next slot state cache") - } - if slots.IsEpochEnd(slot) { - if err := s.handleEpochBoundary(ctx, slot, postState, blockRoot[:]); err != nil { - return errors.Wrap(err, "could not handle epoch boundary") - } - } - } - _, tracked := s.trackedProposer(fcuArgs.headState, proposingSlot) - if tracked { - shouldOverrideFCU = s.shouldOverrideFCU(headRoot, proposingSlot) - fcuArgs.attributes = s.getPayloadAttribute(ctx, fcuArgs.headState, proposingSlot, headRoot[:]) - } - if !shouldOverrideFCU { - if err := s.forkchoiceUpdateWithExecution(ctx, fcuArgs); err != nil { - return err - } - } - } + if err := s.getFCUArgs(cfg, fcuArgs); err != nil { + log.WithError(err).Error("Could not get forkchoice update argument") + return nil } - optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(blockRoot) - if err != nil { - log.WithError(err).Debug("Could not check if block is optimistic") - optimistic = true + if err := s.sendFCU(cfg, fcuArgs); err != nil { + return errors.Wrap(err, "could not send FCU to engine") } - // Send notification of the processed block to the state feed. - s.cfg.StateNotifier.StateFeed().Send(&feed.Event{ - Type: statefeed.BlockProcessed, - Data: &statefeed.BlockProcessedData{ - Slot: signed.Block().Slot(), - BlockRoot: blockRoot, - SignedBlock: signed, - Verified: true, - Optimistic: optimistic, - }, - }) - if blockRoot == headRoot && isEarly { - go func() { - slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline) - defer cancel() - if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil { - log.WithError(err).Error("could not update next slot state cache") - } - if slots.IsEpochEnd(slot) { - if err := s.handleEpochBoundary(ctx, slot, postState, blockRoot[:]); err != nil { - log.WithError(err).Error("could not handle epoch boundary") - } - } - if _, tracked := s.trackedProposer(fcuArgs.headState, proposingSlot); !tracked { - return - } - fcuArgs.attributes = s.getPayloadAttribute(ctx, fcuArgs.headState, proposingSlot, headRoot[:]) - s.cfg.ForkChoiceStore.RLock() - defer s.cfg.ForkChoiceStore.RUnlock() - if _, err := s.notifyForkchoiceUpdate(ctx, fcuArgs); err != nil { - log.WithError(err).Error("could not update forkchoice with payload attributes for proposal") - } - }() - } - defer reportAttestationInclusion(b) - onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds())) + return nil } @@ -205,7 +128,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD return preStateVersion, preStateHeader, nil } -func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock) error { +func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityStore) error { ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch") defer span.End() @@ -308,8 +231,8 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo return err } } - if err := s.databaseDACheck(ctx, b); err != nil { - return errors.Wrap(err, "could not validate blob data availability") + if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil { + return errors.Wrapf(err, "could not validate blob data availability at slot %d", b.Block().Slot()) } args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(), JustifiedCheckpoint: jCheckpoints[i], @@ -376,37 +299,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload) } -func commitmentsToCheck(b consensusblocks.ROBlock, current primitives.Slot) [][]byte { - if b.Version() < version.Deneb { - return nil - } - // We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS - if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) { - return nil - } - kzgCommitments, err := b.Block().Body().BlobKzgCommitments() - if err != nil { - return nil - } - return kzgCommitments -} - -func (s *Service) databaseDACheck(ctx context.Context, b consensusblocks.ROBlock) error { - commitments := commitmentsToCheck(b, s.CurrentSlot()) - if len(commitments) == 0 { - return nil - } - missing, err := missingIndices(s.blobStorage, b.Root(), commitments) - if err != nil { - return err - } - if len(missing) == 0 { - return nil - } - // TODO: don't worry that this error isn't informative, it will be superceded by a detailed sidecar cache error. - return errors.New("not all kzg commitments are available") -} - func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error { e := coreTime.CurrentEpoch(st) if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil { @@ -690,10 +582,15 @@ func (s *Service) lateBlockTasks(ctx context.Context) { if s.CurrentSlot() == s.HeadSlot() { return } + s.cfg.ForkChoiceStore.RLock() + defer s.cfg.ForkChoiceStore.RUnlock() + // return early if we are in init sync + if !s.inRegularSync() { + return + } s.cfg.StateNotifier.StateFeed().Send(&feed.Event{ Type: statefeed.MissedSlot, }) - s.headLock.RLock() headRoot := s.headRoot() headState := s.headState(ctx) @@ -708,23 +605,22 @@ func (s *Service) lateBlockTasks(ctx context.Context) { if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil { log.WithError(err).Debug("could not update next slot state cache") } - // handleEpochBoundary requires a forkchoice lock to obtain the target root. - s.cfg.ForkChoiceStore.RLock() if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil { log.WithError(err).Error("lateBlockTasks: could not update epoch boundary caches") } - s.cfg.ForkChoiceStore.RUnlock() - _, tracked := s.trackedProposer(headState, s.CurrentSlot()+1) - // return early if we are not proposing next slot. - if !tracked { - return - } // return early if we already started building a block for the current // head root _, has := s.cfg.PayloadIDCache.PayloadID(s.CurrentSlot()+1, headRoot) if has { return } + + attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:]) + // return early if we are not proposing next slot + if attribute.IsEmpty() { + return + } + s.headLock.RLock() headBlock, err := s.headBlock() if err != nil { @@ -733,15 +629,14 @@ func (s *Service) lateBlockTasks(ctx context.Context) { return } s.headLock.RUnlock() - s.cfg.ForkChoiceStore.RLock() + fcuArgs := &fcuConfig{ - headState: headState, - headRoot: headRoot, - headBlock: headBlock, + headState: headState, + headRoot: headRoot, + headBlock: headBlock, + attributes: attribute, } - fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:]) _, err = s.notifyForkchoiceUpdate(ctx, fcuArgs) - s.cfg.ForkChoiceStore.RUnlock() if err != nil { log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine") } diff --git a/beacon-chain/blockchain/process_block_helpers.go b/beacon-chain/blockchain/process_block_helpers.go index 54e7735438c5..db84896b65f7 100644 --- a/beacon-chain/blockchain/process_block_helpers.go +++ b/beacon-chain/blockchain/process_block_helpers.go @@ -3,21 +3,28 @@ package blockchain import ( "context" "fmt" + "time" "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" + + "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" + statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition" doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree" forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" + "github.com/prysmaticlabs/prysm/v4/config/features" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" mathutil "github.com/prysmaticlabs/prysm/v4/math" + ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" - "github.com/prysmaticlabs/prysm/v4/time" "github.com/prysmaticlabs/prysm/v4/time/slots" - "go.opencensus.io/trace" ) // CurrentSlot returns the current slot based on time. @@ -25,6 +32,252 @@ func (s *Service) CurrentSlot() primitives.Slot { return slots.CurrentSlot(uint64(s.genesisTime.Unix())) } +// getFCUArgs returns the arguments to call forkchoice update +func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error { + if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil { + return err + } + if !s.inRegularSync() { + return nil + } + slot := cfg.signed.Block().Slot() + if slots.WithinVotingWindow(uint64(s.genesisTime.Unix()), slot) { + return nil + } + return s.computePayloadAttributes(cfg, fcuArgs) +} + +func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error { + if cfg.blockRoot == cfg.headRoot { + fcuArgs.headState = cfg.postState + fcuArgs.headBlock = cfg.signed + fcuArgs.headRoot = cfg.headRoot + fcuArgs.proposingSlot = s.CurrentSlot() + 1 + return nil + } + return s.fcuArgsNonCanonicalBlock(cfg, fcuArgs) +} + +// logNonCanonicalBlockReceived prints a message informing that the received +// block is not the head of the chain. It requires the caller holds a lock on +// Foprkchoice. +func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) { + receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot) + if err != nil { + log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("could not determine node weight") + } + headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot) + if err != nil { + log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("could not determine node weight") + } + log.WithFields(logrus.Fields{ + "receivedRoot": fmt.Sprintf("%#x", blockRoot), + "receivedWeight": receivedWeight, + "headRoot": fmt.Sprintf("%#x", headRoot), + "headWeight": headWeight, + }).Debug("Head block is not the received block") +} + +// fcuArgsNonCanonicalBlock returns the arguments to the FCU call when the +// incoming block is non-canonical, that is, based on the head root. +func (s *Service) fcuArgsNonCanonicalBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error { + headState, headBlock, err := s.getStateAndBlock(cfg.ctx, cfg.headRoot) + if err != nil { + return err + } + fcuArgs.headState = headState + fcuArgs.headBlock = headBlock + fcuArgs.headRoot = cfg.headRoot + fcuArgs.proposingSlot = s.CurrentSlot() + 1 + return nil +} + +// sendStateFeedOnBlock sends an event that a new block has been synced +func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) { + optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(cfg.blockRoot) + if err != nil { + log.WithError(err).Debug("Could not check if block is optimistic") + optimistic = true + } + // Send notification of the processed block to the state feed. + s.cfg.StateNotifier.StateFeed().Send(&feed.Event{ + Type: statefeed.BlockProcessed, + Data: &statefeed.BlockProcessedData{ + Slot: cfg.signed.Block().Slot(), + BlockRoot: cfg.blockRoot, + SignedBlock: cfg.signed, + Verified: true, + Optimistic: optimistic, + }, + }) +} + +// sendLightClientFeeds sends the light client feeds when feature flag is enabled. +func (s *Service) sendLightClientFeeds(cfg *postBlockProcessConfig) { + if features.Get().EnableLightClient { + if _, err := s.sendLightClientOptimisticUpdate(cfg.ctx, cfg.signed, cfg.postState); err != nil { + log.WithError(err).Error("Failed to send light client optimistic update") + } + + // Get the finalized checkpoint + finalized := s.ForkChoicer().FinalizedCheckpoint() + + // LightClientFinalityUpdate needs super majority + s.tryPublishLightClientFinalityUpdate(cfg.ctx, cfg.signed, finalized, cfg.postState) + } +} + +func (s *Service) tryPublishLightClientFinalityUpdate(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, finalized *forkchoicetypes.Checkpoint, postState state.BeaconState) { + if finalized.Epoch <= s.lastPublishedLightClientEpoch { + return + } + + config := params.BeaconConfig() + if finalized.Epoch < config.AltairForkEpoch { + return + } + + syncAggregate, err := signed.Block().Body().SyncAggregate() + if err != nil || syncAggregate == nil { + return + } + + // LightClientFinalityUpdate needs super majority + if syncAggregate.SyncCommitteeBits.Count()*3 < config.SyncCommitteeSize*2 { + return + } + + _, err = s.sendLightClientFinalityUpdate(ctx, signed, postState) + if err != nil { + log.WithError(err).Error("Failed to send light client finality update") + } else { + s.lastPublishedLightClientEpoch = finalized.Epoch + } +} + +// sendLightClientFinalityUpdate sends a light client finality update notification to the state feed. +func (s *Service) sendLightClientFinalityUpdate(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, + postState state.BeaconState) (int, error) { + // Get attested state + attestedRoot := signed.Block().ParentRoot() + attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot) + if err != nil { + return 0, errors.Wrap(err, "could not get attested state") + } + + // Get finalized block + var finalizedBlock interfaces.ReadOnlySignedBeaconBlock + finalizedCheckPoint := attestedState.FinalizedCheckpoint() + if finalizedCheckPoint != nil { + finalizedRoot := bytesutil.ToBytes32(finalizedCheckPoint.Root) + finalizedBlock, err = s.cfg.BeaconDB.Block(ctx, finalizedRoot) + if err != nil { + finalizedBlock = nil + } + } + + update, err := NewLightClientFinalityUpdateFromBeaconState( + ctx, + postState, + signed, + attestedState, + finalizedBlock, + ) + + if err != nil { + return 0, errors.Wrap(err, "could not create light client update") + } + + // Return the result + result := ðpbv2.LightClientFinalityUpdateWithVersion{ + Version: ethpbv2.Version(signed.Version()), + Data: CreateLightClientFinalityUpdate(update), + } + + // Send event + return s.cfg.StateNotifier.StateFeed().Send(&feed.Event{ + Type: statefeed.LightClientFinalityUpdate, + Data: result, + }), nil +} + +// sendLightClientOptimisticUpdate sends a light client optimistic update notification to the state feed. +func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, + postState state.BeaconState) (int, error) { + // Get attested state + attestedRoot := signed.Block().ParentRoot() + attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot) + if err != nil { + return 0, errors.Wrap(err, "could not get attested state") + } + + update, err := NewLightClientOptimisticUpdateFromBeaconState( + ctx, + postState, + signed, + attestedState, + ) + + if err != nil { + return 0, errors.Wrap(err, "could not create light client update") + } + + // Return the result + result := ðpbv2.LightClientOptimisticUpdateWithVersion{ + Version: ethpbv2.Version(signed.Version()), + Data: CreateLightClientOptimisticUpdate(update), + } + + return s.cfg.StateNotifier.StateFeed().Send(&feed.Event{ + Type: statefeed.LightClientOptimisticUpdate, + Data: result, + }), nil +} + +// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch +// boundary in order to compute the right proposer indices after processing +// state transition. This function is called on late blocks while still locked, +// before sending FCU to the engine. +func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error { + slot := cfg.postState.Slot() + if err := transition.UpdateNextSlotCache(cfg.ctx, cfg.blockRoot[:], cfg.postState); err != nil { + return errors.Wrap(err, "could not update next slot state cache") + } + if !slots.IsEpochEnd(slot) { + return nil + } + return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, cfg.blockRoot[:]) +} + +// handleSecondFCUCall handles a second call to FCU when syncing a new block. +// This is useful when proposing in the next block and we want to defer the +// computation of the next slot shuffling. +func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) { + if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.blockRoot { + go s.sendFCUWithAttributes(cfg, fcuArgs) + } +} + +// reportProcessingTime reports the metric of how long it took to process the +// current block +func reportProcessingTime(startTime time.Time) { + onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds())) +} + +// computePayloadAttributes modifies the passed FCU arguments to +// contain the right payload attributes with the tracked proposer. It gets +// called on blocks that arrive after the attestation voting window, or in a +// background routine after syncing early blocks. +func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error { + if cfg.blockRoot == cfg.headRoot { + if err := s.updateCachesPostBlockProcessing(cfg); err != nil { + return err + } + } + fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:]) + return nil +} + // getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block // to retrieve the state in DB. It verifies the pre state's validity and the incoming block // is in the correct time window. diff --git a/beacon-chain/blockchain/process_block_test.go b/beacon-chain/blockchain/process_block_test.go index fc53ce923959..f4bc1a29a80f 100644 --- a/beacon-chain/blockchain/process_block_test.go +++ b/beacon-chain/blockchain/process_block_test.go @@ -1,7 +1,6 @@ package blockchain import ( - "bytes" "context" "fmt" "math/big" @@ -17,6 +16,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/das" "github.com/prysmaticlabs/prysm/v4/beacon-chain/db" "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem" testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" @@ -40,7 +40,6 @@ import ( "github.com/prysmaticlabs/prysm/v4/testing/require" "github.com/prysmaticlabs/prysm/v4/testing/util" prysmTime "github.com/prysmaticlabs/prysm/v4/time" - "github.com/prysmaticlabs/prysm/v4/time/slots" logTest "github.com/sirupsen/logrus/hooks/test" ) @@ -69,7 +68,7 @@ func TestStore_OnBlockBatch(t *testing.T) { require.NoError(t, err) blks = append(blks, rwsb) } - err := service.onBlockBatch(ctx, blks) + err := service.onBlockBatch(ctx, blks, &das.MockAvailabilityStore{}) require.NoError(t, err) jcp := service.CurrentJustifiedCheckpt() jroot := bytesutil.ToBytes32(jcp.Root) @@ -99,7 +98,7 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) { require.NoError(t, service.saveInitSyncBlock(ctx, rwsb.Root(), wsb)) blks = append(blks, rwsb) } - require.NoError(t, service.onBlockBatch(ctx, blks)) + require.NoError(t, service.onBlockBatch(ctx, blks, &das.MockAvailabilityStore{})) } func TestCachedPreState_CanGetFromStateSummary(t *testing.T) { @@ -567,7 +566,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState)) - require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, true)) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, true})) require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch)) _, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch) require.NoError(t, err) @@ -615,7 +614,7 @@ func TestOnBlock_CanFinalize(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState)) - require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, true)) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, true})) require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch)) _, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch) require.NoError(t, err) @@ -641,7 +640,7 @@ func TestOnBlock_CanFinalize(t *testing.T) { func TestOnBlock_NilBlock(t *testing.T) { service, tr := minimalTestService(t) - err := service.postBlockProcess(tr.ctx, nil, [32]byte{}, nil, true) + err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, nil, [32]byte{}, [32]byte{}, nil, true}) require.Equal(t, true, IsInvalidBlock(err)) } @@ -689,7 +688,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState)) - require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, false)) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, false})) testState, err = service.cfg.StateGen.StateByRoot(ctx, r) require.NoError(t, err) } @@ -924,8 +923,10 @@ func Test_validateMergeTransitionBlock(t *testing.T) { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), + Transactions: make([][]byte, 0), }, }, { @@ -969,6 +970,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), TransactionsRoot: make([]byte, fieldparams.RootLength), @@ -1111,7 +1113,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb1) require.NoError(t, err) lock.Lock() - require.NoError(t, service.postBlockProcess(ctx, wsb1, r1, postState, true)) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb1, r1, [32]byte{}, postState, true})) lock.Unlock() wg.Done() }() @@ -1121,7 +1123,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb2) require.NoError(t, err) lock.Lock() - require.NoError(t, service.postBlockProcess(ctx, wsb2, r2, postState, true)) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb2, r2, [32]byte{}, postState, true})) lock.Unlock() wg.Done() }() @@ -1131,7 +1133,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb3) require.NoError(t, err) lock.Lock() - require.NoError(t, service.postBlockProcess(ctx, wsb3, r3, postState, true)) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb3, r3, [32]byte{}, postState, true})) lock.Unlock() wg.Done() }() @@ -1141,7 +1143,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb4) require.NoError(t, err) lock.Lock() - require.NoError(t, service.postBlockProcess(ctx, wsb4, r4, postState, true)) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb4, r4, [32]byte{}, postState, true})) lock.Unlock() wg.Done() }() @@ -1216,7 +1218,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false)) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})) } for i := 6; i < 12; i++ { @@ -1234,7 +1236,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - err = service.postBlockProcess(ctx, wsb, root, postState, false) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}) require.NoError(t, err) } @@ -1253,7 +1255,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - err = service.postBlockProcess(ctx, wsb, root, postState, false) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}) require.NoError(t, err) } // Check that we haven't justified the second epoch yet @@ -1275,7 +1277,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState)) - err = service.postBlockProcess(ctx, wsb, firstInvalidRoot, postState, false) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, firstInvalidRoot, [32]byte{}, postState, false}) require.NoError(t, err) jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint() require.Equal(t, primitives.Epoch(2), jc.Epoch) @@ -1303,7 +1305,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) { postState, err = service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - err = service.postBlockProcess(ctx, wsb, root, postState, false) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}) require.ErrorContains(t, "received an INVALID payload from execution engine", err) // Check that forkchoice's head is the last invalid block imported. The // store's headroot is the previous head (since the invalid block did @@ -1332,7 +1334,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) { postState, err = service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - err = service.postBlockProcess(ctx, wsb, root, postState, true) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true}) require.NoError(t, err) // Check the newly imported block is head, it justified the right // checkpoint and the node is no longer optimistic @@ -1394,7 +1396,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false)) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})) } for i := 6; i < 12; i++ { @@ -1412,7 +1414,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - err = service.postBlockProcess(ctx, wsb, root, postState, false) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}) require.NoError(t, err) } @@ -1432,7 +1434,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - err = service.postBlockProcess(ctx, wsb, root, postState, false) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}) require.NoError(t, err) } // Check that we haven't justified the second epoch yet @@ -1454,7 +1456,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState)) - err = service.postBlockProcess(ctx, wsb, firstInvalidRoot, postState, false) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, firstInvalidRoot, [32]byte{}, postState, false}) require.NoError(t, err) jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint() require.Equal(t, primitives.Epoch(2), jc.Epoch) @@ -1510,7 +1512,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) { postState, err = service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - err = service.postBlockProcess(ctx, wsb, root, postState, true) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true}) require.NoError(t, err) // Check the newly imported block is head, it justified the right // checkpoint and the node is no longer optimistic @@ -1574,7 +1576,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false)) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})) } for i := 6; i < 12; i++ { @@ -1593,7 +1595,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - err = service.postBlockProcess(ctx, wsb, root, postState, false) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}) require.NoError(t, err) } @@ -1612,7 +1614,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState)) - err = service.postBlockProcess(ctx, wsb, lastValidRoot, postState, false) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, lastValidRoot, [32]byte{}, postState, false}) require.NoError(t, err) // save the post state and the payload Hash of this block since it will // be the LVH @@ -1639,7 +1641,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState)) - err = service.postBlockProcess(ctx, wsb, invalidRoots[i-13], postState, false) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, invalidRoots[i-13], [32]byte{}, postState, false}) require.NoError(t, err) } // Check that we have justified the second epoch @@ -1704,7 +1706,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { postState, err = service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, true)) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})) // Check that the head is still INVALID and the node is still optimistic require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot()) optimistic, err = service.IsOptimistic(ctx) @@ -1727,7 +1729,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - err = service.postBlockProcess(ctx, wsb, root, postState, true) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true}) require.NoError(t, err) st, err = service.cfg.StateGen.StateByRoot(ctx, root) require.NoError(t, err) @@ -1753,7 +1755,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) { postState, err = service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - err = service.postBlockProcess(ctx, wsb, root, postState, true) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true}) require.NoError(t, err) require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot()) sjc = service.CurrentJustifiedCheckpt() @@ -1809,7 +1811,7 @@ func TestNoViableHead_Reboot(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false)) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})) } for i := 6; i < 12; i++ { @@ -1827,7 +1829,7 @@ func TestNoViableHead_Reboot(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - err = service.postBlockProcess(ctx, wsb, root, postState, false) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}) require.NoError(t, err) } @@ -1846,7 +1848,7 @@ func TestNoViableHead_Reboot(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState)) - err = service.postBlockProcess(ctx, wsb, lastValidRoot, postState, false) + err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, lastValidRoot, [32]byte{}, postState, false}) require.NoError(t, err) // save the post state and the payload Hash of this block since it will // be the LVH @@ -1875,7 +1877,7 @@ func TestNoViableHead_Reboot(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false)) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})) require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch)) _, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch) require.NoError(t, err) @@ -1946,7 +1948,7 @@ func TestNoViableHead_Reboot(t *testing.T) { rwsb, err := consensusblocks.NewROBlock(wsb) require.NoError(t, err) // We use onBlockBatch here because the valid chain is missing in forkchoice - require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb})) + require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}, &das.MockAvailabilityStore{})) // Check that the head is now VALID and the node is not optimistic require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot())) headRoot, err = service.HeadRoot(ctx) @@ -1990,7 +1992,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState)) - require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false)) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})) st, err = service.HeadState(ctx) require.NoError(t, err) @@ -2049,74 +2051,6 @@ func driftGenesisTime(s *Service, slot, delay int64) { s.SetGenesisTime(time.Unix(time.Now().Unix()-offset, 0)) } -func Test_commitmentsToCheck(t *testing.T) { - windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest) - require.NoError(t, err) - commits := [][]byte{ - bytesutil.PadTo([]byte("a"), 48), - bytesutil.PadTo([]byte("b"), 48), - bytesutil.PadTo([]byte("c"), 48), - bytesutil.PadTo([]byte("d"), 48), - } - cases := []struct { - name string - commits [][]byte - block func(*testing.T) consensusblocks.ROBlock - slot primitives.Slot - }{ - { - name: "pre deneb", - block: func(t *testing.T) consensusblocks.ROBlock { - bb := util.NewBeaconBlockBellatrix() - sb, err := consensusblocks.NewSignedBeaconBlock(bb) - require.NoError(t, err) - rb, err := consensusblocks.NewROBlock(sb) - require.NoError(t, err) - return rb - }, - }, - { - name: "commitments within da", - block: func(t *testing.T) consensusblocks.ROBlock { - d := util.NewBeaconBlockDeneb() - d.Block.Body.BlobKzgCommitments = commits - d.Block.Slot = 100 - sb, err := consensusblocks.NewSignedBeaconBlock(d) - require.NoError(t, err) - rb, err := consensusblocks.NewROBlock(sb) - require.NoError(t, err) - return rb - }, - commits: commits, - slot: 100, - }, - { - name: "commitments outside da", - block: func(t *testing.T) consensusblocks.ROBlock { - d := util.NewBeaconBlockDeneb() - // block is from slot 0, "current slot" is window size +1 (so outside the window) - d.Block.Body.BlobKzgCommitments = commits - sb, err := consensusblocks.NewSignedBeaconBlock(d) - require.NoError(t, err) - rb, err := consensusblocks.NewROBlock(sb) - require.NoError(t, err) - return rb - }, - slot: windowSlots + 1, - }, - } - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - b := c.block(t) - co := commitmentsToCheck(b, c.slot) - require.Equal(t, len(c.commits), len(co)) - for i := 0; i < len(c.commits); i++ { - require.Equal(t, true, bytes.Equal(c.commits[i], co[i])) - } - }) - } -} - func TestMissingIndices(t *testing.T) { cases := []struct { name string @@ -2197,6 +2131,35 @@ func TestMissingIndices(t *testing.T) { } } +func Test_getFCUArgs(t *testing.T) { + s, tr := minimalTestService(t) + ctx := tr.ctx + st, keys := util.DeterministicGenesisState(t, 64) + b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1) + require.NoError(t, err) + wsb, err := consensusblocks.NewSignedBeaconBlock(b) + require.NoError(t, err) + + cfg := &postBlockProcessConfig{ + ctx: ctx, + signed: wsb, + blockRoot: [32]byte{'a'}, + postState: st, + isValidPayload: true, + } + // error branch + fcuArgs := &fcuConfig{} + err = s.getFCUArgs(cfg, fcuArgs) + require.ErrorContains(t, "block does not exist", err) + + // canonical branch + cfg.headRoot = cfg.blockRoot + fcuArgs = &fcuConfig{} + err = s.getFCUArgs(cfg, fcuArgs) + require.NoError(t, err) + require.Equal(t, cfg.blockRoot, fcuArgs.headRoot) +} + func fakeCommitments(n int) [][]byte { f := make([][]byte, n) for i := range f { diff --git a/beacon-chain/blockchain/receive_attestation.go b/beacon-chain/blockchain/receive_attestation.go index c8ba5bcca47c..db5c5de9c476 100644 --- a/beacon-chain/blockchain/receive_attestation.go +++ b/beacon-chain/blockchain/receive_attestation.go @@ -150,13 +150,12 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot) headBlock: headBlock, proposingSlot: proposingSlot, } - _, tracked := s.trackedProposer(headState, proposingSlot) - if tracked { - if s.shouldOverrideFCU(newHeadRoot, proposingSlot) { - return - } + if s.inRegularSync() { fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:]) } + if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) { + return + } if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil { log.WithError(err).Error("could not update forkchoice") } diff --git a/beacon-chain/blockchain/receive_attestation_test.go b/beacon-chain/blockchain/receive_attestation_test.go index 7ebe332ef537..57d19a12f674 100644 --- a/beacon-chain/blockchain/receive_attestation_test.go +++ b/beacon-chain/blockchain/receive_attestation_test.go @@ -112,7 +112,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState)) - require.NoError(t, service.postBlockProcess(ctx, wsb, tRoot, postState, false)) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, tRoot, [32]byte{}, postState, false})) copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot) require.NoError(t, err) require.Equal(t, 2, fcs.NodeCount()) @@ -168,7 +168,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) { postState, err := service.validateStateTransition(ctx, preState, wsb) require.NoError(t, err) require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState)) - require.NoError(t, service.postBlockProcess(ctx, wsb, tRoot, postState, false)) + require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, tRoot, [32]byte{}, postState, false})) require.Equal(t, 2, fcs.NodeCount()) require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb)) require.Equal(t, tRoot, service.head.root) diff --git a/beacon-chain/blockchain/receive_block.go b/beacon-chain/blockchain/receive_block.go index 7e2c494c7c07..66d2647c756a 100644 --- a/beacon-chain/blockchain/receive_block.go +++ b/beacon-chain/blockchain/receive_block.go @@ -12,6 +12,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers" coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/das" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" "github.com/prysmaticlabs/prysm/v4/config/features" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" @@ -33,8 +34,8 @@ var epochsSinceFinalitySaveHotStateDB = primitives.Epoch(100) // BlockReceiver interface defines the methods of chain service for receiving and processing new blocks. type BlockReceiver interface { - ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error - ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock) error + ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error + ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error HasBlock(ctx context.Context, root [32]byte) bool RecentBlockSlot(root [32]byte) (primitives.Slot, error) BlockBeingSynced([32]byte) bool @@ -56,7 +57,7 @@ type SlashingReceiver interface { // 1. Validate block, apply state transition and update checkpoints // 2. Apply fork choice to the processed block // 3. Save latest head info -func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error { +func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error { ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock") defer span.End() // Return early if the block has been synced @@ -72,6 +73,10 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig if err != nil { return err } + rob, err := blocks.NewROBlockWithRoot(block, blockRoot) + if err != nil { + return err + } preState, err := s.getBlockPreState(ctx, blockCopy.Block()) if err != nil { @@ -106,20 +111,35 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig if err := eg.Wait(); err != nil { return err } - daStartTime := time.Now() - if err := s.isDataAvailable(ctx, blockRoot, blockCopy); err != nil { - return errors.Wrap(err, "could not validate blob data availability") + if avs != nil { + if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil { + return errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)") + } + } else { + if err := s.isDataAvailable(ctx, blockRoot, blockCopy); err != nil { + return errors.Wrap(err, "could not validate blob data availability") + } } daWaitedTime := time.Since(daStartTime) + // Defragment the state before continuing block processing. + s.defragmentState(postState) + // The rest of block processing takes a lock on forkchoice. s.cfg.ForkChoiceStore.Lock() defer s.cfg.ForkChoiceStore.Unlock() if err := s.savePostStateInfo(ctx, blockRoot, blockCopy, postState); err != nil { return errors.Wrap(err, "could not save post state info") } - if err := s.postBlockProcess(ctx, blockCopy, blockRoot, postState, isValidPayload); err != nil { + args := &postBlockProcessConfig{ + ctx: ctx, + signed: blockCopy, + blockRoot: blockRoot, + postState: postState, + isValidPayload: isValidPayload, + } + if err := s.postBlockProcess(args); err != nil { err := errors.Wrap(err, "could not process block") tracing.AnnotateError(span, err) return err @@ -196,7 +216,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig // ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning // the state, performing batch verification of all collected signatures and then performing the appropriate // actions for a block post-transition. -func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock) error { +func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error { ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch") defer span.End() @@ -204,7 +224,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock defer s.cfg.ForkChoiceStore.Unlock() // Apply state transition on the incoming newly received block batches, one by one. - if err := s.onBlockBatch(ctx, blocks); err != nil { + if err := s.onBlockBatch(ctx, blocks, avs); err != nil { err := errors.Wrap(err, "could not process block in batch") tracing.AnnotateError(span, err) return err diff --git a/beacon-chain/blockchain/receive_block_test.go b/beacon-chain/blockchain/receive_block_test.go index 5f135362243e..1cfccca74ac4 100644 --- a/beacon-chain/blockchain/receive_block_test.go +++ b/beacon-chain/blockchain/receive_block_test.go @@ -8,6 +8,7 @@ import ( blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/das" "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" @@ -146,7 +147,7 @@ func TestService_ReceiveBlock(t *testing.T) { require.NoError(t, err) wsb, err := blocks.NewSignedBeaconBlock(tt.args.block) require.NoError(t, err) - err = s.ReceiveBlock(ctx, wsb, root) + err = s.ReceiveBlock(ctx, wsb, root, nil) if tt.wantedErr != "" { assert.ErrorContains(t, tt.wantedErr, err) } else { @@ -179,7 +180,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) { go func() { wsb, err := blocks.NewSignedBeaconBlock(b) require.NoError(t, err) - require.NoError(t, s.ReceiveBlock(ctx, wsb, root)) + require.NoError(t, s.ReceiveBlock(ctx, wsb, root, nil)) wg.Done() }() wg.Wait() @@ -243,7 +244,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) { require.NoError(t, err) rwsb, err := blocks.NewROBlock(wsb) require.NoError(t, err) - err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb}) + err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb}, &das.MockAvailabilityStore{}) if tt.wantedErr != "" { assert.ErrorContains(t, tt.wantedErr, err) } else { diff --git a/beacon-chain/blockchain/service.go b/beacon-chain/blockchain/service.go index 33ada7730624..1825d9009498 100644 --- a/beacon-chain/blockchain/service.go +++ b/beacon-chain/blockchain/service.go @@ -11,6 +11,8 @@ import ( "time" "github.com/pkg/errors" + "go.opencensus.io/trace" + "github.com/prysmaticlabs/prysm/v4/async/event" "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg" "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache" @@ -37,34 +39,35 @@ import ( "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces" + "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" prysmTime "github.com/prysmaticlabs/prysm/v4/time" "github.com/prysmaticlabs/prysm/v4/time/slots" - "go.opencensus.io/trace" ) // Service represents a service that handles the internal // logic of managing the full PoS beacon chain. type Service struct { - cfg *config - ctx context.Context - cancel context.CancelFunc - genesisTime time.Time - head *head - headLock sync.RWMutex - originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized - boundaryRoots [][32]byte - checkpointStateCache *cache.CheckpointStateCache - initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock - initSyncBlocksLock sync.RWMutex - wsVerifier *WeakSubjectivityVerifier - clockSetter startup.ClockSetter - clockWaiter startup.ClockWaiter - syncComplete chan struct{} - blobNotifiers *blobNotifierMap - blockBeingSynced *currentlySyncingBlock - blobStorage *filesystem.BlobStorage + cfg *config + ctx context.Context + cancel context.CancelFunc + genesisTime time.Time + head *head + headLock sync.RWMutex + originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized + boundaryRoots [][32]byte + checkpointStateCache *cache.CheckpointStateCache + initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock + initSyncBlocksLock sync.RWMutex + wsVerifier *WeakSubjectivityVerifier + clockSetter startup.ClockSetter + clockWaiter startup.ClockWaiter + syncComplete chan struct{} + blobNotifiers *blobNotifierMap + blockBeingSynced *currentlySyncingBlock + blobStorage *filesystem.BlobStorage + lastPublishedLightClientEpoch primitives.Epoch } // config options for the service. diff --git a/beacon-chain/blockchain/setup_test.go b/beacon-chain/blockchain/setup_test.go index bbdae002e827..746d6e25ff95 100644 --- a/beacon-chain/blockchain/setup_test.go +++ b/beacon-chain/blockchain/setup_test.go @@ -10,6 +10,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache" statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/db" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem" testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice" doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree" @@ -116,6 +117,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq WithBLSToExecPool(req.blsPool), WithDepositCache(dc), WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()), + WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), } // append the variadic opts so they override the defaults by being processed afterwards opts = append(defOpts, opts...) diff --git a/beacon-chain/blockchain/testing/BUILD.bazel b/beacon-chain/blockchain/testing/BUILD.bazel index a4d0345e7dae..f461160748c9 100644 --- a/beacon-chain/blockchain/testing/BUILD.bazel +++ b/beacon-chain/blockchain/testing/BUILD.bazel @@ -17,6 +17,7 @@ go_library( "//beacon-chain/core/feed/operation:go_default_library", "//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/core/helpers:go_default_library", + "//beacon-chain/das:go_default_library", "//beacon-chain/db:go_default_library", "//beacon-chain/forkchoice:go_default_library", "//beacon-chain/state:go_default_library", diff --git a/beacon-chain/blockchain/testing/mock.go b/beacon-chain/blockchain/testing/mock.go index c47cf5e057a0..c9b21ce43a71 100644 --- a/beacon-chain/blockchain/testing/mock.go +++ b/beacon-chain/blockchain/testing/mock.go @@ -16,6 +16,7 @@ import ( opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation" statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/das" "github.com/prysmaticlabs/prysm/v4/beacon-chain/db" "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" @@ -208,7 +209,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf } // ReceiveBlockBatch processes blocks in batches from initial-sync. -func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock) error { +func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityStore) error { if s.State == nil { return ErrNilState } @@ -238,7 +239,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBl } // ReceiveBlock mocks ReceiveBlock method in chain service. -func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte) error { +func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityStore) error { if s.ReceiveBlockMockErr != nil { return s.ReceiveBlockMockErr } @@ -320,7 +321,7 @@ func (s *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint { } // ReceiveAttestation mocks ReceiveAttestation method in chain service. -func (_ *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestation) error { +func (*ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestation) error { return nil } @@ -400,12 +401,12 @@ func (s *ChainService) RecentBlockSlot([32]byte) (primitives.Slot, error) { } // HeadGenesisValidatorsRoot mocks HeadGenesisValidatorsRoot method in chain service. -func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte { +func (*ChainService) HeadGenesisValidatorsRoot() [32]byte { return [32]byte{} } // VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil. -func (_ *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error { +func (*ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error { if !bytes.Equal(a.Data.BeaconBlockRoot, a.Data.Target.Root) { return errors.New("LMD and FFG miss matched") } @@ -413,7 +414,7 @@ func (_ *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attes } // ChainHeads mocks ChainHeads and always return nil. -func (_ *ChainService) ChainHeads() ([][32]byte, []primitives.Slot) { +func (*ChainService) ChainHeads() ([][32]byte, []primitives.Slot) { return [][32]byte{ bytesutil.ToBytes32(bytesutil.PadTo([]byte("foo"), 32)), bytesutil.ToBytes32(bytesutil.PadTo([]byte("bar"), 32)), @@ -422,7 +423,7 @@ func (_ *ChainService) ChainHeads() ([][32]byte, []primitives.Slot) { } // HeadPublicKeyToValidatorIndex mocks HeadPublicKeyToValidatorIndex and always return 0 and true. -func (_ *ChainService) HeadPublicKeyToValidatorIndex(_ [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool) { +func (*ChainService) HeadPublicKeyToValidatorIndex(_ [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool) { return 0, true } @@ -486,7 +487,7 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) { } // ReceiveAttesterSlashing mocks the same method in the chain service. -func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {} +func (*ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {} // IsFinalized mocks the same method in the chain service. func (s *ChainService) IsFinalized(_ context.Context, blockRoot [32]byte) bool { @@ -599,12 +600,12 @@ func (s *ChainService) ProposerBoost() [32]byte { } // FinalizedBlockHash mocks the same method in the chain service -func (s *ChainService) FinalizedBlockHash() [32]byte { +func (*ChainService) FinalizedBlockHash() [32]byte { return [32]byte{} } // UnrealizedJustifiedPayloadBlockHash mocks the same method in the chain service -func (s *ChainService) UnrealizedJustifiedPayloadBlockHash() [32]byte { +func (*ChainService) UnrealizedJustifiedPayloadBlockHash() [32]byte { return [32]byte{} } diff --git a/beacon-chain/core/blocks/genesis.go b/beacon-chain/core/blocks/genesis.go index 24ec15155f87..0091183d4d75 100644 --- a/beacon-chain/core/blocks/genesis.go +++ b/beacon-chain/core/blocks/genesis.go @@ -105,6 +105,7 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa ReceiptsRoot: make([]byte, 32), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), Transactions: make([][]byte, 0), @@ -136,6 +137,7 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa ReceiptsRoot: make([]byte, 32), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), Transactions: make([][]byte, 0), @@ -168,6 +170,7 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa ReceiptsRoot: make([]byte, 32), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), Transactions: make([][]byte, 0), diff --git a/beacon-chain/core/blocks/payload_test.go b/beacon-chain/core/blocks/payload_test.go index a8b0e632642e..eab6487e7fd9 100644 --- a/beacon-chain/core/blocks/payload_test.go +++ b/beacon-chain/core/blocks/payload_test.go @@ -853,10 +853,10 @@ func emptyPayloadHeader() (interfaces.ExecutionData, error) { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), TransactionsRoot: make([]byte, fieldparams.RootLength), - ExtraData: make([]byte, 0), }) } @@ -868,11 +868,11 @@ func emptyPayloadHeaderCapella() (interfaces.ExecutionData, error) { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), TransactionsRoot: make([]byte, fieldparams.RootLength), WithdrawalsRoot: make([]byte, fieldparams.RootLength), - ExtraData: make([]byte, 0), }, 0) } @@ -884,10 +884,10 @@ func emptyPayload() *enginev1.ExecutionPayload { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), Transactions: make([][]byte, 0), - ExtraData: make([]byte, 0), } } @@ -899,10 +899,10 @@ func emptyPayloadCapella() *enginev1.ExecutionPayloadCapella { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), Transactions: make([][]byte, 0), Withdrawals: make([]*enginev1.Withdrawal, 0), - ExtraData: make([]byte, 0), } } diff --git a/beacon-chain/core/capella/upgrade_test.go b/beacon-chain/core/capella/upgrade_test.go index c801268062bc..45f09434ec3c 100644 --- a/beacon-chain/core/capella/upgrade_test.go +++ b/beacon-chain/core/capella/upgrade_test.go @@ -84,6 +84,7 @@ func TestUpgradeToCapella(t *testing.T) { GasLimit: prevHeader.GasLimit(), GasUsed: prevHeader.GasUsed(), Timestamp: prevHeader.Timestamp(), + ExtraData: prevHeader.ExtraData(), BaseFeePerGas: prevHeader.BaseFeePerGas(), BlockHash: prevHeader.BlockHash(), TransactionsRoot: txRoot, diff --git a/beacon-chain/core/deneb/upgrade.go b/beacon-chain/core/deneb/upgrade.go index 269bda7bc8ad..4c8c0d6f5d71 100644 --- a/beacon-chain/core/deneb/upgrade.go +++ b/beacon-chain/core/deneb/upgrade.go @@ -57,6 +57,10 @@ func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) { if err != nil { return nil, err } + historicalRoots, err := state.HistoricalRoots() + if err != nil { + return nil, err + } s := ðpb.BeaconStateDeneb{ GenesisTime: state.GenesisTime(), @@ -70,7 +74,7 @@ func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) { LatestBlockHeader: state.LatestBlockHeader(), BlockRoots: state.BlockRoots(), StateRoots: state.StateRoots(), - HistoricalRoots: [][]byte{}, + HistoricalRoots: historicalRoots, Eth1Data: state.Eth1Data(), Eth1DataVotes: state.Eth1DataVotes(), Eth1DepositIndex: state.Eth1DepositIndex(), @@ -101,10 +105,10 @@ func UpgradeToDeneb(state state.BeaconState) (state.BeaconState, error) { ExtraData: payloadHeader.ExtraData(), BaseFeePerGas: payloadHeader.BaseFeePerGas(), BlockHash: payloadHeader.BlockHash(), - ExcessBlobGas: 0, - BlobGasUsed: 0, TransactionsRoot: txRoot, WithdrawalsRoot: wdRoot, + ExcessBlobGas: 0, + BlobGasUsed: 0, }, NextWithdrawalIndex: wi, NextWithdrawalValidatorIndex: vi, diff --git a/beacon-chain/core/deneb/upgrade_test.go b/beacon-chain/core/deneb/upgrade_test.go index 003b371940fe..169f1d58289f 100644 --- a/beacon-chain/core/deneb/upgrade_test.go +++ b/beacon-chain/core/deneb/upgrade_test.go @@ -14,6 +14,7 @@ import ( func TestUpgradeToDeneb(t *testing.T) { st, _ := util.DeterministicGenesisStateCapella(t, params.BeaconConfig().MaxValidatorsPerCommittee) + require.NoError(t, st.SetHistoricalRoots([][]byte{{1}})) preForkState := st.Copy() mSt, err := deneb.UpgradeToDeneb(st) require.NoError(t, err) @@ -46,6 +47,12 @@ func TestUpgradeToDeneb(t *testing.T) { require.NoError(t, err) require.DeepSSZEqual(t, make([]uint64, numValidators), s) + hr1, err := preForkState.HistoricalRoots() + require.NoError(t, err) + hr2, err := mSt.HistoricalRoots() + require.NoError(t, err) + require.DeepEqual(t, hr1, hr2) + f := mSt.Fork() require.DeepSSZEqual(t, ðpb.Fork{ PreviousVersion: st.Fork().CurrentVersion, @@ -85,6 +92,7 @@ func TestUpgradeToDeneb(t *testing.T) { GasLimit: prevHeader.GasLimit(), GasUsed: prevHeader.GasUsed(), Timestamp: prevHeader.Timestamp(), + ExtraData: prevHeader.ExtraData(), BaseFeePerGas: prevHeader.BaseFeePerGas(), BlockHash: prevHeader.BlockHash(), TransactionsRoot: txRoot, diff --git a/beacon-chain/core/execution/upgrade.go b/beacon-chain/core/execution/upgrade.go index 60505750f322..fea264c0f4da 100644 --- a/beacon-chain/core/execution/upgrade.go +++ b/beacon-chain/core/execution/upgrade.go @@ -79,6 +79,7 @@ func UpgradeToBellatrix(state state.BeaconState) (state.BeaconState, error) { GasLimit: 0, GasUsed: 0, Timestamp: 0, + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), TransactionsRoot: make([]byte, 32), diff --git a/beacon-chain/core/execution/upgrade_test.go b/beacon-chain/core/execution/upgrade_test.go index 2f07929ab083..a18df89a9b2a 100644 --- a/beacon-chain/core/execution/upgrade_test.go +++ b/beacon-chain/core/execution/upgrade_test.go @@ -79,6 +79,7 @@ func TestUpgradeToBellatrix(t *testing.T) { GasLimit: 0, GasUsed: 0, Timestamp: 0, + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), TransactionsRoot: make([]byte, 32), diff --git a/beacon-chain/core/feed/operation/events.go b/beacon-chain/core/feed/operation/events.go index 2b9d84f5aad2..65adcb9e3da8 100644 --- a/beacon-chain/core/feed/operation/events.go +++ b/beacon-chain/core/feed/operation/events.go @@ -26,6 +26,12 @@ const ( // BlobSidecarReceived is sent after a blob sidecar is received from gossip or rpc. BlobSidecarReceived = 6 + + // ProposerSlashingReceived is sent after a proposer slashing is received from gossip or rpc + ProposerSlashingReceived = 7 + + // AttesterSlashingReceived is sent after an attester slashing is received from gossip or rpc + AttesterSlashingReceived = 8 ) // UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events. @@ -61,3 +67,13 @@ type BLSToExecutionChangeReceivedData struct { type BlobSidecarReceivedData struct { Blob *blocks.VerifiedROBlob } + +// ProposerSlashingReceivedData is the data sent with ProposerSlashingReceived events. +type ProposerSlashingReceivedData struct { + ProposerSlashing *ethpb.ProposerSlashing +} + +// AttesterSlashingReceivedData is the data sent with AttesterSlashingReceived events. +type AttesterSlashingReceivedData struct { + AttesterSlashing *ethpb.AttesterSlashing +} diff --git a/beacon-chain/core/feed/state/events.go b/beacon-chain/core/feed/state/events.go index c15923eeedb2..b9e16a14ba3e 100644 --- a/beacon-chain/core/feed/state/events.go +++ b/beacon-chain/core/feed/state/events.go @@ -27,6 +27,10 @@ const ( NewHead // MissedSlot is sent when we need to notify users that a slot was missed. MissedSlot + // LightClientFinalityUpdate event + LightClientFinalityUpdate + // LightClientOptimisticUpdate event + LightClientOptimisticUpdate ) // BlockProcessedData is the data sent with BlockProcessed events. diff --git a/beacon-chain/core/helpers/BUILD.bazel b/beacon-chain/core/helpers/BUILD.bazel index 0c50256c3c93..c496943c6989 100644 --- a/beacon-chain/core/helpers/BUILD.bazel +++ b/beacon-chain/core/helpers/BUILD.bazel @@ -64,6 +64,7 @@ go_test( deps = [ "//beacon-chain/cache:go_default_library", "//beacon-chain/core/time:go_default_library", + "//beacon-chain/forkchoice/types:go_default_library", "//beacon-chain/state:go_default_library", "//beacon-chain/state/state-native:go_default_library", "//config/fieldparams:go_default_library", diff --git a/beacon-chain/core/helpers/validators.go b/beacon-chain/core/helpers/validators.go index 2fea24d7160d..94019d565136 100644 --- a/beacon-chain/core/helpers/validators.go +++ b/beacon-chain/core/helpers/validators.go @@ -9,6 +9,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time" + forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" @@ -271,11 +272,22 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) ( func cachedProposerIndexAtSlot(slot primitives.Slot, root [32]byte) (primitives.ValidatorIndex, error) { proposerIndices, has := proposerIndicesCache.ProposerIndices(slots.ToEpoch(slot), root) if !has { - cache.ProposerIndicesCacheMiss.Inc() return 0, errProposerIndexMiss } if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) { - cache.ProposerIndicesCacheMiss.Inc() + return 0, errProposerIndexMiss + } + return proposerIndices[slot%params.BeaconConfig().SlotsPerEpoch], nil +} + +// ProposerIndexAtSlotFromCheckpoint returns the proposer index at the given +// slot from the cache at the given checkpoint +func ProposerIndexAtSlotFromCheckpoint(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, error) { + proposerIndices, has := proposerIndicesCache.IndicesFromCheckpoint(*c) + if !has { + return 0, errProposerIndexMiss + } + if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) { return 0, errProposerIndexMiss } return proposerIndices[slot%params.BeaconConfig().SlotsPerEpoch], nil diff --git a/beacon-chain/core/helpers/validators_test.go b/beacon-chain/core/helpers/validators_test.go index b8b6b90b01e5..0f40df2e9765 100644 --- a/beacon-chain/core/helpers/validators_test.go +++ b/beacon-chain/core/helpers/validators_test.go @@ -7,6 +7,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/cache" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time" + forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types" state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -802,3 +803,18 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) { require.NoError(t, err) require.Equal(t, index, primitives.ValidatorIndex(3)) } + +func TestProposerIndexFromCheckpoint(t *testing.T) { + e := primitives.Epoch(2) + r := [32]byte{'a'} + root := [32]byte{'b'} + ids := [32]primitives.ValidatorIndex{} + slot := primitives.Slot(69) // slot 5 in the Epoch + ids[5] = primitives.ValidatorIndex(19) + proposerIndicesCache.Set(e, r, ids) + c := &forkchoicetypes.Checkpoint{Root: root, Epoch: e - 1} + proposerIndicesCache.SetCheckpoint(*c, r) + id, err := ProposerIndexAtSlotFromCheckpoint(c, slot) + require.NoError(t, err) + require.Equal(t, ids[5], id) +} diff --git a/beacon-chain/core/transition/bellatrix_transition_no_verify_sig_test.go b/beacon-chain/core/transition/bellatrix_transition_no_verify_sig_test.go index ecc3529d9cba..ec9f3d5428ba 100644 --- a/beacon-chain/core/transition/bellatrix_transition_no_verify_sig_test.go +++ b/beacon-chain/core/transition/bellatrix_transition_no_verify_sig_test.go @@ -245,10 +245,10 @@ func createFullBellatrixBlockWithOperations(t *testing.T) (state.BeaconState, ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: bytesutil.PadTo([]byte{1, 2, 3, 4}, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), Transactions: make([][]byte, 0), - ExtraData: make([]byte, 0), }, }, }, @@ -284,11 +284,11 @@ func createFullCapellaBlockWithOperations(t *testing.T) (state.BeaconState, ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: bytesutil.PadTo([]byte{1, 2, 3, 4}, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), Transactions: make([][]byte, 0), Withdrawals: make([]*enginev1.Withdrawal, 0), - ExtraData: make([]byte, 0), }, }, }, diff --git a/beacon-chain/core/transition/state-bellatrix.go b/beacon-chain/core/transition/state-bellatrix.go index 4f4e34623721..9a4f098dc2ed 100644 --- a/beacon-chain/core/transition/state-bellatrix.go +++ b/beacon-chain/core/transition/state-bellatrix.go @@ -209,6 +209,7 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea ReceiptsRoot: make([]byte, 32), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), Transactions: make([][]byte, 0), @@ -269,6 +270,7 @@ func EmptyGenesisStateBellatrix() (state.BeaconState, error) { ReceiptsRoot: make([]byte, 32), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), TransactionsRoot: make([]byte, 32), diff --git a/beacon-chain/das/BUILD.bazel b/beacon-chain/das/BUILD.bazel new file mode 100644 index 000000000000..8666d00439a2 --- /dev/null +++ b/beacon-chain/das/BUILD.bazel @@ -0,0 +1,49 @@ +load("@prysm//tools/go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "availability.go", + "cache.go", + "iface.go", + "mock.go", + ], + importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/das", + visibility = ["//visibility:public"], + deps = [ + "//beacon-chain/db/filesystem:go_default_library", + "//beacon-chain/verification:go_default_library", + "//config/fieldparams:go_default_library", + "//config/params:go_default_library", + "//consensus-types/blocks:go_default_library", + "//consensus-types/primitives:go_default_library", + "//runtime/logging:go_default_library", + "//runtime/version:go_default_library", + "//time/slots:go_default_library", + "@com_github_pkg_errors//:go_default_library", + "@com_github_sirupsen_logrus//:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "availability_test.go", + "cache_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//beacon-chain/db/filesystem:go_default_library", + "//beacon-chain/verification:go_default_library", + "//config/fieldparams:go_default_library", + "//config/params:go_default_library", + "//consensus-types/blocks:go_default_library", + "//consensus-types/primitives:go_default_library", + "//encoding/bytesutil:go_default_library", + "//proto/prysm/v1alpha1:go_default_library", + "//testing/require:go_default_library", + "//testing/util:go_default_library", + "//time/slots:go_default_library", + "@com_github_pkg_errors//:go_default_library", + ], +) diff --git a/beacon-chain/das/availability.go b/beacon-chain/das/availability.go new file mode 100644 index 000000000000..0f0e23077238 --- /dev/null +++ b/beacon-chain/das/availability.go @@ -0,0 +1,149 @@ +package das + +import ( + "context" + "fmt" + + errors "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/verification" + "github.com/prysmaticlabs/prysm/v4/config/params" + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" + "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" + "github.com/prysmaticlabs/prysm/v4/runtime/logging" + "github.com/prysmaticlabs/prysm/v4/runtime/version" + "github.com/prysmaticlabs/prysm/v4/time/slots" + log "github.com/sirupsen/logrus" +) + +var ( + errMixedRoots = errors.New("BlobSidecars must all be for the same block") +) + +// LazilyPersistentStore is an implementation of AvailabilityStore to be used when batch syncing. +// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their +// block, at which time they will undergo full verification and be saved to the disk. +type LazilyPersistentStore struct { + store *filesystem.BlobStorage + cache *cache + verifier BlobBatchVerifier +} + +var _ AvailabilityStore = &LazilyPersistentStore{} + +// BlobBatchVerifier enables LazyAvailabilityStore to manage the verification process +// going from ROBlob->VerifiedROBlob, while avoiding the decision of which individual verifications +// to run and in what order. Since LazilyPersistentStore always tries to verify and save blobs only when +// they are all available, the interface takes a slice of blobs, enabling the implementation to optimize +// batch verification. +type BlobBatchVerifier interface { + VerifiedROBlobs(ctx context.Context, blk blocks.ROBlock, sc []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) +} + +// NewLazilyPersistentStore creates a new LazilyPersistentStore. This constructor should always be used +// when creating a LazilyPersistentStore because it needs to initialize the cache under the hood. +func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchVerifier) *LazilyPersistentStore { + return &LazilyPersistentStore{ + store: store, + cache: newCache(), + verifier: verifier, + } +} + +// Persist adds blobs to the working blob cache. Blobs stored in this cache will be persisted +// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced +// by the given block are guaranteed to be persisted for the remainder of the retention period. +func (s *LazilyPersistentStore) Persist(current primitives.Slot, sc ...blocks.ROBlob) error { + if len(sc) == 0 { + return nil + } + if len(sc) > 1 { + first := sc[0].BlockRoot() + for i := 1; i < len(sc); i++ { + if first != sc[i].BlockRoot() { + return errMixedRoots + } + } + } + if !params.WithinDAPeriod(slots.ToEpoch(sc[0].Slot()), slots.ToEpoch(current)) { + return nil + } + key := keyFromSidecar(sc[0]) + entry := s.cache.ensure(key) + for i := range sc { + if err := entry.stash(&sc[i]); err != nil { + return err + } + } + return nil +} + +// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified. +// BlobSidecars already in the db are assumed to have been previously verified against the block. +func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error { + blockCommitments, err := commitmentsToCheck(b, current) + if err != nil { + return errors.Wrapf(err, "could check data availability for block %#x", b.Root()) + } + // Return early for blocks that are pre-deneb or which do not have any commitments. + if blockCommitments.count() == 0 { + return nil + } + + key := keyFromBlock(b) + entry := s.cache.ensure(key) + defer s.cache.delete(key) + root := b.Root() + // Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent. + // We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather + // ignore their response and decrease their peer score. + sidecars, err := entry.filter(root, blockCommitments) + if err != nil { + return errors.Wrap(err, "incomplete BlobSidecar batch") + } + // Do thorough verifications of each BlobSidecar for the block. + // Same as above, we don't save BlobSidecars if there are any problems with the batch. + vscs, err := s.verifier.VerifiedROBlobs(ctx, b, sidecars) + if err != nil { + var me verification.VerificationMultiError + ok := errors.As(err, &me) + if ok { + fails := me.Failures() + lf := make(log.Fields, len(fails)) + for i := range fails { + lf[fmt.Sprintf("fail_%d", i)] = fails[i].Error() + } + log.WithFields(lf).WithFields(logging.BlockFieldsFromBlob(sidecars[0])). + Debug("invalid BlobSidecars received") + } + return errors.Wrapf(err, "invalid BlobSidecars received for block %#x", root) + } + // Ensure that each BlobSidecar is written to disk. + for i := range vscs { + if err := s.store.Save(vscs[i]); err != nil { + return errors.Wrapf(err, "failed to save BlobSidecar index %d for block %#x", vscs[i].Index, root) + } + } + // All BlobSidecars are persisted - da check succeeds. + return nil +} + +func commitmentsToCheck(b blocks.ROBlock, current primitives.Slot) (safeCommitmentArray, error) { + var ar safeCommitmentArray + if b.Version() < version.Deneb { + return ar, nil + } + // We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS + if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) { + return ar, nil + } + kc, err := b.Block().Body().BlobKzgCommitments() + if err != nil { + return ar, err + } + if len(kc) > len(ar) { + return ar, errIndexOutOfBounds + } + copy(ar[:], kc) + return ar, nil +} diff --git a/beacon-chain/das/availability_test.go b/beacon-chain/das/availability_test.go new file mode 100644 index 000000000000..203816ce07ff --- /dev/null +++ b/beacon-chain/das/availability_test.go @@ -0,0 +1,214 @@ +package das + +import ( + "bytes" + "context" + "testing" + + errors "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/verification" + fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" + "github.com/prysmaticlabs/prysm/v4/config/params" + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" + "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" + "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" + ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/v4/testing/require" + "github.com/prysmaticlabs/prysm/v4/testing/util" + "github.com/prysmaticlabs/prysm/v4/time/slots" +) + +func Test_commitmentsToCheck(t *testing.T) { + windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest) + require.NoError(t, err) + commits := [][]byte{ + bytesutil.PadTo([]byte("a"), 48), + bytesutil.PadTo([]byte("b"), 48), + bytesutil.PadTo([]byte("c"), 48), + bytesutil.PadTo([]byte("d"), 48), + } + cases := []struct { + name string + commits [][]byte + block func(*testing.T) blocks.ROBlock + slot primitives.Slot + err error + }{ + { + name: "pre deneb", + block: func(t *testing.T) blocks.ROBlock { + bb := util.NewBeaconBlockBellatrix() + sb, err := blocks.NewSignedBeaconBlock(bb) + require.NoError(t, err) + rb, err := blocks.NewROBlock(sb) + require.NoError(t, err) + return rb + }, + }, + { + name: "commitments within da", + block: func(t *testing.T) blocks.ROBlock { + d := util.NewBeaconBlockDeneb() + d.Block.Body.BlobKzgCommitments = commits + d.Block.Slot = 100 + sb, err := blocks.NewSignedBeaconBlock(d) + require.NoError(t, err) + rb, err := blocks.NewROBlock(sb) + require.NoError(t, err) + return rb + }, + commits: commits, + slot: 100, + }, + { + name: "commitments outside da", + block: func(t *testing.T) blocks.ROBlock { + d := util.NewBeaconBlockDeneb() + // block is from slot 0, "current slot" is window size +1 (so outside the window) + d.Block.Body.BlobKzgCommitments = commits + sb, err := blocks.NewSignedBeaconBlock(d) + require.NoError(t, err) + rb, err := blocks.NewROBlock(sb) + require.NoError(t, err) + return rb + }, + slot: windowSlots + 1, + }, + { + name: "excessive commitments", + block: func(t *testing.T) blocks.ROBlock { + d := util.NewBeaconBlockDeneb() + d.Block.Slot = 100 + // block is from slot 0, "current slot" is window size +1 (so outside the window) + d.Block.Body.BlobKzgCommitments = commits + // Double the number of commitments, assert that this is over the limit + d.Block.Body.BlobKzgCommitments = append(commits, d.Block.Body.BlobKzgCommitments...) + sb, err := blocks.NewSignedBeaconBlock(d) + require.NoError(t, err) + rb, err := blocks.NewROBlock(sb) + require.NoError(t, err) + c, err := rb.Block().Body().BlobKzgCommitments() + require.NoError(t, err) + require.Equal(t, true, len(c) > fieldparams.MaxBlobsPerBlock) + return rb + }, + slot: windowSlots + 1, + err: errIndexOutOfBounds, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + b := c.block(t) + co, err := commitmentsToCheck(b, c.slot) + if c.err != nil { + require.ErrorIs(t, err, c.err) + } else { + require.NoError(t, err) + } + require.Equal(t, len(c.commits), co.count()) + for i := 0; i < len(c.commits); i++ { + require.Equal(t, true, bytes.Equal(c.commits[i], co[i])) + } + }) + } +} + +func daAlwaysSucceeds(_ [][]byte, _ []*ethpb.BlobSidecar) error { + return nil +} + +type mockDA struct { + t *testing.T + scs []blocks.ROBlob + err error +} + +func TestLazilyPersistent_Missing(t *testing.T) { + ctx := context.Background() + store := filesystem.NewEphemeralBlobStorage(t) + + blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3) + + mbv := &mockBlobBatchVerifier{t: t, scs: scs} + as := NewLazilyPersistentStore(store, mbv) + + // Only one commitment persisted, should return error with other indices + require.NoError(t, as.Persist(1, scs[2])) + err := as.IsDataAvailable(ctx, 1, blk) + require.ErrorIs(t, err, errMissingSidecar) + + // All but one persisted, return missing idx + require.NoError(t, as.Persist(1, scs[0])) + err = as.IsDataAvailable(ctx, 1, blk) + require.ErrorIs(t, err, errMissingSidecar) + + // All persisted, return nil + require.NoError(t, as.Persist(1, scs...)) + + require.NoError(t, as.IsDataAvailable(ctx, 1, blk)) +} + +func TestLazilyPersistent_Mismatch(t *testing.T) { + ctx := context.Background() + store := filesystem.NewEphemeralBlobStorage(t) + + blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3) + + mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")} + scs[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48) + as := NewLazilyPersistentStore(store, mbv) + + // Only one commitment persisted, should return error with other indices + require.NoError(t, as.Persist(1, scs[0])) + err := as.IsDataAvailable(ctx, 1, blk) + require.NotNil(t, err) + require.ErrorIs(t, err, errCommitmentMismatch) +} + +func TestLazyPersistOnceCommitted(t *testing.T) { + _, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6) + as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{}) + // stashes as expected + require.NoError(t, as.Persist(1, scs...)) + // ignores duplicates + require.ErrorIs(t, as.Persist(1, scs...), ErrDuplicateSidecar) + + // ignores index out of bound + scs[0].Index = 6 + require.ErrorIs(t, as.Persist(1, scs[0]), errIndexOutOfBounds) + + _, more := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4) + // ignores sidecars before the retention period + slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest) + require.NoError(t, err) + require.NoError(t, as.Persist(32+slotOOB, more[0])) + + // doesn't ignore new sidecars with a different block root + require.NoError(t, as.Persist(1, more...)) +} + +type mockBlobBatchVerifier struct { + t *testing.T + scs []blocks.ROBlob + err error + verified map[[32]byte]primitives.Slot +} + +var _ BlobBatchVerifier = &mockBlobBatchVerifier{} + +func (m *mockBlobBatchVerifier) VerifiedROBlobs(_ context.Context, _ blocks.ROBlock, scs []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) { + require.Equal(m.t, len(scs), len(m.scs)) + for i := range m.scs { + require.Equal(m.t, m.scs[i], scs[i]) + } + vscs := verification.FakeVerifySliceForTest(m.t, scs) + return vscs, m.err +} + +func (m *mockBlobBatchVerifier) MarkVerified(root [32]byte, slot primitives.Slot) { + if m.verified == nil { + m.verified = make(map[[32]byte]primitives.Slot) + } + m.verified[root] = slot +} diff --git a/beacon-chain/das/cache.go b/beacon-chain/das/cache.go new file mode 100644 index 000000000000..9953f2b5c924 --- /dev/null +++ b/beacon-chain/das/cache.go @@ -0,0 +1,117 @@ +package das + +import ( + "bytes" + + "github.com/pkg/errors" + fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" + "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" +) + +var ( + ErrDuplicateSidecar = errors.New("duplicate sidecar stashed in AvailabilityStore") + errIndexOutOfBounds = errors.New("sidecar.index > MAX_BLOBS_PER_BLOCK") + errCommitmentMismatch = errors.New("KzgCommitment of sidecar in cache did not match block commitment") + errMissingSidecar = errors.New("no sidecar in cache for block commitment") +) + +// cacheKey includes the slot so that we can easily iterate through the cache and compare +// slots for eviction purposes. Whether the input is the block or the sidecar, we always have +// the root+slot when interacting with the cache, so it isn't an inconvenience to use both. +type cacheKey struct { + slot primitives.Slot + root [32]byte +} + +type cache struct { + entries map[cacheKey]*cacheEntry +} + +func newCache() *cache { + return &cache{entries: make(map[cacheKey]*cacheEntry)} +} + +// keyFromSidecar is a convenience method for constructing a cacheKey from a BlobSidecar value. +func keyFromSidecar(sc blocks.ROBlob) cacheKey { + return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()} +} + +// keyFromBlock is a convenience method for constructing a cacheKey from a ROBlock value. +func keyFromBlock(b blocks.ROBlock) cacheKey { + return cacheKey{slot: b.Block().Slot(), root: b.Root()} +} + +// ensure returns the entry for the given key, creating it if it isn't already present. +func (c *cache) ensure(key cacheKey) *cacheEntry { + e, ok := c.entries[key] + if !ok { + e = &cacheEntry{} + c.entries[key] = e + } + return e +} + +// delete removes the cache entry from the cache. +func (c *cache) delete(key cacheKey) { + delete(c.entries, key) +} + +// cacheEntry holds a fixed-length cache of BlobSidecars. +type cacheEntry struct { + scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob +} + +// stash adds an item to the in-memory cache of BlobSidecars. +// Only the first BlobSidecar of a given Index will be kept in the cache. +// stash will return an error if the given blob is already in the cache, or if the Index is out of bounds. +func (e *cacheEntry) stash(sc *blocks.ROBlob) error { + if sc.Index >= fieldparams.MaxBlobsPerBlock { + return errors.Wrapf(errIndexOutOfBounds, "index=%d", sc.Index) + } + if e.scs[sc.Index] != nil { + return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.Index, sc.KzgCommitment) + } + e.scs[sc.Index] = sc + return nil +} + +// filter evicts sidecars that are not committed to by the block and returns custom +// errors if the cache is missing any of the commitments, or if the commitments in +// the cache do not match those found in the block. If err is nil, then all expected +// commitments were found in the cache and the sidecar slice return value can be used +// to perform a DA check against the cached sidecars. +func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROBlob, error) { + scs := make([]blocks.ROBlob, kc.count()) + for i := uint64(0); i < fieldparams.MaxBlobsPerBlock; i++ { + if kc[i] == nil { + if e.scs[i] != nil { + return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment) + } + continue + } + + if e.scs[i] == nil { + return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i) + } + if !bytes.Equal(kc[i], e.scs[i].KzgCommitment) { + return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitment, kc[i]) + } + scs[i] = *e.scs[i] + } + + return scs, nil +} + +// safeCommitmentArray is a fixed size array of commitment byte slices. This is helpful for avoiding +// gratuitous bounds checks. +type safeCommitmentArray [fieldparams.MaxBlobsPerBlock][]byte + +func (s safeCommitmentArray) count() int { + for i := range s { + if s[i] == nil { + return i + } + } + return fieldparams.MaxBlobsPerBlock +} diff --git a/beacon-chain/das/cache_test.go b/beacon-chain/das/cache_test.go new file mode 100644 index 000000000000..afec2ad7c19c --- /dev/null +++ b/beacon-chain/das/cache_test.go @@ -0,0 +1,25 @@ +package das + +import ( + "testing" + + "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" + "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" + "github.com/prysmaticlabs/prysm/v4/testing/require" +) + +func TestCacheEnsureDelete(t *testing.T) { + c := newCache() + require.Equal(t, 0, len(c.entries)) + root := bytesutil.ToBytes32([]byte("root")) + slot := primitives.Slot(1234) + k := cacheKey{root: root, slot: slot} + entry := c.ensure(k) + require.Equal(t, 1, len(c.entries)) + require.Equal(t, c.entries[k], entry) + + c.delete(k) + require.Equal(t, 0, len(c.entries)) + var nilEntry *cacheEntry + require.Equal(t, nilEntry, c.entries[k]) +} diff --git a/beacon-chain/das/iface.go b/beacon-chain/das/iface.go new file mode 100644 index 000000000000..1e15f4778a02 --- /dev/null +++ b/beacon-chain/das/iface.go @@ -0,0 +1,19 @@ +package das + +import ( + "context" + + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" + "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" +) + +// AvailabilityStore describes a component that can verify and save sidecars for a given block, and confirm previously +// verified and saved sidecars. +// Persist guarantees that the sidecar will be available to perform a DA check +// for the life of the beacon node process. +// IsDataAvailable guarantees that all blobs committed to in the block have been +// durably persisted before returning a non-error value. +type AvailabilityStore interface { + IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error + Persist(current primitives.Slot, sc ...blocks.ROBlob) error +} diff --git a/beacon-chain/das/mock.go b/beacon-chain/das/mock.go new file mode 100644 index 000000000000..899af9d1cf7f --- /dev/null +++ b/beacon-chain/das/mock.go @@ -0,0 +1,32 @@ +package das + +import ( + "context" + + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" + "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" +) + +// MockAvailabilityStore is an implementation of AvailabilityStore that can be used by other packages in tests. +type MockAvailabilityStore struct { + VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error + PersistBlobsCallback func(current primitives.Slot, sc ...blocks.ROBlob) error +} + +var _ AvailabilityStore = &MockAvailabilityStore{} + +// IsDataAvailable satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests. +func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error { + if m.VerifyAvailabilityCallback != nil { + return m.VerifyAvailabilityCallback(ctx, current, b) + } + return nil +} + +// Persist satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests. +func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROBlob) error { + if m.PersistBlobsCallback != nil { + return m.PersistBlobsCallback(current, sc...) + } + return nil +} diff --git a/beacon-chain/db/filesystem/BUILD.bazel b/beacon-chain/db/filesystem/BUILD.bazel index 2d186a74fd5e..fe1b4fdac51a 100644 --- a/beacon-chain/db/filesystem/BUILD.bazel +++ b/beacon-chain/db/filesystem/BUILD.bazel @@ -6,6 +6,7 @@ go_library( "blob.go", "ephemeral.go", "metrics.go", + "pruner.go", ], importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem", visibility = ["//visibility:public"], @@ -19,7 +20,6 @@ go_library( "//proto/prysm/v1alpha1:go_default_library", "//runtime/logging:go_default_library", "//time/slots:go_default_library", - "@com_github_ethereum_go_ethereum//common/hexutil:go_default_library", "@com_github_pkg_errors//:go_default_library", "@com_github_prometheus_client_golang//prometheus:go_default_library", "@com_github_prometheus_client_golang//prometheus/promauto:go_default_library", @@ -30,7 +30,10 @@ go_library( go_test( name = "go_default_test", - srcs = ["blob_test.go"], + srcs = [ + "blob_test.go", + "pruner_test.go", + ], embed = [":go_default_library"], deps = [ "//beacon-chain/verification:go_default_library", @@ -40,7 +43,6 @@ go_test( "//proto/prysm/v1alpha1:go_default_library", "//testing/require:go_default_library", "//testing/util:go_default_library", - "@com_github_ethereum_go_ethereum//common/hexutil:go_default_library", "@com_github_pkg_errors//:go_default_library", "@com_github_prysmaticlabs_fastssz//:go_default_library", "@com_github_spf13_afero//:go_default_library", diff --git a/beacon-chain/db/filesystem/blob.go b/beacon-chain/db/filesystem/blob.go index eaab7fcf4f44..adc07f329dbc 100644 --- a/beacon-chain/db/filesystem/blob.go +++ b/beacon-chain/db/filesystem/blob.go @@ -1,28 +1,21 @@ package filesystem import ( - "encoding/binary" "fmt" - "io" "os" "path" - "path/filepath" "strconv" "strings" - "sync/atomic" "time" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/v4/beacon-chain/verification" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" - "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" "github.com/prysmaticlabs/prysm/v4/io/file" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/v4/runtime/logging" - "github.com/prysmaticlabs/prysm/v4/time/slots" log "github.com/sirupsen/logrus" "github.com/spf13/afero" ) @@ -35,7 +28,6 @@ const ( sszExt = "ssz" partExt = "part" - bufferEpochs = 2 directoryPermissions = 0700 ) @@ -45,11 +37,11 @@ type BlobStorageOption func(*BlobStorage) error // WithBlobRetentionEpochs is an option that changes the number of epochs blobs will be persisted. func WithBlobRetentionEpochs(e primitives.Epoch) BlobStorageOption { return func(b *BlobStorage) error { - s, err := slots.EpochStart(e + bufferEpochs) + pruner, err := newBlobPruner(b.fs, e) if err != nil { - return errors.Wrap(err, "could not set retentionSlots") + return err } - b.retentionSlots = s + b.pruner = pruner return nil } } @@ -71,14 +63,29 @@ func NewBlobStorage(base string, opts ...BlobStorageOption) (*BlobStorage, error return nil, fmt.Errorf("failed to create blob storage at %s: %w", base, err) } } + if b.pruner == nil { + log.Warn("Initializing blob filesystem storage with pruning disabled") + } return b, nil } // BlobStorage is the concrete implementation of the filesystem backend for saving and retrieving BlobSidecars. type BlobStorage struct { - fs afero.Fs - retentionSlots primitives.Slot - prunedBefore atomic.Uint64 + fs afero.Fs + pruner *blobPruner +} + +// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache +// will be populated at node startup, avoiding a costly cold prune (~4s in syscalls) during syncing. +func (bs *BlobStorage) WarmCache() { + if bs.pruner == nil { + return + } + go func() { + if err := bs.pruner.prune(0); err != nil { + log.WithError(err).Error("Error encountered while warming up blob pruner cache.") + } + }() } // Save saves blobs given a list of sidecars. @@ -94,7 +101,9 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error { log.WithFields(logging.BlobFields(sidecar.ROBlob)).Debug("ignoring a duplicate blob sidecar Save attempt") return nil } - bs.tryPrune(sidecar.Slot()) + if bs.pruner != nil { + bs.pruner.notify(sidecar.BlockRoot(), sidecar.Slot()) + } // Serialize the ethpb.BlobSidecar to binary data using SSZ. sidecarData, err := sidecar.MarshalSSZ() @@ -106,8 +115,12 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error { } partPath := fname.partPath() + partialMoved := false // Ensure the partial file is deleted. defer func() { + if partialMoved { + return + } // It's expected to error if the save is successful. err = bs.fs.Remove(partPath) if err == nil { @@ -141,8 +154,9 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error { if err != nil { return errors.Wrap(err, "failed to rename partial file to final name") } - blobsTotalGauge.Inc() - blobSaveLatency.Observe(time.Since(startTime).Seconds()) + partialMoved = true + blobsWrittenCounter.Inc() + blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds())) return nil } @@ -166,11 +180,17 @@ func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, er return blocks.VerifiedROBlob{}, err } defer func() { - blobFetchLatency.Observe(time.Since(startTime).Seconds()) + blobFetchLatency.Observe(float64(time.Since(startTime).Milliseconds())) }() return verification.BlobSidecarNoop(ro) } +// Remove removes all blobs for a given root. +func (bs *BlobStorage) Remove(root [32]byte) error { + rootDir := blobNamer{root: root}.dir() + return bs.fs.RemoveAll(rootDir) +} + // Indices generates a bitmap representing which BlobSidecar.Index values are present on disk for a given root. // This value can be compared to the commitments observed in a block to determine which indices need to be found // on the network to confirm data availability. @@ -218,7 +238,7 @@ func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer { } func (p blobNamer) dir() string { - return fmt.Sprintf("%#x", p.root) + return rootString(p.root) } func (p blobNamer) fname(ext string) string { @@ -233,118 +253,6 @@ func (p blobNamer) path() string { return p.fname(sszExt) } -// Prune prunes blobs in the base directory based on the retention epoch. -// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs). -// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs. -func (bs *BlobStorage) Prune(pruneBefore primitives.Slot) error { - t := time.Now() - - var dirs []string - err := afero.Walk(bs.fs, ".", func(path string, info os.FileInfo, err error) error { - if err != nil { - return errors.Wrap(err, "failed to walk blob storage directory") - } - if info.IsDir() && path != "." { - dirs = append(dirs, path) - } - return nil - }) - if err != nil { - return errors.Wrap(err, "failed to build directories list") - } - - var totalPruned int - for _, dir := range dirs { - num, err := bs.processFolder(dir, pruneBefore) - if err != nil { - return errors.Wrapf(err, "failed to process folder %s", dir) - } - blobsPrunedCounter.Add(float64(num)) - blobsTotalGauge.Add(-float64(num)) - totalPruned += num - } - - if totalPruned > 0 { - pruneTime := time.Since(t) - log.WithFields(log.Fields{ - "lastPrunedEpoch": slots.ToEpoch(pruneBefore), - "pruneTime": pruneTime, - "numberBlobsPruned": totalPruned, - }).Debug("Pruned old blobs") - } - - return nil -} - -// processFolder will delete the folder of blobs if the blob slot is outside the -// retention period. We determine the slot by looking at the first blob in the folder. -func (bs *BlobStorage) processFolder(folder string, pruneBefore primitives.Slot) (int, error) { - f, err := bs.fs.Open(filepath.Join(folder, "0."+sszExt)) - if err != nil { - return 0, err - } - defer func() { - if err := f.Close(); err != nil { - log.WithError(err).Errorf("Could not close blob file") - } - }() - - slot, err := slotFromBlob(f) - if err != nil { - return 0, err - } - var num int - if slot < pruneBefore { - num, err = bs.countFiles(folder) - if err != nil { - return 0, err - } - if err = bs.fs.RemoveAll(folder); err != nil { - return 0, errors.Wrapf(err, "failed to delete blob %s", f.Name()) - } - } - return num, nil -} - -// slotFromBlob reads the ssz data of a file at the specified offset (8 + 131072 + 48 + 48 = 131176 bytes), -// which is calculated based on the size of the BlobSidecar struct and is based on the size of the fields -// preceding the slot information within SignedBeaconBlockHeader. -func slotFromBlob(at io.ReaderAt) (primitives.Slot, error) { - b := make([]byte, 8) - _, err := at.ReadAt(b, 131176) - if err != nil { - return 0, err - } - rawSlot := binary.LittleEndian.Uint64(b) - return primitives.Slot(rawSlot), nil -} - -// Delete removes the directory matching the provided block root and all the blobs it contains. -func (bs *BlobStorage) Delete(root [32]byte) error { - if err := bs.fs.RemoveAll(hexutil.Encode(root[:])); err != nil { - return fmt.Errorf("failed to delete blobs for root %#x: %w", root, err) - } - return nil -} - -// tryPrune checks whether we should prune and then calls prune -func (bs *BlobStorage) tryPrune(latest primitives.Slot) { - pruned := uint64(pruneBefore(latest, bs.retentionSlots)) - if bs.prunedBefore.Swap(pruned) == pruned { - return - } - go func() { - if err := bs.Prune(primitives.Slot(pruned)); err != nil { - log.WithError(err).Errorf("failed to prune blobs from slot %d", latest) - } - }() -} - -func pruneBefore(latest primitives.Slot, offset primitives.Slot) primitives.Slot { - // Safely compute the first slot in the epoch for the latest slot - latest = latest - latest%params.BeaconConfig().SlotsPerEpoch - if latest < offset { - return 0 - } - return latest - offset +func rootString(root [32]byte) string { + return fmt.Sprintf("%#x", root) } diff --git a/beacon-chain/db/filesystem/blob_test.go b/beacon-chain/db/filesystem/blob_test.go index 6a7362df40cf..74300de7588a 100644 --- a/beacon-chain/db/filesystem/blob_test.go +++ b/beacon-chain/db/filesystem/blob_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/pkg/errors" ssz "github.com/prysmaticlabs/fastssz" "github.com/prysmaticlabs/prysm/v4/beacon-chain/verification" @@ -74,47 +73,20 @@ func TestBlobStorage_SaveBlobData(t *testing.T) { require.NoError(t, err) require.DeepSSZEqual(t, expected, actual) }) - t.Run("check pruning", func(t *testing.T) { - fs, bs, err := NewEphemeralBlobStorageWithFs(t) - require.NoError(t, err) - // Slot in first half of epoch therefore should not prune - bs.tryPrune(testSidecars[0].Slot()) - err = bs.Save(testSidecars[0]) - require.NoError(t, err) - actual, err := bs.Get(testSidecars[0].BlockRoot(), testSidecars[0].Index) - require.NoError(t, err) - require.DeepSSZEqual(t, testSidecars[0], actual) - err = pollUntil(t, fs, 1) + t.Run("round trip write, read and delete", func(t *testing.T) { + bs := NewEphemeralBlobStorage(t) + err := bs.Save(testSidecars[0]) require.NoError(t, err) - _, sidecars = util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 33, fieldparams.MaxBlobsPerBlock) - testSidecars1, err := verification.BlobSidecarSliceNoop(sidecars) - require.NoError(t, err) - // Slot in first half of epoch therefore should not prune - bs.tryPrune(testSidecars1[0].Slot()) - err = bs.Save(testSidecars1[0]) - require.NoError(t, err) - // Check previous saved sidecar was not pruned - actual, err = bs.Get(testSidecars[0].BlockRoot(), testSidecars[0].Index) - require.NoError(t, err) - require.DeepSSZEqual(t, testSidecars[0], actual) - // Check latest sidecar exists - actual, err = bs.Get(testSidecars1[0].BlockRoot(), testSidecars1[0].Index) - require.NoError(t, err) - require.DeepSSZEqual(t, testSidecars1[0], actual) - err = pollUntil(t, fs, 2) // Check correct number of files + expected := testSidecars[0] + actual, err := bs.Get(expected.BlockRoot(), expected.Index) require.NoError(t, err) + require.DeepSSZEqual(t, expected, actual) - _, sidecars = util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 131187, fieldparams.MaxBlobsPerBlock) - testSidecars2, err := verification.BlobSidecarSliceNoop(sidecars) - // Slot in second half of epoch therefore should prune - bs.tryPrune(testSidecars2[0].Slot()) - require.NoError(t, err) - err = bs.Save(testSidecars2[0]) - require.NoError(t, err) - err = pollUntil(t, fs, 3) - require.NoError(t, err) + require.NoError(t, bs.Remove(expected.BlockRoot())) + _, err = bs.Get(expected.BlockRoot(), expected.Index) + require.ErrorContains(t, "file does not exist", err) }) } @@ -188,7 +160,22 @@ func TestBlobStoragePrune(t *testing.T) { require.NoError(t, bs.Save(sidecar)) } - require.NoError(t, bs.Prune(currentSlot-bs.retentionSlots)) + require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize)) + + remainingFolders, err := afero.ReadDir(fs, ".") + require.NoError(t, err) + require.Equal(t, 0, len(remainingFolders)) + }) + t.Run("Prune dangling blob", func(t *testing.T) { + _, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 299, fieldparams.MaxBlobsPerBlock) + testSidecars, err := verification.BlobSidecarSliceNoop(sidecars) + require.NoError(t, err) + + for _, sidecar := range testSidecars[4:] { + require.NoError(t, bs.Save(sidecar)) + } + + require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize)) remainingFolders, err := afero.ReadDir(fs, ".") require.NoError(t, err) @@ -196,7 +183,7 @@ func TestBlobStoragePrune(t *testing.T) { }) t.Run("PruneMany", func(t *testing.T) { blockQty := 10 - slot := primitives.Slot(0) + slot := primitives.Slot(1) for j := 0; j <= blockQty; j++ { root := bytesutil.ToBytes32(bytesutil.ToBytes(uint64(slot), 32)) @@ -208,7 +195,7 @@ func TestBlobStoragePrune(t *testing.T) { slot += 10000 } - require.NoError(t, bs.Prune(currentSlot-bs.retentionSlots)) + require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize)) remainingFolders, err := afero.ReadDir(fs, ".") require.NoError(t, err) @@ -237,41 +224,11 @@ func BenchmarkPruning(b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - err := bs.Prune(currentSlot) + err := bs.pruner.prune(currentSlot) require.NoError(b, err) } } -func TestBlobStorageDelete(t *testing.T) { - fs, bs, err := NewEphemeralBlobStorageWithFs(t) - require.NoError(t, err) - rawRoot := "0xcf9bb70c98f58092c9d6459227c9765f984d240be9690e85179bc5a6f60366ad" - blockRoot, err := hexutil.Decode(rawRoot) - require.NoError(t, err) - - _, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, fieldparams.MaxBlobsPerBlock) - testSidecars, err := verification.BlobSidecarSliceNoop(sidecars) - require.NoError(t, err) - for _, sidecar := range testSidecars { - require.NoError(t, bs.Save(sidecar)) - } - - exists, err := afero.DirExists(fs, hexutil.Encode(blockRoot)) - require.NoError(t, err) - require.Equal(t, true, exists) - - // Delete the directory corresponding to the block root - require.NoError(t, bs.Delete(bytesutil.ToBytes32(blockRoot))) - - // Ensure that the directory no longer exists after deletion - exists, err = afero.DirExists(fs, hexutil.Encode(blockRoot)) - require.NoError(t, err) - require.Equal(t, false, exists) - - // Deleting a non-existent root does not return an error. - require.NoError(t, bs.Delete(bytesutil.ToBytes32([]byte{0x1}))) -} - func TestNewBlobStorage(t *testing.T) { _, err := NewBlobStorage(path.Join(t.TempDir(), "good")) require.NoError(t, err) diff --git a/beacon-chain/db/filesystem/ephemeral.go b/beacon-chain/db/filesystem/ephemeral.go index 5518651a8192..98b83c2ac9e1 100644 --- a/beacon-chain/db/filesystem/ephemeral.go +++ b/beacon-chain/db/filesystem/ephemeral.go @@ -4,26 +4,30 @@ import ( "testing" "github.com/prysmaticlabs/prysm/v4/config/params" - "github.com/prysmaticlabs/prysm/v4/time/slots" "github.com/spf13/afero" ) // NewEphemeralBlobStorage should only be used for tests. // The instance of BlobStorage returned is backed by an in-memory virtual filesystem, // improving test performance and simplifying cleanup. -func NewEphemeralBlobStorage(_ testing.TB) *BlobStorage { - return &BlobStorage{fs: afero.NewMemMapFs()} +func NewEphemeralBlobStorage(t testing.TB) *BlobStorage { + fs := afero.NewMemMapFs() + pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest) + if err != nil { + t.Fatal("test setup issue", err) + } + return &BlobStorage{fs: fs, pruner: pruner} } // NewEphemeralBlobStorageWithFs can be used by tests that want access to the virtual filesystem // in order to interact with it outside the parameters of the BlobStorage api. -func NewEphemeralBlobStorageWithFs(_ testing.TB) (afero.Fs, *BlobStorage, error) { +func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage, error) { fs := afero.NewMemMapFs() - s, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest) + pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest) if err != nil { - return fs, &BlobStorage{}, err + t.Fatal("test setup issue", err) } - return fs, &BlobStorage{fs: fs, retentionSlots: s}, nil + return fs, &BlobStorage{fs: fs, pruner: pruner}, nil } type BlobMocker struct { diff --git a/beacon-chain/db/filesystem/metrics.go b/beacon-chain/db/filesystem/metrics.go index 088be6445f1a..1e8791c879bd 100644 --- a/beacon-chain/db/filesystem/metrics.go +++ b/beacon-chain/db/filesystem/metrics.go @@ -1,66 +1,28 @@ package filesystem import ( - "fmt" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/spf13/afero" ) var ( - blobBuckets = []float64{0.00003, 0.00005, 0.00007, 0.00009, 0.00011, 0.00013, 0.00015} + blobBuckets = []float64{3, 5, 7, 9, 11, 13} blobSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{ Name: "blob_storage_save_latency", - Help: "Latency of blob storage save operations in seconds", + Help: "Latency of BlobSidecar storage save operations in milliseconds", Buckets: blobBuckets, }) blobFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{ Name: "blob_storage_get_latency", - Help: "Latency of blob storage get operations in seconds", + Help: "Latency of BlobSidecar storage get operations in milliseconds", Buckets: blobBuckets, }) blobsPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{ - Name: "blob_pruned_blobs_total", - Help: "Total number of pruned blobs.", + Name: "blob_pruned", + Help: "Number of BlobSidecar files pruned.", }) - blobsTotalGauge = promauto.NewGauge(prometheus.GaugeOpts{ - Name: "blobs_on_disk_total", - Help: "Total number of blobs in filesystem.", + blobsWrittenCounter = promauto.NewCounter(prometheus.CounterOpts{ + Name: "blobs_written", + Help: "Number of BlobSidecar files written.", }) ) - -func (bs *BlobStorage) Initialize() error { - if err := bs.collectTotalBlobMetric(); err != nil { - return fmt.Errorf("failed to initialize blob metrics: %w", err) - } - return nil -} - -// CollectTotalBlobMetric set the number of blobs currently present in the filesystem -// to the blobsTotalGauge metric. -func (bs *BlobStorage) collectTotalBlobMetric() error { - totalBlobs := 0 - folders, err := afero.ReadDir(bs.fs, ".") - if err != nil { - return err - } - for _, folder := range folders { - num, err := bs.countFiles(folder.Name()) - if err != nil { - return err - } - totalBlobs = totalBlobs + num - } - blobsTotalGauge.Set(float64(totalBlobs)) - return nil -} - -// countFiles returns the length of blob files for a given directory. -func (bs *BlobStorage) countFiles(folderName string) (int, error) { - files, err := afero.ReadDir(bs.fs, folderName) - if err != nil { - return 0, err - } - return len(files), nil -} diff --git a/beacon-chain/db/filesystem/pruner.go b/beacon-chain/db/filesystem/pruner.go new file mode 100644 index 000000000000..a4a26742fadb --- /dev/null +++ b/beacon-chain/db/filesystem/pruner.go @@ -0,0 +1,274 @@ +package filesystem + +import ( + "encoding/binary" + "io" + "path" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" + "github.com/prysmaticlabs/prysm/v4/config/params" + "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" + "github.com/prysmaticlabs/prysm/v4/time/slots" + log "github.com/sirupsen/logrus" + "github.com/spf13/afero" +) + +const retentionBuffer primitives.Epoch = 2 + +var ( + errPruningFailures = errors.New("blobs could not be pruned for some roots") +) + +type blobPruner struct { + sync.Mutex + prunedBefore atomic.Uint64 + windowSize primitives.Slot + slotMap *slotForRoot + fs afero.Fs +} + +func newBlobPruner(fs afero.Fs, retain primitives.Epoch) (*blobPruner, error) { + r, err := slots.EpochStart(retain + retentionBuffer) + if err != nil { + return nil, errors.Wrap(err, "could not set retentionSlots") + } + return &blobPruner{fs: fs, windowSize: r, slotMap: newSlotForRoot()}, nil +} + +// notify updates the pruner's view of root->blob mappings. This allows the pruner to build a cache +// of root->slot mappings and decide when to evict old blobs based on the age of present blobs. +func (p *blobPruner) notify(root [32]byte, latest primitives.Slot) { + p.slotMap.ensure(rootString(root), latest) + pruned := uint64(windowMin(latest, p.windowSize)) + if p.prunedBefore.Swap(pruned) == pruned { + return + } + go func() { + if err := p.prune(primitives.Slot(pruned)); err != nil { + log.WithError(err).Errorf("Failed to prune blobs from slot %d", latest) + } + }() +} + +func windowMin(latest primitives.Slot, offset primitives.Slot) primitives.Slot { + // Safely compute the first slot in the epoch for the latest slot + latest = latest - latest%params.BeaconConfig().SlotsPerEpoch + if latest < offset { + return 0 + } + return latest - offset +} + +// Prune prunes blobs in the base directory based on the retention epoch. +// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs). +// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs. +func (p *blobPruner) prune(pruneBefore primitives.Slot) error { + p.Lock() + defer p.Unlock() + start := time.Now() + totalPruned, totalErr := 0, 0 + // Customize logging/metrics behavior for the initial cache warmup when slot=0. + // We'll never see a prune request for slot 0, unless this is the initial call to warm up the cache. + if pruneBefore == 0 { + defer func() { + log.WithField("duration", time.Since(start).String()).Debug("Warmed up pruner cache") + }() + } else { + defer func() { + log.WithFields(log.Fields{ + "upToEpoch": slots.ToEpoch(pruneBefore), + "duration": time.Since(start).String(), + "filesRemoved": totalPruned, + }).Debug("Pruned old blobs") + blobsPrunedCounter.Add(float64(totalPruned)) + }() + } + + entries, err := listDir(p.fs, ".") + if err != nil { + return errors.Wrap(err, "unable to list root blobs directory") + } + dirs := filter(entries, filterRoot) + for _, dir := range dirs { + pruned, err := p.tryPruneDir(dir, pruneBefore) + if err != nil { + totalErr += 1 + log.WithError(err).WithField("directory", dir).Error("Unable to prune directory") + } + totalPruned += pruned + } + + if totalErr > 0 { + return errors.Wrapf(errPruningFailures, "pruning failed for %d root directories", totalErr) + } + return nil +} + +func shouldRetain(slot, pruneBefore primitives.Slot) bool { + return slot >= pruneBefore +} + +func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int, error) { + root := rootFromDir(dir) + slot, slotCached := p.slotMap.slot(root) + // Return early if the slot is cached and doesn't need pruning. + if slotCached && shouldRetain(slot, pruneBefore) { + return 0, nil + } + + // entries will include things that aren't ssz files, like dangling .part files. We need these to + // completely clean up the directory. + entries, err := listDir(p.fs, dir) + if err != nil { + return 0, errors.Wrapf(err, "failed to list blobs in directory %s", dir) + } + // scFiles filters the dir listing down to the ssz encoded BlobSidecar files. This allows us to peek + // at the first one in the list to figure out the slot. + scFiles := filter(entries, filterSsz) + if len(scFiles) == 0 { + log.WithField("dir", dir).Warn("Pruner ignoring directory with no blob files") + return 0, nil + } + if !slotCached { + slot, err = slotFromFile(path.Join(dir, scFiles[0]), p.fs) + if err != nil { + return 0, errors.Wrapf(err, "slot could not be read from blob file %s", scFiles[0]) + } + p.slotMap.ensure(root, slot) + if shouldRetain(slot, pruneBefore) { + return 0, nil + } + } + + removed := 0 + for _, fname := range entries { + fullName := path.Join(dir, fname) + if err := p.fs.Remove(fullName); err != nil { + return removed, errors.Wrapf(err, "unable to remove %s", fullName) + } + // Don't count other files that happen to be in the dir, like dangling .part files. + if filterSsz(fname) { + removed += 1 + } + // Log a warning whenever we clean up a .part file + if filterPart(fullName) { + log.WithField("file", fullName).Warn("Deleting abandoned blob .part file") + } + } + if err := p.fs.Remove(dir); err != nil { + return removed, errors.Wrapf(err, "unable to remove blob directory %s", dir) + } + + p.slotMap.evict(rootFromDir(dir)) + return len(scFiles), nil +} + +func rootFromDir(dir string) string { + return filepath.Base(dir) // end of the path should be the blob directory, named by hex encoding of root +} + +// Read slot from marshaled BlobSidecar data in the given file. See slotFromBlob for details. +func slotFromFile(file string, fs afero.Fs) (primitives.Slot, error) { + f, err := fs.Open(file) + if err != nil { + return 0, err + } + defer func() { + if err := f.Close(); err != nil { + log.WithError(err).Errorf("Could not close blob file") + } + }() + return slotFromBlob(f) +} + +// slotFromBlob reads the ssz data of a file at the specified offset (8 + 131072 + 48 + 48 = 131176 bytes), +// which is calculated based on the size of the BlobSidecar struct and is based on the size of the fields +// preceding the slot information within SignedBeaconBlockHeader. +func slotFromBlob(at io.ReaderAt) (primitives.Slot, error) { + b := make([]byte, 8) + _, err := at.ReadAt(b, 131176) + if err != nil { + return 0, err + } + rawSlot := binary.LittleEndian.Uint64(b) + return primitives.Slot(rawSlot), nil +} + +func listDir(fs afero.Fs, dir string) ([]string, error) { + top, err := fs.Open(dir) + if err != nil { + return nil, errors.Wrap(err, "failed to open directory descriptor") + } + defer func() { + if err := top.Close(); err != nil { + log.WithError(err).Errorf("Could not close file %s", dir) + } + }() + // re the -1 param: "If n <= 0, Readdirnames returns all the names from the directory in a single slice" + dirs, err := top.Readdirnames(-1) + if err != nil { + return nil, errors.Wrap(err, "failed to read directory listing") + } + return dirs, nil +} + +func filter(entries []string, filt func(string) bool) []string { + filtered := make([]string, 0, len(entries)) + for i := range entries { + if filt(entries[i]) { + filtered = append(filtered, entries[i]) + } + } + return filtered +} + +func filterRoot(s string) bool { + return strings.HasPrefix(s, "0x") +} + +var dotSszExt = "." + sszExt +var dotPartExt = "." + partExt + +func filterSsz(s string) bool { + return filepath.Ext(s) == dotSszExt +} + +func filterPart(s string) bool { + return filepath.Ext(s) == dotPartExt +} + +func newSlotForRoot() *slotForRoot { + return &slotForRoot{ + cache: make(map[string]primitives.Slot, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch), + } +} + +type slotForRoot struct { + sync.RWMutex + cache map[string]primitives.Slot +} + +func (s *slotForRoot) ensure(key string, slot primitives.Slot) { + s.Lock() + defer s.Unlock() + s.cache[key] = slot +} + +func (s *slotForRoot) slot(key string) (primitives.Slot, bool) { + s.RLock() + defer s.RUnlock() + slot, ok := s.cache[key] + return slot, ok +} + +func (s *slotForRoot) evict(key string) { + s.Lock() + defer s.Unlock() + delete(s.cache, key) +} diff --git a/beacon-chain/db/filesystem/pruner_test.go b/beacon-chain/db/filesystem/pruner_test.go new file mode 100644 index 000000000000..e67068414771 --- /dev/null +++ b/beacon-chain/db/filesystem/pruner_test.go @@ -0,0 +1,327 @@ +package filesystem + +import ( + "bytes" + "fmt" + "math" + "os" + "path" + "sort" + "testing" + + "github.com/prysmaticlabs/prysm/v4/beacon-chain/verification" + fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" + "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" + "github.com/prysmaticlabs/prysm/v4/testing/require" + "github.com/prysmaticlabs/prysm/v4/testing/util" + "github.com/spf13/afero" +) + +func TestTryPruneDir_CachedNotExpired(t *testing.T) { + fs := afero.NewMemMapFs() + pr, err := newBlobPruner(fs, 0) + require.NoError(t, err) + slot := pr.windowSize + _, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, fieldparams.MaxBlobsPerBlock) + sc, err := verification.BlobSidecarNoop(sidecars[0]) + require.NoError(t, err) + root := fmt.Sprintf("%#x", sc.BlockRoot()) + // This slot is right on the edge of what would need to be pruned, so by adding it to the cache and + // skipping any other test setup, we can be certain the hot cache path never touches the filesystem. + pr.slotMap.ensure(root, sc.Slot()) + pruned, err := pr.tryPruneDir(root, pr.windowSize) + require.NoError(t, err) + require.Equal(t, 0, pruned) +} + +func TestTryPruneDir_CachedExpired(t *testing.T) { + t.Run("empty directory", func(t *testing.T) { + fs := afero.NewMemMapFs() + pr, err := newBlobPruner(fs, 0) + require.NoError(t, err) + var slot primitives.Slot = 0 + _, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1) + sc, err := verification.BlobSidecarNoop(sidecars[0]) + require.NoError(t, err) + root := fmt.Sprintf("%#x", sc.BlockRoot()) + require.NoError(t, fs.Mkdir(root, directoryPermissions)) // make empty directory + pr.slotMap.ensure(root, sc.Slot()) + pruned, err := pr.tryPruneDir(root, slot+1) + require.NoError(t, err) + require.Equal(t, 0, pruned) + }) + t.Run("blobs to delete", func(t *testing.T) { + fs, bs, err := NewEphemeralBlobStorageWithFs(t) + require.NoError(t, err) + var slot primitives.Slot = 0 + _, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2) + scs, err := verification.BlobSidecarSliceNoop(sidecars) + require.NoError(t, err) + + require.NoError(t, bs.Save(scs[0])) + require.NoError(t, bs.Save(scs[1])) + + // check that the root->slot is cached + root := fmt.Sprintf("%#x", scs[0].BlockRoot()) + cs, cok := bs.pruner.slotMap.slot(root) + require.Equal(t, true, cok) + require.Equal(t, slot, cs) + + // ensure that we see the saved files in the filesystem + files, err := listDir(fs, root) + require.NoError(t, err) + require.Equal(t, 2, len(files)) + + pruned, err := bs.pruner.tryPruneDir(root, slot+1) + require.NoError(t, err) + require.Equal(t, 2, pruned) + files, err = listDir(fs, root) + require.ErrorIs(t, err, os.ErrNotExist) + require.Equal(t, 0, len(files)) + }) +} + +func TestTryPruneDir_SlotFromFile(t *testing.T) { + t.Run("expired blobs deleted", func(t *testing.T) { + fs, bs, err := NewEphemeralBlobStorageWithFs(t) + require.NoError(t, err) + var slot primitives.Slot = 0 + _, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2) + scs, err := verification.BlobSidecarSliceNoop(sidecars) + require.NoError(t, err) + + require.NoError(t, bs.Save(scs[0])) + require.NoError(t, bs.Save(scs[1])) + + // check that the root->slot is cached + root := fmt.Sprintf("%#x", scs[0].BlockRoot()) + cs, ok := bs.pruner.slotMap.slot(root) + require.Equal(t, true, ok) + require.Equal(t, slot, cs) + // evict it from the cache so that we trigger the file read path + bs.pruner.slotMap.evict(root) + _, ok = bs.pruner.slotMap.slot(root) + require.Equal(t, false, ok) + + // ensure that we see the saved files in the filesystem + files, err := listDir(fs, root) + require.NoError(t, err) + require.Equal(t, 2, len(files)) + + pruned, err := bs.pruner.tryPruneDir(root, slot+1) + require.NoError(t, err) + require.Equal(t, 2, pruned) + files, err = listDir(fs, root) + require.ErrorIs(t, err, os.ErrNotExist) + require.Equal(t, 0, len(files)) + }) + t.Run("not expired, intact", func(t *testing.T) { + fs, bs, err := NewEphemeralBlobStorageWithFs(t) + require.NoError(t, err) + // Set slot equal to the window size, so it should be retained. + var slot primitives.Slot = bs.pruner.windowSize + _, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2) + scs, err := verification.BlobSidecarSliceNoop(sidecars) + require.NoError(t, err) + + require.NoError(t, bs.Save(scs[0])) + require.NoError(t, bs.Save(scs[1])) + + // Evict slot mapping from the cache so that we trigger the file read path. + root := fmt.Sprintf("%#x", scs[0].BlockRoot()) + bs.pruner.slotMap.evict(root) + _, ok := bs.pruner.slotMap.slot(root) + require.Equal(t, false, ok) + + // Ensure that we see the saved files in the filesystem. + files, err := listDir(fs, root) + require.NoError(t, err) + require.Equal(t, 2, len(files)) + + // This should use the slotFromFile code (simulating restart). + // Setting pruneBefore == slot, so that the slot will be outside the window (at the boundary). + pruned, err := bs.pruner.tryPruneDir(root, slot) + require.NoError(t, err) + require.Equal(t, 0, pruned) + + // Ensure files are still present. + files, err = listDir(fs, root) + require.NoError(t, err) + require.Equal(t, 2, len(files)) + }) +} + +func TestSlotFromBlob(t *testing.T) { + cases := []struct { + slot primitives.Slot + }{ + {slot: 0}, + {slot: 2}, + {slot: 1123581321}, + {slot: math.MaxUint64}, + } + for _, c := range cases { + t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) { + _, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1) + sc := sidecars[0] + enc, err := sc.MarshalSSZ() + require.NoError(t, err) + slot, err := slotFromBlob(bytes.NewReader(enc)) + require.NoError(t, err) + require.Equal(t, c.slot, slot) + }) + } +} + +func TestSlotFromFile(t *testing.T) { + cases := []struct { + slot primitives.Slot + }{ + {slot: 0}, + {slot: 2}, + {slot: 1123581321}, + {slot: math.MaxUint64}, + } + for _, c := range cases { + t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) { + fs, bs, err := NewEphemeralBlobStorageWithFs(t) + require.NoError(t, err) + _, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1) + sc, err := verification.BlobSidecarNoop(sidecars[0]) + require.NoError(t, err) + require.NoError(t, bs.Save(sc)) + fname := namerForSidecar(sc) + sszPath := fname.path() + slot, err := slotFromFile(sszPath, fs) + require.NoError(t, err) + require.Equal(t, c.slot, slot) + }) + } +} + +type dirFiles struct { + name string + isDir bool + children []dirFiles +} + +func (df dirFiles) reify(t *testing.T, fs afero.Fs, base string) { + fullPath := path.Join(base, df.name) + if df.isDir { + if df.name != "" { + require.NoError(t, fs.Mkdir(fullPath, directoryPermissions)) + } + for _, c := range df.children { + c.reify(t, fs, fullPath) + } + } else { + fp, err := fs.Create(fullPath) + require.NoError(t, err) + _, err = fp.WriteString("derp") + require.NoError(t, err) + } +} + +func (df dirFiles) childNames() []string { + cn := make([]string, len(df.children)) + for i := range df.children { + cn[i] = df.children[i].name + } + return cn +} + +func TestListDir(t *testing.T) { + fs := afero.NewMemMapFs() + + // parent directory + fsLayout := dirFiles{isDir: true} + // break out each subdir for easier assertions + notABlob := dirFiles{name: "notABlob", isDir: true} + childlessBlob := dirFiles{name: "0x0987654321", isDir: true} + blobWithSsz := dirFiles{name: "0x1123581321", isDir: true, + children: []dirFiles{{name: "1.ssz"}, {name: "2.ssz"}}, + } + blobWithSszAndTmp := dirFiles{name: "0x1234567890", isDir: true, + children: []dirFiles{{name: "5.ssz"}, {name: "0.part"}}} + fsLayout.children = append(fsLayout.children, notABlob) + fsLayout.children = append(fsLayout.children, childlessBlob) + fsLayout.children = append(fsLayout.children, blobWithSsz) + fsLayout.children = append(fsLayout.children, blobWithSszAndTmp) + + topChildren := make([]string, len(fsLayout.children)) + for i := range fsLayout.children { + topChildren[i] = fsLayout.children[i].name + } + + fsLayout.reify(t, fs, "") + cases := []struct { + name string + dirPath string + expected []string + filter func(string) bool + err error + }{ + { + name: "non-existent", + dirPath: "derp", + expected: []string{}, + err: os.ErrNotExist, + }, + { + name: "empty", + dirPath: childlessBlob.name, + expected: []string{}, + }, + { + name: "top", + dirPath: ".", + expected: topChildren, + }, + { + name: "custom filter: only notABlob", + dirPath: ".", + expected: []string{notABlob.name}, + filter: func(s string) bool { + if s == notABlob.name { + return true + } + return false + }, + }, + { + name: "root filter", + dirPath: ".", + expected: []string{childlessBlob.name, blobWithSsz.name, blobWithSszAndTmp.name}, + filter: filterRoot, + }, + { + name: "ssz filter", + dirPath: blobWithSsz.name, + expected: blobWithSsz.childNames(), + filter: filterSsz, + }, + { + name: "ssz mixed filter", + dirPath: blobWithSszAndTmp.name, + expected: []string{"5.ssz"}, + filter: filterSsz, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + result, err := listDir(fs, c.dirPath) + if c.filter != nil { + result = filter(result, c.filter) + } + if c.err != nil { + require.ErrorIs(t, err, c.err) + require.Equal(t, 0, len(result)) + } else { + require.NoError(t, err) + sort.Strings(c.expected) + sort.Strings(result) + require.DeepEqual(t, c.expected, result) + } + }) + } +} diff --git a/beacon-chain/db/iface/interface.go b/beacon-chain/db/iface/interface.go index e80f34d2b5a9..50934100456a 100644 --- a/beacon-chain/db/iface/interface.go +++ b/beacon-chain/db/iface/interface.go @@ -55,9 +55,6 @@ type ReadOnlyDatabase interface { FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error) RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error) - // Blob operations. - BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) - BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) // origin checkpoint sync support OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error) BackfillBlockRoot(ctx context.Context) ([32]byte, error) @@ -93,9 +90,6 @@ type NoHeadAccessDatabase interface { SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, addrs []common.Address) error SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error - // Blob operations. - DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte) error - CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error } diff --git a/beacon-chain/db/kv/BUILD.bazel b/beacon-chain/db/kv/BUILD.bazel index 48c9d50b825c..896ad73c0ee3 100644 --- a/beacon-chain/db/kv/BUILD.bazel +++ b/beacon-chain/db/kv/BUILD.bazel @@ -5,7 +5,6 @@ go_library( srcs = [ "archived_point.go", "backup.go", - "blob.go", "blocks.go", "checkpoint.go", "deposit_contract.go", @@ -39,7 +38,6 @@ go_library( "//beacon-chain/state/genesis:go_default_library", "//beacon-chain/state/state-native:go_default_library", "//config/features:go_default_library", - "//config/fieldparams:go_default_library", "//config/params:go_default_library", "//consensus-types/blocks:go_default_library", "//consensus-types/interfaces:go_default_library", @@ -76,7 +74,6 @@ go_test( srcs = [ "archived_point_test.go", "backup_test.go", - "blob_test.go", "blocks_test.go", "checkpoint_test.go", "deposit_contract_test.go", @@ -114,7 +111,6 @@ go_test( "//proto/prysm/v1alpha1:go_default_library", "//proto/testing:go_default_library", "//testing/assert:go_default_library", - "//testing/assertions:go_default_library", "//testing/require:go_default_library", "//testing/util:go_default_library", "@com_github_ethereum_go_ethereum//common:go_default_library", diff --git a/beacon-chain/db/kv/blob.go b/beacon-chain/db/kv/blob.go deleted file mode 100644 index 322df4f3274a..000000000000 --- a/beacon-chain/db/kv/blob.go +++ /dev/null @@ -1,320 +0,0 @@ -package kv - -import ( - "bytes" - "context" - "sort" - - "github.com/pkg/errors" - fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" - "github.com/prysmaticlabs/prysm/v4/config/params" - types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" - "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" - ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" - bolt "go.etcd.io/bbolt" - "go.opencensus.io/trace" -) - -var ( - errBlobSlotMismatch = errors.New("sidecar slot mismatch") - errBlobParentMismatch = errors.New("sidecar parent root mismatch") - errBlobRootMismatch = errors.New("sidecar root mismatch") - errBlobProposerMismatch = errors.New("sidecar proposer index mismatch") - errBlobSidecarLimit = errors.New("sidecar exceeds maximum number of blobs") - errEmptySidecar = errors.New("nil or empty blob sidecars") - errNewerBlobExists = errors.New("Will not overwrite newer blobs in db") -) - -// A blob rotating key is represented as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root -type blobRotatingKey []byte - -// BufferPrefix returns the first 8 bytes of the rotating key. -// This represents bytes(slot_to_rotating_buffer(blob.slot)) in the rotating key. -func (rk blobRotatingKey) BufferPrefix() []byte { - return rk[0:8] -} - -// Slot returns the information from the key. -func (rk blobRotatingKey) Slot() types.Slot { - slotBytes := rk[8:16] - return bytesutil.BytesToSlotBigEndian(slotBytes) -} - -// BlockRoot returns the block root information from the key. -func (rk blobRotatingKey) BlockRoot() []byte { - return rk[16:] -} - -// SaveBlobSidecar saves the blobs for a given epoch in the sidecar bucket. When we receive a blob: -// -// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_EPOCHS_TO_PERSIST_BLOBS*SLOTS_PER_EPOCH -// -// 2. Compute key for blob as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root -// -// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot -// in the rotating keys buffer, we overwrite all elements for that slot. Otherwise, we merge the blob with an existing one. -// Trying to replace a newer blob with an older one is an error. -func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.DeprecatedBlobSidecar) error { - if len(scs) == 0 { - return errEmptySidecar - } - ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlobSidecar") - defer span.End() - - first := scs[0] - newKey := s.blobSidecarKey(first) - prefix := newKey.BufferPrefix() - var prune []blobRotatingKey - return s.db.Update(func(tx *bolt.Tx) error { - var existing []byte - sc := ðpb.DeprecatedBlobSidecars{} - bkt := tx.Bucket(blobsBucket) - c := bkt.Cursor() - for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { - key := blobRotatingKey(k) - ks := key.Slot() - if ks < first.Slot { - // Mark older blobs at the same position of the ring buffer for deletion. - prune = append(prune, key) - continue - } - if ks > first.Slot { - // We shouldn't be overwriting newer blobs with older blobs. Something is wrong. - return errNewerBlobExists - } - // The slot isn't older or newer, so it must be equal. - // If the roots match, then we want to merge the new sidecars with the existing data. - if bytes.Equal(first.BlockRoot, key.BlockRoot()) { - existing = v - if err := decode(ctx, v, sc); err != nil { - return err - } - } - // If the slot is equal but the roots don't match, leave the existing key alone and allow the sidecar - // to be written to the new key with the same prefix. In this case sc will be empty, so it will just - // contain the incoming sidecars when we write it. - } - sc.Sidecars = append(sc.Sidecars, scs...) - sortSidecars(sc.Sidecars) - var err error - sc.Sidecars, err = validUniqueSidecars(sc.Sidecars) - if err != nil { - return err - } - encoded, err := encode(ctx, sc) - if err != nil { - return err - } - // don't write if the merged result is the same as before - if len(existing) == len(encoded) && bytes.Equal(existing, encoded) { - return nil - } - // Only prune if we're actually going through with the update. - for _, k := range prune { - if err := bkt.Delete(k); err != nil { - // note: attempting to delete a key that does not exist should not return an error. - log.WithError(err).Warnf("Could not delete blob key %#x.", k) - } - } - return bkt.Put(newKey, encoded) - }) -} - -// validUniqueSidecars ensures that all sidecars have the same slot, parent root, block root, and proposer index, and -// there are no more than MAX_BLOBS_PER_BLOCK sidecars. -func validUniqueSidecars(scs []*ethpb.DeprecatedBlobSidecar) ([]*ethpb.DeprecatedBlobSidecar, error) { - if len(scs) == 0 { - return nil, errEmptySidecar - } - - // If there's only 1 sidecar, we've got nothing to compare. - if len(scs) == 1 { - return scs, nil - } - - prev := scs[0] - didx := 1 - for i := 1; i < len(scs); i++ { - sc := scs[i] - if sc.Slot != prev.Slot { - return nil, errors.Wrapf(errBlobSlotMismatch, "%d != %d", sc.Slot, prev.Slot) - } - if !bytes.Equal(sc.BlockParentRoot, prev.BlockParentRoot) { - return nil, errors.Wrapf(errBlobParentMismatch, "%x != %x", sc.BlockParentRoot, prev.BlockParentRoot) - } - if !bytes.Equal(sc.BlockRoot, prev.BlockRoot) { - return nil, errors.Wrapf(errBlobRootMismatch, "%x != %x", sc.BlockRoot, prev.BlockRoot) - } - if sc.ProposerIndex != prev.ProposerIndex { - return nil, errors.Wrapf(errBlobProposerMismatch, "%d != %d", sc.ProposerIndex, prev.ProposerIndex) - } - // skip duplicate - if sc.Index == prev.Index { - continue - } - if didx != i { - scs[didx] = scs[i] - } - prev = scs[i] - didx += 1 - } - - if didx > fieldparams.MaxBlobsPerBlock { - return nil, errors.Wrapf(errBlobSidecarLimit, "%d > %d", didx, fieldparams.MaxBlobsPerBlock) - } - return scs[0:didx], nil -} - -// sortSidecars sorts the sidecars by their index. -func sortSidecars(scs []*ethpb.DeprecatedBlobSidecar) { - sort.Slice(scs, func(i, j int) bool { - return scs[i].Index < scs[j].Index - }) -} - -// BlobSidecarsByRoot retrieves the blobs for the given beacon block root. -// If the `indices` argument is omitted, all blobs for the root will be returned. -// Otherwise, the result will be filtered to only include the specified indices. -// An error will result if an invalid index is specified. -// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out. -func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) { - ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsByRoot") - defer span.End() - - var enc []byte - if err := s.db.View(func(tx *bolt.Tx) error { - c := tx.Bucket(blobsBucket).Cursor() - // Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added. - for k, v := c.First(); k != nil; k, v = c.Next() { - if bytes.HasSuffix(k, root[:]) { - enc = v - break - } - } - return nil - }); err != nil { - return nil, err - } - if enc == nil { - return nil, ErrNotFound - } - sc := ðpb.DeprecatedBlobSidecars{} - if err := decode(ctx, enc, sc); err != nil { - return nil, err - } - - return filterForIndices(sc, indices...) -} - -func filterForIndices(sc *ethpb.DeprecatedBlobSidecars, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) { - if len(indices) == 0 { - return sc.Sidecars, nil - } - // This loop assumes that the BlobSidecars value stores the complete set of blobs for a block - // in ascending order from eg 0..3, without gaps. This allows us to assume the indices argument - // maps 1:1 with indices in the BlobSidecars storage object. - maxIdx := uint64(len(sc.Sidecars)) - 1 - sidecars := make([]*ethpb.DeprecatedBlobSidecar, len(indices)) - for i, idx := range indices { - if idx > maxIdx { - return nil, errors.Wrapf(ErrNotFound, "BlobSidecars missing index: index %d", idx) - } - sidecars[i] = sc.Sidecars[idx] - } - return sidecars, nil -} - -// BlobSidecarsBySlot retrieves BlobSidecars for the given slot. -// If the `indices` argument is omitted, all blobs for the slot will be returned. -// Otherwise, the result will be filtered to only include the specified indices. -// An error will result if an invalid index is specified. -// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out. -func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) { - ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsBySlot") - defer span.End() - - var enc []byte - sk := s.slotKey(slot) - if err := s.db.View(func(tx *bolt.Tx) error { - c := tx.Bucket(blobsBucket).Cursor() - // Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added. - for k, v := c.Seek(sk); bytes.HasPrefix(k, sk); k, _ = c.Next() { - slotInKey := bytesutil.BytesToSlotBigEndian(k[8:16]) - if slotInKey == slot { - enc = v - break - } - } - return nil - }); err != nil { - return nil, err - } - if enc == nil { - return nil, ErrNotFound - } - sc := ðpb.DeprecatedBlobSidecars{} - if err := decode(ctx, enc, sc); err != nil { - return nil, err - } - - return filterForIndices(sc, indices...) -} - -// DeleteBlobSidecars returns true if the blobs are in the db. -func (s *Store) DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte) error { - _, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlobSidecar") - defer span.End() - return s.db.Update(func(tx *bolt.Tx) error { - bkt := tx.Bucket(blobsBucket) - c := bkt.Cursor() - for k, _ := c.First(); k != nil; k, _ = c.Next() { - if bytes.HasSuffix(k, beaconBlockRoot[:]) { - if err := bkt.Delete(k); err != nil { - return err - } - } - } - return nil - }) -} - -// We define a blob sidecar key as: bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root -// where slot_to_rotating_buffer(slot) = slot % MAX_SLOTS_TO_PERSIST_BLOBS. -func (s *Store) blobSidecarKey(blob *ethpb.DeprecatedBlobSidecar) blobRotatingKey { - key := s.slotKey(blob.Slot) - key = append(key, bytesutil.SlotToBytesBigEndian(blob.Slot)...) - key = append(key, blob.BlockRoot...) - return key -} - -func (s *Store) slotKey(slot types.Slot) []byte { - return bytesutil.SlotToBytesBigEndian(slot.ModSlot(s.blobRetentionSlots())) -} - -func (s *Store) blobRetentionSlots() types.Slot { - return types.Slot(s.blobRetentionEpochs.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))) -} - -var errBlobRetentionEpochMismatch = errors.New("epochs for blobs request value in DB does not match runtime config") - -func (s *Store) checkEpochsForBlobSidecarsRequestBucket(db *bolt.DB) error { - uRetentionEpochs := uint64(s.blobRetentionEpochs) - if err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(chainMetadataBucket) - v := b.Get(blobRetentionEpochsKey) - if v == nil { - if err := b.Put(blobRetentionEpochsKey, bytesutil.Uint64ToBytesBigEndian(uRetentionEpochs)); err != nil { - return err - } - return nil - } - e := bytesutil.BytesToUint64BigEndian(v) - if e != uRetentionEpochs { - return errors.Wrapf(errBlobRetentionEpochMismatch, "db=%d, config=%d", e, uRetentionEpochs) - } - return nil - }); err != nil { - return err - } - return nil -} diff --git a/beacon-chain/db/kv/blob_test.go b/beacon-chain/db/kv/blob_test.go deleted file mode 100644 index 527f39b2ef3e..000000000000 --- a/beacon-chain/db/kv/blob_test.go +++ /dev/null @@ -1,532 +0,0 @@ -package kv - -import ( - "context" - "crypto/rand" - "fmt" - "testing" - - "github.com/pkg/errors" - fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" - "github.com/prysmaticlabs/prysm/v4/config/params" - "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" - types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" - "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" - ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" - "github.com/prysmaticlabs/prysm/v4/testing/assertions" - "github.com/prysmaticlabs/prysm/v4/testing/require" - bolt "go.etcd.io/bbolt" -) - -func equalBlobSlices(expect []*ethpb.DeprecatedBlobSidecar, got []*ethpb.DeprecatedBlobSidecar) error { - if len(expect) != len(got) { - return fmt.Errorf("mismatched lengths, expect=%d, got=%d", len(expect), len(got)) - } - for i := 0; i < len(expect); i++ { - es := expect[i] - gs := got[i] - var e string - assertions.DeepEqual(assertions.SprintfAssertionLoggerFn(&e), es, gs) - if e != "" { - return errors.New(e) - } - } - return nil -} - -func TestStore_BlobSidecars(t *testing.T) { - ctx := context.Background() - - t.Run("empty", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, 0) - require.ErrorContains(t, "nil or empty blob sidecars", db.SaveBlobSidecar(ctx, scs)) - }) - t.Run("empty by root", func(t *testing.T) { - db := setupDB(t) - got, err := db.BlobSidecarsByRoot(ctx, [32]byte{}) - require.ErrorIs(t, ErrNotFound, err) - require.Equal(t, 0, len(got)) - }) - t.Run("empty by slot", func(t *testing.T) { - db := setupDB(t) - got, err := db.BlobSidecarsBySlot(ctx, 1) - require.ErrorIs(t, ErrNotFound, err) - require.Equal(t, 0, len(got)) - }) - t.Run("save and retrieve by root (one)", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, 1) - require.NoError(t, db.SaveBlobSidecar(ctx, scs)) - require.Equal(t, 1, len(scs)) - got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(scs, got)) - }) - t.Run("save and retrieve by root (max), per batch", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - require.NoError(t, db.SaveBlobSidecar(ctx, scs)) - require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs)) - got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(scs, got)) - }) - t.Run("save and retrieve by root, max and individually", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - for _, sc := range scs { - require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc})) - } - require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs)) - got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(scs, got)) - }) - t.Run("save and retrieve valid subset by root", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - require.NoError(t, db.SaveBlobSidecar(ctx, scs)) - require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs)) - - // we'll request indices 0 and 3, so make a slice with those indices for comparison - expect := make([]*ethpb.DeprecatedBlobSidecar, 2) - expect[0] = scs[0] - expect[1] = scs[3] - - got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), 0, 3) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(expect, got)) - require.Equal(t, uint64(0), got[0].Index) - require.Equal(t, uint64(3), got[1].Index) - }) - t.Run("error for invalid index when retrieving by root", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - require.NoError(t, db.SaveBlobSidecar(ctx, scs)) - require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs)) - - got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), uint64(len(scs))) - require.ErrorIs(t, err, ErrNotFound) - require.Equal(t, 0, len(got)) - }) - t.Run("save and retrieve by slot (one)", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, 1) - require.NoError(t, db.SaveBlobSidecar(ctx, scs)) - require.Equal(t, 1, len(scs)) - got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(scs, got)) - }) - t.Run("save and retrieve by slot (max)", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - require.NoError(t, db.SaveBlobSidecar(ctx, scs)) - require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs)) - got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(scs, got)) - }) - t.Run("save and retrieve by slot, max and individually", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - for _, sc := range scs { - require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc})) - } - require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs)) - got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(scs, got)) - }) - t.Run("save and retrieve valid subset by slot", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - require.NoError(t, db.SaveBlobSidecar(ctx, scs)) - require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs)) - - // we'll request indices 0 and 3, so make a slice with those indices for comparison - expect := make([]*ethpb.DeprecatedBlobSidecar, 2) - expect[0] = scs[0] - expect[1] = scs[3] - - got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, 0, 3) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(expect, got)) - - require.Equal(t, uint64(0), got[0].Index) - require.Equal(t, uint64(3), got[1].Index) - }) - t.Run("error for invalid index when retrieving by slot", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - require.NoError(t, db.SaveBlobSidecar(ctx, scs)) - require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs)) - - got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, uint64(len(scs))) - require.ErrorIs(t, err, ErrNotFound) - require.Equal(t, 0, len(got)) - }) - t.Run("delete works", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - require.NoError(t, db.SaveBlobSidecar(ctx, scs)) - require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs)) - got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(scs, got)) - require.NoError(t, db.DeleteBlobSidecars(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))) - got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)) - require.ErrorIs(t, ErrNotFound, err) - require.Equal(t, 0, len(got)) - }) - t.Run("saving blob different times", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - - for i := 0; i < fieldparams.MaxBlobsPerBlock; i++ { - scs[i].Slot = primitives.Slot(i) - scs[i].BlockRoot = bytesutil.PadTo([]byte{byte(i)}, 32) - require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{scs[i]})) - br := bytesutil.ToBytes32(scs[i].BlockRoot) - saved, err := db.BlobSidecarsByRoot(ctx, br) - require.NoError(t, err) - require.NoError(t, equalBlobSlices([]*ethpb.DeprecatedBlobSidecar{scs[i]}, saved)) - } - }) - t.Run("saving a new blob for rotation (batch)", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - require.NoError(t, db.SaveBlobSidecar(ctx, scs)) - require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs)) - oldBlockRoot := scs[0].BlockRoot - got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(oldBlockRoot)) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(scs, got)) - - newScs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - newRetentionSlot := primitives.Slot(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))) - for _, sc := range newScs { - sc.Slot = sc.Slot + newRetentionSlot - } - require.NoError(t, db.SaveBlobSidecar(ctx, newScs)) - - _, err = db.BlobSidecarsBySlot(ctx, 100) - require.ErrorIs(t, ErrNotFound, err) - - got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(newScs[0].BlockRoot)) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(newScs, got)) - }) - t.Run("save multiple blobs after new rotation (individually)", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - for _, sc := range scs { - require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc})) - } - got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(scs, got)) - - scs = generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - newRetentionSlot := primitives.Slot(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))) - for _, sc := range scs { - sc.Slot = sc.Slot + newRetentionSlot - } - for _, sc := range scs { - require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc})) - } - - _, err = db.BlobSidecarsBySlot(ctx, 100) - require.ErrorIs(t, ErrNotFound, err) - - got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(scs, got)) - }) - t.Run("save multiple blobs after new rotation (batch then individually)", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - require.NoError(t, db.SaveBlobSidecar(ctx, scs)) - require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs)) - oldBlockRoot := scs[0].BlockRoot - got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(oldBlockRoot)) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(scs, got)) - - scs = generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - newRetentionSlot := primitives.Slot(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))) - for _, sc := range scs { - sc.Slot = sc.Slot + newRetentionSlot - } - for _, sc := range scs { - require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc})) - } - - _, err = db.BlobSidecarsBySlot(ctx, 100) - require.ErrorIs(t, ErrNotFound, err) - - got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(scs, got)) - }) - t.Run("save multiple blobs after new rotation (individually then batch)", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - for _, sc := range scs { - require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc})) - } - got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(scs, got)) - - scs = generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock) - newRetentionSlot := primitives.Slot(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))) - for _, sc := range scs { - sc.Slot = sc.Slot + newRetentionSlot - } - require.NoError(t, db.SaveBlobSidecar(ctx, scs)) - - _, err = db.BlobSidecarsBySlot(ctx, 100) - require.ErrorIs(t, ErrNotFound, err) - - got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(scs, got)) - }) - t.Run("save equivocating blobs", func(t *testing.T) { - db := setupDB(t) - scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock/2) - eScs := generateEquivocatingBlobSidecars(t, fieldparams.MaxBlobsPerBlock/2) - - for i, sc := range scs { - require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc})) - require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{eScs[i]})) - } - - got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(scs, got)) - - got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(eScs[0].BlockRoot)) - require.NoError(t, err) - require.NoError(t, equalBlobSlices(eScs, got)) - }) -} - -func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.DeprecatedBlobSidecar { - blobSidecars := make([]*ethpb.DeprecatedBlobSidecar, n) - for i := uint64(0); i < n; i++ { - blobSidecars[i] = generateBlobSidecar(t, i) - } - return blobSidecars -} - -func generateBlobSidecar(t *testing.T, index uint64) *ethpb.DeprecatedBlobSidecar { - blob := make([]byte, 131072) - _, err := rand.Read(blob) - require.NoError(t, err) - kzgCommitment := make([]byte, 48) - _, err = rand.Read(kzgCommitment) - require.NoError(t, err) - kzgProof := make([]byte, 48) - _, err = rand.Read(kzgProof) - require.NoError(t, err) - return ðpb.DeprecatedBlobSidecar{ - BlockRoot: bytesutil.PadTo([]byte{'a'}, 32), - Index: index, - Slot: 100, - BlockParentRoot: bytesutil.PadTo([]byte{'b'}, 32), - ProposerIndex: 101, - Blob: blob, - KzgCommitment: kzgCommitment, - KzgProof: kzgProof, - } -} - -func generateEquivocatingBlobSidecars(t *testing.T, n uint64) []*ethpb.DeprecatedBlobSidecar { - blobSidecars := make([]*ethpb.DeprecatedBlobSidecar, n) - for i := uint64(0); i < n; i++ { - blobSidecars[i] = generateEquivocatingBlobSidecar(t, i) - } - return blobSidecars -} - -func generateEquivocatingBlobSidecar(t *testing.T, index uint64) *ethpb.DeprecatedBlobSidecar { - blob := make([]byte, 131072) - _, err := rand.Read(blob) - require.NoError(t, err) - kzgCommitment := make([]byte, 48) - _, err = rand.Read(kzgCommitment) - require.NoError(t, err) - kzgProof := make([]byte, 48) - _, err = rand.Read(kzgProof) - require.NoError(t, err) - - return ðpb.DeprecatedBlobSidecar{ - BlockRoot: bytesutil.PadTo([]byte{'c'}, 32), - Index: index, - Slot: 100, - BlockParentRoot: bytesutil.PadTo([]byte{'b'}, 32), - ProposerIndex: 102, - Blob: blob, - KzgCommitment: kzgCommitment, - KzgProof: kzgProof, - } -} - -func Test_validUniqueSidecars_validation(t *testing.T) { - tests := []struct { - name string - scs []*ethpb.DeprecatedBlobSidecar - err error - }{ - {name: "empty", scs: []*ethpb.DeprecatedBlobSidecar{}, err: errEmptySidecar}, - {name: "too many sidecars", scs: generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock+1), err: errBlobSidecarLimit}, - {name: "invalid slot", scs: []*ethpb.DeprecatedBlobSidecar{{Slot: 1}, {Slot: 2}}, err: errBlobSlotMismatch}, - {name: "invalid proposer index", scs: []*ethpb.DeprecatedBlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, err: errBlobProposerMismatch}, - {name: "invalid root", scs: []*ethpb.DeprecatedBlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, err: errBlobRootMismatch}, - {name: "invalid parent root", scs: []*ethpb.DeprecatedBlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, err: errBlobParentMismatch}, - {name: "happy path", scs: []*ethpb.DeprecatedBlobSidecar{{Index: 0}, {Index: 1}}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := validUniqueSidecars(tt.scs) - if tt.err != nil { - require.ErrorIs(t, err, tt.err) - } else { - require.NoError(t, err) - } - }) - } -} - -func Test_validUniqueSidecars_dedup(t *testing.T) { - cases := []struct { - name string - scs []*ethpb.DeprecatedBlobSidecar - expected []*ethpb.DeprecatedBlobSidecar - err error - }{ - { - name: "duplicate sidecar", - scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 1}}, - expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}}, - }, - { - name: "single sidecar", - scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}}, - expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}}, - }, - { - name: "multiple duplicates", - scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}}, - expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}}, - }, - { - name: "ok number after de-dupe, > 6 before", - scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}}, - expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}}, - }, - { - name: "max unique, no dupes", - scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}}, - expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}}, - }, - { - name: "too many unique", - scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}}, - err: errBlobSidecarLimit, - }, - { - name: "too many unique with dupes", - scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 1}, {Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}}, - err: errBlobSidecarLimit, - }, - } - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - u, err := validUniqueSidecars(c.scs) - if c.err != nil { - require.ErrorIs(t, err, c.err) - } else { - require.NoError(t, err) - } - require.Equal(t, len(c.expected), len(u)) - }) - } -} - -func TestStore_sortSidecars(t *testing.T) { - scs := []*ethpb.DeprecatedBlobSidecar{ - {Index: 6}, - {Index: 4}, - {Index: 2}, - {Index: 1}, - {Index: 3}, - {Index: 5}, - {}, - } - sortSidecars(scs) - for i := 0; i < len(scs)-1; i++ { - require.Equal(t, uint64(i), scs[i].Index) - } -} - -func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) { - s := setupDB(b) - ctx := context.Background() - require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{ - {BlockRoot: bytesutil.PadTo([]byte{'a'}, 32), Slot: 0}, - })) - - err := s.db.Update(func(tx *bolt.Tx) error { - bkt := tx.Bucket(blobsBucket) - for i := 1; i < 131071; i++ { - r := make([]byte, 32) - _, err := rand.Read(r) - require.NoError(b, err) - scs := []*ethpb.DeprecatedBlobSidecar{ - {BlockRoot: r, Slot: primitives.Slot(i)}, - } - k := s.blobSidecarKey(scs[0]) - encodedBlobSidecar, err := encode(ctx, ðpb.DeprecatedBlobSidecars{Sidecars: scs}) - require.NoError(b, err) - require.NoError(b, bkt.Put(k, encodedBlobSidecar)) - } - return nil - }) - require.NoError(b, err) - - require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{ - {BlockRoot: bytesutil.PadTo([]byte{'b'}, 32), Slot: 131071}, - })) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := s.BlobSidecarsByRoot(ctx, [32]byte{'b'}) - require.NoError(b, err) - } -} - -func Test_checkEpochsForBlobSidecarsRequestBucket(t *testing.T) { - s := setupDB(t) - - require.NoError(t, s.checkEpochsForBlobSidecarsRequestBucket(s.db)) // First write - require.NoError(t, s.checkEpochsForBlobSidecarsRequestBucket(s.db)) // First check - - s.blobRetentionEpochs += 1 - require.ErrorIs(t, s.checkEpochsForBlobSidecarsRequestBucket(s.db), errBlobRetentionEpochMismatch) -} - -func TestBlobRotatingKey(t *testing.T) { - s := setupDB(t) - k := s.blobSidecarKey(ðpb.DeprecatedBlobSidecar{ - Slot: 1, - BlockRoot: []byte{2}, - }) - - require.Equal(t, types.Slot(1), k.Slot()) - require.DeepEqual(t, []byte{2}, k.BlockRoot()) - require.DeepEqual(t, s.slotKey(types.Slot(1)), k.BufferPrefix()) -} diff --git a/beacon-chain/db/kv/kv.go b/beacon-chain/db/kv/kv.go index a75c82f30f15..4edd992fdc4a 100644 --- a/beacon-chain/db/kv/kv.go +++ b/beacon-chain/db/kv/kv.go @@ -18,7 +18,6 @@ import ( "github.com/prysmaticlabs/prysm/v4/config/features" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" - "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" "github.com/prysmaticlabs/prysm/v4/io/file" bolt "go.etcd.io/bbolt" ) @@ -91,7 +90,6 @@ type Store struct { validatorEntryCache *ristretto.Cache stateSummaryCache *stateSummaryCache ctx context.Context - blobRetentionEpochs primitives.Epoch } // StoreDatafilePath is the canonical construction of a full @@ -138,13 +136,6 @@ var Buckets = [][]byte{ // KVStoreOption is a functional option that modifies a kv.Store. type KVStoreOption func(*Store) -// WithBlobRetentionEpochs sets the variable configuring the blob retention window. -func WithBlobRetentionEpochs(e primitives.Epoch) KVStoreOption { - return func(s *Store) { - s.blobRetentionEpochs = e - } -} - // NewKVStore initializes a new boltDB key-value store at the directory // path specified, creates the kv-buckets based on the schema, and stores // an open connection db object as a property of the Store struct. @@ -217,14 +208,6 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St return nil, err } - if err := kv.checkEpochsForBlobSidecarsRequestBucket(boltDB); err != nil { - return nil, errors.Wrap(err, "failed to check epochs for blob sidecars request bucket") - } - - // set a default so that tests don't break - if kv.blobRetentionEpochs == 0 { - kv.blobRetentionEpochs = params.BeaconConfig().MinEpochsForBlobsSidecarsRequest - } return kv, nil } diff --git a/beacon-chain/db/kv/kv_test.go b/beacon-chain/db/kv/kv_test.go index d20d8430d210..3941d468cbb0 100644 --- a/beacon-chain/db/kv/kv_test.go +++ b/beacon-chain/db/kv/kv_test.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/prysmaticlabs/prysm/v4/config/features" - "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/v4/testing/require" @@ -16,8 +15,7 @@ import ( // setupDB instantiates and returns a Store instance. func setupDB(t testing.TB) *Store { - opt := WithBlobRetentionEpochs(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest) - db, err := NewKVStore(context.Background(), t.TempDir(), opt) + db, err := NewKVStore(context.Background(), t.TempDir()) require.NoError(t, err, "Failed to instantiate DB") t.Cleanup(func() { require.NoError(t, db.Close(), "Failed to close database") diff --git a/beacon-chain/db/kv/schema.go b/beacon-chain/db/kv/schema.go index b7b1f1af5c51..a5c89230abee 100644 --- a/beacon-chain/db/kv/schema.go +++ b/beacon-chain/db/kv/schema.go @@ -47,10 +47,6 @@ var ( finalizedCheckpointKey = []byte("finalized-checkpoint") powchainDataKey = []byte("powchain-data") lastValidatedCheckpointKey = []byte("last-validated-checkpoint") - // blobRetentionEpochsKey determines the size of the blob circular buffer and how the keys in that buffer are - // determined. If this value changes, the existing data is invalidated, so storing it in the db - // allows us to assert at runtime that the db state is still consistent with the runtime state. - blobRetentionEpochsKey = []byte("blob-retention-epochs") // Below keys are used to identify objects are to be fork compatible. // Objects that are only compatible with specific forks should be prefixed with such keys. diff --git a/beacon-chain/execution/BUILD.bazel b/beacon-chain/execution/BUILD.bazel index d0d3cda67ca7..47fbf228f6ee 100644 --- a/beacon-chain/execution/BUILD.bazel +++ b/beacon-chain/execution/BUILD.bazel @@ -105,7 +105,6 @@ go_test( "//beacon-chain/execution/types:go_default_library", "//beacon-chain/forkchoice/doubly-linked-tree:go_default_library", "//beacon-chain/state/stategen:go_default_library", - "//config/features:go_default_library", "//config/fieldparams:go_default_library", "//config/params:go_default_library", "//consensus-types/blocks:go_default_library", diff --git a/beacon-chain/execution/engine_client.go b/beacon-chain/execution/engine_client.go index 87d9a98e4ee6..4de713b03ee8 100644 --- a/beacon-chain/execution/engine_client.go +++ b/beacon-chain/execution/engine_client.go @@ -15,7 +15,6 @@ import ( "github.com/holiman/uint256" "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/types" - "github.com/prysmaticlabs/prysm/v4/config/features" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" @@ -489,6 +488,10 @@ func (s *Service) GetPayloadBodiesByHash(ctx context.Context, executionBlockHash defer span.End() result := make([]*pb.ExecutionPayloadBodyV1, 0) + // Exit early if there are no execution hashes. + if len(executionBlockHashes) == 0 { + return result, nil + } err := s.rpcClient.CallContext(ctx, &result, GetPayloadBodiesByHashV1, executionBlockHashes) for i, item := range result { @@ -621,31 +624,15 @@ func (s *Service) ReconstructFullBellatrixBlockBatch( } func (s *Service) retrievePayloadFromExecutionHash(ctx context.Context, executionBlockHash common.Hash, header interfaces.ExecutionData, version int) (interfaces.ExecutionData, error) { - if features.Get().EnableOptionalEngineMethods { - pBodies, err := s.GetPayloadBodiesByHash(ctx, []common.Hash{executionBlockHash}) - if err != nil { - return nil, fmt.Errorf("could not get payload body by hash %#x: %v", executionBlockHash, err) - } - if len(pBodies) != 1 { - return nil, errors.Errorf("could not retrieve the correct number of payload bodies: wanted 1 but got %d", len(pBodies)) - } - bdy := pBodies[0] - return fullPayloadFromPayloadBody(header, bdy, version) - } - - executionBlock, err := s.ExecutionBlockByHash(ctx, executionBlockHash, true /* with txs */) + pBodies, err := s.GetPayloadBodiesByHash(ctx, []common.Hash{executionBlockHash}) if err != nil { - return nil, fmt.Errorf("could not fetch execution block with txs by hash %#x: %v", executionBlockHash, err) - } - if executionBlock == nil { - return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionBlockHash) + return nil, fmt.Errorf("could not get payload body by hash %#x: %v", executionBlockHash, err) } - if bytes.Equal(executionBlock.Hash.Bytes(), []byte{}) { - return nil, ErrEmptyBlockHash + if len(pBodies) != 1 { + return nil, errors.Errorf("could not retrieve the correct number of payload bodies: wanted 1 but got %d", len(pBodies)) } - - executionBlock.Version = version - return fullPayloadFromExecutionBlock(version, header, executionBlock) + bdy := pBodies[0] + return fullPayloadFromPayloadBody(header, bdy, version) } func (s *Service) retrievePayloadsFromExecutionHashes( @@ -654,19 +641,12 @@ func (s *Service) retrievePayloadsFromExecutionHashes( validExecPayloads []int, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock) ([]interfaces.SignedBeaconBlock, error) { fullBlocks := make([]interfaces.SignedBeaconBlock, len(blindedBlocks)) - var execBlocks []*pb.ExecutionBlock var payloadBodies []*pb.ExecutionPayloadBodyV1 var err error - if features.Get().EnableOptionalEngineMethods { - payloadBodies, err = s.GetPayloadBodiesByHash(ctx, executionHashes) - if err != nil { - return nil, fmt.Errorf("could not fetch payload bodies by hash %#x: %v", executionHashes, err) - } - } else { - execBlocks, err = s.ExecutionBlocksByHashes(ctx, executionHashes, true /* with txs*/) - if err != nil { - return nil, fmt.Errorf("could not fetch execution blocks with txs by hash %#x: %v", executionHashes, err) - } + + payloadBodies, err = s.GetPayloadBodiesByHash(ctx, executionHashes) + if err != nil { + return nil, fmt.Errorf("could not fetch payload bodies by hash %#x: %v", executionHashes, err) } // For each valid payload, we reconstruct the full block from it with the @@ -674,32 +654,17 @@ func (s *Service) retrievePayloadsFromExecutionHashes( for sliceIdx, realIdx := range validExecPayloads { var payload interfaces.ExecutionData bblock := blindedBlocks[realIdx] - if features.Get().EnableOptionalEngineMethods { - b := payloadBodies[sliceIdx] - if b == nil { - return nil, fmt.Errorf("received nil payload body for request by hash %#x", executionHashes[sliceIdx]) - } - header, err := bblock.Block().Body().Execution() - if err != nil { - return nil, err - } - payload, err = fullPayloadFromPayloadBody(header, b, bblock.Version()) - if err != nil { - return nil, err - } - } else { - b := execBlocks[sliceIdx] - if b == nil { - return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionHashes[sliceIdx]) - } - header, err := bblock.Block().Body().Execution() - if err != nil { - return nil, err - } - payload, err = fullPayloadFromExecutionBlock(bblock.Version(), header, b) - if err != nil { - return nil, err - } + b := payloadBodies[sliceIdx] + if b == nil { + return nil, fmt.Errorf("received nil payload body for request by hash %#x", executionHashes[sliceIdx]) + } + header, err := bblock.Block().Body().Execution() + if err != nil { + return nil, err + } + payload, err = fullPayloadFromPayloadBody(header, b, bblock.Version()) + if err != nil { + return nil, err } fullBlock, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(bblock, payload.Proto()) if err != nil { @@ -796,8 +761,8 @@ func fullPayloadFromExecutionBlock( BlockHash: blockHash[:], Transactions: txs, Withdrawals: block.Withdrawals, - ExcessBlobGas: ebg, BlobGasUsed: bgu, + ExcessBlobGas: ebg, }, 0) // We can't get the block value and don't care about the block value for this instance default: return nil, fmt.Errorf("unknown execution block version %d", block.Version) @@ -976,10 +941,10 @@ func buildEmptyExecutionPayload(v int) (proto.Message, error) { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), Transactions: make([][]byte, 0), - ExtraData: make([]byte, 0), }, nil case version.Capella: return &pb.ExecutionPayloadCapella{ @@ -989,10 +954,10 @@ func buildEmptyExecutionPayload(v int) (proto.Message, error) { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), Transactions: make([][]byte, 0), - ExtraData: make([]byte, 0), Withdrawals: make([]*pb.Withdrawal, 0), }, nil case version.Deneb: @@ -1003,10 +968,10 @@ func buildEmptyExecutionPayload(v int) (proto.Message, error) { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), Transactions: make([][]byte, 0), - ExtraData: make([]byte, 0), Withdrawals: make([]*pb.Withdrawal, 0), }, nil default: diff --git a/beacon-chain/execution/engine_client_fuzz_test.go b/beacon-chain/execution/engine_client_fuzz_test.go index 363983df58fa..c84a8137e789 100644 --- a/beacon-chain/execution/engine_client_fuzz_test.go +++ b/beacon-chain/execution/engine_client_fuzz_test.go @@ -85,7 +85,7 @@ func FuzzExecutionPayload(f *testing.F) { GasLimit: math.MaxUint64, GasUsed: math.MaxUint64, Timestamp: 100, - ExtraData: nil, + ExtraData: []byte{}, BaseFeePerGas: big.NewInt(math.MaxInt), BlockHash: common.Hash([32]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}), Transactions: [][]byte{{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}, {0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}, {0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}, {0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01}}, diff --git a/beacon-chain/execution/engine_client_test.go b/beacon-chain/execution/engine_client_test.go index c4074adfee2d..0ba5ed5d4468 100644 --- a/beacon-chain/execution/engine_client_test.go +++ b/beacon-chain/execution/engine_client_test.go @@ -20,7 +20,6 @@ import ( "github.com/holiman/uint256" "github.com/pkg/errors" mocks "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing" - "github.com/prysmaticlabs/prysm/v4/config/features" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" @@ -758,26 +757,7 @@ func TestReconstructFullBellatrixBlock(t *testing.T) { encodedBinaryTxs[0], err = txs[0].MarshalBinary() require.NoError(t, err) payload.Transactions = encodedBinaryTxs - jsonPayload["transactions"] = txs - num := big.NewInt(1) - encodedNum := hexutil.EncodeBig(num) - jsonPayload["hash"] = hexutil.Encode(payload.BlockHash) - jsonPayload["parentHash"] = common.BytesToHash([]byte("parent")) - jsonPayload["sha3Uncles"] = common.BytesToHash([]byte("uncles")) - jsonPayload["miner"] = common.BytesToAddress([]byte("miner")) - jsonPayload["stateRoot"] = common.BytesToHash([]byte("state")) - jsonPayload["transactionsRoot"] = common.BytesToHash([]byte("txs")) - jsonPayload["receiptsRoot"] = common.BytesToHash([]byte("receipts")) - jsonPayload["logsBloom"] = gethtypes.BytesToBloom([]byte("bloom")) - jsonPayload["gasLimit"] = hexutil.EncodeUint64(1) - jsonPayload["gasUsed"] = hexutil.EncodeUint64(2) - jsonPayload["timestamp"] = hexutil.EncodeUint64(3) - jsonPayload["number"] = encodedNum - jsonPayload["extraData"] = common.BytesToHash([]byte("extra")) - jsonPayload["totalDifficulty"] = "0x123456" - jsonPayload["difficulty"] = encodedNum - jsonPayload["size"] = encodedNum - jsonPayload["baseFeePerGas"] = encodedNum + jsonPayload["transactions"] = []hexutil.Bytes{encodedBinaryTxs[0]} wrappedPayload, err := blocks.WrappedExecutionPayload(payload) require.NoError(t, err) @@ -792,7 +772,7 @@ func TestReconstructFullBellatrixBlock(t *testing.T) { respJSON := map[string]interface{}{ "jsonrpc": "2.0", "id": 1, - "result": jsonPayload, + "result": []map[string]interface{}{jsonPayload}, } require.NoError(t, json.NewEncoder(w).Encode(respJSON)) })) @@ -869,26 +849,7 @@ func TestReconstructFullBellatrixBlockBatch(t *testing.T) { encodedBinaryTxs[0], err = txs[0].MarshalBinary() require.NoError(t, err) payload.Transactions = encodedBinaryTxs - jsonPayload["transactions"] = txs - num := big.NewInt(1) - encodedNum := hexutil.EncodeBig(num) - jsonPayload["hash"] = hexutil.Encode(payload.BlockHash) - jsonPayload["parentHash"] = common.BytesToHash([]byte("parent")) - jsonPayload["sha3Uncles"] = common.BytesToHash([]byte("uncles")) - jsonPayload["miner"] = common.BytesToAddress([]byte("miner")) - jsonPayload["stateRoot"] = common.BytesToHash([]byte("state")) - jsonPayload["transactionsRoot"] = common.BytesToHash([]byte("txs")) - jsonPayload["receiptsRoot"] = common.BytesToHash([]byte("receipts")) - jsonPayload["logsBloom"] = gethtypes.BytesToBloom([]byte("bloom")) - jsonPayload["gasLimit"] = hexutil.EncodeUint64(1) - jsonPayload["gasUsed"] = hexutil.EncodeUint64(2) - jsonPayload["timestamp"] = hexutil.EncodeUint64(3) - jsonPayload["number"] = encodedNum - jsonPayload["extraData"] = common.BytesToHash([]byte("extra")) - jsonPayload["totalDifficulty"] = "0x123456" - jsonPayload["difficulty"] = encodedNum - jsonPayload["size"] = encodedNum - jsonPayload["baseFeePerGas"] = encodedNum + jsonPayload["transactions"] = []hexutil.Bytes{encodedBinaryTxs[0]} wrappedPayload, err := blocks.WrappedExecutionPayload(payload) require.NoError(t, err) @@ -912,20 +873,12 @@ func TestReconstructFullBellatrixBlockBatch(t *testing.T) { require.NoError(t, r.Body.Close()) }() - respJSON := []map[string]interface{}{ - { - "jsonrpc": "2.0", - "id": 1, - "result": jsonPayload, - }, - { - "jsonrpc": "2.0", - "id": 2, - "result": jsonPayload, - }, + respJSON := map[string]interface{}{ + "jsonrpc": "2.0", + "id": 1, + "result": []map[string]interface{}{jsonPayload, jsonPayload}, } require.NoError(t, json.NewEncoder(w).Encode(respJSON)) - require.NoError(t, json.NewEncoder(w).Encode(respJSON)) })) defer srv.Close() @@ -1288,6 +1241,10 @@ func fixtures() map[string]interface{} { BlockHash: foo[:], Transactions: [][]byte{foo[:]}, } + executionPayloadBodyFixture := &pb.ExecutionPayloadBodyV1{ + Transactions: [][]byte{foo[:]}, + Withdrawals: []*pb.Withdrawal{}, + } executionPayloadFixtureCapella := &pb.ExecutionPayloadCapella{ ParentHash: foo[:], FeeRecipient: bar, @@ -1459,6 +1416,7 @@ func fixtures() map[string]interface{} { } return map[string]interface{}{ "ExecutionBlock": executionBlock, + "ExecutionPayloadBody": executionPayloadBodyFixture, "ExecutionPayload": executionPayloadFixture, "ExecutionPayloadCapella": executionPayloadFixtureCapella, "ExecutionPayloadDeneb": executionPayloadFixtureDeneb, @@ -2007,10 +1965,6 @@ func newPayloadV3Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.Execu } func TestCapella_PayloadBodiesByHash(t *testing.T) { - resetFn := features.InitWithReset(&features.Flags{ - EnableOptionalEngineMethods: true, - }) - defer resetFn() t.Run("empty response works", func(t *testing.T) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") @@ -2067,7 +2021,9 @@ func TestCapella_PayloadBodiesByHash(t *testing.T) { service := &Service{} service.rpcClient = rpcClient - results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{}) + bRoot := [32]byte{} + copy(bRoot[:], "hash") + results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{bRoot}) require.NoError(t, err) require.Equal(t, 1, len(results)) @@ -2113,7 +2069,9 @@ func TestCapella_PayloadBodiesByHash(t *testing.T) { service := &Service{} service.rpcClient = rpcClient - results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{}) + bRoot := [32]byte{} + copy(bRoot[:], "hash") + results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{bRoot, bRoot, bRoot}) require.NoError(t, err) require.Equal(t, 3, len(results)) @@ -2154,7 +2112,9 @@ func TestCapella_PayloadBodiesByHash(t *testing.T) { service := &Service{} service.rpcClient = rpcClient - results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{}) + bRoot := [32]byte{} + copy(bRoot[:], "hash") + results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{bRoot}) require.NoError(t, err) require.Equal(t, 1, len(results)) @@ -2204,7 +2164,9 @@ func TestCapella_PayloadBodiesByHash(t *testing.T) { service := &Service{} service.rpcClient = rpcClient - results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{}) + bRoot := [32]byte{} + copy(bRoot[:], "hash") + results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{bRoot}) require.NoError(t, err) require.Equal(t, 2, len(results)) @@ -2247,7 +2209,9 @@ func TestCapella_PayloadBodiesByHash(t *testing.T) { service := &Service{} service.rpcClient = rpcClient - results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{}) + bRoot := [32]byte{} + copy(bRoot[:], "hash") + results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{bRoot, bRoot, bRoot}) require.NoError(t, err) require.Equal(t, 3, len(results)) @@ -2258,10 +2222,6 @@ func TestCapella_PayloadBodiesByHash(t *testing.T) { } func TestCapella_PayloadBodiesByRange(t *testing.T) { - resetFn := features.InitWithReset(&features.Flags{ - EnableOptionalEngineMethods: true, - }) - defer resetFn() t.Run("empty response works", func(t *testing.T) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") @@ -2509,10 +2469,6 @@ func TestCapella_PayloadBodiesByRange(t *testing.T) { } func Test_ExchangeCapabilities(t *testing.T) { - resetFn := features.InitWithReset(&features.Flags{ - EnableOptionalEngineMethods: true, - }) - defer resetFn() t.Run("empty response works", func(t *testing.T) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") diff --git a/beacon-chain/execution/log.go b/beacon-chain/execution/log.go index 301778f2fa61..1d1840a9e477 100644 --- a/beacon-chain/execution/log.go +++ b/beacon-chain/execution/log.go @@ -2,4 +2,4 @@ package execution import "github.com/sirupsen/logrus" -var log = logrus.WithField("prefix", "powchain") +var log = logrus.WithField("prefix", "execution") diff --git a/beacon-chain/forkchoice/doubly-linked-tree/store.go b/beacon-chain/forkchoice/doubly-linked-tree/store.go index 79eb00f9ae0d..da98f1b8cbaf 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/store.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/store.go @@ -240,6 +240,20 @@ func (f *ForkChoice) HighestReceivedBlockSlot() primitives.Slot { return f.store.highestReceivedNode.slot } +// HighestReceivedBlockSlotDelay returns the number of slots that the highest +// received block was late when receiving it +func (f *ForkChoice) HighestReceivedBlockDelay() primitives.Slot { + n := f.store.highestReceivedNode + if n == nil { + return 0 + } + secs, err := slots.SecondsSinceSlotStart(n.slot, f.store.genesisTime, n.timestamp) + if err != nil { + return 0 + } + return primitives.Slot(secs / params.BeaconConfig().SecondsPerSlot) +} + // ReceivedBlocksLastEpoch returns the number of blocks received in the last epoch func (f *ForkChoice) ReceivedBlocksLastEpoch() (uint64, error) { count := uint64(0) diff --git a/beacon-chain/forkchoice/doubly-linked-tree/store_test.go b/beacon-chain/forkchoice/doubly-linked-tree/store_test.go index b38d3686c4a3..580154a318a7 100644 --- a/beacon-chain/forkchoice/doubly-linked-tree/store_test.go +++ b/beacon-chain/forkchoice/doubly-linked-tree/store_test.go @@ -333,26 +333,29 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(1), count) require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockSlot()) + require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay()) // 64 // Received block last epoch is 1 _, err = s.insert(context.Background(), 64, [32]byte{'A'}, b, b, 1, 1) require.NoError(t, err) - s.genesisTime = uint64(time.Now().Add(time.Duration(-64*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix()) + s.genesisTime = uint64(time.Now().Add(time.Duration((-64*int64(params.BeaconConfig().SecondsPerSlot))-1) * time.Second).Unix()) count, err = f.ReceivedBlocksLastEpoch() require.NoError(t, err) require.Equal(t, uint64(1), count) require.Equal(t, primitives.Slot(64), f.HighestReceivedBlockSlot()) + require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay()) // 64 65 // Received block last epoch is 2 _, err = s.insert(context.Background(), 65, [32]byte{'B'}, b, b, 1, 1) require.NoError(t, err) - s.genesisTime = uint64(time.Now().Add(time.Duration(-65*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix()) + s.genesisTime = uint64(time.Now().Add(time.Duration(-66*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix()) count, err = f.ReceivedBlocksLastEpoch() require.NoError(t, err) require.Equal(t, uint64(2), count) require.Equal(t, primitives.Slot(65), f.HighestReceivedBlockSlot()) + require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockDelay()) // 64 65 66 // Received block last epoch is 3 diff --git a/beacon-chain/forkchoice/interfaces.go b/beacon-chain/forkchoice/interfaces.go index 8ce0a80504cf..dd4bb65c5701 100644 --- a/beacon-chain/forkchoice/interfaces.go +++ b/beacon-chain/forkchoice/interfaces.go @@ -64,6 +64,7 @@ type FastGetter interface { FinalizedPayloadBlockHash() [32]byte HasNode([32]byte) bool HighestReceivedBlockSlot() primitives.Slot + HighestReceivedBlockDelay() primitives.Slot IsCanonical(root [32]byte) bool IsOptimistic(root [32]byte) (bool, error) IsViableForCheckpoint(*forkchoicetypes.Checkpoint) (bool, error) diff --git a/beacon-chain/forkchoice/ro.go b/beacon-chain/forkchoice/ro.go index 995bcd33e946..6d9caddd2fc0 100644 --- a/beacon-chain/forkchoice/ro.go +++ b/beacon-chain/forkchoice/ro.go @@ -114,6 +114,13 @@ func (ro *ROForkChoice) HighestReceivedBlockSlot() primitives.Slot { return ro.getter.HighestReceivedBlockSlot() } +// HighestReceivedBlockDelay delegates to the underlying forkchoice call, under a lock. +func (ro *ROForkChoice) HighestReceivedBlockDelay() primitives.Slot { + ro.l.RLock() + defer ro.l.RUnlock() + return ro.getter.HighestReceivedBlockDelay() +} + // ReceivedBlocksLastEpoch delegates to the underlying forkchoice call, under a lock. func (ro *ROForkChoice) ReceivedBlocksLastEpoch() (uint64, error) { ro.l.RLock() diff --git a/beacon-chain/forkchoice/ro_test.go b/beacon-chain/forkchoice/ro_test.go index 5cbbb34ef4ca..8bf94071e222 100644 --- a/beacon-chain/forkchoice/ro_test.go +++ b/beacon-chain/forkchoice/ro_test.go @@ -29,6 +29,7 @@ const ( unrealizedJustifiedPayloadBlockHashCalled nodeCountCalled highestReceivedBlockSlotCalled + highestReceivedBlockDelayCalled receivedBlocksLastEpochCalled weightCalled isOptimisticCalled @@ -113,6 +114,11 @@ func TestROLocking(t *testing.T) { call: highestReceivedBlockSlotCalled, cb: func(g FastGetter) { g.HighestReceivedBlockSlot() }, }, + { + name: "highestReceivedBlockDelayCalled", + call: highestReceivedBlockDelayCalled, + cb: func(g FastGetter) { g.HighestReceivedBlockDelay() }, + }, { name: "receivedBlocksLastEpochCalled", call: receivedBlocksLastEpochCalled, @@ -245,6 +251,11 @@ func (ro *mockROForkchoice) HighestReceivedBlockSlot() primitives.Slot { return 0 } +func (ro *mockROForkchoice) HighestReceivedBlockDelay() primitives.Slot { + ro.calls = append(ro.calls, highestReceivedBlockDelayCalled) + return 0 +} + func (ro *mockROForkchoice) ReceivedBlocksLastEpoch() (uint64, error) { ro.calls = append(ro.calls, receivedBlocksLastEpochCalled) return 0, nil diff --git a/beacon-chain/gateway/BUILD.bazel b/beacon-chain/gateway/BUILD.bazel index f0bb36f59bd7..5ea81d383e80 100644 --- a/beacon-chain/gateway/BUILD.bazel +++ b/beacon-chain/gateway/BUILD.bazel @@ -6,7 +6,6 @@ go_library( importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/gateway", visibility = ["//beacon-chain:__subpackages__"], deps = [ - "//api:go_default_library", "//api/gateway:go_default_library", "//cmd/beacon-chain/flags:go_default_library", "//proto/prysm/v1alpha1:go_default_library", diff --git a/beacon-chain/gateway/helpers.go b/beacon-chain/gateway/helpers.go index d8abfe1dfdef..b51777b03c59 100644 --- a/beacon-chain/gateway/helpers.go +++ b/beacon-chain/gateway/helpers.go @@ -2,7 +2,6 @@ package gateway import ( gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" - "github.com/prysmaticlabs/prysm/v4/api" "github.com/prysmaticlabs/prysm/v4/api/gateway" "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags" ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" @@ -41,7 +40,7 @@ func DefaultConfig(enableDebugRPCEndpoints bool, httpModules string) MuxConfig { }, }), gwruntime.WithMarshalerOption( - api.EventStreamMediaType, &gwruntime.EventSourceJSONPb{}, + "text/event-stream", &gwruntime.EventSourceJSONPb{}, ), ) v1AlphaPbHandler = &gateway.PbMux{ diff --git a/beacon-chain/node/node.go b/beacon-chain/node/node.go index fd5153172a3d..916481074467 100644 --- a/beacon-chain/node/node.go +++ b/beacon-chain/node/node.go @@ -116,6 +116,7 @@ type BeaconNode struct { initialSyncComplete chan struct{} BlobStorage *filesystem.BlobStorage blobRetentionEpochs primitives.Epoch + verifyInitWaiter *verification.InitializerWaiter } // New creates a new node instance, sets up configuration options, and registers @@ -207,6 +208,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco if err := beacon.startDB(cliCtx, depositAddress); err != nil { return nil, err } + beacon.BlobStorage.WarmCache() log.Debugln("Starting Slashing DB") if err := beacon.startSlasherDB(cliCtx); err != nil { @@ -228,9 +230,8 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco return nil, err } - if err := beacon.BlobStorage.Initialize(); err != nil { - return nil, fmt.Errorf("failed to initialize blob storage: %w", err) - } + beacon.verifyInitWaiter = verification.NewInitializerWaiter( + beacon.clockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen) log.Debugln("Registering P2P Service") if err := beacon.registerP2P(cliCtx); err != nil { @@ -397,7 +398,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error { log.WithField("database-path", dbPath).Info("Checking DB") - d, err := kv.NewKVStore(b.ctx, dbPath, kv.WithBlobRetentionEpochs(b.blobRetentionEpochs)) + d, err := kv.NewKVStore(b.ctx, dbPath) if err != nil { return err } @@ -420,7 +421,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error { return errors.Wrap(err, "could not clear database") } - d, err = kv.NewKVStore(b.ctx, dbPath, kv.WithBlobRetentionEpochs(b.blobRetentionEpochs)) + d, err = kv.NewKVStore(b.ctx, dbPath) if err != nil { return errors.Wrap(err, "could not create new database") } @@ -742,7 +743,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}) erro regularsync.WithInitialSyncComplete(initialSyncComplete), regularsync.WithStateNotifier(b), regularsync.WithBlobStorage(b.BlobStorage), - regularsync.WithVerifierWaiter(verification.NewInitializerWaiter(b.clockWaiter, b.forkChoicer, b.stateGen)), + regularsync.WithVerifierWaiter(b.verifyInitWaiter), ) return b.services.RegisterService(rs) } @@ -753,6 +754,9 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error { return err } + opts := []initialsync.Option{ + initialsync.WithVerifierWaiter(b.verifyInitWaiter), + } is := initialsync.NewService(b.ctx, &initialsync.Config{ DB: b.db, Chain: chainService, @@ -762,7 +766,7 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error { ClockWaiter: b.clockWaiter, InitialSyncComplete: complete, BlobStorage: b.BlobStorage, - }) + }, opts...) return b.services.RegisterService(is) } diff --git a/beacon-chain/operations/attestations/BUILD.bazel b/beacon-chain/operations/attestations/BUILD.bazel index 25eb6af0f6de..4f856751351b 100644 --- a/beacon-chain/operations/attestations/BUILD.bazel +++ b/beacon-chain/operations/attestations/BUILD.bazel @@ -47,7 +47,6 @@ go_test( deps = [ "//async:go_default_library", "//beacon-chain/operations/attestations/kv:go_default_library", - "//config/features:go_default_library", "//config/fieldparams:go_default_library", "//config/params:go_default_library", "//crypto/bls:go_default_library", diff --git a/beacon-chain/operations/attestations/kv/BUILD.bazel b/beacon-chain/operations/attestations/kv/BUILD.bazel index a787ebd6c77c..aab6e082324a 100644 --- a/beacon-chain/operations/attestations/kv/BUILD.bazel +++ b/beacon-chain/operations/attestations/kv/BUILD.bazel @@ -14,7 +14,6 @@ go_library( visibility = ["//beacon-chain:__subpackages__"], deps = [ "//beacon-chain/core/helpers:go_default_library", - "//config/features:go_default_library", "//config/params:go_default_library", "//consensus-types/primitives:go_default_library", "//crypto/hash:go_default_library", @@ -39,7 +38,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//config/features:go_default_library", "//config/fieldparams:go_default_library", "//crypto/bls:go_default_library", "//proto/prysm/v1alpha1:go_default_library", diff --git a/beacon-chain/operations/attestations/kv/aggregated.go b/beacon-chain/operations/attestations/kv/aggregated.go index a322b37dd381..97817b246b9f 100644 --- a/beacon-chain/operations/attestations/kv/aggregated.go +++ b/beacon-chain/operations/attestations/kv/aggregated.go @@ -7,7 +7,6 @@ import ( "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers" - "github.com/prysmaticlabs/prysm/v4/config/features" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" attaggregation "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation/aggregation/attestations" @@ -46,30 +45,7 @@ func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedA // Track the unaggregated attestations that aren't able to aggregate. leftOverUnaggregatedAtt := make(map[[32]byte]bool) - if features.Get().AggregateParallel { - leftOverUnaggregatedAtt = c.aggregateParallel(attsByDataRoot, leftOverUnaggregatedAtt) - } else { - for _, atts := range attsByDataRoot { - aggregated, err := attaggregation.AggregateDisjointOneBitAtts(atts) - if err != nil { - return errors.Wrap(err, "could not aggregate unaggregated attestations") - } - if aggregated == nil { - return errors.New("could not aggregate unaggregated attestations") - } - if helpers.IsAggregated(aggregated) { - if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil { - return err - } - } else { - h, err := hashFn(aggregated) - if err != nil { - return err - } - leftOverUnaggregatedAtt[h] = true - } - } - } + leftOverUnaggregatedAtt = c.aggregateParallel(attsByDataRoot, leftOverUnaggregatedAtt) // Remove the unaggregated attestations from the pool that were successfully aggregated. for _, att := range unaggregatedAtts { diff --git a/beacon-chain/operations/attestations/kv/aggregated_test.go b/beacon-chain/operations/attestations/kv/aggregated_test.go index b83f1303a179..e6ce172e472d 100644 --- a/beacon-chain/operations/attestations/kv/aggregated_test.go +++ b/beacon-chain/operations/attestations/kv/aggregated_test.go @@ -9,7 +9,6 @@ import ( "github.com/pkg/errors" fssz "github.com/prysmaticlabs/fastssz" "github.com/prysmaticlabs/go-bitfield" - "github.com/prysmaticlabs/prysm/v4/config/features" "github.com/prysmaticlabs/prysm/v4/crypto/bls" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/v4/testing/assert" @@ -18,11 +17,6 @@ import ( ) func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) { - resetFn := features.InitWithReset(&features.Flags{ - AggregateParallel: true, - }) - defer resetFn() - cache := NewAttCaches() priv, err := bls.RandKey() require.NoError(t, err) diff --git a/beacon-chain/operations/attestations/prepare_forkchoice.go b/beacon-chain/operations/attestations/prepare_forkchoice.go index 1fc610cd1888..8f6242efe2dc 100644 --- a/beacon-chain/operations/attestations/prepare_forkchoice.go +++ b/beacon-chain/operations/attestations/prepare_forkchoice.go @@ -42,7 +42,7 @@ func (s *Service) prepareForkChoiceAtts() { switch slotInterval.Interval { case 0: duration := time.Since(t) - log.WithField("Duration", duration).Debug("aggregated unaggregated attestations") + log.WithField("Duration", duration).Debug("Aggregated unaggregated attestations") batchForkChoiceAttsT1.Observe(float64(duration.Milliseconds())) case 1: batchForkChoiceAttsT2.Observe(float64(time.Since(t).Milliseconds())) diff --git a/beacon-chain/operations/attestations/prepare_forkchoice_test.go b/beacon-chain/operations/attestations/prepare_forkchoice_test.go index 174947fc8849..eef9df5b4355 100644 --- a/beacon-chain/operations/attestations/prepare_forkchoice_test.go +++ b/beacon-chain/operations/attestations/prepare_forkchoice_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/prysmaticlabs/go-bitfield" - "github.com/prysmaticlabs/prysm/v4/config/features" "github.com/prysmaticlabs/prysm/v4/crypto/bls" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" attaggregation "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation/aggregation/attestations" @@ -18,11 +17,6 @@ import ( ) func TestBatchAttestations_Multiple(t *testing.T) { - resetFn := features.InitWithReset(&features.Flags{ - AggregateParallel: true, - }) - defer resetFn() - s, err := NewService(context.Background(), &Config{Pool: NewPool()}) require.NoError(t, err) diff --git a/beacon-chain/rpc/core/BUILD.bazel b/beacon-chain/rpc/core/BUILD.bazel index cdca729eb62f..f13da5ab56a7 100644 --- a/beacon-chain/rpc/core/BUILD.bazel +++ b/beacon-chain/rpc/core/BUILD.bazel @@ -38,6 +38,7 @@ go_library( "//time/slots:go_default_library", "@com_github_pkg_errors//:go_default_library", "@com_github_sirupsen_logrus//:go_default_library", + "@io_opencensus_go//trace:go_default_library", "@org_golang_google_grpc//codes:go_default_library", "@org_golang_x_sync//errgroup:go_default_library", ], diff --git a/beacon-chain/rpc/core/service.go b/beacon-chain/rpc/core/service.go index aba524899890..5a9facb4b4bc 100644 --- a/beacon-chain/rpc/core/service.go +++ b/beacon-chain/rpc/core/service.go @@ -11,14 +11,15 @@ import ( ) type Service struct { - HeadFetcher blockchain.HeadFetcher - FinalizedFetcher blockchain.FinalizationFetcher - GenesisTimeFetcher blockchain.TimeFetcher - SyncChecker sync.Checker - Broadcaster p2p.Broadcaster - SyncCommitteePool synccommittee.Pool - OperationNotifier opfeed.Notifier - AttestationCache *cache.AttestationCache - StateGen stategen.StateManager - P2P p2p.Broadcaster + HeadFetcher blockchain.HeadFetcher + FinalizedFetcher blockchain.FinalizationFetcher + GenesisTimeFetcher blockchain.TimeFetcher + SyncChecker sync.Checker + Broadcaster p2p.Broadcaster + SyncCommitteePool synccommittee.Pool + OperationNotifier opfeed.Notifier + AttestationCache *cache.AttestationCache + StateGen stategen.StateManager + P2P p2p.Broadcaster + OptimisticModeFetcher blockchain.OptimisticModeFetcher } diff --git a/beacon-chain/rpc/core/validator.go b/beacon-chain/rpc/core/validator.go index 12b39bd1479e..8f966387de6d 100644 --- a/beacon-chain/rpc/core/validator.go +++ b/beacon-chain/rpc/core/validator.go @@ -29,9 +29,12 @@ import ( prysmTime "github.com/prysmaticlabs/prysm/v4/time" "github.com/prysmaticlabs/prysm/v4/time/slots" "github.com/sirupsen/logrus" + "go.opencensus.io/trace" "golang.org/x/sync/errgroup" ) +var errOptimisticMode = errors.New("the node is currently optimistic and cannot serve validators") + // AggregateBroadcastFailedError represents an error scenario where // broadcasting an aggregate selection proof failed. type AggregateBroadcastFailedError struct { @@ -56,6 +59,9 @@ func (s *Service) ComputeValidatorPerformance( ctx context.Context, req *ethpb.ValidatorPerformanceRequest, ) (*ethpb.ValidatorPerformanceResponse, *RpcError) { + ctx, span := trace.StartSpan(ctx, "coreService.ComputeValidatorPerformance") + defer span.End() + if s.SyncChecker.Syncing() { return nil, &RpcError{Reason: Unavailable, Err: errors.New("Syncing to latest head, not ready to respond")} } @@ -211,6 +217,9 @@ func (s *Service) SubmitSignedContributionAndProof( ctx context.Context, req *ethpb.SignedContributionAndProof, ) *RpcError { + ctx, span := trace.StartSpan(ctx, "coreService.SubmitSignedContributionAndProof") + defer span.End() + errs, ctx := errgroup.WithContext(ctx) // Broadcasting and saving contribution into the pool in parallel. As one fail should not affect another. @@ -243,6 +252,9 @@ func (s *Service) SubmitSignedAggregateSelectionProof( ctx context.Context, req *ethpb.SignedAggregateSubmitRequest, ) *RpcError { + ctx, span := trace.StartSpan(ctx, "coreService.SubmitSignedAggregateSelectionProof") + defer span.End() + if req.SignedAggregateAndProof == nil || req.SignedAggregateAndProof.Message == nil || req.SignedAggregateAndProof.Message.Aggregate == nil || req.SignedAggregateAndProof.Message.Aggregate.Data == nil { return &RpcError{Err: errors.New("signed aggregate request can't be nil"), Reason: BadRequest} @@ -315,6 +327,9 @@ func (s *Service) AggregatedSigAndAggregationBits( func (s *Service) GetAttestationData( ctx context.Context, req *ethpb.AttestationDataRequest, ) (*ethpb.AttestationData, *RpcError) { + ctx, span := trace.StartSpan(ctx, "coreService.GetAttestationData") + defer span.End() + if req.Slot != s.GenesisTimeFetcher.CurrentSlot() { return nil, &RpcError{Reason: BadRequest, Err: errors.Errorf("invalid request: slot %d is not the current slot %d", req.Slot, s.GenesisTimeFetcher.CurrentSlot())} } @@ -368,6 +383,14 @@ func (s *Service) GetAttestationData( }, }, nil } + // cache miss, we need to check for optimistic status before proceeding + optimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx) + if err != nil { + return nil, &RpcError{Reason: Internal, Err: err} + } + if optimistic { + return nil, &RpcError{Reason: Unavailable, Err: errOptimisticMode} + } headRoot, err := s.HeadFetcher.HeadRoot(ctx) if err != nil { @@ -412,6 +435,9 @@ func (s *Service) GetAttestationData( // SubmitSyncMessage submits the sync committee message to the network. // It also saves the sync committee message into the pending pool for block inclusion. func (s *Service) SubmitSyncMessage(ctx context.Context, msg *ethpb.SyncCommitteeMessage) *RpcError { + ctx, span := trace.StartSpan(ctx, "coreService.SubmitSyncMessage") + defer span.End() + errs, ctx := errgroup.WithContext(ctx) headSyncCommitteeIndices, err := s.HeadFetcher.HeadSyncCommitteeIndices(ctx, msg.ValidatorIndex, msg.Slot) diff --git a/beacon-chain/rpc/eth/beacon/handlers_pool.go b/beacon-chain/rpc/eth/beacon/handlers_pool.go index dd5776da6046..9bd254e41434 100644 --- a/beacon-chain/rpc/eth/beacon/handlers_pool.go +++ b/beacon-chain/rpc/eth/beacon/handlers_pool.go @@ -501,6 +501,13 @@ func (s *Server) SubmitAttesterSlashing(w http.ResponseWriter, r *http.Request) httputil.HandleError(w, "Could not insert attester slashing into pool: "+err.Error(), http.StatusInternalServerError) return } + // notify events + s.OperationNotifier.OperationFeed().Send(&feed.Event{ + Type: operation.AttesterSlashingReceived, + Data: &operation.AttesterSlashingReceivedData{ + AttesterSlashing: slashing, + }, + }) if !features.Get().DisableBroadcastSlashings { if err = s.Broadcaster.Broadcast(ctx, slashing); err != nil { httputil.HandleError(w, "Could not broadcast slashing object: "+err.Error(), http.StatusInternalServerError) @@ -569,6 +576,15 @@ func (s *Server) SubmitProposerSlashing(w http.ResponseWriter, r *http.Request) httputil.HandleError(w, "Could not insert proposer slashing into pool: "+err.Error(), http.StatusInternalServerError) return } + + // notify events + s.OperationNotifier.OperationFeed().Send(&feed.Event{ + Type: operation.ProposerSlashingReceived, + Data: &operation.ProposerSlashingReceivedData{ + ProposerSlashing: slashing, + }, + }) + if !features.Get().DisableBroadcastSlashings { if err = s.Broadcaster.Broadcast(ctx, slashing); err != nil { httputil.HandleError(w, "Could not broadcast slashing object: "+err.Error(), http.StatusInternalServerError) diff --git a/beacon-chain/rpc/eth/beacon/handlers_pool_test.go b/beacon-chain/rpc/eth/beacon/handlers_pool_test.go index 0ec7840abd3b..7e8c688c19cd 100644 --- a/beacon-chain/rpc/eth/beacon/handlers_pool_test.go +++ b/beacon-chain/rpc/eth/beacon/handlers_pool_test.go @@ -1205,10 +1205,12 @@ func TestSubmitAttesterSlashing_Ok(t *testing.T) { } broadcaster := &p2pMock.MockBroadcaster{} + chainmock := &blockchainmock.ChainService{State: bs} s := &Server{ - ChainInfoFetcher: &blockchainmock.ChainService{State: bs}, - SlashingsPool: &slashingsmock.PoolMock{}, - Broadcaster: broadcaster, + ChainInfoFetcher: chainmock, + SlashingsPool: &slashingsmock.PoolMock{}, + Broadcaster: broadcaster, + OperationNotifier: chainmock.OperationNotifier(), } toSubmit := shared.AttesterSlashingsFromConsensus([]*ethpbv1alpha1.AttesterSlashing{slashing}) @@ -1295,10 +1297,12 @@ func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) { } broadcaster := &p2pMock.MockBroadcaster{} + chainmock := &blockchainmock.ChainService{State: bs} s := &Server{ - ChainInfoFetcher: &blockchainmock.ChainService{State: bs}, - SlashingsPool: &slashingsmock.PoolMock{}, - Broadcaster: broadcaster, + ChainInfoFetcher: chainmock, + SlashingsPool: &slashingsmock.PoolMock{}, + Broadcaster: broadcaster, + OperationNotifier: chainmock.OperationNotifier(), } toSubmit := shared.AttesterSlashingsFromConsensus([]*ethpbv1alpha1.AttesterSlashing{slashing}) @@ -1404,10 +1408,12 @@ func TestSubmitProposerSlashing_Ok(t *testing.T) { } broadcaster := &p2pMock.MockBroadcaster{} + chainmock := &blockchainmock.ChainService{State: bs} s := &Server{ - ChainInfoFetcher: &blockchainmock.ChainService{State: bs}, - SlashingsPool: &slashingsmock.PoolMock{}, - Broadcaster: broadcaster, + ChainInfoFetcher: chainmock, + SlashingsPool: &slashingsmock.PoolMock{}, + Broadcaster: broadcaster, + OperationNotifier: chainmock.OperationNotifier(), } toSubmit := shared.ProposerSlashingsFromConsensus([]*ethpbv1alpha1.ProposerSlashing{slashing}) @@ -1486,10 +1492,12 @@ func TestSubmitProposerSlashing_AcrossFork(t *testing.T) { } broadcaster := &p2pMock.MockBroadcaster{} + chainmock := &blockchainmock.ChainService{State: bs} s := &Server{ - ChainInfoFetcher: &blockchainmock.ChainService{State: bs}, - SlashingsPool: &slashingsmock.PoolMock{}, - Broadcaster: broadcaster, + ChainInfoFetcher: chainmock, + SlashingsPool: &slashingsmock.PoolMock{}, + Broadcaster: broadcaster, + OperationNotifier: chainmock.OperationNotifier(), } toSubmit := shared.ProposerSlashingsFromConsensus([]*ethpbv1alpha1.ProposerSlashing{slashing}) diff --git a/beacon-chain/rpc/eth/events/BUILD.bazel b/beacon-chain/rpc/eth/events/BUILD.bazel index 535f71045c35..449cce79d4e3 100644 --- a/beacon-chain/rpc/eth/events/BUILD.bazel +++ b/beacon-chain/rpc/eth/events/BUILD.bazel @@ -8,9 +8,8 @@ go_library( "structs.go", ], importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events", - visibility = ["//visibility:public"], + visibility = ["//beacon-chain:__subpackages__"], deps = [ - "//api:go_default_library", "//beacon-chain/blockchain:go_default_library", "//beacon-chain/core/feed:go_default_library", "//beacon-chain/core/feed/operation:go_default_library", @@ -19,9 +18,9 @@ go_library( "//beacon-chain/core/time:go_default_library", "//beacon-chain/core/transition:go_default_library", "//beacon-chain/rpc/eth/shared:go_default_library", - "//config/params:go_default_library", "//network/httputil:go_default_library", "//proto/eth/v1:go_default_library", + "//proto/eth/v2:go_default_library", "//runtime/version:go_default_library", "//time/slots:go_default_library", "@com_github_ethereum_go_ethereum//common/hexutil:go_default_library", @@ -40,6 +39,7 @@ go_test( "//beacon-chain/core/feed/operation:go_default_library", "//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/state:go_default_library", + "//config/fieldparams:go_default_library", "//consensus-types/blocks:go_default_library", "//consensus-types/interfaces:go_default_library", "//consensus-types/primitives:go_default_library", diff --git a/beacon-chain/rpc/eth/events/events.go b/beacon-chain/rpc/eth/events/events.go index bcd85c3f9483..dc5c3c98a97f 100644 --- a/beacon-chain/rpc/eth/events/events.go +++ b/beacon-chain/rpc/eth/events/events.go @@ -5,10 +5,11 @@ import ( "encoding/json" "fmt" "net/http" - time2 "time" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/prysmaticlabs/prysm/v4/api" + log "github.com/sirupsen/logrus" + "go.opencensus.io/trace" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation" @@ -17,13 +18,11 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition" "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared" - "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/network/httputil" ethpb "github.com/prysmaticlabs/prysm/v4/proto/eth/v1" + ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2" "github.com/prysmaticlabs/prysm/v4/runtime/version" "github.com/prysmaticlabs/prysm/v4/time/slots" - log "github.com/sirupsen/logrus" - "go.opencensus.io/trace" ) const ( @@ -47,6 +46,14 @@ const ( PayloadAttributesTopic = "payload_attributes" // BlobSidecarTopic represents a new blob sidecar event topic BlobSidecarTopic = "blob_sidecar" + // ProposerSlashingTopic represents a new proposer slashing event topic + ProposerSlashingTopic = "proposer_slashing" + // AttesterSlashingTopic represents a new attester slashing event topic + AttesterSlashingTopic = "attester_slashing" + // LightClientFinalityUpdateTopic represents a new light client finality update event topic. + LightClientFinalityUpdateTopic = "light_client_finality_update" + // LightClientOptimisticUpdateTopic represents a new light client optimistic update event topic. + LightClientOptimisticUpdateTopic = "light_client_optimistic_update" ) const topicDataMismatch = "Event data type %T does not correspond to event topic %s" @@ -54,16 +61,20 @@ const topicDataMismatch = "Event data type %T does not correspond to event topic const chanBuffer = 1000 var casesHandled = map[string]bool{ - HeadTopic: true, - BlockTopic: true, - AttestationTopic: true, - VoluntaryExitTopic: true, - FinalizedCheckpointTopic: true, - ChainReorgTopic: true, - SyncCommitteeContributionTopic: true, - BLSToExecutionChangeTopic: true, - PayloadAttributesTopic: true, - BlobSidecarTopic: true, + HeadTopic: true, + BlockTopic: true, + AttestationTopic: true, + VoluntaryExitTopic: true, + FinalizedCheckpointTopic: true, + ChainReorgTopic: true, + SyncCommitteeContributionTopic: true, + BLSToExecutionChangeTopic: true, + PayloadAttributesTopic: true, + BlobSidecarTopic: true, + ProposerSlashingTopic: true, + AttesterSlashingTopic: true, + LightClientFinalityUpdateTopic: true, + LightClientOptimisticUpdateTopic: true, } // StreamEvents provides an endpoint to subscribe to the beacon node Server-Sent-Events stream. @@ -103,24 +114,16 @@ func (s *Server) StreamEvents(w http.ResponseWriter, r *http.Request) { defer stateSub.Unsubscribe() // Set up SSE response headers - w.Header().Set("Content-Type", api.EventStreamMediaType) + w.Header().Set("Content-Type", "text/event-stream") w.Header().Set("Connection", "keep-alive") // Handle each event received and context cancellation. - // We send a keepalive dummy message immediately to prevent clients - // stalling while waiting for the first response chunk. - // After that we send a keepalive dummy message every SECONDS_PER_SLOT - // to prevent anyone (e.g. proxy servers) from closing connections. - sendKeepalive(w, flusher) - keepaliveTicker := time2.NewTicker(time2.Duration(params.BeaconConfig().SecondsPerSlot) * time2.Second) for { select { case event := <-opsChan: handleBlockOperationEvents(w, flusher, topicsMap, event) case event := <-stateChan: s.handleStateEvents(ctx, w, flusher, topicsMap, event) - case <-keepaliveTicker.C: - sendKeepalive(w, flusher) case <-ctx.Done(): return } @@ -201,6 +204,26 @@ func handleBlockOperationEvents(w http.ResponseWriter, flusher http.Flusher, req KzgCommitment: hexutil.Encode(blobData.Blob.KzgCommitment), } send(w, flusher, BlobSidecarTopic, blobEvent) + case operation.AttesterSlashingReceived: + if _, ok := requestedTopics[AttesterSlashingTopic]; !ok { + return + } + attesterSlashingData, ok := event.Data.(*operation.AttesterSlashingReceivedData) + if !ok { + write(w, flusher, topicDataMismatch, event.Data, AttesterSlashingTopic) + return + } + send(w, flusher, AttesterSlashingTopic, shared.AttesterSlashingFromConsensus(attesterSlashingData.AttesterSlashing)) + case operation.ProposerSlashingReceived: + if _, ok := requestedTopics[ProposerSlashingTopic]; !ok { + return + } + proposerSlashingData, ok := event.Data.(*operation.ProposerSlashingReceivedData) + if !ok { + write(w, flusher, topicDataMismatch, event.Data, ProposerSlashingTopic) + return + } + send(w, flusher, ProposerSlashingTopic, shared.ProposerSlashingFromConsensus(proposerSlashingData.ProposerSlashing)) } } @@ -247,6 +270,72 @@ func (s *Server) handleStateEvents(ctx context.Context, w http.ResponseWriter, f ExecutionOptimistic: checkpointData.ExecutionOptimistic, } send(w, flusher, FinalizedCheckpointTopic, checkpoint) + case statefeed.LightClientFinalityUpdate: + if _, ok := requestedTopics[LightClientFinalityUpdateTopic]; !ok { + return + } + updateData, ok := event.Data.(*ethpbv2.LightClientFinalityUpdateWithVersion) + if !ok { + write(w, flusher, topicDataMismatch, event.Data, LightClientFinalityUpdateTopic) + return + } + + var finalityBranch []string + for _, b := range updateData.Data.FinalityBranch { + finalityBranch = append(finalityBranch, hexutil.Encode(b)) + } + update := &LightClientFinalityUpdateEvent{ + Version: version.String(int(updateData.Version)), + Data: &LightClientFinalityUpdate{ + AttestedHeader: &shared.BeaconBlockHeader{ + Slot: fmt.Sprintf("%d", updateData.Data.AttestedHeader.Slot), + ProposerIndex: fmt.Sprintf("%d", updateData.Data.AttestedHeader.ProposerIndex), + ParentRoot: hexutil.Encode(updateData.Data.AttestedHeader.ParentRoot), + StateRoot: hexutil.Encode(updateData.Data.AttestedHeader.StateRoot), + BodyRoot: hexutil.Encode(updateData.Data.AttestedHeader.BodyRoot), + }, + FinalizedHeader: &shared.BeaconBlockHeader{ + Slot: fmt.Sprintf("%d", updateData.Data.FinalizedHeader.Slot), + ProposerIndex: fmt.Sprintf("%d", updateData.Data.FinalizedHeader.ProposerIndex), + ParentRoot: hexutil.Encode(updateData.Data.FinalizedHeader.ParentRoot), + StateRoot: hexutil.Encode(updateData.Data.FinalizedHeader.StateRoot), + }, + FinalityBranch: finalityBranch, + SyncAggregate: &shared.SyncAggregate{ + SyncCommitteeBits: hexutil.Encode(updateData.Data.SyncAggregate.SyncCommitteeBits), + SyncCommitteeSignature: hexutil.Encode(updateData.Data.SyncAggregate.SyncCommitteeSignature), + }, + SignatureSlot: fmt.Sprintf("%d", updateData.Data.SignatureSlot), + }, + } + send(w, flusher, LightClientFinalityUpdateTopic, update) + case statefeed.LightClientOptimisticUpdate: + if _, ok := requestedTopics[LightClientOptimisticUpdateTopic]; !ok { + return + } + updateData, ok := event.Data.(*ethpbv2.LightClientOptimisticUpdateWithVersion) + if !ok { + write(w, flusher, topicDataMismatch, event.Data, LightClientOptimisticUpdateTopic) + return + } + update := &LightClientOptimisticUpdateEvent{ + Version: version.String(int(updateData.Version)), + Data: &LightClientOptimisticUpdate{ + AttestedHeader: &shared.BeaconBlockHeader{ + Slot: fmt.Sprintf("%d", updateData.Data.AttestedHeader.Slot), + ProposerIndex: fmt.Sprintf("%d", updateData.Data.AttestedHeader.ProposerIndex), + ParentRoot: hexutil.Encode(updateData.Data.AttestedHeader.ParentRoot), + StateRoot: hexutil.Encode(updateData.Data.AttestedHeader.StateRoot), + BodyRoot: hexutil.Encode(updateData.Data.AttestedHeader.BodyRoot), + }, + SyncAggregate: &shared.SyncAggregate{ + SyncCommitteeBits: hexutil.Encode(updateData.Data.SyncAggregate.SyncCommitteeBits), + SyncCommitteeSignature: hexutil.Encode(updateData.Data.SyncAggregate.SyncCommitteeSignature), + }, + SignatureSlot: fmt.Sprintf("%d", updateData.Data.SignatureSlot), + }, + } + send(w, flusher, LightClientOptimisticUpdateTopic, update) case statefeed.Reorg: if _, ok := requestedTopics[ChainReorgTopic]; !ok { return @@ -416,10 +505,6 @@ func send(w http.ResponseWriter, flusher http.Flusher, name string, data interfa write(w, flusher, "event: %s\ndata: %s\n\n", name, string(j)) } -func sendKeepalive(w http.ResponseWriter, flusher http.Flusher) { - write(w, flusher, ":\n\n") -} - func write(w http.ResponseWriter, flusher http.Flusher, format string, a ...any) { _, err := fmt.Fprintf(w, format, a...) if err != nil { diff --git a/beacon-chain/rpc/eth/events/events_test.go b/beacon-chain/rpc/eth/events/events_test.go index a06838944e53..1acb9244560e 100644 --- a/beacon-chain/rpc/eth/events/events_test.go +++ b/beacon-chain/rpc/eth/events/events_test.go @@ -15,6 +15,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation" statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" + fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" @@ -41,7 +42,15 @@ func TestStreamEvents_OperationsEvents(t *testing.T) { OperationNotifier: &mockChain.MockOperationNotifier{}, } - topics := []string{AttestationTopic, VoluntaryExitTopic, SyncCommitteeContributionTopic, BLSToExecutionChangeTopic, BlobSidecarTopic} + topics := []string{ + AttestationTopic, + VoluntaryExitTopic, + SyncCommitteeContributionTopic, + BLSToExecutionChangeTopic, + BlobSidecarTopic, + AttesterSlashingTopic, + ProposerSlashingTopic, + } for i, topic := range topics { topics[i] = "topics=" + topic } @@ -124,6 +133,65 @@ func TestStreamEvents_OperationsEvents(t *testing.T) { Blob: &vblob, }, }) + + s.OperationNotifier.OperationFeed().Send(&feed.Event{ + Type: operation.AttesterSlashingReceived, + Data: &operation.AttesterSlashingReceivedData{ + AttesterSlashing: ð.AttesterSlashing{ + Attestation_1: ð.IndexedAttestation{ + AttestingIndices: []uint64{0, 1}, + Data: ð.AttestationData{ + BeaconBlockRoot: make([]byte, fieldparams.RootLength), + Source: ð.Checkpoint{ + Root: make([]byte, fieldparams.RootLength), + }, + Target: ð.Checkpoint{ + Root: make([]byte, fieldparams.RootLength), + }, + }, + Signature: make([]byte, fieldparams.BLSSignatureLength), + }, + Attestation_2: ð.IndexedAttestation{ + AttestingIndices: []uint64{0, 1}, + Data: ð.AttestationData{ + BeaconBlockRoot: make([]byte, fieldparams.RootLength), + Source: ð.Checkpoint{ + Root: make([]byte, fieldparams.RootLength), + }, + Target: ð.Checkpoint{ + Root: make([]byte, fieldparams.RootLength), + }, + }, + Signature: make([]byte, fieldparams.BLSSignatureLength), + }, + }, + }, + }) + + s.OperationNotifier.OperationFeed().Send(&feed.Event{ + Type: operation.ProposerSlashingReceived, + Data: &operation.ProposerSlashingReceivedData{ + ProposerSlashing: ð.ProposerSlashing{ + Header_1: ð.SignedBeaconBlockHeader{ + Header: ð.BeaconBlockHeader{ + ParentRoot: make([]byte, fieldparams.RootLength), + StateRoot: make([]byte, fieldparams.RootLength), + BodyRoot: make([]byte, fieldparams.RootLength), + }, + Signature: make([]byte, fieldparams.BLSSignatureLength), + }, + Header_2: ð.SignedBeaconBlockHeader{ + Header: ð.BeaconBlockHeader{ + ParentRoot: make([]byte, fieldparams.RootLength), + StateRoot: make([]byte, fieldparams.RootLength), + BodyRoot: make([]byte, fieldparams.RootLength), + }, + Signature: make([]byte, fieldparams.BLSSignatureLength), + }, + }, + }, + }) + time.Sleep(1 * time.Second) request.Context().Done() @@ -307,9 +375,7 @@ func TestStreamEvents_OperationsEvents(t *testing.T) { }) } -const operationsResult = `: - -event: attestation +const operationsResult = `event: attestation data: {"aggregation_bits":"0x00","data":{"slot":"0","index":"0","beacon_block_root":"0x0000000000000000000000000000000000000000000000000000000000000000","source":{"epoch":"0","root":"0x0000000000000000000000000000000000000000000000000000000000000000"},"target":{"epoch":"0","root":"0x0000000000000000000000000000000000000000000000000000000000000000"}},"signature":"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"} event: attestation @@ -327,11 +393,15 @@ data: {"message":{"validator_index":"0","from_bls_pubkey":"0x0000000000000000000 event: blob_sidecar data: {"block_root":"0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c","index":"0","slot":"0","kzg_commitment":"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","versioned_hash":"0x01b0761f87b081d5cf10757ccc89f12be355c70e2e29df288b65b30710dcbcd1"} -` +event: attester_slashing +data: {"attestation_1":{"attesting_indices":["0","1"],"data":{"slot":"0","index":"0","beacon_block_root":"0x0000000000000000000000000000000000000000000000000000000000000000","source":{"epoch":"0","root":"0x0000000000000000000000000000000000000000000000000000000000000000"},"target":{"epoch":"0","root":"0x0000000000000000000000000000000000000000000000000000000000000000"}},"signature":"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},"attestation_2":{"attesting_indices":["0","1"],"data":{"slot":"0","index":"0","beacon_block_root":"0x0000000000000000000000000000000000000000000000000000000000000000","source":{"epoch":"0","root":"0x0000000000000000000000000000000000000000000000000000000000000000"},"target":{"epoch":"0","root":"0x0000000000000000000000000000000000000000000000000000000000000000"}},"signature":"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}} -const stateResult = `: +event: proposer_slashing +data: {"signed_header_1":{"message":{"slot":"0","proposer_index":"0","parent_root":"0x0000000000000000000000000000000000000000000000000000000000000000","state_root":"0x0000000000000000000000000000000000000000000000000000000000000000","body_root":"0x0000000000000000000000000000000000000000000000000000000000000000"},"signature":"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},"signed_header_2":{"message":{"slot":"0","proposer_index":"0","parent_root":"0x0000000000000000000000000000000000000000000000000000000000000000","state_root":"0x0000000000000000000000000000000000000000000000000000000000000000","body_root":"0x0000000000000000000000000000000000000000000000000000000000000000"},"signature":"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}} + +` -event: head +const stateResult = `event: head data: {"slot":"0","block":"0x0000000000000000000000000000000000000000000000000000000000000000","state":"0x0000000000000000000000000000000000000000000000000000000000000000","epoch_transition":true,"execution_optimistic":false,"previous_duty_dependent_root":"0x0000000000000000000000000000000000000000000000000000000000000000","current_duty_dependent_root":"0x0000000000000000000000000000000000000000000000000000000000000000"} event: finalized_checkpoint @@ -345,23 +415,17 @@ data: {"slot":"0","block":"0xeade62f0457b2fdf48e7d3fc4b60736688286be7c7a3ac4c9a1 ` -const payloadAttributesBellatrixResult = `: - -event: payload_attributes +const payloadAttributesBellatrixResult = `event: payload_attributes data: {"version":"bellatrix","data":{"proposer_index":"0","proposal_slot":"1","parent_block_number":"0","parent_block_root":"0x0000000000000000000000000000000000000000000000000000000000000000","parent_block_hash":"0x0000000000000000000000000000000000000000000000000000000000000000","payload_attributes":{"timestamp":"12","prev_randao":"0x0000000000000000000000000000000000000000000000000000000000000000","suggested_fee_recipient":"0x0000000000000000000000000000000000000000"}}} ` -const payloadAttributesCapellaResult = `: - -event: payload_attributes +const payloadAttributesCapellaResult = `event: payload_attributes data: {"version":"capella","data":{"proposer_index":"0","proposal_slot":"1","parent_block_number":"0","parent_block_root":"0x0000000000000000000000000000000000000000000000000000000000000000","parent_block_hash":"0x0000000000000000000000000000000000000000000000000000000000000000","payload_attributes":{"timestamp":"12","prev_randao":"0x0000000000000000000000000000000000000000000000000000000000000000","suggested_fee_recipient":"0x0000000000000000000000000000000000000000","withdrawals":[]}}} ` -const payloadAttributesDenebResult = `: - -event: payload_attributes +const payloadAttributesDenebResult = `event: payload_attributes data: {"version":"deneb","data":{"proposer_index":"0","proposal_slot":"1","parent_block_number":"0","parent_block_root":"0x0000000000000000000000000000000000000000000000000000000000000000","parent_block_hash":"0x0000000000000000000000000000000000000000000000000000000000000000","payload_attributes":{"timestamp":"12","prev_randao":"0x0000000000000000000000000000000000000000000000000000000000000000","suggested_fee_recipient":"0x0000000000000000000000000000000000000000","withdrawals":[],"parent_beacon_block_root":"0xbef96cb938fd48b2403d3e662664325abb0102ed12737cbb80d717520e50cf4a"}}} ` diff --git a/beacon-chain/rpc/eth/events/structs.go b/beacon-chain/rpc/eth/events/structs.go index 81e10edd41c2..e66327d26efa 100644 --- a/beacon-chain/rpc/eth/events/structs.go +++ b/beacon-chain/rpc/eth/events/structs.go @@ -92,3 +92,27 @@ type BlobSidecarEvent struct { KzgCommitment string `json:"kzg_commitment"` VersionedHash string `json:"versioned_hash"` } + +type LightClientFinalityUpdateEvent struct { + Version string `json:"version"` + Data *LightClientFinalityUpdate `json:"data"` +} + +type LightClientFinalityUpdate struct { + AttestedHeader *shared.BeaconBlockHeader `json:"attested_header"` + FinalizedHeader *shared.BeaconBlockHeader `json:"finalized_header"` + FinalityBranch []string `json:"finality_branch"` + SyncAggregate *shared.SyncAggregate `json:"sync_aggregate"` + SignatureSlot string `json:"signature_slot"` +} + +type LightClientOptimisticUpdateEvent struct { + Version string `json:"version"` + Data *LightClientOptimisticUpdate `json:"data"` +} + +type LightClientOptimisticUpdate struct { + AttestedHeader *shared.BeaconBlockHeader `json:"attested_header"` + SyncAggregate *shared.SyncAggregate `json:"sync_aggregate"` + SignatureSlot string `json:"signature_slot"` +} diff --git a/beacon-chain/rpc/eth/light-client/handlers.go b/beacon-chain/rpc/eth/light-client/handlers.go index 0bee138cc993..30beb9318ac7 100644 --- a/beacon-chain/rpc/eth/light-client/handlers.go +++ b/beacon-chain/rpc/eth/light-client/handlers.go @@ -8,9 +8,10 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/gorilla/mux" - "github.com/wealdtech/go-bytesutil" "go.opencensus.io/trace" + "github.com/wealdtech/go-bytesutil" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -219,11 +220,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R return } - response := &LightClientUpdatesByRangeResponse{ - Updates: updates, - } - - httputil.WriteJson(w, response) + httputil.WriteJson(w, updates) } // GetLightClientFinalityUpdate - implements https://github.com/ethereum/beacon-APIs/blob/263f4ed6c263c967f13279c7a9f5629b51c5fc55/apis/beacon/light_client/finality_update.yaml diff --git a/beacon-chain/rpc/eth/light-client/handlers_test.go b/beacon-chain/rpc/eth/light-client/handlers_test.go index 9ce75cf8172e..c66e50bb8a02 100644 --- a/beacon-chain/rpc/eth/light-client/handlers_test.go +++ b/beacon-chain/rpc/eth/light-client/handlers_test.go @@ -171,12 +171,12 @@ func TestLightClientHandler_GetLightClientUpdatesByRange(t *testing.T) { s.GetLightClientUpdatesByRange(writer, request) require.Equal(t, http.StatusOK, writer.Code) - resp := &LightClientUpdatesByRangeResponse{} - require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp)) - require.Equal(t, 1, len(resp.Updates)) - require.Equal(t, "capella", resp.Updates[0].Version) - require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), resp.Updates[0].Data.AttestedHeader.BodyRoot) - require.NotNil(t, resp.Updates) + var resp []LightClientUpdateWithVersion + require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp)) + require.Equal(t, 1, len(resp)) + require.Equal(t, "capella", resp[0].Version) + require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), resp[0].Data.AttestedHeader.BodyRoot) + require.NotNil(t, resp) } func TestLightClientHandler_GetLightClientUpdatesByRange_TooBigInputCount(t *testing.T) { @@ -274,12 +274,12 @@ func TestLightClientHandler_GetLightClientUpdatesByRange_TooBigInputCount(t *tes s.GetLightClientUpdatesByRange(writer, request) require.Equal(t, http.StatusOK, writer.Code) - resp := &LightClientUpdatesByRangeResponse{} - require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp)) - require.Equal(t, 1, len(resp.Updates)) // Even with big count input, the response is still the max available period, which is 1 in test case. - require.Equal(t, "capella", resp.Updates[0].Version) - require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), resp.Updates[0].Data.AttestedHeader.BodyRoot) - require.NotNil(t, resp.Updates) + var resp []LightClientUpdateWithVersion + require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp)) + require.Equal(t, 1, len(resp)) // Even with big count input, the response is still the max available period, which is 1 in test case. + require.Equal(t, "capella", resp[0].Version) + require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), resp[0].Data.AttestedHeader.BodyRoot) + require.NotNil(t, resp) } func TestLightClientHandler_GetLightClientUpdatesByRange_TooEarlyPeriod(t *testing.T) { @@ -377,12 +377,12 @@ func TestLightClientHandler_GetLightClientUpdatesByRange_TooEarlyPeriod(t *testi s.GetLightClientUpdatesByRange(writer, request) require.Equal(t, http.StatusOK, writer.Code) - resp := &LightClientUpdatesByRangeResponse{} - require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp)) - require.Equal(t, 1, len(resp.Updates)) - require.Equal(t, "capella", resp.Updates[0].Version) - require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), resp.Updates[0].Data.AttestedHeader.BodyRoot) - require.NotNil(t, resp.Updates) + var resp []LightClientUpdateWithVersion + require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp)) + require.Equal(t, 1, len(resp)) + require.Equal(t, "capella", resp[0].Version) + require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), resp[0].Data.AttestedHeader.BodyRoot) + require.NotNil(t, resp) } func TestLightClientHandler_GetLightClientUpdatesByRange_TooBigCount(t *testing.T) { @@ -480,12 +480,12 @@ func TestLightClientHandler_GetLightClientUpdatesByRange_TooBigCount(t *testing. s.GetLightClientUpdatesByRange(writer, request) require.Equal(t, http.StatusOK, writer.Code) - resp := &LightClientUpdatesByRangeResponse{} - require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp)) - require.Equal(t, 1, len(resp.Updates)) - require.Equal(t, "capella", resp.Updates[0].Version) - require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), resp.Updates[0].Data.AttestedHeader.BodyRoot) - require.NotNil(t, resp.Updates) + var resp []LightClientUpdateWithVersion + require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp)) + require.Equal(t, 1, len(resp)) + require.Equal(t, "capella", resp[0].Version) + require.Equal(t, hexutil.Encode(attestedHeader.BodyRoot), resp[0].Data.AttestedHeader.BodyRoot) + require.NotNil(t, resp) } func TestLightClientHandler_GetLightClientUpdatesByRange_BeforeAltair(t *testing.T) { @@ -583,9 +583,6 @@ func TestLightClientHandler_GetLightClientUpdatesByRange_BeforeAltair(t *testing s.GetLightClientUpdatesByRange(writer, request) require.Equal(t, http.StatusNotFound, writer.Code) - resp := &LightClientUpdatesByRangeResponse{} - require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp)) - require.Equal(t, 0, len(resp.Updates)) } func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) { diff --git a/beacon-chain/rpc/eth/light-client/helpers.go b/beacon-chain/rpc/eth/light-client/helpers.go index 277fcafcd6b4..98e210654780 100644 --- a/beacon-chain/rpc/eth/light-client/helpers.go +++ b/beacon-chain/rpc/eth/light-client/helpers.go @@ -296,11 +296,16 @@ func newLightClientUpdateToJSON(input *v2.LightClientUpdate) *LightClientUpdate nextSyncCommittee = shared.SyncCommitteeFromConsensus(migration.V2SyncCommitteeToV1Alpha1(input.NextSyncCommittee)) } + var finalizedHeader *shared.BeaconBlockHeader + if input.FinalizedHeader != nil { + finalizedHeader = shared.BeaconBlockHeaderFromConsensus(migration.V1HeaderToV1Alpha1(input.FinalizedHeader)) + } + return &LightClientUpdate{ AttestedHeader: shared.BeaconBlockHeaderFromConsensus(migration.V1HeaderToV1Alpha1(input.AttestedHeader)), NextSyncCommittee: nextSyncCommittee, NextSyncCommitteeBranch: branchToJSON(input.NextSyncCommitteeBranch), - FinalizedHeader: shared.BeaconBlockHeaderFromConsensus(migration.V1HeaderToV1Alpha1(input.FinalizedHeader)), + FinalizedHeader: finalizedHeader, FinalityBranch: branchToJSON(input.FinalityBranch), SyncAggregate: syncAggregateToJSON(input.SyncAggregate), SignatureSlot: strconv.FormatUint(uint64(input.SignatureSlot), 10), diff --git a/beacon-chain/rpc/eth/light-client/structs.go b/beacon-chain/rpc/eth/light-client/structs.go index 662262ebdb93..822545505faa 100644 --- a/beacon-chain/rpc/eth/light-client/structs.go +++ b/beacon-chain/rpc/eth/light-client/structs.go @@ -17,11 +17,11 @@ type LightClientBootstrap struct { type LightClientUpdate struct { AttestedHeader *shared.BeaconBlockHeader `json:"attested_header"` - NextSyncCommittee *shared.SyncCommittee `json:"next_sync_committee"` - FinalizedHeader *shared.BeaconBlockHeader `json:"finalized_header"` + NextSyncCommittee *shared.SyncCommittee `json:"next_sync_committee,omitempty"` + FinalizedHeader *shared.BeaconBlockHeader `json:"finalized_header,omitempty"` SyncAggregate *shared.SyncAggregate `json:"sync_aggregate"` - NextSyncCommitteeBranch []string `json:"next_sync_committee_branch"` - FinalityBranch []string `json:"finality_branch"` + NextSyncCommitteeBranch []string `json:"next_sync_committee_branch,omitempty"` + FinalityBranch []string `json:"finality_branch,omitempty"` SignatureSlot string `json:"signature_slot"` } diff --git a/beacon-chain/rpc/eth/shared/conversions.go b/beacon-chain/rpc/eth/shared/conversions.go index fab700fa865d..3cb9a7ed8c3a 100644 --- a/beacon-chain/rpc/eth/shared/conversions.go +++ b/beacon-chain/rpc/eth/shared/conversions.go @@ -769,30 +769,34 @@ func ProposerSlashingsToConsensus(src []*ProposerSlashing) ([]*eth.ProposerSlash func ProposerSlashingsFromConsensus(src []*eth.ProposerSlashing) []*ProposerSlashing { proposerSlashings := make([]*ProposerSlashing, len(src)) for i, s := range src { - proposerSlashings[i] = &ProposerSlashing{ - SignedHeader1: &SignedBeaconBlockHeader{ - Message: &BeaconBlockHeader{ - Slot: fmt.Sprintf("%d", s.Header_1.Header.Slot), - ProposerIndex: fmt.Sprintf("%d", s.Header_1.Header.ProposerIndex), - ParentRoot: hexutil.Encode(s.Header_1.Header.ParentRoot), - StateRoot: hexutil.Encode(s.Header_1.Header.StateRoot), - BodyRoot: hexutil.Encode(s.Header_1.Header.BodyRoot), - }, - Signature: hexutil.Encode(s.Header_1.Signature), + proposerSlashings[i] = ProposerSlashingFromConsensus(s) + } + return proposerSlashings +} + +func ProposerSlashingFromConsensus(src *eth.ProposerSlashing) *ProposerSlashing { + return &ProposerSlashing{ + SignedHeader1: &SignedBeaconBlockHeader{ + Message: &BeaconBlockHeader{ + Slot: fmt.Sprintf("%d", src.Header_1.Header.Slot), + ProposerIndex: fmt.Sprintf("%d", src.Header_1.Header.ProposerIndex), + ParentRoot: hexutil.Encode(src.Header_1.Header.ParentRoot), + StateRoot: hexutil.Encode(src.Header_1.Header.StateRoot), + BodyRoot: hexutil.Encode(src.Header_1.Header.BodyRoot), }, - SignedHeader2: &SignedBeaconBlockHeader{ - Message: &BeaconBlockHeader{ - Slot: fmt.Sprintf("%d", s.Header_2.Header.Slot), - ProposerIndex: fmt.Sprintf("%d", s.Header_2.Header.ProposerIndex), - ParentRoot: hexutil.Encode(s.Header_2.Header.ParentRoot), - StateRoot: hexutil.Encode(s.Header_2.Header.StateRoot), - BodyRoot: hexutil.Encode(s.Header_2.Header.BodyRoot), - }, - Signature: hexutil.Encode(s.Header_2.Signature), + Signature: hexutil.Encode(src.Header_1.Signature), + }, + SignedHeader2: &SignedBeaconBlockHeader{ + Message: &BeaconBlockHeader{ + Slot: fmt.Sprintf("%d", src.Header_2.Header.Slot), + ProposerIndex: fmt.Sprintf("%d", src.Header_2.Header.ProposerIndex), + ParentRoot: hexutil.Encode(src.Header_2.Header.ParentRoot), + StateRoot: hexutil.Encode(src.Header_2.Header.StateRoot), + BodyRoot: hexutil.Encode(src.Header_2.Header.BodyRoot), }, - } + Signature: hexutil.Encode(src.Header_2.Signature), + }, } - return proposerSlashings } func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlashing, error) { @@ -875,52 +879,56 @@ func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlash func AttesterSlashingsFromConsensus(src []*eth.AttesterSlashing) []*AttesterSlashing { attesterSlashings := make([]*AttesterSlashing, len(src)) for i, s := range src { - a1AttestingIndices := make([]string, len(s.Attestation_1.AttestingIndices)) - for j, ix := range s.Attestation_1.AttestingIndices { - a1AttestingIndices[j] = fmt.Sprintf("%d", ix) - } - a2AttestingIndices := make([]string, len(s.Attestation_2.AttestingIndices)) - for j, ix := range s.Attestation_2.AttestingIndices { - a2AttestingIndices[j] = fmt.Sprintf("%d", ix) - } - attesterSlashings[i] = &AttesterSlashing{ - Attestation1: &IndexedAttestation{ - AttestingIndices: a1AttestingIndices, - Data: &AttestationData{ - Slot: fmt.Sprintf("%d", s.Attestation_1.Data.Slot), - CommitteeIndex: fmt.Sprintf("%d", s.Attestation_1.Data.CommitteeIndex), - BeaconBlockRoot: hexutil.Encode(s.Attestation_1.Data.BeaconBlockRoot), - Source: &Checkpoint{ - Epoch: fmt.Sprintf("%d", s.Attestation_1.Data.Source.Epoch), - Root: hexutil.Encode(s.Attestation_1.Data.Source.Root), - }, - Target: &Checkpoint{ - Epoch: fmt.Sprintf("%d", s.Attestation_1.Data.Target.Epoch), - Root: hexutil.Encode(s.Attestation_1.Data.Target.Root), - }, + attesterSlashings[i] = AttesterSlashingFromConsensus(s) + } + return attesterSlashings +} + +func AttesterSlashingFromConsensus(src *eth.AttesterSlashing) *AttesterSlashing { + a1AttestingIndices := make([]string, len(src.Attestation_1.AttestingIndices)) + for j, ix := range src.Attestation_1.AttestingIndices { + a1AttestingIndices[j] = fmt.Sprintf("%d", ix) + } + a2AttestingIndices := make([]string, len(src.Attestation_2.AttestingIndices)) + for j, ix := range src.Attestation_2.AttestingIndices { + a2AttestingIndices[j] = fmt.Sprintf("%d", ix) + } + return &AttesterSlashing{ + Attestation1: &IndexedAttestation{ + AttestingIndices: a1AttestingIndices, + Data: &AttestationData{ + Slot: fmt.Sprintf("%d", src.Attestation_1.Data.Slot), + CommitteeIndex: fmt.Sprintf("%d", src.Attestation_1.Data.CommitteeIndex), + BeaconBlockRoot: hexutil.Encode(src.Attestation_1.Data.BeaconBlockRoot), + Source: &Checkpoint{ + Epoch: fmt.Sprintf("%d", src.Attestation_1.Data.Source.Epoch), + Root: hexutil.Encode(src.Attestation_1.Data.Source.Root), + }, + Target: &Checkpoint{ + Epoch: fmt.Sprintf("%d", src.Attestation_1.Data.Target.Epoch), + Root: hexutil.Encode(src.Attestation_1.Data.Target.Root), }, - Signature: hexutil.Encode(s.Attestation_1.Signature), }, - Attestation2: &IndexedAttestation{ - AttestingIndices: a2AttestingIndices, - Data: &AttestationData{ - Slot: fmt.Sprintf("%d", s.Attestation_2.Data.Slot), - CommitteeIndex: fmt.Sprintf("%d", s.Attestation_2.Data.CommitteeIndex), - BeaconBlockRoot: hexutil.Encode(s.Attestation_2.Data.BeaconBlockRoot), - Source: &Checkpoint{ - Epoch: fmt.Sprintf("%d", s.Attestation_2.Data.Source.Epoch), - Root: hexutil.Encode(s.Attestation_2.Data.Source.Root), - }, - Target: &Checkpoint{ - Epoch: fmt.Sprintf("%d", s.Attestation_2.Data.Target.Epoch), - Root: hexutil.Encode(s.Attestation_2.Data.Target.Root), - }, + Signature: hexutil.Encode(src.Attestation_1.Signature), + }, + Attestation2: &IndexedAttestation{ + AttestingIndices: a2AttestingIndices, + Data: &AttestationData{ + Slot: fmt.Sprintf("%d", src.Attestation_2.Data.Slot), + CommitteeIndex: fmt.Sprintf("%d", src.Attestation_2.Data.CommitteeIndex), + BeaconBlockRoot: hexutil.Encode(src.Attestation_2.Data.BeaconBlockRoot), + Source: &Checkpoint{ + Epoch: fmt.Sprintf("%d", src.Attestation_2.Data.Source.Epoch), + Root: hexutil.Encode(src.Attestation_2.Data.Source.Root), + }, + Target: &Checkpoint{ + Epoch: fmt.Sprintf("%d", src.Attestation_2.Data.Target.Epoch), + Root: hexutil.Encode(src.Attestation_2.Data.Target.Root), }, - Signature: hexutil.Encode(s.Attestation_2.Signature), }, - } + Signature: hexutil.Encode(src.Attestation_2.Signature), + }, } - return attesterSlashings } func AttsToConsensus(src []*Attestation) ([]*eth.Attestation, error) { diff --git a/beacon-chain/rpc/eth/shared/conversions_block.go b/beacon-chain/rpc/eth/shared/conversions_block.go index b0c5f9479889..2eae9907a2fd 100644 --- a/beacon-chain/rpc/eth/shared/conversions_block.go +++ b/beacon-chain/rpc/eth/shared/conversions_block.go @@ -2333,10 +2333,10 @@ func ExecutionPayloadHeaderDenebFromConsensus(payload *enginev1.ExecutionPayload Timestamp: fmt.Sprintf("%d", payload.Timestamp), ExtraData: hexutil.Encode(payload.ExtraData), BaseFeePerGas: baseFeePerGas, - BlobGasUsed: fmt.Sprintf("%d", payload.BlobGasUsed), - ExcessBlobGas: fmt.Sprintf("%d", payload.ExcessBlobGas), BlockHash: hexutil.Encode(payload.BlockHash), TransactionsRoot: hexutil.Encode(payload.TransactionsRoot), WithdrawalsRoot: hexutil.Encode(payload.WithdrawalsRoot), + BlobGasUsed: fmt.Sprintf("%d", payload.BlobGasUsed), + ExcessBlobGas: fmt.Sprintf("%d", payload.ExcessBlobGas), }, nil } diff --git a/beacon-chain/rpc/eth/shared/structs_block.go b/beacon-chain/rpc/eth/shared/structs_block.go index 0dfccb06781a..c911cac57135 100644 --- a/beacon-chain/rpc/eth/shared/structs_block.go +++ b/beacon-chain/rpc/eth/shared/structs_block.go @@ -325,11 +325,11 @@ type ExecutionPayloadDeneb struct { Timestamp string `json:"timestamp"` ExtraData string `json:"extra_data"` BaseFeePerGas string `json:"base_fee_per_gas"` - BlobGasUsed string `json:"blob_gas_used"` - ExcessBlobGas string `json:"excess_blob_gas"` BlockHash string `json:"block_hash"` Transactions []string `json:"transactions"` Withdrawals []*Withdrawal `json:"withdrawals"` + BlobGasUsed string `json:"blob_gas_used"` + ExcessBlobGas string `json:"excess_blob_gas"` } type ExecutionPayloadHeaderDeneb struct { @@ -345,9 +345,9 @@ type ExecutionPayloadHeaderDeneb struct { Timestamp string `json:"timestamp"` ExtraData string `json:"extra_data"` BaseFeePerGas string `json:"base_fee_per_gas"` - BlobGasUsed string `json:"blob_gas_used"` - ExcessBlobGas string `json:"excess_blob_gas"` BlockHash string `json:"block_hash"` TransactionsRoot string `json:"transactions_root"` WithdrawalsRoot string `json:"withdrawals_root"` + BlobGasUsed string `json:"blob_gas_used"` + ExcessBlobGas string `json:"excess_blob_gas"` } diff --git a/beacon-chain/rpc/eth/validator/handlers.go b/beacon-chain/rpc/eth/validator/handlers.go index bdb06e70e685..cecd6b8f37bc 100644 --- a/beacon-chain/rpc/eth/validator/handlers.go +++ b/beacon-chain/rpc/eth/validator/handlers.go @@ -388,10 +388,6 @@ func (s *Server) GetAttestationData(w http.ResponseWriter, r *http.Request) { return } - if isOptimistic, err := shared.IsOptimistic(ctx, w, s.OptimisticModeFetcher); isOptimistic || err != nil { - return - } - _, slot, ok := shared.UintFromQuery(w, r, "slot", true) if !ok { return diff --git a/beacon-chain/rpc/eth/validator/handlers_test.go b/beacon-chain/rpc/eth/validator/handlers_test.go index 9178c520b91c..1cfeb2350cda 100644 --- a/beacon-chain/rpc/eth/validator/handlers_test.go +++ b/beacon-chain/rpc/eth/validator/handlers_test.go @@ -831,10 +831,11 @@ func TestGetAttestationData(t *testing.T) { TimeFetcher: chain, OptimisticModeFetcher: chain, CoreService: &core.Service{ - HeadFetcher: chain, - GenesisTimeFetcher: chain, - FinalizedFetcher: chain, - AttestationCache: cache.NewAttestationCache(), + HeadFetcher: chain, + GenesisTimeFetcher: chain, + FinalizedFetcher: chain, + AttestationCache: cache.NewAttestationCache(), + OptimisticModeFetcher: chain, }, } @@ -914,10 +915,11 @@ func TestGetAttestationData(t *testing.T) { TimeFetcher: chain, OptimisticModeFetcher: chain, CoreService: &core.Service{ - AttestationCache: cache.NewAttestationCache(), - GenesisTimeFetcher: chain, - HeadFetcher: chain, - FinalizedFetcher: chain, + AttestationCache: cache.NewAttestationCache(), + GenesisTimeFetcher: chain, + HeadFetcher: chain, + FinalizedFetcher: chain, + OptimisticModeFetcher: chain, }, } @@ -959,8 +961,9 @@ func TestGetAttestationData(t *testing.T) { TimeFetcher: chain, OptimisticModeFetcher: chain, CoreService: &core.Service{ - GenesisTimeFetcher: chain, - FinalizedFetcher: chain, + GenesisTimeFetcher: chain, + OptimisticModeFetcher: chain, + FinalizedFetcher: chain, }, } @@ -1017,10 +1020,11 @@ func TestGetAttestationData(t *testing.T) { TimeFetcher: chain, OptimisticModeFetcher: chain, CoreService: &core.Service{ - HeadFetcher: chain, - GenesisTimeFetcher: chain, - StateGen: stategen.New(db, doublylinkedtree.New()), - FinalizedFetcher: chain, + HeadFetcher: chain, + GenesisTimeFetcher: chain, + OptimisticModeFetcher: chain, + StateGen: stategen.New(db, doublylinkedtree.New()), + FinalizedFetcher: chain, }, } @@ -1065,10 +1069,11 @@ func TestGetAttestationData(t *testing.T) { TimeFetcher: chain, OptimisticModeFetcher: chain, CoreService: &core.Service{ - AttestationCache: cache.NewAttestationCache(), - HeadFetcher: chain, - GenesisTimeFetcher: chain, - FinalizedFetcher: chain, + AttestationCache: cache.NewAttestationCache(), + OptimisticModeFetcher: chain, + HeadFetcher: chain, + GenesisTimeFetcher: chain, + FinalizedFetcher: chain, }, } @@ -1151,10 +1156,11 @@ func TestGetAttestationData(t *testing.T) { TimeFetcher: chain, OptimisticModeFetcher: chain, CoreService: &core.Service{ - AttestationCache: cache.NewAttestationCache(), - HeadFetcher: chain, - GenesisTimeFetcher: chain, - FinalizedFetcher: chain, + AttestationCache: cache.NewAttestationCache(), + OptimisticModeFetcher: chain, + HeadFetcher: chain, + GenesisTimeFetcher: chain, + FinalizedFetcher: chain, }, } diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/BUILD.bazel b/beacon-chain/rpc/prysm/v1alpha1/validator/BUILD.bazel index 471320cf27bf..e9aefbeed7de 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/BUILD.bazel +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/BUILD.bazel @@ -38,7 +38,6 @@ go_library( "//beacon-chain/builder:go_default_library", "//beacon-chain/cache:go_default_library", "//beacon-chain/cache/depositcache:go_default_library", - "//beacon-chain/core/altair:go_default_library", "//beacon-chain/core/blocks:go_default_library", "//beacon-chain/core/feed:go_default_library", "//beacon-chain/core/feed/block:go_default_library", @@ -66,7 +65,6 @@ go_library( "//config/features:go_default_library", "//config/fieldparams:go_default_library", "//config/params:go_default_library", - "//consensus-types:go_default_library", "//consensus-types/blocks:go_default_library", "//consensus-types/interfaces:go_default_library", "//consensus-types/payload-attribute:go_default_library", @@ -91,13 +89,13 @@ go_library( "//time/slots:go_default_library", "@com_github_ethereum_go_ethereum//common:go_default_library", "@com_github_ethereum_go_ethereum//common/hexutil:go_default_library", + "@com_github_golang_protobuf//ptypes/empty", "@com_github_pkg_errors//:go_default_library", "@com_github_prometheus_client_golang//prometheus:go_default_library", "@com_github_prometheus_client_golang//prometheus/promauto:go_default_library", "@com_github_prysmaticlabs_fastssz//:go_default_library", "@com_github_prysmaticlabs_go_bitfield//:go_default_library", "@com_github_sirupsen_logrus//:go_default_library", - "@io_bazel_rules_go//proto/wkt:empty_go_proto", "@io_opencensus_go//trace:go_default_library", "@org_golang_google_grpc//codes:go_default_library", "@org_golang_google_grpc//status:go_default_library", @@ -141,6 +139,7 @@ common_deps = [ "//beacon-chain/sync/initial-sync/testing:go_default_library", "//config/fieldparams:go_default_library", "//config/params:go_default_library", + "//consensus-types:go_default_library", "//consensus-types/blocks:go_default_library", "//consensus-types/interfaces:go_default_library", "//consensus-types/primitives:go_default_library", diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/attester.go b/beacon-chain/rpc/prysm/v1alpha1/validator/attester.go index ea97088b1fc0..7e402d0b6168 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/attester.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/attester.go @@ -31,12 +31,6 @@ func (vs *Server) GetAttestationData(ctx context.Context, req *ethpb.Attestation if vs.SyncChecker.Syncing() { return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond") } - - // An optimistic validator MUST NOT participate in attestation. (i.e., sign across the DOMAIN_BEACON_ATTESTER, DOMAIN_SELECTION_PROOF or DOMAIN_AGGREGATE_AND_PROOF domains). - if err := vs.optimisticStatus(ctx); err != nil { - return nil, err - } - res, err := vs.CoreService.GetAttestationData(ctx, req) if err != nil { return nil, status.Errorf(core.ErrorReasonToGRPC(err.Reason), "Could not get attestation data: %v", err.Err) diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/attester_mainnet_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/attester_mainnet_test.go index d4e13e0ee540..be6495386436 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/attester_mainnet_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/attester_mainnet_test.go @@ -61,10 +61,11 @@ func TestAttestationDataAtSlot_HandlesFarAwayJustifiedEpoch(t *testing.T) { OptimisticModeFetcher: &mock.ChainService{Optimistic: false}, TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)}, CoreService: &core.Service{ - AttestationCache: cache.NewAttestationCache(), - HeadFetcher: &mock.ChainService{TargetRoot: blockRoot, Root: blockRoot[:]}, - GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)}, - FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint}, + AttestationCache: cache.NewAttestationCache(), + HeadFetcher: &mock.ChainService{TargetRoot: blockRoot, Root: blockRoot[:]}, + GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)}, + FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint}, + OptimisticModeFetcher: &mock.ChainService{Optimistic: false}, }, } diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/attester_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/attester_test.go index e1a5cfd5c60f..9a238addc3aa 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/attester_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/attester_test.go @@ -116,8 +116,9 @@ func TestGetAttestationData_OK(t *testing.T) { GenesisTimeFetcher: &mock.ChainService{ Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second), }, - FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint}, - AttestationCache: cache.NewAttestationCache(), + FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint}, + AttestationCache: cache.NewAttestationCache(), + OptimisticModeFetcher: &mock.ChainService{Optimistic: false}, }, } @@ -176,7 +177,8 @@ func BenchmarkGetAttestationDataConcurrent(b *testing.B) { GenesisTimeFetcher: &mock.ChainService{ Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second), }, - FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint}, + OptimisticModeFetcher: &mock.ChainService{Optimistic: false}, + FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint}, }, } @@ -222,9 +224,10 @@ func TestGetAttestationData_Optimistic(t *testing.T) { OptimisticModeFetcher: &mock.ChainService{Optimistic: true}, TimeFetcher: &mock.ChainService{Genesis: time.Now()}, CoreService: &core.Service{ - GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now()}, - HeadFetcher: &mock.ChainService{}, - AttestationCache: cache.NewAttestationCache(), + GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now()}, + HeadFetcher: &mock.ChainService{}, + AttestationCache: cache.NewAttestationCache(), + OptimisticModeFetcher: &mock.ChainService{Optimistic: true}, }, } _, err := as.GetAttestationData(context.Background(), ðpb.AttestationDataRequest{}) @@ -240,10 +243,11 @@ func TestGetAttestationData_Optimistic(t *testing.T) { OptimisticModeFetcher: &mock.ChainService{Optimistic: false}, TimeFetcher: &mock.ChainService{Genesis: time.Now()}, CoreService: &core.Service{ - AttestationCache: cache.NewAttestationCache(), - GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now()}, - HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState}, - FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: ðpb.Checkpoint{}}, + AttestationCache: cache.NewAttestationCache(), + GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now()}, + HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState}, + FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: ðpb.Checkpoint{}}, + OptimisticModeFetcher: &mock.ChainService{Optimistic: false}, }, } _, err = as.GetAttestationData(context.Background(), ðpb.AttestationDataRequest{}) @@ -260,7 +264,8 @@ func TestServer_GetAttestationData_InvalidRequestSlot(t *testing.T) { OptimisticModeFetcher: &mock.ChainService{Optimistic: false}, TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)}, CoreService: &core.Service{ - GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)}, + GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)}, + OptimisticModeFetcher: &mock.ChainService{Optimistic: false}, }, } @@ -301,10 +306,11 @@ func TestServer_GetAttestationData_RequestSlotIsDifferentThanCurrentSlot(t *test OptimisticModeFetcher: &mock.ChainService{Optimistic: false}, TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)}, CoreService: &core.Service{ - HeadFetcher: &mock.ChainService{TargetRoot: blockRoot2, Root: blockRoot[:]}, - GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)}, - StateGen: stategen.New(db, doublylinkedtree.New()), - FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint}, + HeadFetcher: &mock.ChainService{TargetRoot: blockRoot2, Root: blockRoot[:]}, + GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)}, + StateGen: stategen.New(db, doublylinkedtree.New()), + FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint}, + OptimisticModeFetcher: &mock.ChainService{Optimistic: false}, }, } util.SaveBlock(t, ctx, db, block) @@ -346,8 +352,9 @@ func TestGetAttestationData_SucceedsInFirstEpoch(t *testing.T) { HeadFetcher: &mock.ChainService{ TargetRoot: targetRoot, Root: blockRoot[:], }, - GenesisTimeFetcher: &mock.ChainService{Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)}, - FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint}, + GenesisTimeFetcher: &mock.ChainService{Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)}, + FinalizedFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: justifiedCheckpoint}, + OptimisticModeFetcher: &mock.ChainService{Optimistic: false}, }, } diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer.go index 744151d26edd..384f0d4fb045 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer.go @@ -308,7 +308,7 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.Si Type: blockfeed.ReceivedBlock, Data: &blockfeed.ReceivedBlockData{SignedBlock: block}, }) - return vs.BlockReceiver.ReceiveBlock(ctx, block, root) + return vs.BlockReceiver.ReceiveBlock(ctx, block, root, nil) } // broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars. diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_bellatrix.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_bellatrix.go index c1e64fad3072..2ae08869ba7e 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_bellatrix.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_bellatrix.go @@ -225,7 +225,7 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot primitiv } for _, c := range kzgCommitments { if len(c) != fieldparams.BLSPubkeyLength { - return nil, nil, fmt.Errorf("builder returned invalid kzg commitment lenth: %d", len(c)) + return nil, nil, fmt.Errorf("builder returned invalid kzg commitment length: %d", len(c)) } } } diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_bellatrix_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_bellatrix_test.go index a35a646119f4..04d28777805a 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_bellatrix_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_bellatrix_test.go @@ -113,17 +113,18 @@ func TestServer_setExecutionData(t *testing.T) { require.NoError(t, err) bid := ðpb.BuilderBidCapella{ Header: &v1.ExecutionPayloadHeaderCapella{ + ParentHash: params.BeaconConfig().ZeroHash[:], FeeRecipient: make([]byte, fieldparams.FeeRecipientLength), StateRoot: make([]byte, fieldparams.RootLength), ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + BlockNumber: 2, + Timestamp: uint64(ti.Unix()), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), TransactionsRoot: bytesutil.PadTo([]byte{1}, fieldparams.RootLength), - ParentHash: params.BeaconConfig().ZeroHash[:], - Timestamp: uint64(ti.Unix()), - BlockNumber: 2, WithdrawalsRoot: make([]byte, fieldparams.RootLength), }, Pubkey: sk.PublicKey().Marshal(), @@ -176,17 +177,18 @@ func TestServer_setExecutionData(t *testing.T) { builderValue := bytesutil.ReverseByteOrder(big.NewInt(1e9).Bytes()) bid := ðpb.BuilderBidCapella{ Header: &v1.ExecutionPayloadHeaderCapella{ + ParentHash: params.BeaconConfig().ZeroHash[:], FeeRecipient: make([]byte, fieldparams.FeeRecipientLength), StateRoot: make([]byte, fieldparams.RootLength), ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + BlockNumber: 2, + Timestamp: uint64(ti.Unix()), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), TransactionsRoot: bytesutil.PadTo([]byte{1}, fieldparams.RootLength), - ParentHash: params.BeaconConfig().ZeroHash[:], - Timestamp: uint64(ti.Unix()), - BlockNumber: 2, WithdrawalsRoot: wr[:], }, Pubkey: sk.PublicKey().Marshal(), diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_execution_payload.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_execution_payload.go index cf35fbad27a9..f949236a645e 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_execution_payload.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_execution_payload.go @@ -336,6 +336,7 @@ func emptyPayload() *enginev1.ExecutionPayload { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), Transactions: make([][]byte, 0), @@ -350,6 +351,7 @@ func emptyPayloadCapella() *enginev1.ExecutionPayloadCapella { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), Transactions: make([][]byte, 0), @@ -365,6 +367,7 @@ func emptyPayloadDeneb() *enginev1.ExecutionPayloadDeneb { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), Transactions: make([][]byte, 0), diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_test.go index c705797592c8..9338064ff909 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_test.go @@ -154,7 +154,6 @@ func TestServer_GetBeaconBlock_Altair(t *testing.T) { SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: scBits[:], SyncCommitteeSignature: make([]byte, 96)}, }, }, - Signature: genesis.Signature, } blkRoot, err := genAltair.Block.HashTreeRoot() @@ -244,7 +243,6 @@ func TestServer_GetBeaconBlock_Bellatrix(t *testing.T) { }, }, }, - Signature: genesis.Signature, } blkRoot, err := blk.Block.HashTreeRoot() @@ -363,12 +361,14 @@ func TestServer_GetBeaconBlock_Capella(t *testing.T) { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), + Transactions: make([][]byte, 0), + Withdrawals: make([]*enginev1.Withdrawal, 0), }, }, }, - Signature: genesis.Signature, } blkRoot, err := blk.Block.HashTreeRoot() @@ -388,14 +388,15 @@ func TestServer_GetBeaconBlock_Capella(t *testing.T) { ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, fieldparams.LogsBloomLength), PrevRandao: random, - BaseFeePerGas: make([]byte, fieldparams.RootLength), - BlockHash: make([]byte, fieldparams.RootLength), - Transactions: make([][]byte, 0), - ExtraData: make([]byte, 0), BlockNumber: 1, GasLimit: 2, GasUsed: 3, Timestamp: uint64(timeStamp.Unix()), + ExtraData: make([]byte, 0), + BaseFeePerGas: make([]byte, fieldparams.RootLength), + BlockHash: make([]byte, fieldparams.RootLength), + Transactions: make([][]byte, 0), + Withdrawals: make([]*enginev1.Withdrawal, 0), } proposerServer := getProposerServer(db, beaconState, parentRoot[:]) @@ -479,7 +480,6 @@ func TestServer_GetBeaconBlock_Deneb(t *testing.T) { }, }, }, - Signature: genesis.Signature, } blkRoot, err := blk.Block.HashTreeRoot() @@ -731,7 +731,7 @@ func TestProposer_ProposeBlock_OK(t *testing.T) { }, }, { - name: "deneb block some blobs (kzg and blob count missmatch)", + name: "deneb block some blobs (kzg and blob count mismatch)", block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock { blockToPropose := util.NewBeaconBlockContentsDeneb() blockToPropose.Block.Block.Slot = 5 diff --git a/beacon-chain/rpc/service.go b/beacon-chain/rpc/service.go index 524e29780770..1a7baeefc507 100644 --- a/beacon-chain/rpc/service.go +++ b/beacon-chain/rpc/service.go @@ -359,16 +359,17 @@ func (s *Service) Start() { }) coreService := &core.Service{ - HeadFetcher: s.cfg.HeadFetcher, - GenesisTimeFetcher: s.cfg.GenesisTimeFetcher, - SyncChecker: s.cfg.SyncService, - Broadcaster: s.cfg.Broadcaster, - SyncCommitteePool: s.cfg.SyncCommitteeObjectPool, - OperationNotifier: s.cfg.OperationNotifier, - AttestationCache: cache.NewAttestationCache(), - StateGen: s.cfg.StateGen, - P2P: s.cfg.Broadcaster, - FinalizedFetcher: s.cfg.FinalizationFetcher, + HeadFetcher: s.cfg.HeadFetcher, + GenesisTimeFetcher: s.cfg.GenesisTimeFetcher, + SyncChecker: s.cfg.SyncService, + Broadcaster: s.cfg.Broadcaster, + SyncCommitteePool: s.cfg.SyncCommitteeObjectPool, + OperationNotifier: s.cfg.OperationNotifier, + AttestationCache: cache.NewAttestationCache(), + StateGen: s.cfg.StateGen, + P2P: s.cfg.Broadcaster, + FinalizedFetcher: s.cfg.FinalizationFetcher, + OptimisticModeFetcher: s.cfg.OptimisticModeFetcher, } validatorServer := &validatorv1alpha1.Server{ diff --git a/beacon-chain/state/interfaces.go b/beacon-chain/state/interfaces.go index 37a08fe9f2d4..10e93d65bd09 100644 --- a/beacon-chain/state/interfaces.go +++ b/beacon-chain/state/interfaces.go @@ -22,6 +22,7 @@ type BeaconState interface { WriteOnlyBeaconState Copy() BeaconState CopyAllTries() + Defragment() HashTreeRoot(ctx context.Context) ([32]byte, error) Prover json.Marshaler @@ -66,6 +67,7 @@ type ReadOnlyBeaconState interface { HistoricalSummaries() ([]*ethpb.HistoricalSummary, error) Slashings() []uint64 FieldReferencesCount() map[string]uint64 + RecordStateMetrics() MarshalSSZ() ([]byte, error) IsNil() bool Version() int diff --git a/beacon-chain/state/state-native/multi_value_slices.go b/beacon-chain/state/state-native/multi_value_slices.go index 80ea0297f263..0e2385edc7e8 100644 --- a/beacon-chain/state/state-native/multi_value_slices.go +++ b/beacon-chain/state/state-native/multi_value_slices.go @@ -5,6 +5,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native/types" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" multi_value_slice "github.com/prysmaticlabs/prysm/v4/container/multi-value-slice" "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" @@ -12,24 +13,26 @@ import ( ) var ( - multiValueRandaoMixesCountGauge = promauto.NewGauge(prometheus.GaugeOpts{ - Name: "multi_value_randao_mixes_count", - }) - multiValueBlockRootsCountGauge = promauto.NewGauge(prometheus.GaugeOpts{ - Name: "multi_value_block_roots_count", - }) - multiValueStateRootsCountGauge = promauto.NewGauge(prometheus.GaugeOpts{ - Name: "multi_value_state_roots_count", - }) - multiValueBalancesCountGauge = promauto.NewGauge(prometheus.GaugeOpts{ - Name: "multi_value_balances_count", - }) - multiValueValidatorsCountGauge = promauto.NewGauge(prometheus.GaugeOpts{ - Name: "multi_value_validators_count", - }) - multiValueInactivityScoresCountGauge = promauto.NewGauge(prometheus.GaugeOpts{ - Name: "multi_value_inactivity_scores_count", - }) + multiValueCountGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "multi_value_object_count", + Help: "The number of instances that exist for the multivalue slice for a particular field.", + }, []string{"field"}) + multiValueIndividualElementsCountGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "multi_value_individual_elements_count", + Help: "The number of individual elements that exist for the multivalue slice object.", + }, []string{"field"}) + multiValueIndividualElementReferencesCountGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "multi_value_individual_element_references_count", + Help: "The number of individual element references that exist for the multivalue slice object.", + }, []string{"field"}) + multiValueAppendedElementsCountGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "multi_value_appended_elements_count", + Help: "The number of appended elements that exist for the multivalue slice object.", + }, []string{"field"}) + multiValueAppendedElementReferencesCountGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "multi_value_appended_element_references_count", + Help: "The number of appended element references that exist for the multivalue slice object.", + }, []string{"field"}) ) // MultiValueRandaoMixes is a multi-value slice of randao mixes. @@ -43,7 +46,7 @@ func NewMultiValueRandaoMixes(mixes [][]byte) *MultiValueRandaoMixes { } mv := &MultiValueRandaoMixes{} mv.Init(items) - multiValueRandaoMixesCountGauge.Inc() + multiValueCountGauge.WithLabelValues(types.RandaoMixes.String()).Inc() runtime.SetFinalizer(mv, randaoMixesFinalizer) return mv } @@ -59,7 +62,7 @@ func NewMultiValueBlockRoots(roots [][]byte) *MultiValueBlockRoots { } mv := &MultiValueBlockRoots{} mv.Init(items) - multiValueBlockRootsCountGauge.Inc() + multiValueCountGauge.WithLabelValues(types.BlockRoots.String()).Inc() runtime.SetFinalizer(mv, blockRootsFinalizer) return mv } @@ -75,7 +78,7 @@ func NewMultiValueStateRoots(roots [][]byte) *MultiValueStateRoots { } mv := &MultiValueStateRoots{} mv.Init(items) - multiValueStateRootsCountGauge.Inc() + multiValueCountGauge.WithLabelValues(types.StateRoots.String()).Inc() runtime.SetFinalizer(mv, stateRootsFinalizer) return mv } @@ -89,7 +92,7 @@ func NewMultiValueBalances(balances []uint64) *MultiValueBalances { copy(items, balances) mv := &MultiValueBalances{} mv.Init(items) - multiValueBalancesCountGauge.Inc() + multiValueCountGauge.WithLabelValues(types.Balances.String()).Inc() runtime.SetFinalizer(mv, balancesFinalizer) return mv } @@ -103,7 +106,7 @@ func NewMultiValueInactivityScores(scores []uint64) *MultiValueInactivityScores copy(items, scores) mv := &MultiValueInactivityScores{} mv.Init(items) - multiValueInactivityScoresCountGauge.Inc() + multiValueCountGauge.WithLabelValues(types.InactivityScores.String()).Inc() runtime.SetFinalizer(mv, inactivityScoresFinalizer) return mv } @@ -115,31 +118,80 @@ type MultiValueValidators = multi_value_slice.Slice[*ethpb.Validator] func NewMultiValueValidators(vals []*ethpb.Validator) *MultiValueValidators { mv := &MultiValueValidators{} mv.Init(vals) - multiValueValidatorsCountGauge.Inc() + multiValueCountGauge.WithLabelValues(types.Validators.String()).Inc() runtime.SetFinalizer(mv, validatorsFinalizer) return mv } +// Defragment checks whether each individual multi-value field in our state is fragmented +// and if it is, it will 'reset' the field to create a new multivalue object. +func (b *BeaconState) Defragment() { + b.lock.Lock() + defer b.lock.Unlock() + if b.blockRootsMultiValue != nil && b.blockRootsMultiValue.IsFragmented() { + initialMVslice := b.blockRootsMultiValue + b.blockRootsMultiValue = b.blockRootsMultiValue.Reset(b) + initialMVslice.Detach(b) + multiValueCountGauge.WithLabelValues(types.BlockRoots.String()).Inc() + runtime.SetFinalizer(b.blockRootsMultiValue, blockRootsFinalizer) + } + if b.stateRootsMultiValue != nil && b.stateRootsMultiValue.IsFragmented() { + initialMVslice := b.stateRootsMultiValue + b.stateRootsMultiValue = b.stateRootsMultiValue.Reset(b) + initialMVslice.Detach(b) + multiValueCountGauge.WithLabelValues(types.StateRoots.String()).Inc() + runtime.SetFinalizer(b.stateRootsMultiValue, stateRootsFinalizer) + } + if b.randaoMixesMultiValue != nil && b.randaoMixesMultiValue.IsFragmented() { + initialMVslice := b.randaoMixesMultiValue + b.randaoMixesMultiValue = b.randaoMixesMultiValue.Reset(b) + initialMVslice.Detach(b) + multiValueCountGauge.WithLabelValues(types.RandaoMixes.String()).Inc() + runtime.SetFinalizer(b.randaoMixesMultiValue, randaoMixesFinalizer) + } + if b.balancesMultiValue != nil && b.balancesMultiValue.IsFragmented() { + initialMVslice := b.balancesMultiValue + b.balancesMultiValue = b.balancesMultiValue.Reset(b) + initialMVslice.Detach(b) + multiValueCountGauge.WithLabelValues(types.Balances.String()).Inc() + runtime.SetFinalizer(b.balancesMultiValue, balancesFinalizer) + } + if b.validatorsMultiValue != nil && b.validatorsMultiValue.IsFragmented() { + initialMVslice := b.validatorsMultiValue + b.validatorsMultiValue = b.validatorsMultiValue.Reset(b) + initialMVslice.Detach(b) + multiValueCountGauge.WithLabelValues(types.Validators.String()).Inc() + runtime.SetFinalizer(b.validatorsMultiValue, validatorsFinalizer) + } + if b.inactivityScoresMultiValue != nil && b.inactivityScoresMultiValue.IsFragmented() { + initialMVslice := b.inactivityScoresMultiValue + b.inactivityScoresMultiValue = b.inactivityScoresMultiValue.Reset(b) + initialMVslice.Detach(b) + multiValueCountGauge.WithLabelValues(types.InactivityScores.String()).Inc() + runtime.SetFinalizer(b.inactivityScoresMultiValue, inactivityScoresFinalizer) + } +} + func randaoMixesFinalizer(m *MultiValueRandaoMixes) { - multiValueRandaoMixesCountGauge.Dec() + multiValueCountGauge.WithLabelValues(types.RandaoMixes.String()).Dec() } func blockRootsFinalizer(m *MultiValueBlockRoots) { - multiValueBlockRootsCountGauge.Dec() + multiValueCountGauge.WithLabelValues(types.BlockRoots.String()).Dec() } func stateRootsFinalizer(m *MultiValueStateRoots) { - multiValueStateRootsCountGauge.Dec() + multiValueCountGauge.WithLabelValues(types.StateRoots.String()).Dec() } func balancesFinalizer(m *MultiValueBalances) { - multiValueBalancesCountGauge.Dec() + multiValueCountGauge.WithLabelValues(types.Balances.String()).Dec() } func validatorsFinalizer(m *MultiValueValidators) { - multiValueValidatorsCountGauge.Dec() + multiValueCountGauge.WithLabelValues(types.Validators.String()).Dec() } func inactivityScoresFinalizer(m *MultiValueInactivityScores) { - multiValueInactivityScoresCountGauge.Dec() + multiValueCountGauge.WithLabelValues(types.InactivityScores.String()).Dec() } diff --git a/beacon-chain/state/state-native/state_trie.go b/beacon-chain/state/state-native/state_trie.go index dcbe87fea6b5..296e461bd9cf 100644 --- a/beacon-chain/state/state-native/state_trie.go +++ b/beacon-chain/state/state-native/state_trie.go @@ -932,6 +932,68 @@ func (b *BeaconState) FieldReferencesCount() map[string]uint64 { return refMap } +// RecordStateMetrics proceeds to record any state related metrics data. +func (b *BeaconState) RecordStateMetrics() { + b.lock.RLock() + defer b.lock.RUnlock() + // Only run this for nodes running with the experimental state. + if !features.Get().EnableExperimentalState { + return + } + + // Validators + if b.validatorsMultiValue != nil { + stats := b.validatorsMultiValue.MultiValueStatistics() + multiValueIndividualElementsCountGauge.WithLabelValues(types.Validators.String()).Set(float64(stats.TotalIndividualElements)) + multiValueIndividualElementReferencesCountGauge.WithLabelValues(types.Validators.String()).Set(float64(stats.TotalIndividualElemReferences)) + multiValueAppendedElementsCountGauge.WithLabelValues(types.Validators.String()).Set(float64(stats.TotalAppendedElements)) + multiValueAppendedElementReferencesCountGauge.WithLabelValues(types.Validators.String()).Set(float64(stats.TotalAppendedElemReferences)) + } + + // Balances + if b.balancesMultiValue != nil { + stats := b.balancesMultiValue.MultiValueStatistics() + multiValueIndividualElementsCountGauge.WithLabelValues(types.Balances.String()).Set(float64(stats.TotalIndividualElements)) + multiValueIndividualElementReferencesCountGauge.WithLabelValues(types.Balances.String()).Set(float64(stats.TotalIndividualElemReferences)) + multiValueAppendedElementsCountGauge.WithLabelValues(types.Balances.String()).Set(float64(stats.TotalAppendedElements)) + multiValueAppendedElementReferencesCountGauge.WithLabelValues(types.Balances.String()).Set(float64(stats.TotalAppendedElemReferences)) + } + + // InactivityScores + if b.inactivityScoresMultiValue != nil { + stats := b.inactivityScoresMultiValue.MultiValueStatistics() + multiValueIndividualElementsCountGauge.WithLabelValues(types.InactivityScores.String()).Set(float64(stats.TotalIndividualElements)) + multiValueIndividualElementReferencesCountGauge.WithLabelValues(types.InactivityScores.String()).Set(float64(stats.TotalIndividualElemReferences)) + multiValueAppendedElementsCountGauge.WithLabelValues(types.InactivityScores.String()).Set(float64(stats.TotalAppendedElements)) + multiValueAppendedElementReferencesCountGauge.WithLabelValues(types.InactivityScores.String()).Set(float64(stats.TotalAppendedElemReferences)) + } + // BlockRoots + if b.blockRootsMultiValue != nil { + stats := b.blockRootsMultiValue.MultiValueStatistics() + multiValueIndividualElementsCountGauge.WithLabelValues(types.BlockRoots.String()).Set(float64(stats.TotalIndividualElements)) + multiValueIndividualElementReferencesCountGauge.WithLabelValues(types.BlockRoots.String()).Set(float64(stats.TotalIndividualElemReferences)) + multiValueAppendedElementsCountGauge.WithLabelValues(types.BlockRoots.String()).Set(float64(stats.TotalAppendedElements)) + multiValueAppendedElementReferencesCountGauge.WithLabelValues(types.BlockRoots.String()).Set(float64(stats.TotalAppendedElemReferences)) + } + + // StateRoots + if b.stateRootsMultiValue != nil { + stats := b.stateRootsMultiValue.MultiValueStatistics() + multiValueIndividualElementsCountGauge.WithLabelValues(types.StateRoots.String()).Set(float64(stats.TotalIndividualElements)) + multiValueIndividualElementReferencesCountGauge.WithLabelValues(types.StateRoots.String()).Set(float64(stats.TotalIndividualElemReferences)) + multiValueAppendedElementsCountGauge.WithLabelValues(types.StateRoots.String()).Set(float64(stats.TotalAppendedElements)) + multiValueAppendedElementReferencesCountGauge.WithLabelValues(types.StateRoots.String()).Set(float64(stats.TotalAppendedElemReferences)) + } + // RandaoMixes + if b.randaoMixesMultiValue != nil { + stats := b.randaoMixesMultiValue.MultiValueStatistics() + multiValueIndividualElementsCountGauge.WithLabelValues(types.RandaoMixes.String()).Set(float64(stats.TotalIndividualElements)) + multiValueIndividualElementReferencesCountGauge.WithLabelValues(types.RandaoMixes.String()).Set(float64(stats.TotalIndividualElemReferences)) + multiValueAppendedElementsCountGauge.WithLabelValues(types.RandaoMixes.String()).Set(float64(stats.TotalAppendedElements)) + multiValueAppendedElementReferencesCountGauge.WithLabelValues(types.RandaoMixes.String()).Set(float64(stats.TotalAppendedElemReferences)) + } +} + // IsNil checks if the state and the underlying proto // object are nil. func (b *BeaconState) IsNil() bool { diff --git a/beacon-chain/sync/BUILD.bazel b/beacon-chain/sync/BUILD.bazel index cf88e4c5584a..bfe8a44ed0cc 100644 --- a/beacon-chain/sync/BUILD.bazel +++ b/beacon-chain/sync/BUILD.bazel @@ -15,7 +15,6 @@ go_library( "fuzz_exports.go", # keep "log.go", "metrics.go", - "mock_blob_verifier.go", "options.go", "pending_attestations_queue.go", "pending_blocks_queue.go", diff --git a/beacon-chain/sync/blobs_test.go b/beacon-chain/sync/blobs_test.go index 6d2ba38c7a96..68b4b7483dd4 100644 --- a/beacon-chain/sync/blobs_test.go +++ b/beacon-chain/sync/blobs_test.go @@ -338,7 +338,7 @@ func TestRoundTripDenebSave(t *testing.T) { require.NoError(t, undo()) }() parentRoot := [32]byte{} - c := blobsTestCase{nblocks: 10} + c := blobsTestCase{} chain, clock := defaultMockChain(t) c.chain = chain c.clock = clock diff --git a/beacon-chain/sync/initial-sync/BUILD.bazel b/beacon-chain/sync/initial-sync/BUILD.bazel index 193edd52e68d..e598432e8e8e 100644 --- a/beacon-chain/sync/initial-sync/BUILD.bazel +++ b/beacon-chain/sync/initial-sync/BUILD.bazel @@ -12,15 +12,18 @@ go_library( "log.go", "round_robin.go", "service.go", + "verification.go", ], importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync", visibility = ["//beacon-chain:__subpackages__"], deps = [ "//async/abool:go_default_library", "//beacon-chain/blockchain:go_default_library", + "//beacon-chain/blockchain/kzg:go_default_library", "//beacon-chain/core/feed/block:go_default_library", "//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/core/transition:go_default_library", + "//beacon-chain/das:go_default_library", "//beacon-chain/db:go_default_library", "//beacon-chain/db/filesystem:go_default_library", "//beacon-chain/p2p:go_default_library", @@ -38,6 +41,7 @@ go_library( "//consensus-types/primitives:go_default_library", "//container/leaky-bucket:go_default_library", "//crypto/rand:go_default_library", + "//encoding/bytesutil:go_default_library", "//math:go_default_library", "//proto/prysm/v1alpha1:go_default_library", "//runtime:go_default_library", @@ -69,6 +73,7 @@ go_test( deps = [ "//async/abool:go_default_library", "//beacon-chain/blockchain/testing:go_default_library", + "//beacon-chain/das:go_default_library", "//beacon-chain/db:go_default_library", "//beacon-chain/db/filesystem:go_default_library", "//beacon-chain/db/testing:go_default_library", @@ -80,6 +85,7 @@ go_test( "//beacon-chain/startup:go_default_library", "//beacon-chain/sync:go_default_library", "//beacon-chain/sync/verify:go_default_library", + "//beacon-chain/verification:go_default_library", "//cmd/beacon-chain/flags:go_default_library", "//config/features:go_default_library", "//config/params:go_default_library", diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher.go b/beacon-chain/sync/initial-sync/blocks_fetcher.go index 20a391f6a5ef..e42452e6c4b1 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher.go @@ -120,7 +120,7 @@ type fetchRequestResponse struct { pid peer.ID start primitives.Slot count uint64 - bwb []blocks2.BlockWithVerifiedBlobs + bwb []blocks2.BlockWithROBlobs err error } @@ -263,7 +263,7 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot response := &fetchRequestResponse{ start: start, count: count, - bwb: []blocks2.BlockWithVerifiedBlobs{}, + bwb: []blocks2.BlockWithROBlobs{}, err: nil, } @@ -304,7 +304,7 @@ func (f *blocksFetcher) fetchBlocksFromPeer( ctx context.Context, start primitives.Slot, count uint64, peers []peer.ID, -) ([]blocks2.BlockWithVerifiedBlobs, peer.ID, error) { +) ([]blocks2.BlockWithROBlobs, peer.ID, error) { ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlocksFromPeer") defer span.End() @@ -332,20 +332,20 @@ func (f *blocksFetcher) fetchBlocksFromPeer( return nil, "", errNoPeersAvailable } -func sortedBlockWithVerifiedBlobSlice(blocks []interfaces.ReadOnlySignedBeaconBlock) ([]blocks2.BlockWithVerifiedBlobs, error) { - rb := make([]blocks2.BlockWithVerifiedBlobs, len(blocks)) +func sortedBlockWithVerifiedBlobSlice(blocks []interfaces.ReadOnlySignedBeaconBlock) ([]blocks2.BlockWithROBlobs, error) { + rb := make([]blocks2.BlockWithROBlobs, len(blocks)) for i, b := range blocks { ro, err := blocks2.NewROBlock(b) if err != nil { return nil, err } - rb[i] = blocks2.BlockWithVerifiedBlobs{Block: ro} + rb[i] = blocks2.BlockWithROBlobs{Block: ro} } - sort.Sort(blocks2.BlockWithVerifiedBlobsSlice(rb)) + sort.Sort(blocks2.BlockWithROBlobsSlice(rb)) return rb, nil } -func blobRequest(bwb []blocks2.BlockWithVerifiedBlobs, blobWindowStart primitives.Slot) *p2ppb.BlobSidecarsByRangeRequest { +func blobRequest(bwb []blocks2.BlockWithROBlobs, blobWindowStart primitives.Slot) *p2ppb.BlobSidecarsByRangeRequest { if len(bwb) == 0 { return nil } @@ -360,7 +360,7 @@ func blobRequest(bwb []blocks2.BlockWithVerifiedBlobs, blobWindowStart primitive } } -func lowestSlotNeedsBlob(retentionStart primitives.Slot, bwb []blocks2.BlockWithVerifiedBlobs) *primitives.Slot { +func lowestSlotNeedsBlob(retentionStart primitives.Slot, bwb []blocks2.BlockWithROBlobs) *primitives.Slot { if len(bwb) == 0 { return nil } @@ -398,7 +398,7 @@ func sortBlobs(blobs []blocks.ROBlob) []blocks.ROBlob { var errBlobVerification = errors.New("peer unable to serve aligned BlobSidecarsByRange and BeaconBlockSidecarsByRange responses") var errMissingBlobsForBlockCommitments = errors.Wrap(errBlobVerification, "blobs unavailable for processing block with kzg commitments") -func verifyAndPopulateBlobs(bwb []blocks2.BlockWithVerifiedBlobs, blobs []blocks.ROBlob, blobWindowStart primitives.Slot) ([]blocks2.BlockWithVerifiedBlobs, error) { +func verifyAndPopulateBlobs(bwb []blocks2.BlockWithROBlobs, blobs []blocks.ROBlob, blobWindowStart primitives.Slot) ([]blocks2.BlockWithROBlobs, error) { // Assumes bwb has already been sorted by sortedBlockWithVerifiedBlobSlice. blobs = sortBlobs(blobs) blobi := 0 @@ -450,7 +450,7 @@ func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) e } // fetchBlobsFromPeer fetches blocks from a single randomly selected peer. -func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks2.BlockWithVerifiedBlobs, pid peer.ID) ([]blocks2.BlockWithVerifiedBlobs, error) { +func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks2.BlockWithROBlobs, pid peer.ID) ([]blocks2.BlockWithROBlobs, error) { ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlobsFromPeer") defer span.End() if slots.ToEpoch(f.clock.CurrentSlot()) < params.BeaconConfig().DenebForkEpoch { diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go index c25efbaf41ca..8ccb74dac1a5 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go @@ -306,9 +306,9 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) { fetcher.stop() }() - processFetchedBlocks := func() ([]blocks.BlockWithVerifiedBlobs, error) { + processFetchedBlocks := func() ([]blocks.BlockWithROBlobs, error) { defer cancel() - var unionRespBlocks []blocks.BlockWithVerifiedBlobs + var unionRespBlocks []blocks.BlockWithROBlobs for { select { @@ -347,7 +347,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) { bwb, err := processFetchedBlocks() assert.NoError(t, err) - sort.Sort(blocks.BlockWithVerifiedBlobsSlice(bwb)) + sort.Sort(blocks.BlockWithROBlobsSlice(bwb)) ss := make([]primitives.Slot, len(bwb)) for i, b := range bwb { ss[i] = b.Block.Block().Slot() @@ -454,7 +454,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) { } }() - var bwb []blocks.BlockWithVerifiedBlobs + var bwb []blocks.BlockWithROBlobs select { case <-ctx.Done(): t.Error(ctx.Err()) @@ -606,9 +606,7 @@ func TestBlocksFetcher_WaitForBandwidth(t *testing.T) { p1.Connect(p2) require.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected") req := ðpb.BeaconBlocksByRangeRequest{ - StartSlot: 100, - Step: 1, - Count: 64, + Count: 64, } topic := p2pm.RPCBlocksByRangeTopicV1 @@ -1015,7 +1013,7 @@ func TestLowestSlotNeedsBlob(t *testing.T) { func TestBlobRequest(t *testing.T) { var nilReq *ethpb.BlobSidecarsByRangeRequest // no blocks - req := blobRequest([]blocks.BlockWithVerifiedBlobs{}, 0) + req := blobRequest([]blocks.BlockWithROBlobs{}, 0) require.Equal(t, nilReq, req) blks, _ := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10) sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks)) @@ -1047,7 +1045,7 @@ func TestBlobRequest(t *testing.T) { require.Equal(t, len(allAfter), int(req.Count)) } -func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithVerifiedBlobs, []blocks.ROBlob) { +func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROBlobs, []blocks.ROBlob) { blks, blobs := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, nblocks) sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks)) for i := range blks { diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_utils.go b/beacon-chain/sync/initial-sync/blocks_fetcher_utils.go index b80c54635057..57c63cc33476 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_utils.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_utils.go @@ -23,7 +23,7 @@ import ( // either in DB or initial sync cache. type forkData struct { peer peer.ID - bwb []blocks.BlockWithVerifiedBlobs + bwb []blocks.BlockWithROBlobs } // nonSkippedSlotAfter checks slots after the given one in an attempt to find a non-empty future slot. diff --git a/beacon-chain/sync/initial-sync/blocks_queue.go b/beacon-chain/sync/initial-sync/blocks_queue.go index 628be0eb1744..242b9adfdace 100644 --- a/beacon-chain/sync/initial-sync/blocks_queue.go +++ b/beacon-chain/sync/initial-sync/blocks_queue.go @@ -92,7 +92,7 @@ type blocksQueue struct { // blocksQueueFetchedData is a data container that is returned from a queue on each step. type blocksQueueFetchedData struct { pid peer.ID - bwb []blocks.BlockWithVerifiedBlobs + bwb []blocks.BlockWithROBlobs } // newBlocksQueue creates initialized priority queue. diff --git a/beacon-chain/sync/initial-sync/blocks_queue_test.go b/beacon-chain/sync/initial-sync/blocks_queue_test.go index 87610368bfdd..6154c102b4c3 100644 --- a/beacon-chain/sync/initial-sync/blocks_queue_test.go +++ b/beacon-chain/sync/initial-sync/blocks_queue_test.go @@ -263,7 +263,7 @@ func TestBlocksQueue_Loop(t *testing.T) { highestExpectedSlot: tt.highestExpectedSlot, }) assert.NoError(t, queue.start()) - processBlock := func(b blocks.BlockWithVerifiedBlobs) error { + processBlock := func(b blocks.BlockWithROBlobs) error { block := b.Block if !beaconDB.HasBlock(ctx, block.Block().ParentRoot()) { return fmt.Errorf("%w: %#x", errParentDoesNotExist, block.Block().ParentRoot()) @@ -272,10 +272,10 @@ func TestBlocksQueue_Loop(t *testing.T) { if err != nil { return err } - return mc.ReceiveBlock(ctx, block, root) + return mc.ReceiveBlock(ctx, block, root, nil) } - var blocks []blocks.BlockWithVerifiedBlobs + var blocks []blocks.BlockWithROBlobs for data := range queue.fetchedData { for _, b := range data.bwb { if err := processBlock(b); err != nil { @@ -538,7 +538,7 @@ func TestBlocksQueue_onDataReceivedEvent(t *testing.T) { require.NoError(t, err) response := &fetchRequestResponse{ pid: "abc", - bwb: []blocks.BlockWithVerifiedBlobs{ + bwb: []blocks.BlockWithROBlobs{ {Block: blocks.ROBlock{ReadOnlySignedBeaconBlock: wsb}}, {Block: blocks.ROBlock{ReadOnlySignedBeaconBlock: wsbCopy}}, }, @@ -638,7 +638,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) { queue.smm.machines[256].pid = pidDataParsed rwsb, err := blocks.NewROBlock(wsb) require.NoError(t, err) - queue.smm.machines[256].bwb = []blocks.BlockWithVerifiedBlobs{ + queue.smm.machines[256].bwb = []blocks.BlockWithROBlobs{ {Block: rwsb}, } @@ -672,7 +672,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) { queue.smm.machines[320].pid = pidDataParsed rwsb, err := blocks.NewROBlock(wsb) require.NoError(t, err) - queue.smm.machines[320].bwb = []blocks.BlockWithVerifiedBlobs{ + queue.smm.machines[320].bwb = []blocks.BlockWithROBlobs{ {Block: rwsb}, } @@ -703,7 +703,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) { queue.smm.machines[320].pid = pidDataParsed rwsb, err := blocks.NewROBlock(wsb) require.NoError(t, err) - queue.smm.machines[320].bwb = []blocks.BlockWithVerifiedBlobs{ + queue.smm.machines[320].bwb = []blocks.BlockWithROBlobs{ {Block: rwsb}, } diff --git a/beacon-chain/sync/initial-sync/fsm.go b/beacon-chain/sync/initial-sync/fsm.go index 9f3be7f561d3..ab6ec0db757e 100644 --- a/beacon-chain/sync/initial-sync/fsm.go +++ b/beacon-chain/sync/initial-sync/fsm.go @@ -46,7 +46,7 @@ type stateMachine struct { start primitives.Slot state stateID pid peer.ID - bwb []blocks.BlockWithVerifiedBlobs + bwb []blocks.BlockWithROBlobs updated time.Time } @@ -78,7 +78,7 @@ func (smm *stateMachineManager) addStateMachine(startSlot primitives.Slot) *stat smm: smm, start: startSlot, state: stateNew, - bwb: []blocks.BlockWithVerifiedBlobs{}, + bwb: []blocks.BlockWithROBlobs{}, updated: prysmTime.Now(), } smm.recalculateMachineAttribs() diff --git a/beacon-chain/sync/initial-sync/round_robin.go b/beacon-chain/sync/initial-sync/round_robin.go index 5f313eab48f8..d6551b9bf72a 100644 --- a/beacon-chain/sync/initial-sync/round_robin.go +++ b/beacon-chain/sync/initial-sync/round_robin.go @@ -10,8 +10,8 @@ import ( "github.com/paulbellamy/ratecounter" "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/das" "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/verification" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" @@ -25,10 +25,10 @@ const ( ) // blockReceiverFn defines block receiving function. -type blockReceiverFn func(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error +type blockReceiverFn func(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error // batchBlockReceiverFn defines batch receiving function. -type batchBlockReceiverFn func(ctx context.Context, blks []blocks.ROBlock) error +type batchBlockReceiverFn func(ctx context.Context, blks []blocks.ROBlock, avs das.AvailabilityStore) error // Round Robin sync looks at the latest peer statuses and syncs up to the highest known epoch. // @@ -154,52 +154,48 @@ func (s *Service) processFetchedData( } } -// processFetchedData processes data received from queue. +// processFetchedDataRegSync processes data received from queue. func (s *Service) processFetchedDataRegSync( ctx context.Context, genesis time.Time, startSlot primitives.Slot, data *blocksQueueFetchedData) { defer s.updatePeerScorerStats(data.pid, startSlot) - blockReceiver := s.cfg.Chain.ReceiveBlock - invalidBlocks := 0 - blksWithoutParentCount := 0 - for _, b := range data.bwb { - if len(b.Blobs) > 0 { - verified, err := verification.BlobSidecarSliceNoop(b.Blobs) - if err != nil { - log.WithField("root", b.Block.Root()).WithError(err).Error("blobs failed verification") - continue - } - for i := range verified { - if err := s.cfg.BlobStorage.Save(verified[i]); err != nil { - log.WithError(err).Warn("Failed to save blob sidecar") - } - } + bwb, err := validUnprocessed(ctx, data.bwb, s.cfg.Chain.HeadSlot(), s.isProcessedBlock) + if err != nil { + log.WithError(err).Debug("batch did not contain a valid sequence of unprocessed blocks") + return + } + if len(bwb) == 0 { + return + } + bv := newBlobBatchVerifier(s.newBlobVerifier) + avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv) + batchFields := logrus.Fields{ + "firstSlot": data.bwb[0].Block.Block().Slot(), + "firstUnprocessed": bwb[0].Block.Block().Slot(), + } + for _, b := range bwb { + if err := avs.Persist(s.clock.CurrentSlot(), b.Blobs...); err != nil { + log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Batch failure due to BlobSidecar issues") + return } - - if err := s.processBlock(ctx, genesis, b, blockReceiver); err != nil { + if err := s.processBlock(ctx, genesis, b, s.cfg.Chain.ReceiveBlock, avs); err != nil { switch { - case errors.Is(err, errBlockAlreadyProcessed): - log.WithError(err).Debug("Block is not processed") - invalidBlocks++ case errors.Is(err, errParentDoesNotExist): - blksWithoutParentCount++ - invalidBlocks++ + log.WithFields(batchFields).WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())). + WithFields(syncFields(b.Block)).Debug("Could not process batch blocks due to missing parent") + return default: - log.WithError(err).Warn("Block is not processed") + log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Block processing failure") + return } - continue } } - if blksWithoutParentCount > 0 { - log.WithFields(logrus.Fields{ - "missingParent": fmt.Sprintf("%#x", data.bwb[0].Block.Block().ParentRoot()), - "firstSlot": data.bwb[0].Block.Block().Slot(), - "lastSlot": data.bwb[blksWithoutParentCount-1].Block.Block().Slot(), - }).Debug("Could not process batch blocks due to missing parent") - } - // Add more visible logging if all blocks cannot be processed. - if len(data.bwb) == invalidBlocks { - log.WithField("error", "Range had no valid blocks to process").Warn("Range is not processed") +} + +func syncFields(b blocks.ROBlock) logrus.Fields { + return logrus.Fields{ + "root": fmt.Sprintf("%#x", b.Root()), + "lastSlot": b.Block().Slot(), } } @@ -260,8 +256,9 @@ func (s *Service) logBatchSyncStatus(genesis time.Time, firstBlk blocks.ROBlock, func (s *Service) processBlock( ctx context.Context, genesis time.Time, - bwb blocks.BlockWithVerifiedBlobs, + bwb blocks.BlockWithROBlobs, blockReceiver blockReceiverFn, + avs das.AvailabilityStore, ) error { blk := bwb.Block blkRoot := blk.Root() @@ -273,12 +270,12 @@ func (s *Service) processBlock( if !s.cfg.Chain.HasBlock(ctx, blk.Block().ParentRoot()) { return fmt.Errorf("%w: (in processBlock, slot=%d) %#x", errParentDoesNotExist, blk.Block().Slot(), blk.Block().ParentRoot()) } - return blockReceiver(ctx, blk, blkRoot) + return blockReceiver(ctx, blk, blkRoot, avs) } type processedChecker func(context.Context, blocks.ROBlock) bool -func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithVerifiedBlobs, headSlot primitives.Slot, isProc processedChecker) ([]blocks.BlockWithVerifiedBlobs, error) { +func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROBlobs, headSlot primitives.Slot, isProc processedChecker) ([]blocks.BlockWithROBlobs, error) { // use a pointer to avoid confusing the zero-value with the case where the first element is processed. var processed *int for i := range bwb { @@ -292,7 +289,7 @@ func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithVerifiedBlobs, parent := bwb[i-1].Block if parent.Root() != b.Block().ParentRoot() { return nil, fmt.Errorf("expected linear block list with parent root of %#x (slot %d) but received %#x (slot %d)", - parent, parent.Block().Slot(), b.Block().ParentRoot(), b.Block().Slot()) + parent.Root(), parent.Block().Slot(), b.Block().ParentRoot(), b.Block().Slot()) } } } @@ -302,14 +299,14 @@ func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithVerifiedBlobs, if *processed+1 == len(bwb) { maxIncoming := bwb[len(bwb)-1].Block maxRoot := maxIncoming.Root() - return nil, fmt.Errorf("headSlot:%d, blockSlot:%d , root %#x:%w", headSlot, maxIncoming.Block().Slot(), maxRoot, errBlockAlreadyProcessed) + return nil, fmt.Errorf("%w: headSlot=%d, blockSlot=%d, root=%#x", errBlockAlreadyProcessed, headSlot, maxIncoming.Block().Slot(), maxRoot) } nonProcessedIdx := *processed + 1 return bwb[nonProcessedIdx:], nil } func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time, - bwb []blocks.BlockWithVerifiedBlobs, bFunc batchBlockReceiverFn) error { + bwb []blocks.BlockWithROBlobs, bFunc batchBlockReceiverFn) error { if len(bwb) == 0 { return errors.New("0 blocks provided into method") } @@ -328,32 +325,20 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time, return fmt.Errorf("%w: %#x (in processBatchedBlocks, slot=%d)", errParentDoesNotExist, first.Block().ParentRoot(), first.Block().Slot()) } + + bv := newBlobBatchVerifier(s.newBlobVerifier) + avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv) s.logBatchSyncStatus(genesis, first, len(bwb)) - blobCount := 0 for _, bb := range bwb { if len(bb.Blobs) == 0 { continue } - verified, err := verification.BlobSidecarSliceNoop(bb.Blobs) - if err != nil { - return errors.Wrapf(err, "blobs for root %#x failed verification", bb.Block.Root()) - } - for i := range verified { - if err := s.cfg.BlobStorage.Save(verified[i]); err != nil { - return errors.Wrapf(err, "failed to save blobs for block %#x", bb.Block.Root()) - } + if err := avs.Persist(s.clock.CurrentSlot(), bb.Blobs...); err != nil { + return err } - blobCount += len(bb.Blobs) - } - if blobCount > 0 { - log.WithFields(logrus.Fields{ - "startSlot": bwb[0].Block.Block().Slot(), - "endSlot": bwb[len(bwb)-1].Block.Block().Slot(), - "count": blobCount, - }).Info("Processed blob sidecars") } - return bFunc(ctx, blocks.BlockWithVerifiedBlobsSlice(bwb).ROBlocks()) + return bFunc(ctx, blocks.BlockWithROBlobsSlice(bwb).ROBlocks(), avs) } // updatePeerScorerStats adjusts monitored metrics for a peer. diff --git a/beacon-chain/sync/initial-sync/round_robin_test.go b/beacon-chain/sync/initial-sync/round_robin_test.go index dab0f3a42a4a..8db187847550 100644 --- a/beacon-chain/sync/initial-sync/round_robin_test.go +++ b/beacon-chain/sync/initial-sync/round_robin_test.go @@ -8,6 +8,7 @@ import ( "github.com/paulbellamy/ratecounter" "github.com/prysmaticlabs/prysm/v4/async/abool" mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/das" dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" p2pt "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" @@ -371,11 +372,11 @@ func TestService_processBlock(t *testing.T) { require.NoError(t, err) rowsb, err := blocks.NewROBlock(wsb) require.NoError(t, err) - err = s.processBlock(ctx, genesis, blocks.BlockWithVerifiedBlobs{Block: rowsb}, func( - ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error { - assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot)) + err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func( + ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error { + assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil)) return nil - }) + }, nil) assert.NoError(t, err) // Duplicate processing should trigger error. @@ -383,10 +384,10 @@ func TestService_processBlock(t *testing.T) { require.NoError(t, err) rowsb, err = blocks.NewROBlock(wsb) require.NoError(t, err) - err = s.processBlock(ctx, genesis, blocks.BlockWithVerifiedBlobs{Block: rowsb}, func( - ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error { + err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func( + ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error { return nil - }) + }, nil) assert.ErrorContains(t, errBlockAlreadyProcessed.Error(), err) // Continue normal processing, should proceed w/o errors. @@ -394,11 +395,11 @@ func TestService_processBlock(t *testing.T) { require.NoError(t, err) rowsb, err = blocks.NewROBlock(wsb) require.NoError(t, err) - err = s.processBlock(ctx, genesis, blocks.BlockWithVerifiedBlobs{Block: rowsb}, func( - ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error { - assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot)) + err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func( + ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error { + assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil)) return nil - }) + }, nil) assert.NoError(t, err) assert.Equal(t, primitives.Slot(2), s.cfg.Chain.HeadSlot(), "Unexpected head slot") }) @@ -429,7 +430,7 @@ func TestService_processBlockBatch(t *testing.T) { genesis := makeGenesisTime(32) t.Run("process non-linear batch", func(t *testing.T) { - var batch []blocks.BlockWithVerifiedBlobs + var batch []blocks.BlockWithROBlobs currBlockRoot := genesisBlkRoot for i := primitives.Slot(1); i < 10; i++ { parentRoot := currBlockRoot @@ -443,11 +444,11 @@ func TestService_processBlockBatch(t *testing.T) { require.NoError(t, err) rowsb, err := blocks.NewROBlock(wsb) require.NoError(t, err) - batch = append(batch, blocks.BlockWithVerifiedBlobs{Block: rowsb}) + batch = append(batch, blocks.BlockWithROBlobs{Block: rowsb}) currBlockRoot = blk1Root } - var batch2 []blocks.BlockWithVerifiedBlobs + var batch2 []blocks.BlockWithROBlobs for i := primitives.Slot(10); i < 20; i++ { parentRoot := currBlockRoot blk1 := util.NewBeaconBlock() @@ -460,19 +461,19 @@ func TestService_processBlockBatch(t *testing.T) { require.NoError(t, err) rowsb, err := blocks.NewROBlock(wsb) require.NoError(t, err) - batch2 = append(batch2, blocks.BlockWithVerifiedBlobs{Block: rowsb}) + batch2 = append(batch2, blocks.BlockWithROBlobs{Block: rowsb}) currBlockRoot = blk1Root } - cbnormal := func(ctx context.Context, blks []blocks.ROBlock) error { - assert.NoError(t, s.cfg.Chain.ReceiveBlockBatch(ctx, blks)) + cbnormal := func(ctx context.Context, blks []blocks.ROBlock, avs das.AvailabilityStore) error { + assert.NoError(t, s.cfg.Chain.ReceiveBlockBatch(ctx, blks, avs)) return nil } // Process block normally. err = s.processBatchedBlocks(ctx, genesis, batch, cbnormal) assert.NoError(t, err) - cbnil := func(ctx context.Context, blocks []blocks.ROBlock) error { + cbnil := func(ctx context.Context, blocks []blocks.ROBlock, _ das.AvailabilityStore) error { return nil } @@ -480,7 +481,7 @@ func TestService_processBlockBatch(t *testing.T) { err = s.processBatchedBlocks(ctx, genesis, batch, cbnil) assert.ErrorContains(t, "block is already processed", err) - var badBatch2 []blocks.BlockWithVerifiedBlobs + var badBatch2 []blocks.BlockWithROBlobs for i, b := range batch2 { // create a non-linear batch if i%3 == 0 && i != 0 { @@ -675,7 +676,7 @@ func TestService_ValidUnprocessed(t *testing.T) { require.NoError(t, err) util.SaveBlock(t, context.Background(), beaconDB, genesisBlk) - var batch []blocks.BlockWithVerifiedBlobs + var batch []blocks.BlockWithROBlobs currBlockRoot := genesisBlkRoot for i := primitives.Slot(1); i < 10; i++ { parentRoot := currBlockRoot @@ -689,7 +690,7 @@ func TestService_ValidUnprocessed(t *testing.T) { require.NoError(t, err) rowsb, err := blocks.NewROBlock(wsb) require.NoError(t, err) - batch = append(batch, blocks.BlockWithVerifiedBlobs{Block: rowsb}) + batch = append(batch, blocks.BlockWithROBlobs{Block: rowsb}) currBlockRoot = blk1Root } diff --git a/beacon-chain/sync/initial-sync/service.go b/beacon-chain/sync/initial-sync/service.go index 68c4a7968451..540a7b4a4435 100644 --- a/beacon-chain/sync/initial-sync/service.go +++ b/beacon-chain/sync/initial-sync/service.go @@ -17,6 +17,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem" "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p" "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/verification" "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/runtime" @@ -47,19 +48,32 @@ type Config struct { // Service service. type Service struct { - cfg *Config - ctx context.Context - cancel context.CancelFunc - synced *abool.AtomicBool - chainStarted *abool.AtomicBool - counter *ratecounter.RateCounter - genesisChan chan time.Time - clock *startup.Clock + cfg *Config + ctx context.Context + cancel context.CancelFunc + synced *abool.AtomicBool + chainStarted *abool.AtomicBool + counter *ratecounter.RateCounter + genesisChan chan time.Time + clock *startup.Clock + verifierWaiter *verification.InitializerWaiter + newBlobVerifier verification.NewBlobVerifier +} + +// Option is a functional option for the initial-sync Service. +type Option func(*Service) + +// WithVerifierWaiter sets the verification.InitializerWaiter +// for the initial-sync Service. +func WithVerifierWaiter(viw *verification.InitializerWaiter) Option { + return func(s *Service) { + s.verifierWaiter = viw + } } // NewService configures the initial sync service responsible for bringing the node up to the // latest head of the blockchain. -func NewService(ctx context.Context, cfg *Config) *Service { +func NewService(ctx context.Context, cfg *Config, opts ...Option) *Service { ctx, cancel := context.WithCancel(ctx) s := &Service{ cfg: cfg, @@ -71,7 +85,9 @@ func NewService(ctx context.Context, cfg *Config) *Service { genesisChan: make(chan time.Time), clock: startup.NewClock(time.Unix(0, 0), [32]byte{}), // default clock to prevent panic } - + for _, o := range opts { + o(s) + } return s } @@ -86,6 +102,13 @@ func (s *Service) Start() { s.clock = clock log.Info("Received state initialized event") + v, err := s.verifierWaiter.WaitForInitializer(s.ctx) + if err != nil { + log.WithError(err).Error("Could not get verification initializer") + return + } + s.newBlobVerifier = newBlobVerifierFromInitializer(v) + gt := clock.GenesisTime() if gt.IsZero() { log.Debug("Exiting Initial Sync Service") diff --git a/beacon-chain/sync/initial-sync/service_test.go b/beacon-chain/sync/initial-sync/service_test.go index 0a6f3eb38d2a..027e0be858d9 100644 --- a/beacon-chain/sync/initial-sync/service_test.go +++ b/beacon-chain/sync/initial-sync/service_test.go @@ -13,6 +13,7 @@ import ( dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing" p2pt "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/startup" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/verification" "github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" @@ -158,6 +159,7 @@ func TestService_InitStartStop(t *testing.T) { StateNotifier: &mock.MockStateNotifier{}, InitialSyncComplete: make(chan struct{}), }) + s.verifierWaiter = verification.NewInitializerWaiter(gs, nil, nil) time.Sleep(500 * time.Millisecond) assert.NotNil(t, s) if tt.setGenesis != nil { @@ -200,6 +202,7 @@ func TestService_waitForStateInitialization(t *testing.T) { counter: ratecounter.NewRateCounter(counterSeconds * time.Second), genesisChan: make(chan time.Time), } + s.verifierWaiter = verification.NewInitializerWaiter(cs, nil, nil) return s, cs } diff --git a/beacon-chain/sync/initial-sync/verification.go b/beacon-chain/sync/initial-sync/verification.go new file mode 100644 index 000000000000..084d05fbe623 --- /dev/null +++ b/beacon-chain/sync/initial-sync/verification.go @@ -0,0 +1,99 @@ +package initialsync + +import ( + "context" + + "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/das" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/verification" + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" + "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" +) + +var ( + // ErrBatchSignatureMismatch is returned by VerifiedROBlobs when any of the blobs in the batch have a signature + // which does not match the signature for the block with a corresponding root. + ErrBatchSignatureMismatch = errors.New("Sidecar block header signature does not match signed block") + // ErrBlockRootMismatch is returned by VerifiedROBlobs in the scenario where the root of the given signed block + // does not match the block header in one of the corresponding sidecars. + ErrBatchBlockRootMismatch = errors.New("Sidecar block header root does not match signed block") +) + +func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.NewBlobVerifier { + return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier { + return ini.NewBlobVerifier(b, reqs) + } +} + +func newBlobBatchVerifier(newVerifier verification.NewBlobVerifier) *BlobBatchVerifier { + return &BlobBatchVerifier{ + verifyKzg: kzg.Verify, + newVerifier: newVerifier, + } +} + +type kzgVerifier func(b ...blocks.ROBlob) error + +// BlobBatchVerifier solves problems that come from verifying batches of blobs from RPC. +// First: we only update forkchoice after the entire batch has completed, so the n+1 elements in the batch +// won't be in forkchoice yet. +// Second: it is more efficient to batch some verifications, like kzg commitment verification. Batch adds a +// method to BlobVerifier to verify the kzg commitments of all blob sidecars for a block together, then using the cached +// result of the batch verification when verifying the individual blobs. +type BlobBatchVerifier struct { + verifyKzg kzgVerifier + newVerifier verification.NewBlobVerifier +} + +var _ das.BlobBatchVerifier = &BlobBatchVerifier{} + +func (batch *BlobBatchVerifier) VerifiedROBlobs(ctx context.Context, blk blocks.ROBlock, scs []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) { + if len(scs) == 0 { + return nil, nil + } + // We assume the proposer was validated wrt the block in batch block processing before performing the DA check. + + // So at this stage we just need to make sure the value being signed and signature bytes match the block. + for i := range scs { + if blk.Signature() != bytesutil.ToBytes96(scs[i].SignedBlockHeader.Signature) { + return nil, ErrBatchSignatureMismatch + } + // Extra defensive check to make sure the roots match. This should be unnecessary in practice since the root from + // the block should be used as the lookup key into the cache of sidecars. + if blk.Root() != scs[i].BlockRoot() { + return nil, ErrBatchBlockRootMismatch + } + } + // Verify commitments for all blobs at once. verifyOneBlob assumes it is only called once this check succeeds. + if err := batch.verifyKzg(scs...); err != nil { + return nil, err + } + vs := make([]blocks.VerifiedROBlob, len(scs)) + for i := range scs { + vb, err := batch.verifyOneBlob(ctx, scs[i]) + if err != nil { + return nil, err + } + vs[i] = vb + } + return vs, nil +} + +func (batch *BlobBatchVerifier) verifyOneBlob(ctx context.Context, sc blocks.ROBlob) (blocks.VerifiedROBlob, error) { + vb := blocks.VerifiedROBlob{} + bv := batch.newVerifier(sc, verification.InitsyncSidecarRequirements) + // We can satisfy the following 2 requirements immediately because VerifiedROBlobs always verifies commitments + // and block signature for all blobs in the batch before calling verifyOneBlob. + bv.SatisfyRequirement(verification.RequireSidecarKzgProofVerified) + bv.SatisfyRequirement(verification.RequireValidProposerSignature) + + if err := bv.BlobIndexInBounds(); err != nil { + return vb, err + } + if err := bv.SidecarInclusionProven(); err != nil { + return vb, err + } + + return bv.VerifiedROBlob() +} diff --git a/beacon-chain/sync/mock_blob_verifier.go b/beacon-chain/sync/mock_blob_verifier.go deleted file mode 100644 index 70b14bff2f28..000000000000 --- a/beacon-chain/sync/mock_blob_verifier.go +++ /dev/null @@ -1,84 +0,0 @@ -package sync - -import ( - "context" - - "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" -) - -type BlobVerifier interface { - VerifiedROBlob() (blocks.VerifiedROBlob, error) - BlobIndexInBounds() (err error) - SlotNotTooEarly() (err error) - SlotAboveFinalized() (err error) - ValidProposerSignature(ctx context.Context) (err error) - SidecarParentSeen(badParent func([32]byte) bool) (err error) - SidecarParentValid(badParent func([32]byte) bool) (err error) - SidecarParentSlotLower() (err error) - SidecarDescendsFromFinalized() (err error) - SidecarInclusionProven() (err error) - SidecarKzgProofVerified() (err error) - SidecarProposerExpected(ctx context.Context) (err error) -} - -type mockBlobVerifier struct { - errBlobIndexInBounds error - errSlotTooEarly error - errSlotAboveFinalized error - errValidProposerSignature error - errSidecarParentSeen error - errSidecarParentValid error - errSidecarParentSlotLower error - errSidecarDescendsFromFinalized error - errSidecarInclusionProven error - errSidecarKzgProofVerified error - errSidecarProposerExpected error -} - -func (m *mockBlobVerifier) VerifiedROBlob() (blocks.VerifiedROBlob, error) { - return blocks.VerifiedROBlob{}, nil -} - -func (m *mockBlobVerifier) BlobIndexInBounds() (err error) { - return m.errBlobIndexInBounds -} - -func (m *mockBlobVerifier) SlotNotTooEarly() (err error) { - return m.errSlotTooEarly -} - -func (m *mockBlobVerifier) SlotAboveFinalized() (err error) { - return m.errSlotAboveFinalized -} - -func (m *mockBlobVerifier) ValidProposerSignature(ctx context.Context) (err error) { - return m.errValidProposerSignature -} - -func (m *mockBlobVerifier) SidecarParentSeen(badParent func([32]byte) bool) (err error) { - return m.errSidecarParentSeen -} - -func (m *mockBlobVerifier) SidecarParentValid(badParent func([32]byte) bool) (err error) { - return m.errSidecarParentValid -} - -func (m *mockBlobVerifier) SidecarParentSlotLower() (err error) { - return m.errSidecarParentSlotLower -} - -func (m *mockBlobVerifier) SidecarDescendsFromFinalized() (err error) { - return m.errSidecarDescendsFromFinalized -} - -func (m *mockBlobVerifier) SidecarInclusionProven() (err error) { - return m.errSidecarInclusionProven -} - -func (m *mockBlobVerifier) SidecarKzgProofVerified() (err error) { - return m.errSidecarKzgProofVerified -} - -func (m *mockBlobVerifier) SidecarProposerExpected(ctx context.Context) (err error) { - return m.errSidecarProposerExpected -} diff --git a/beacon-chain/sync/pending_blocks_queue.go b/beacon-chain/sync/pending_blocks_queue.go index 18fe859ecfcb..428adf09a1e8 100644 --- a/beacon-chain/sync/pending_blocks_queue.go +++ b/beacon-chain/sync/pending_blocks_queue.go @@ -211,7 +211,7 @@ func (s *Service) processAndBroadcastBlock(ctx context.Context, b interfaces.Rea } } - if err := s.cfg.chain.ReceiveBlock(ctx, b, blkRoot); err != nil { + if err := s.cfg.chain.ReceiveBlock(ctx, b, blkRoot, nil); err != nil { return err } diff --git a/beacon-chain/sync/rpc_beacon_blocks_by_range.go b/beacon-chain/sync/rpc_beacon_blocks_by_range.go index cc22418e34af..5f603b322ea5 100644 --- a/beacon-chain/sync/rpc_beacon_blocks_by_range.go +++ b/beacon-chain/sync/rpc_beacon_blocks_by_range.go @@ -110,7 +110,7 @@ func validateRangeRequest(r *pb.BeaconBlocksByRangeRequest, current primitives.S if rp.start > maxStart { return rangeParams{}, p2ptypes.ErrInvalidRequest } - rp.end, err = rp.start.SafeAdd((rp.size - 1)) + rp.end, err = rp.start.SafeAdd(rp.size - 1) if err != nil { return rangeParams{}, p2ptypes.ErrInvalidRequest } diff --git a/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go b/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go index 43a1b3f0d639..0d44edc4c06c 100644 --- a/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go +++ b/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go @@ -1074,7 +1074,6 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) { func TestRPCBeaconBlocksByRange_FilterBlocks_PreviousRoot(t *testing.T) { req := ðpb.BeaconBlocksByRangeRequest{ StartSlot: 100, - Step: 1, Count: uint64(flags.Get().BlockBatchLimit) * 2, } diff --git a/beacon-chain/sync/rpc_send_request.go b/beacon-chain/sync/rpc_send_request.go index cd04b49c230d..a8714445b9e1 100644 --- a/beacon-chain/sync/rpc_send_request.go +++ b/beacon-chain/sync/rpc_send_request.go @@ -31,7 +31,7 @@ var ErrInvalidFetchedData = errors.New("invalid data returned from peer") var errMaxRequestBlobSidecarsExceeded = errors.Wrap(ErrInvalidFetchedData, "peer exceeded req blob chunk tx limit") var errBlobChunkedReadFailure = errors.New("failed to read stream of chunk-encoded blobs") var errBlobUnmarshal = errors.New("Could not unmarshal chunk-encoded blob") -var errUnrequestedRoot = errors.New("Received BlobSidecar in response that was not requested") +var errUnrequested = errors.New("Received BlobSidecar in response that was not requested") var errBlobResponseOutOfBounds = errors.New("received BlobSidecar with slot outside BlobSidecarsByRangeRequest bounds") // BeaconBlockProcessor defines a block processing function, which allows to start utilizing @@ -199,13 +199,22 @@ func SendBlobSidecarByRoot( type blobResponseValidation func(blocks.ROBlob) error func blobValidatorFromRootReq(req *p2ptypes.BlobSidecarsByRootReq) blobResponseValidation { - roots := make(map[[32]byte]bool) + blobIds := make(map[[32]byte]map[uint64]bool) for _, sc := range *req { - roots[bytesutil.ToBytes32(sc.BlockRoot)] = true + blockRoot := bytesutil.ToBytes32(sc.BlockRoot) + if blobIds[blockRoot] == nil { + blobIds[blockRoot] = make(map[uint64]bool) + } + blobIds[blockRoot][sc.Index] = true } return func(sc blocks.ROBlob) error { - if requested := roots[sc.BlockRoot()]; !requested { - return errors.Wrapf(errUnrequestedRoot, "root=%#x", sc.BlockRoot()) + blobIndices := blobIds[sc.BlockRoot()] + if blobIndices == nil { + return errors.Wrapf(errUnrequested, "root=%#x", sc.BlockRoot()) + } + requested := blobIndices[sc.Index] + if !requested { + return errors.Wrapf(errUnrequested, "root=%#x index=%d", sc.BlockRoot(), sc.Index) } return nil } diff --git a/beacon-chain/sync/rpc_send_request_test.go b/beacon-chain/sync/rpc_send_request_test.go index 235f8ef6285a..d99de2733b64 100644 --- a/beacon-chain/sync/rpc_send_request_test.go +++ b/beacon-chain/sync/rpc_send_request_test.go @@ -479,11 +479,12 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) { } func TestBlobValidatorFromRootReq(t *testing.T) { - validRoot := bytesutil.PadTo([]byte("valid"), 32) - invalidRoot := bytesutil.PadTo([]byte("invalid"), 32) + rootA := bytesutil.PadTo([]byte("valid"), 32) + rootB := bytesutil.PadTo([]byte("invalid"), 32) header := ðpb.SignedBeaconBlockHeader{} - validb := util.GenerateTestDenebBlobSidecar(t, bytesutil.ToBytes32(validRoot), header, 0, []byte{}, make([][]byte, 0)) - invalidb := util.GenerateTestDenebBlobSidecar(t, bytesutil.ToBytes32(invalidRoot), header, 0, []byte{}, make([][]byte, 0)) + blobSidecarA0 := util.GenerateTestDenebBlobSidecar(t, bytesutil.ToBytes32(rootA), header, 0, []byte{}, make([][]byte, 0)) + blobSidecarA1 := util.GenerateTestDenebBlobSidecar(t, bytesutil.ToBytes32(rootA), header, 1, []byte{}, make([][]byte, 0)) + blobSidecarB0 := util.GenerateTestDenebBlobSidecar(t, bytesutil.ToBytes32(rootB), header, 0, []byte{}, make([][]byte, 0)) cases := []struct { name string ids []*ethpb.BlobIdentifier @@ -491,15 +492,21 @@ func TestBlobValidatorFromRootReq(t *testing.T) { err error }{ { - name: "valid", - ids: []*ethpb.BlobIdentifier{{BlockRoot: validRoot}}, - response: []blocks.ROBlob{validb}, + name: "expected", + ids: []*ethpb.BlobIdentifier{{BlockRoot: rootA, Index: 0}}, + response: []blocks.ROBlob{blobSidecarA0}, }, { - name: "invalid", - ids: []*ethpb.BlobIdentifier{{BlockRoot: validRoot}}, - response: []blocks.ROBlob{invalidb}, - err: errUnrequestedRoot, + name: "wrong root", + ids: []*ethpb.BlobIdentifier{{BlockRoot: rootA, Index: 0}}, + response: []blocks.ROBlob{blobSidecarB0}, + err: errUnrequested, + }, + { + name: "wrong index", + ids: []*ethpb.BlobIdentifier{{BlockRoot: rootA, Index: 0}}, + response: []blocks.ROBlob{blobSidecarA1}, + err: errUnrequested, }, } for _, c := range cases { diff --git a/beacon-chain/sync/service.go b/beacon-chain/sync/service.go index 8b71fbe16cf3..bc664489814e 100644 --- a/beacon-chain/sync/service.go +++ b/beacon-chain/sync/service.go @@ -154,7 +154,7 @@ type Service struct { clockWaiter startup.ClockWaiter initialSyncComplete chan struct{} verifierWaiter *verification.InitializerWaiter - newBlobVerifier NewBlobVerifier + newBlobVerifier verification.NewBlobVerifier } // NewService initializes new regular sync service. @@ -205,11 +205,9 @@ func NewService(ctx context.Context, opts ...Option) *Service { return r } -type NewBlobVerifier func(b blocks.ROBlob, reqs ...verification.Requirement) BlobVerifier - -func newBlobVerifierFromInitializer(ini *verification.Initializer) NewBlobVerifier { - return func(b blocks.ROBlob, reqs ...verification.Requirement) BlobVerifier { - return ini.NewBlobVerifier(b, reqs...) +func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.NewBlobVerifier { + return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier { + return ini.NewBlobVerifier(b, reqs) } } diff --git a/beacon-chain/sync/subscriber_beacon_blocks.go b/beacon-chain/sync/subscriber_beacon_blocks.go index 5eb5931a2be6..95c7c23e4332 100644 --- a/beacon-chain/sync/subscriber_beacon_blocks.go +++ b/beacon-chain/sync/subscriber_beacon_blocks.go @@ -27,7 +27,7 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message) return err } - if err := s.cfg.chain.ReceiveBlock(ctx, signed, root); err != nil { + if err := s.cfg.chain.ReceiveBlock(ctx, signed, root, nil); err != nil { if blockchain.IsInvalidBlock(err) { r := blockchain.InvalidBlockRoot(err) if r != [32]byte{} { diff --git a/beacon-chain/sync/validate_attester_slashing.go b/beacon-chain/sync/validate_attester_slashing.go index 00342cdb0d8b..fb56960f81b4 100644 --- a/beacon-chain/sync/validate_attester_slashing.go +++ b/beacon-chain/sync/validate_attester_slashing.go @@ -7,6 +7,8 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" "github.com/prysmaticlabs/prysm/v4/container/slice" @@ -74,6 +76,14 @@ func (s *Service) validateAttesterSlashing(ctx context.Context, pid peer.ID, msg } s.cfg.chain.ReceiveAttesterSlashing(ctx, slashing) + // notify events + s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{ + Type: operation.AttesterSlashingReceived, + Data: &operation.AttesterSlashingReceivedData{ + AttesterSlashing: slashing, + }, + }) + msg.ValidatorData = slashing // Used in downstream subscriber return pubsub.ValidationAccept, nil } diff --git a/beacon-chain/sync/validate_attester_slashing_test.go b/beacon-chain/sync/validate_attester_slashing_test.go index 3358c865a10f..3fd1ebc1d817 100644 --- a/beacon-chain/sync/validate_attester_slashing_test.go +++ b/beacon-chain/sync/validate_attester_slashing_test.go @@ -84,10 +84,11 @@ func TestValidateAttesterSlashing_ValidSlashing(t *testing.T) { chain := &mock.ChainService{State: s, Genesis: time.Now()} r := &Service{ cfg: &config{ - p2p: p, - chain: chain, - clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), - initialSync: &mockSync.Sync{IsSyncing: false}, + p2p: p, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), + initialSync: &mockSync.Sync{IsSyncing: false}, + operationNotifier: chain.OperationNotifier(), }, seenAttesterSlashingCache: make(map[uint64]bool), subHandler: newSubTopicHandler(), @@ -129,10 +130,11 @@ func TestValidateAttesterSlashing_InvalidSlashing_WithdrawableEpoch(t *testing.T chain := &mock.ChainService{State: s, Genesis: time.Now()} r := &Service{ cfg: &config{ - p2p: p, - chain: chain, - clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), - initialSync: &mockSync.Sync{IsSyncing: false}, + p2p: p, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), + initialSync: &mockSync.Sync{IsSyncing: false}, + operationNotifier: chain.OperationNotifier(), }, seenAttesterSlashingCache: make(map[uint64]bool), subHandler: newSubTopicHandler(), diff --git a/beacon-chain/sync/validate_blob.go b/beacon-chain/sync/validate_blob.go index 22b547596263..bfa2b5c8a579 100644 --- a/beacon-chain/sync/validate_blob.go +++ b/beacon-chain/sync/validate_blob.go @@ -47,7 +47,7 @@ func (s *Service) validateBlob(ctx context.Context, pid peer.ID, msg *pubsub.Mes if err != nil { return pubsub.ValidationReject, errors.Wrap(err, "roblob conversion failure") } - vf := s.newBlobVerifier(blob, verification.GossipSidecarRequirements...) + vf := s.newBlobVerifier(blob, verification.GossipSidecarRequirements) if err := vf.BlobIndexInBounds(); err != nil { return pubsub.ValidationReject, err @@ -60,7 +60,7 @@ func (s *Service) validateBlob(ctx context.Context, pid peer.ID, msg *pubsub.Mes return pubsub.ValidationReject, fmt.Errorf("wrong topic name: %s", *msg.Topic) } - if err := vf.SlotNotTooEarly(); err != nil { + if err := vf.NotFromFutureSlot(); err != nil { return pubsub.ValidationIgnore, err } diff --git a/beacon-chain/sync/validate_blob_test.go b/beacon-chain/sync/validate_blob_test.go index a9d542b06ec6..83b3322ac0eb 100644 --- a/beacon-chain/sync/validate_blob_test.go +++ b/beacon-chain/sync/validate_blob_test.go @@ -172,83 +172,83 @@ func TestValidateBlob_ErrorPathsWithMock(t *testing.T) { tests := []struct { name string error error - verifier NewBlobVerifier + verifier verification.NewBlobVerifier result pubsub.ValidationResult }{ { error: errors.New("blob index out of bound"), - verifier: func(b blocks.ROBlob, reqs ...verification.Requirement) BlobVerifier { - return &mockBlobVerifier{errBlobIndexInBounds: errors.New("blob index out of bound")} + verifier: func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier { + return &verification.MockBlobVerifier{ErrBlobIndexInBounds: errors.New("blob index out of bound")} }, result: pubsub.ValidationReject, }, { error: errors.New("slot too early"), - verifier: func(b blocks.ROBlob, reqs ...verification.Requirement) BlobVerifier { - return &mockBlobVerifier{errSlotTooEarly: errors.New("slot too early")} + verifier: func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier { + return &verification.MockBlobVerifier{ErrSlotTooEarly: errors.New("slot too early")} }, result: pubsub.ValidationIgnore, }, { error: errors.New("slot above finalized"), - verifier: func(b blocks.ROBlob, reqs ...verification.Requirement) BlobVerifier { - return &mockBlobVerifier{errSlotAboveFinalized: errors.New("slot above finalized")} + verifier: func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier { + return &verification.MockBlobVerifier{ErrSlotAboveFinalized: errors.New("slot above finalized")} }, result: pubsub.ValidationIgnore, }, { error: errors.New("valid proposer signature"), - verifier: func(b blocks.ROBlob, reqs ...verification.Requirement) BlobVerifier { - return &mockBlobVerifier{errValidProposerSignature: errors.New("valid proposer signature")} + verifier: func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier { + return &verification.MockBlobVerifier{ErrValidProposerSignature: errors.New("valid proposer signature")} }, result: pubsub.ValidationReject, }, { error: errors.New("sidecar parent seen"), - verifier: func(b blocks.ROBlob, reqs ...verification.Requirement) BlobVerifier { - return &mockBlobVerifier{errSidecarParentSeen: errors.New("sidecar parent seen")} + verifier: func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier { + return &verification.MockBlobVerifier{ErrSidecarParentSeen: errors.New("sidecar parent seen")} }, result: pubsub.ValidationIgnore, }, { error: errors.New("sidecar parent valid"), - verifier: func(b blocks.ROBlob, reqs ...verification.Requirement) BlobVerifier { - return &mockBlobVerifier{errSidecarParentValid: errors.New("sidecar parent valid")} + verifier: func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier { + return &verification.MockBlobVerifier{ErrSidecarParentValid: errors.New("sidecar parent valid")} }, result: pubsub.ValidationReject, }, { error: errors.New("sidecar parent slot lower"), - verifier: func(b blocks.ROBlob, reqs ...verification.Requirement) BlobVerifier { - return &mockBlobVerifier{errSidecarParentSlotLower: errors.New("sidecar parent slot lower")} + verifier: func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier { + return &verification.MockBlobVerifier{ErrSidecarParentSlotLower: errors.New("sidecar parent slot lower")} }, result: pubsub.ValidationReject, }, { error: errors.New("descends from finalized"), - verifier: func(b blocks.ROBlob, reqs ...verification.Requirement) BlobVerifier { - return &mockBlobVerifier{errSidecarDescendsFromFinalized: errors.New("descends from finalized")} + verifier: func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier { + return &verification.MockBlobVerifier{ErrSidecarDescendsFromFinalized: errors.New("descends from finalized")} }, result: pubsub.ValidationReject, }, { error: errors.New("inclusion proven"), - verifier: func(b blocks.ROBlob, reqs ...verification.Requirement) BlobVerifier { - return &mockBlobVerifier{errSidecarInclusionProven: errors.New("inclusion proven")} + verifier: func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier { + return &verification.MockBlobVerifier{ErrSidecarInclusionProven: errors.New("inclusion proven")} }, result: pubsub.ValidationReject, }, { error: errors.New("kzg proof verified"), - verifier: func(b blocks.ROBlob, reqs ...verification.Requirement) BlobVerifier { - return &mockBlobVerifier{errSidecarKzgProofVerified: errors.New("kzg proof verified")} + verifier: func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier { + return &verification.MockBlobVerifier{ErrSidecarKzgProofVerified: errors.New("kzg proof verified")} }, result: pubsub.ValidationReject, }, { error: errors.New("sidecar proposer expected"), - verifier: func(b blocks.ROBlob, reqs ...verification.Requirement) BlobVerifier { - return &mockBlobVerifier{errSidecarProposerExpected: errors.New("sidecar proposer expected")} + verifier: func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier { + return &verification.MockBlobVerifier{ErrSidecarProposerExpected: errors.New("sidecar proposer expected")} }, result: pubsub.ValidationReject, }, @@ -285,8 +285,8 @@ func TestValidateBlob_ErrorPathsWithMock(t *testing.T) { } } -func testNewBlobVerifier() NewBlobVerifier { - return func(b blocks.ROBlob, reqs ...verification.Requirement) BlobVerifier { - return &mockBlobVerifier{} +func testNewBlobVerifier() verification.NewBlobVerifier { + return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier { + return &verification.MockBlobVerifier{} } } diff --git a/beacon-chain/sync/validate_proposer_slashing.go b/beacon-chain/sync/validate_proposer_slashing.go index cdfb3ab21549..3fbd311af439 100644 --- a/beacon-chain/sync/validate_proposer_slashing.go +++ b/beacon-chain/sync/validate_proposer_slashing.go @@ -6,6 +6,8 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed" + "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" "github.com/prysmaticlabs/prysm/v4/monitoring/tracing" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" @@ -55,6 +57,14 @@ func (s *Service) validateProposerSlashing(ctx context.Context, pid peer.ID, msg return pubsub.ValidationReject, err } + // notify events + s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{ + Type: operation.ProposerSlashingReceived, + Data: &operation.ProposerSlashingReceivedData{ + ProposerSlashing: slashing, + }, + }) + msg.ValidatorData = slashing // Used in downstream subscriber return pubsub.ValidationAccept, nil } diff --git a/beacon-chain/sync/validate_proposer_slashing_test.go b/beacon-chain/sync/validate_proposer_slashing_test.go index 3aac4a4a8eaa..a484587fb965 100644 --- a/beacon-chain/sync/validate_proposer_slashing_test.go +++ b/beacon-chain/sync/validate_proposer_slashing_test.go @@ -117,10 +117,11 @@ func TestValidateProposerSlashing_ValidSlashing(t *testing.T) { chain := &mock.ChainService{State: s, Genesis: time.Now()} r := &Service{ cfg: &config{ - p2p: p, - chain: chain, - clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), - initialSync: &mockSync.Sync{IsSyncing: false}, + p2p: p, + chain: chain, + clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot), + initialSync: &mockSync.Sync{IsSyncing: false}, + operationNotifier: chain.OperationNotifier(), }, seenProposerSlashingCache: lruwrpr.New(10), } diff --git a/beacon-chain/verification/BUILD.bazel b/beacon-chain/verification/BUILD.bazel index 65247a50ce36..0a77b7868b3d 100644 --- a/beacon-chain/verification/BUILD.bazel +++ b/beacon-chain/verification/BUILD.bazel @@ -8,6 +8,8 @@ go_library( "error.go", "fake.go", "initializer.go", + "interface.go", + "mock.go", "result.go", ], importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/verification", diff --git a/beacon-chain/verification/blob.go b/beacon-chain/verification/blob.go index bb91edc79de7..f444b2b41b40 100644 --- a/beacon-chain/verification/blob.go +++ b/beacon-chain/verification/blob.go @@ -4,6 +4,7 @@ import ( "context" "github.com/pkg/errors" + forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -16,7 +17,7 @@ import ( const ( RequireBlobIndexInBounds Requirement = iota - RequireSlotNotTooEarly + RequireNotFromFutureSlot RequireSlotAboveFinalized RequireValidProposerSignature RequireSidecarParentSeen @@ -32,7 +33,7 @@ const ( // must satisfy in order to upgrade an ROBlob to a VerifiedROBlob. var GossipSidecarRequirements = []Requirement{ RequireBlobIndexInBounds, - RequireSlotNotTooEarly, + RequireNotFromFutureSlot, RequireSlotAboveFinalized, RequireValidProposerSignature, RequireSidecarParentSeen, @@ -44,12 +45,30 @@ var GossipSidecarRequirements = []Requirement{ RequireSidecarProposerExpected, } +// InitsyncSidecarRequirements is the list of verification requirements to be used by the init-sync service +// for batch-mode syncing. Because we only perform batch verification as part of the IsDataAvailable method +// for blobs after the block has been verified, and the blobs to be verified are keyed in the cache by the +// block root, it is safe to skip the following verifications. +// RequireSidecarProposerExpected +// RequireNotFromFutureSlot, +// RequireSlotAboveFinalized, +// RequireSidecarParentSeen, +// RequireSidecarParentValid, +// RequireSidecarParentSlotLower, +// RequireSidecarDescendsFromFinalized, +var InitsyncSidecarRequirements = []Requirement{ + RequireValidProposerSignature, + RequireSidecarKzgProofVerified, + RequireBlobIndexInBounds, + RequireSidecarInclusionProven, +} + var ( ErrBlobInvalid = errors.New("blob failed verification") // ErrBlobIndexInvalid means RequireBlobIndexInBounds failed. ErrBlobIndexInvalid = errors.Wrap(ErrBlobInvalid, "incorrect blob sidecar index") - // ErrSlotTooEarly means RequireSlotNotTooEarly failed. - ErrSlotTooEarly = errors.Wrap(ErrBlobInvalid, "slot is too far in the future") + // ErrFromFutureSlot means RequireSlotNotTooEarly failed. + ErrFromFutureSlot = errors.Wrap(ErrBlobInvalid, "slot is too far in the future") // ErrSlotNotAfterFinalized means RequireSlotAboveFinalized failed. ErrSlotNotAfterFinalized = errors.Wrap(ErrBlobInvalid, "slot <= finalized checkpoint") // ErrInvalidProposerSignature means RequireValidProposerSignature failed. @@ -70,7 +89,7 @@ var ( ErrSidecarUnexpectedProposer = errors.Wrap(ErrBlobInvalid, "sidecar was not proposed by the expected proposer_index") ) -type BlobVerifier struct { +type ROBlobVerifier struct { *sharedResources results *results blob blocks.ROBlob @@ -78,19 +97,31 @@ type BlobVerifier struct { verifyBlobCommitment roblobCommitmentVerifier } -type roblobCommitmentVerifier func(blocks.ROBlob) error +type roblobCommitmentVerifier func(...blocks.ROBlob) error + +var _ BlobVerifier = &ROBlobVerifier{} // VerifiedROBlob "upgrades" the wrapped ROBlob to a VerifiedROBlob. // If any of the verifications ran against the blob failed, or some required verifications // were not run, an error will be returned. -func (bv *BlobVerifier) VerifiedROBlob() (blocks.VerifiedROBlob, error) { +func (bv *ROBlobVerifier) VerifiedROBlob() (blocks.VerifiedROBlob, error) { if bv.results.allSatisfied() { return blocks.NewVerifiedROBlob(bv.blob), nil } return blocks.VerifiedROBlob{}, bv.results.errors(ErrBlobInvalid) } -func (bv *BlobVerifier) recordResult(req Requirement, err *error) { +// SatisfyRequirement allows the caller to assert that a requirement has been satisfied. +// This gives us a way to tick the box for a requirement where the usual method would be impractical. +// For example, when batch syncing, forkchoice is only updated at the end of the batch. So the checks that use +// forkchoice, like descends from finalized or parent seen, would necessarily fail. Allowing the caller to +// assert the requirement has been satisfied ensures we have an easy way to audit which piece of code is satisfying +// a requireent outside of this package. +func (bv *ROBlobVerifier) SatisfyRequirement(req Requirement) { + bv.recordResult(req, nil) +} + +func (bv *ROBlobVerifier) recordResult(req Requirement, err *error) { if err == nil || *err == nil { bv.results.record(req, nil) return @@ -100,7 +131,7 @@ func (bv *BlobVerifier) recordResult(req Requirement, err *error) { // BlobIndexInBounds represents the follow spec verification: // [REJECT] The sidecar's index is consistent with MAX_BLOBS_PER_BLOCK -- i.e. blob_sidecar.index < MAX_BLOBS_PER_BLOCK. -func (bv *BlobVerifier) BlobIndexInBounds() (err error) { +func (bv *ROBlobVerifier) BlobIndexInBounds() (err error) { defer bv.recordResult(RequireBlobIndexInBounds, &err) if bv.blob.Index >= fieldparams.MaxBlobsPerBlock { log.WithFields(logging.BlobFields(bv.blob)).Debug("Sidecar index >= MAX_BLOBS_PER_BLOCK") @@ -109,19 +140,20 @@ func (bv *BlobVerifier) BlobIndexInBounds() (err error) { return nil } -// SlotNotTooEarly represents the spec verification: +// NotFromFutureSlot represents the spec verification: // [IGNORE] The sidecar is not from a future slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) // -- i.e. validate that block_header.slot <= current_slot -func (bv *BlobVerifier) SlotNotTooEarly() (err error) { - defer bv.recordResult(RequireSlotNotTooEarly, &err) +func (bv *ROBlobVerifier) NotFromFutureSlot() (err error) { + defer bv.recordResult(RequireNotFromFutureSlot, &err) if bv.clock.CurrentSlot() == bv.blob.Slot() { return nil } - // Subtract the max clock disparity from the start slot time. - validAfter := bv.clock.SlotStart(bv.blob.Slot()).Add(-1 * params.BeaconConfig().MaximumGossipClockDisparityDuration()) - // If the difference between now and gt is greater than maximum clock disparity, the block is too far in the future. - if bv.clock.Now().Before(validAfter) { - return ErrSlotTooEarly + // earliestStart represents the time the slot starts, lowered by MAXIMUM_GOSSIP_CLOCK_DISPARITY. + // We lower the time by MAXIMUM_GOSSIP_CLOCK_DISPARITY in case system time is running slightly behind real time. + earliestStart := bv.clock.SlotStart(bv.blob.Slot()).Add(-1 * params.BeaconConfig().MaximumGossipClockDisparityDuration()) + // If the system time is still before earliestStart, we consider the blob from a future slot and return an error. + if bv.clock.Now().Before(earliestStart) { + return ErrFromFutureSlot } return nil } @@ -129,7 +161,7 @@ func (bv *BlobVerifier) SlotNotTooEarly() (err error) { // SlotAboveFinalized represents the spec verification: // [IGNORE] The sidecar is from a slot greater than the latest finalized slot // -- i.e. validate that block_header.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch) -func (bv *BlobVerifier) SlotAboveFinalized() (err error) { +func (bv *ROBlobVerifier) SlotAboveFinalized() (err error) { defer bv.recordResult(RequireSlotAboveFinalized, &err) fcp := bv.fc.FinalizedCheckpoint() fSlot, err := slots.EpochStart(fcp.Epoch) @@ -145,7 +177,7 @@ func (bv *BlobVerifier) SlotAboveFinalized() (err error) { // ValidProposerSignature represents the spec verification: // [REJECT] The proposer signature of blob_sidecar.signed_block_header, // is valid with respect to the block_header.proposer_index pubkey. -func (bv *BlobVerifier) ValidProposerSignature(ctx context.Context) (err error) { +func (bv *ROBlobVerifier) ValidProposerSignature(ctx context.Context) (err error) { defer bv.recordResult(RequireValidProposerSignature, &err) sd := blobToSignatureData(bv.blob) // First check if there is a cached verification that can be reused. @@ -175,12 +207,12 @@ func (bv *BlobVerifier) ValidProposerSignature(ctx context.Context) (err error) // SidecarParentSeen represents the spec verification: // [IGNORE] The sidecar's block's parent (defined by block_header.parent_root) has been seen // (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). -func (bv *BlobVerifier) SidecarParentSeen(badParent func([32]byte) bool) (err error) { +func (bv *ROBlobVerifier) SidecarParentSeen(parentSeen func([32]byte) bool) (err error) { defer bv.recordResult(RequireSidecarParentSeen, &err) - if bv.fc.HasNode(bv.blob.ParentRoot()) { + if parentSeen != nil && parentSeen(bv.blob.ParentRoot()) { return nil } - if badParent != nil && badParent(bv.blob.ParentRoot()) { + if bv.fc.HasNode(bv.blob.ParentRoot()) { return nil } return ErrSidecarParentNotSeen @@ -188,7 +220,7 @@ func (bv *BlobVerifier) SidecarParentSeen(badParent func([32]byte) bool) (err er // SidecarParentValid represents the spec verification: // [REJECT] The sidecar's block's parent (defined by block_header.parent_root) passes validation. -func (bv *BlobVerifier) SidecarParentValid(badParent func([32]byte) bool) (err error) { +func (bv *ROBlobVerifier) SidecarParentValid(badParent func([32]byte) bool) (err error) { defer bv.recordResult(RequireSidecarParentValid, &err) if badParent != nil && badParent(bv.blob.ParentRoot()) { return ErrSidecarParentInvalid @@ -198,7 +230,7 @@ func (bv *BlobVerifier) SidecarParentValid(badParent func([32]byte) bool) (err e // SidecarParentSlotLower represents the spec verification: // [REJECT] The sidecar is from a higher slot than the sidecar's block's parent (defined by block_header.parent_root). -func (bv *BlobVerifier) SidecarParentSlotLower() (err error) { +func (bv *ROBlobVerifier) SidecarParentSlotLower() (err error) { defer bv.recordResult(RequireSidecarParentSlotLower, &err) parentSlot, err := bv.fc.Slot(bv.blob.ParentRoot()) if err != nil { @@ -213,7 +245,7 @@ func (bv *BlobVerifier) SidecarParentSlotLower() (err error) { // SidecarDescendsFromFinalized represents the spec verification: // [REJECT] The current finalized_checkpoint is an ancestor of the sidecar's block // -- i.e. get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root. -func (bv *BlobVerifier) SidecarDescendsFromFinalized() (err error) { +func (bv *ROBlobVerifier) SidecarDescendsFromFinalized() (err error) { defer bv.recordResult(RequireSidecarDescendsFromFinalized, &err) if !bv.fc.IsCanonical(bv.blob.ParentRoot()) { return ErrSidecarNotFinalizedDescendent @@ -223,7 +255,7 @@ func (bv *BlobVerifier) SidecarDescendsFromFinalized() (err error) { // SidecarInclusionProven represents the spec verification: // [REJECT] The sidecar's inclusion proof is valid as verified by verify_blob_sidecar_inclusion_proof(blob_sidecar). -func (bv *BlobVerifier) SidecarInclusionProven() (err error) { +func (bv *ROBlobVerifier) SidecarInclusionProven() (err error) { defer bv.recordResult(RequireSidecarInclusionProven, &err) if err = blocks.VerifyKZGInclusionProof(bv.blob); err != nil { log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("sidecar inclusion proof verification failed") @@ -235,7 +267,7 @@ func (bv *BlobVerifier) SidecarInclusionProven() (err error) { // SidecarKzgProofVerified represents the spec verification: // [REJECT] The sidecar's blob is valid as verified by // verify_blob_kzg_proof(blob_sidecar.blob, blob_sidecar.kzg_commitment, blob_sidecar.kzg_proof). -func (bv *BlobVerifier) SidecarKzgProofVerified() (err error) { +func (bv *ROBlobVerifier) SidecarKzgProofVerified() (err error) { defer bv.recordResult(RequireSidecarKzgProofVerified, &err) if err = bv.verifyBlobCommitment(bv.blob); err != nil { log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("kzg commitment proof verification failed") @@ -249,9 +281,18 @@ func (bv *BlobVerifier) SidecarKzgProofVerified() (err error) { // in the context of the current shuffling (defined by block_header.parent_root/block_header.slot). // If the proposer_index cannot immediately be verified against the expected shuffling, the sidecar MAY be queued // for later processing while proposers for the block's branch are calculated -- in such a case do not REJECT, instead IGNORE this message. -func (bv *BlobVerifier) SidecarProposerExpected(ctx context.Context) (err error) { +func (bv *ROBlobVerifier) SidecarProposerExpected(ctx context.Context) (err error) { defer bv.recordResult(RequireSidecarProposerExpected, &err) - idx, cached := bv.pc.Proposer(bv.blob.ParentRoot(), bv.blob.Slot()) + e := slots.ToEpoch(bv.blob.Slot()) + if e > 0 { + e = e - 1 + } + r, err := bv.fc.TargetRootForEpoch(bv.blob.ParentRoot(), e) + if err != nil { + return ErrSidecarUnexpectedProposer + } + c := &forkchoicetypes.Checkpoint{Root: r, Epoch: e} + idx, cached := bv.pc.Proposer(c, bv.blob.Slot()) if !cached { pst, err := bv.parentState(ctx) if err != nil { @@ -273,7 +314,7 @@ func (bv *BlobVerifier) SidecarProposerExpected(ctx context.Context) (err error) return nil } -func (bv *BlobVerifier) parentState(ctx context.Context) (state.BeaconState, error) { +func (bv *ROBlobVerifier) parentState(ctx context.Context) (state.BeaconState, error) { if bv.parent != nil { return bv.parent, nil } diff --git a/beacon-chain/verification/blob_test.go b/beacon-chain/verification/blob_test.go index 1c69ec5a9589..e7071da2385f 100644 --- a/beacon-chain/verification/blob_test.go +++ b/beacon-chain/verification/blob_test.go @@ -27,13 +27,13 @@ func TestBlobIndexInBounds(t *testing.T) { _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1) b := blobs[0] // set Index to a value that is out of bounds - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.NoError(t, v.BlobIndexInBounds()) require.Equal(t, true, v.results.executed(RequireBlobIndexInBounds)) require.NoError(t, v.results.result(RequireBlobIndexInBounds)) b.Index = fieldparams.MaxBlobsPerBlock - v = ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v = ini.NewBlobVerifier(b, GossipSidecarRequirements) require.ErrorIs(t, v.BlobIndexInBounds(), ErrBlobIndexInvalid) require.Equal(t, true, v.results.executed(RequireBlobIndexInBounds)) require.NotNil(t, v.results.result(RequireBlobIndexInBounds)) @@ -52,27 +52,27 @@ func TestSlotNotTooEarly(t *testing.T) { // This clock will give a current slot of 1 on the nose happyClock := startup.NewClock(genesis, [32]byte{}, startup.WithNower(func() time.Time { return now })) ini := Initializer{shared: &sharedResources{clock: happyClock}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) - require.NoError(t, v.SlotNotTooEarly()) - require.Equal(t, true, v.results.executed(RequireSlotNotTooEarly)) - require.NoError(t, v.results.result(RequireSlotNotTooEarly)) + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) + require.NoError(t, v.NotFromFutureSlot()) + require.Equal(t, true, v.results.executed(RequireNotFromFutureSlot)) + require.NoError(t, v.results.result(RequireNotFromFutureSlot)) // Since we have an early return for slots that are directly equal, give a time that is less than max disparity // but still in the previous slot. closeClock := startup.NewClock(genesis, [32]byte{}, startup.WithNower(func() time.Time { return now.Add(-1 * params.BeaconConfig().MaximumGossipClockDisparityDuration() / 2) })) ini = Initializer{shared: &sharedResources{clock: closeClock}} - v = ini.NewBlobVerifier(b, GossipSidecarRequirements...) - require.NoError(t, v.SlotNotTooEarly()) + v = ini.NewBlobVerifier(b, GossipSidecarRequirements) + require.NoError(t, v.NotFromFutureSlot()) // This clock will give a current slot of 0, with now coming more than max clock disparity before slot 1 disparate := now.Add(-2 * params.BeaconConfig().MaximumGossipClockDisparityDuration()) dispClock := startup.NewClock(genesis, [32]byte{}, startup.WithNower(func() time.Time { return disparate })) // Set up initializer to use the clock that will set now to a little to far before slot 1 ini = Initializer{shared: &sharedResources{clock: dispClock}} - v = ini.NewBlobVerifier(b, GossipSidecarRequirements...) - require.ErrorIs(t, v.SlotNotTooEarly(), ErrSlotTooEarly) - require.Equal(t, true, v.results.executed(RequireSlotNotTooEarly)) - require.NotNil(t, v.results.result(RequireSlotNotTooEarly)) + v = ini.NewBlobVerifier(b, GossipSidecarRequirements) + require.ErrorIs(t, v.NotFromFutureSlot(), ErrFromFutureSlot) + require.Equal(t, true, v.results.executed(RequireNotFromFutureSlot)) + require.NotNil(t, v.results.result(RequireNotFromFutureSlot)) } func TestSlotAboveFinalized(t *testing.T) { @@ -114,7 +114,7 @@ func TestSlotAboveFinalized(t *testing.T) { _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1) b := blobs[0] b.SignedBlockHeader.Header.Slot = c.slot - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) err := v.SlotAboveFinalized() require.Equal(t, true, v.results.executed(RequireSlotAboveFinalized)) if c.err == nil { @@ -146,7 +146,7 @@ func TestValidProposerSignature_Cached(t *testing.T) { }, } ini := Initializer{shared: &sharedResources{sc: sc, sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.NoError(t, v.ValidProposerSignature(ctx)) require.Equal(t, true, v.results.executed(RequireValidProposerSignature)) require.NoError(t, v.results.result(RequireValidProposerSignature)) @@ -159,7 +159,7 @@ func TestValidProposerSignature_Cached(t *testing.T) { return true, errors.New("derp") } ini = Initializer{shared: &sharedResources{sc: sc, sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}}} - v = ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v = ini.NewBlobVerifier(b, GossipSidecarRequirements) require.ErrorIs(t, v.ValidProposerSignature(ctx), ErrInvalidProposerSignature) require.Equal(t, true, v.results.executed(RequireValidProposerSignature)) require.NotNil(t, v.results.result(RequireValidProposerSignature)) @@ -182,14 +182,14 @@ func TestValidProposerSignature_CacheMiss(t *testing.T) { }, } ini := Initializer{shared: &sharedResources{sc: sc, sr: sbrForValOverride(b.ProposerIndex(), ðpb.Validator{})}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.NoError(t, v.ValidProposerSignature(ctx)) require.Equal(t, true, v.results.executed(RequireValidProposerSignature)) require.NoError(t, v.results.result(RequireValidProposerSignature)) // simulate state not found ini = Initializer{shared: &sharedResources{sc: sc, sr: sbrNotFound(t, expectedSd.Parent)}} - v = ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v = ini.NewBlobVerifier(b, GossipSidecarRequirements) require.ErrorIs(t, v.ValidProposerSignature(ctx), ErrInvalidProposerSignature) require.Equal(t, true, v.results.executed(RequireValidProposerSignature)) require.NotNil(t, v.results.result(RequireValidProposerSignature)) @@ -206,7 +206,7 @@ func TestValidProposerSignature_CacheMiss(t *testing.T) { }, } ini = Initializer{shared: &sharedResources{sc: sc, sr: sbr}} - v = ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v = ini.NewBlobVerifier(b, GossipSidecarRequirements) // make sure all the histories are clean before calling the method // so we don't get polluted by previous usages @@ -255,14 +255,14 @@ func TestSidecarParentSeen(t *testing.T) { t.Run("happy path", func(t *testing.T) { ini := Initializer{shared: &sharedResources{fc: fcHas}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.NoError(t, v.SidecarParentSeen(nil)) require.Equal(t, true, v.results.executed(RequireSidecarParentSeen)) require.NoError(t, v.results.result(RequireSidecarParentSeen)) }) t.Run("HasNode false, no badParent cb, expected error", func(t *testing.T) { ini := Initializer{shared: &sharedResources{fc: fcLacks}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.ErrorIs(t, v.SidecarParentSeen(nil), ErrSidecarParentNotSeen) require.Equal(t, true, v.results.executed(RequireSidecarParentSeen)) require.NotNil(t, v.results.result(RequireSidecarParentSeen)) @@ -270,14 +270,14 @@ func TestSidecarParentSeen(t *testing.T) { t.Run("HasNode false, badParent true", func(t *testing.T) { ini := Initializer{shared: &sharedResources{fc: fcLacks}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.NoError(t, v.SidecarParentSeen(badParentCb(t, b.ParentRoot(), true))) require.Equal(t, true, v.results.executed(RequireSidecarParentSeen)) require.NoError(t, v.results.result(RequireSidecarParentSeen)) }) t.Run("HasNode false, badParent false", func(t *testing.T) { ini := Initializer{shared: &sharedResources{fc: fcLacks}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.ErrorIs(t, v.SidecarParentSeen(badParentCb(t, b.ParentRoot(), false)), ErrSidecarParentNotSeen) require.Equal(t, true, v.results.executed(RequireSidecarParentSeen)) require.NotNil(t, v.results.result(RequireSidecarParentSeen)) @@ -289,14 +289,14 @@ func TestSidecarParentValid(t *testing.T) { b := blobs[0] t.Run("parent valid", func(t *testing.T) { ini := Initializer{shared: &sharedResources{}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.NoError(t, v.SidecarParentValid(badParentCb(t, b.ParentRoot(), false))) require.Equal(t, true, v.results.executed(RequireSidecarParentValid)) require.NoError(t, v.results.result(RequireSidecarParentValid)) }) t.Run("parent not valid", func(t *testing.T) { ini := Initializer{shared: &sharedResources{}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.ErrorIs(t, v.SidecarParentValid(badParentCb(t, b.ParentRoot(), true)), ErrSidecarParentInvalid) require.Equal(t, true, v.results.executed(RequireSidecarParentValid)) require.NotNil(t, v.results.result(RequireSidecarParentValid)) @@ -340,7 +340,7 @@ func TestSidecarParentSlotLower(t *testing.T) { } return c.fcSlot, c.fcErr }}}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) err := v.SidecarParentSlotLower() require.Equal(t, true, v.results.executed(RequireSidecarParentSlotLower)) if c.err == nil { @@ -364,7 +364,7 @@ func TestSidecarDescendsFromFinalized(t *testing.T) { } return false }}}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.ErrorIs(t, v.SidecarDescendsFromFinalized(), ErrSidecarNotFinalizedDescendent) require.Equal(t, true, v.results.executed(RequireSidecarDescendsFromFinalized)) require.NotNil(t, v.results.result(RequireSidecarDescendsFromFinalized)) @@ -376,7 +376,7 @@ func TestSidecarDescendsFromFinalized(t *testing.T) { } return true }}}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.NoError(t, v.SidecarDescendsFromFinalized()) require.Equal(t, true, v.results.executed(RequireSidecarDescendsFromFinalized)) require.NoError(t, v.results.result(RequireSidecarDescendsFromFinalized)) @@ -389,7 +389,7 @@ func TestSidecarInclusionProven(t *testing.T) { b := blobs[0] ini := Initializer{} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.NoError(t, v.SidecarInclusionProven()) require.Equal(t, true, v.results.executed(RequireSidecarInclusionProven)) require.NoError(t, v.results.result(RequireSidecarInclusionProven)) @@ -397,7 +397,7 @@ func TestSidecarInclusionProven(t *testing.T) { // Invert bits of the first byte of the body root to mess up the proof byte0 := b.SignedBlockHeader.Header.BodyRoot[0] b.SignedBlockHeader.Header.BodyRoot[0] = byte0 ^ 255 - v = ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v = ini.NewBlobVerifier(b, GossipSidecarRequirements) require.ErrorIs(t, v.SidecarInclusionProven(), ErrSidecarInclusionProofInvalid) require.Equal(t, true, v.results.executed(RequireSidecarInclusionProven)) require.NotNil(t, v.results.result(RequireSidecarInclusionProven)) @@ -407,20 +407,20 @@ func TestSidecarKzgProofVerified(t *testing.T) { // GenerateTestDenebBlockWithSidecar is supposed to generate valid commitments _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 1) b := blobs[0] - passes := func(vb blocks.ROBlob) error { - require.Equal(t, true, bytes.Equal(b.KzgCommitment, vb.KzgCommitment)) + passes := func(vb ...blocks.ROBlob) error { + require.Equal(t, true, bytes.Equal(b.KzgCommitment, vb[0].KzgCommitment)) return nil } - v := &BlobVerifier{verifyBlobCommitment: passes, results: newResults(), blob: b} + v := &ROBlobVerifier{verifyBlobCommitment: passes, results: newResults(), blob: b} require.NoError(t, v.SidecarKzgProofVerified()) require.Equal(t, true, v.results.executed(RequireSidecarKzgProofVerified)) require.NoError(t, v.results.result(RequireSidecarKzgProofVerified)) - fails := func(vb blocks.ROBlob) error { - require.Equal(t, true, bytes.Equal(b.KzgCommitment, vb.KzgCommitment)) + fails := func(vb ...blocks.ROBlob) error { + require.Equal(t, true, bytes.Equal(b.KzgCommitment, vb[0].KzgCommitment)) return errors.New("bad blob") } - v = &BlobVerifier{verifyBlobCommitment: fails, results: newResults(), blob: b} + v = &ROBlobVerifier{results: newResults(), blob: b, verifyBlobCommitment: fails} require.ErrorIs(t, v.SidecarKzgProofVerified(), ErrSidecarKzgProofInvalid) require.Equal(t, true, v.results.executed(RequireSidecarKzgProofVerified)) require.NotNil(t, v.results.result(RequireSidecarKzgProofVerified)) @@ -431,22 +431,22 @@ func TestSidecarProposerExpected(t *testing.T) { _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 1) b := blobs[0] t.Run("cached, matches", func(t *testing.T) { - ini := Initializer{shared: &sharedResources{pc: &mockProposerCache{ProposerCB: pcReturnsIdx(b.ProposerIndex())}}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + ini := Initializer{shared: &sharedResources{pc: &mockProposerCache{ProposerCB: pcReturnsIdx(b.ProposerIndex())}, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.NoError(t, v.SidecarProposerExpected(ctx)) require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) require.NoError(t, v.results.result(RequireSidecarProposerExpected)) }) t.Run("cached, does not match", func(t *testing.T) { - ini := Initializer{shared: &sharedResources{pc: &mockProposerCache{ProposerCB: pcReturnsIdx(b.ProposerIndex() + 1)}}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + ini := Initializer{shared: &sharedResources{pc: &mockProposerCache{ProposerCB: pcReturnsIdx(b.ProposerIndex() + 1)}, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.ErrorIs(t, v.SidecarProposerExpected(ctx), ErrSidecarUnexpectedProposer) require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) require.NotNil(t, v.results.result(RequireSidecarProposerExpected)) }) t.Run("not cached, state lookup failure", func(t *testing.T) { - ini := Initializer{shared: &sharedResources{sr: sbrNotFound(t, b.ParentRoot()), pc: &mockProposerCache{ProposerCB: pcReturnsNotFound()}}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + ini := Initializer{shared: &sharedResources{sr: sbrNotFound(t, b.ParentRoot()), pc: &mockProposerCache{ProposerCB: pcReturnsNotFound()}, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.ErrorIs(t, v.SidecarProposerExpected(ctx), ErrSidecarUnexpectedProposer) require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) require.NotNil(t, v.results.result(RequireSidecarProposerExpected)) @@ -461,8 +461,8 @@ func TestSidecarProposerExpected(t *testing.T) { return b.ProposerIndex(), nil }, } - ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(b.ProposerIndex(), ðpb.Validator{}), pc: pc}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(b.ProposerIndex(), ðpb.Validator{}), pc: pc, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.NoError(t, v.SidecarProposerExpected(ctx)) require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) require.NoError(t, v.results.result(RequireSidecarProposerExpected)) @@ -476,8 +476,8 @@ func TestSidecarProposerExpected(t *testing.T) { return b.ProposerIndex() + 1, nil }, } - ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(b.ProposerIndex(), ðpb.Validator{}), pc: pc}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(b.ProposerIndex(), ðpb.Validator{}), pc: pc, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.ErrorIs(t, v.SidecarProposerExpected(ctx), ErrSidecarUnexpectedProposer) require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) require.NotNil(t, v.results.result(RequireSidecarProposerExpected)) @@ -491,8 +491,8 @@ func TestSidecarProposerExpected(t *testing.T) { return 0, errors.New("ComputeProposer failed") }, } - ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(b.ProposerIndex(), ðpb.Validator{}), pc: pc}} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + ini := Initializer{shared: &sharedResources{sr: sbrForValOverride(b.ProposerIndex(), ðpb.Validator{}), pc: pc, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}} + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) require.ErrorIs(t, v.SidecarProposerExpected(ctx), ErrSidecarUnexpectedProposer) require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected)) require.NotNil(t, v.results.result(RequireSidecarProposerExpected)) @@ -503,7 +503,7 @@ func TestRequirementSatisfaction(t *testing.T) { _, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 1) b := blobs[0] ini := Initializer{} - v := ini.NewBlobVerifier(b, GossipSidecarRequirements...) + v := ini.NewBlobVerifier(b, GossipSidecarRequirements) _, err := v.VerifiedROBlob() require.ErrorIs(t, err, ErrBlobInvalid) @@ -529,6 +529,7 @@ type mockForkchoicer struct { HasNodeCB func([32]byte) bool IsCanonicalCB func(root [32]byte) bool SlotCB func([32]byte) (primitives.Slot, error) + TargetRootForEpochCB func([32]byte, primitives.Epoch) ([32]byte, error) } var _ Forkchoicer = &mockForkchoicer{} @@ -549,6 +550,16 @@ func (m *mockForkchoicer) Slot(root [32]byte) (primitives.Slot, error) { return m.SlotCB(root) } +func (m *mockForkchoicer) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) { + return m.TargetRootForEpochCB(root, epoch) +} + +func fcReturnsTargetRoot(root [32]byte) func([32]byte, primitives.Epoch) ([32]byte, error) { + return func([32]byte, primitives.Epoch) ([32]byte, error) { + return root, nil + } +} + type mockSignatureCache struct { svCalledForSig map[SignatureData]bool svcb func(sig SignatureData) (bool, error) @@ -634,27 +645,27 @@ func (v *validxStateOverride) ValidatorAtIndex(idx primitives.ValidatorIndex) (* type mockProposerCache struct { ComputeProposerCB func(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) - ProposerCB func(root [32]byte, slot primitives.Slot) (primitives.ValidatorIndex, bool) + ProposerCB func(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, bool) } func (p *mockProposerCache) ComputeProposer(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) { return p.ComputeProposerCB(ctx, root, slot, pst) } -func (p *mockProposerCache) Proposer(root [32]byte, slot primitives.Slot) (primitives.ValidatorIndex, bool) { - return p.ProposerCB(root, slot) +func (p *mockProposerCache) Proposer(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, bool) { + return p.ProposerCB(c, slot) } var _ ProposerCache = &mockProposerCache{} -func pcReturnsIdx(idx primitives.ValidatorIndex) func(root [32]byte, slot primitives.Slot) (primitives.ValidatorIndex, bool) { - return func(root [32]byte, slot primitives.Slot) (primitives.ValidatorIndex, bool) { +func pcReturnsIdx(idx primitives.ValidatorIndex) func(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, bool) { + return func(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, bool) { return idx, true } } -func pcReturnsNotFound() func(root [32]byte, slot primitives.Slot) (primitives.ValidatorIndex, bool) { - return func(root [32]byte, slot primitives.Slot) (primitives.ValidatorIndex, bool) { +func pcReturnsNotFound() func(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, bool) { + return func(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, bool) { return 0, false } } diff --git a/beacon-chain/verification/cache.go b/beacon-chain/verification/cache.go index 4a60914fb7bf..7632a30b2dc2 100644 --- a/beacon-chain/verification/cache.go +++ b/beacon-chain/verification/cache.go @@ -11,6 +11,7 @@ import ( "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition" + forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types" "github.com/prysmaticlabs/prysm/v4/beacon-chain/state" "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" @@ -137,7 +138,7 @@ func (c *sigCache) SignatureVerified(sig SignatureData) (bool, error) { // across multiple values. type ProposerCache interface { ComputeProposer(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) - Proposer(root [32]byte, slot primitives.Slot) (primitives.ValidatorIndex, bool) + Proposer(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, bool) } func newPropCache() *propCache { @@ -163,7 +164,10 @@ func (*propCache) ComputeProposer(ctx context.Context, parent [32]byte, slot pri // Proposer returns the validator index if it is found in the cache, along with a boolean indicating // whether the value was present, similar to accessing an lru or go map. -func (*propCache) Proposer(_ [32]byte, _ primitives.Slot) (primitives.ValidatorIndex, bool) { - // TODO: replace with potuz' proposer id cache - return 0, false +func (*propCache) Proposer(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, bool) { + id, err := helpers.ProposerIndexAtSlotFromCheckpoint(c, slot) + if err != nil { + return 0, false + } + return id, true } diff --git a/beacon-chain/verification/cache_test.go b/beacon-chain/verification/cache_test.go index 3aa769a41a0d..8276aa3583e5 100644 --- a/beacon-chain/verification/cache_test.go +++ b/beacon-chain/verification/cache_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing" + forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types" "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" "github.com/prysmaticlabs/prysm/v4/crypto/bls" @@ -102,7 +103,7 @@ func TestProposerCache(t *testing.T) { st, _ := util.DeterministicGenesisStateDeneb(t, 3) pc := newPropCache() - _, cached := pc.Proposer([32]byte{}, 1) + _, cached := pc.Proposer(&forkchoicetypes.Checkpoint{}, 1) // should not be cached yet require.Equal(t, false, cached) @@ -112,7 +113,7 @@ func TestProposerCache(t *testing.T) { require.NoError(t, err) require.Equal(t, primitives.ValidatorIndex(expectedIdx), idx) - idx, cached = pc.Proposer([32]byte{}, 1) + idx, cached = pc.Proposer(&forkchoicetypes.Checkpoint{}, 1) // TODO: update this test when we integrate a proposer id cache require.Equal(t, false, cached) require.Equal(t, primitives.ValidatorIndex(0), idx) diff --git a/beacon-chain/verification/fake.go b/beacon-chain/verification/fake.go index 342b0fcfddf2..128bbad0580c 100644 --- a/beacon-chain/verification/fake.go +++ b/beacon-chain/verification/fake.go @@ -1,6 +1,10 @@ package verification -import "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" +import ( + "testing" + + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" +) // BlobSidecarNoop is a FAKE verification function that simply launders a ROBlob->VerifiedROBlob. // TODO: find all code that uses this method and replace it with full verification. @@ -17,3 +21,24 @@ func BlobSidecarSliceNoop(b []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) { } return vbs, nil } + +// FakeVerifyForTest can be used by tests that need a VerifiedROBlob but don't want to do all the +// expensive set up to perform full validation. +func FakeVerifyForTest(t *testing.T, b blocks.ROBlob) blocks.VerifiedROBlob { + // log so that t is truly required + t.Log("producing fake VerifiedROBlob for a test") + return blocks.NewVerifiedROBlob(b) +} + +// FakeVerifySliceForTest can be used by tests that need a []VerifiedROBlob but don't want to do all the +// expensive set up to perform full validation. +func FakeVerifySliceForTest(t *testing.T, b []blocks.ROBlob) []blocks.VerifiedROBlob { + // log so that t is truly required + t.Log("producing fake []VerifiedROBlob for a test") + // tautological assertion that ensures this function can only be used in tests. + vbs := make([]blocks.VerifiedROBlob, len(b)) + for i := range b { + vbs[i] = blocks.NewVerifiedROBlob(b[i]) + } + return vbs +} diff --git a/beacon-chain/verification/initializer.go b/beacon-chain/verification/initializer.go index caa69a59a234..7abf842c8bda 100644 --- a/beacon-chain/verification/initializer.go +++ b/beacon-chain/verification/initializer.go @@ -20,6 +20,7 @@ type Forkchoicer interface { HasNode([32]byte) bool IsCanonical(root [32]byte) bool Slot([32]byte) (primitives.Slot, error) + TargetRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error) } // StateByRooter describes a stategen-ish type that can produce arbitrary states by their root @@ -45,12 +46,12 @@ type Initializer struct { } // NewBlobVerifier creates a BlobVerifier for a single blob, with the given set of requirements. -func (ini *Initializer) NewBlobVerifier(b blocks.ROBlob, reqs ...Requirement) *BlobVerifier { - return &BlobVerifier{ +func (ini *Initializer) NewBlobVerifier(b blocks.ROBlob, reqs []Requirement) *ROBlobVerifier { + return &ROBlobVerifier{ sharedResources: ini.shared, blob: b, results: newResults(reqs...), - verifyBlobCommitment: kzg.VerifyROBlobCommitment, + verifyBlobCommitment: kzg.Verify, } } diff --git a/beacon-chain/verification/interface.go b/beacon-chain/verification/interface.go new file mode 100644 index 000000000000..94a15655b86b --- /dev/null +++ b/beacon-chain/verification/interface.go @@ -0,0 +1,31 @@ +package verification + +import ( + "context" + + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" +) + +// BlobVerifier defines the methods implemented by the ROBlobVerifier. +// It is mainly intended to make mocks and tests more straightforward, and to deal +// with the awkwardness of mocking a concrete type that returns a concrete type +// in tests outside of this package. +type BlobVerifier interface { + VerifiedROBlob() (blocks.VerifiedROBlob, error) + BlobIndexInBounds() (err error) + NotFromFutureSlot() (err error) + SlotAboveFinalized() (err error) + ValidProposerSignature(ctx context.Context) (err error) + SidecarParentSeen(parentSeen func([32]byte) bool) (err error) + SidecarParentValid(badParent func([32]byte) bool) (err error) + SidecarParentSlotLower() (err error) + SidecarDescendsFromFinalized() (err error) + SidecarInclusionProven() (err error) + SidecarKzgProofVerified() (err error) + SidecarProposerExpected(ctx context.Context) (err error) + SatisfyRequirement(Requirement) +} + +// NewBlobVerifier is a function signature that can be used by code that needs to be +// able to mock Initializer.NewBlobVerifier without complex setup. +type NewBlobVerifier func(b blocks.ROBlob, reqs []Requirement) BlobVerifier diff --git a/beacon-chain/verification/mock.go b/beacon-chain/verification/mock.go new file mode 100644 index 000000000000..9cf13c6cee54 --- /dev/null +++ b/beacon-chain/verification/mock.go @@ -0,0 +1,74 @@ +package verification + +import ( + "context" + + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" +) + +type MockBlobVerifier struct { + ErrBlobIndexInBounds error + ErrSlotTooEarly error + ErrSlotAboveFinalized error + ErrValidProposerSignature error + ErrSidecarParentSeen error + ErrSidecarParentValid error + ErrSidecarParentSlotLower error + ErrSidecarDescendsFromFinalized error + ErrSidecarInclusionProven error + ErrSidecarKzgProofVerified error + ErrSidecarProposerExpected error + cbVerifiedROBlob func() (blocks.VerifiedROBlob, error) +} + +func (m *MockBlobVerifier) VerifiedROBlob() (blocks.VerifiedROBlob, error) { + return m.cbVerifiedROBlob() +} + +func (m *MockBlobVerifier) BlobIndexInBounds() (err error) { + return m.ErrBlobIndexInBounds +} + +func (m *MockBlobVerifier) NotFromFutureSlot() (err error) { + return m.ErrSlotTooEarly +} + +func (m *MockBlobVerifier) SlotAboveFinalized() (err error) { + return m.ErrSlotAboveFinalized +} + +func (m *MockBlobVerifier) ValidProposerSignature(_ context.Context) (err error) { + return m.ErrValidProposerSignature +} + +func (m *MockBlobVerifier) SidecarParentSeen(_ func([32]byte) bool) (err error) { + return m.ErrSidecarParentSeen +} + +func (m *MockBlobVerifier) SidecarParentValid(_ func([32]byte) bool) (err error) { + return m.ErrSidecarParentValid +} + +func (m *MockBlobVerifier) SidecarParentSlotLower() (err error) { + return m.ErrSidecarParentSlotLower +} + +func (m *MockBlobVerifier) SidecarDescendsFromFinalized() (err error) { + return m.ErrSidecarDescendsFromFinalized +} + +func (m *MockBlobVerifier) SidecarInclusionProven() (err error) { + return m.ErrSidecarInclusionProven +} + +func (m *MockBlobVerifier) SidecarKzgProofVerified() (err error) { + return m.ErrSidecarKzgProofVerified +} + +func (m *MockBlobVerifier) SidecarProposerExpected(_ context.Context) (err error) { + return m.ErrSidecarProposerExpected +} + +func (*MockBlobVerifier) SatisfyRequirement(_ Requirement) {} + +var _ BlobVerifier = &MockBlobVerifier{} diff --git a/cmd/beacon-chain/flags/base.go b/cmd/beacon-chain/flags/base.go index f9aa0773edda..fcfb549a5cec 100644 --- a/cmd/beacon-chain/flags/base.go +++ b/cmd/beacon-chain/flags/base.go @@ -12,7 +12,7 @@ var ( // MevRelayEndpoint provides an HTTP access endpoint to a MEV builder network. MevRelayEndpoint = &cli.StringFlag{ Name: "http-mev-relay", - Usage: "A MEV builder relay string http endpoint, this wil be used to interact MEV builder network using API defined in: https://ethereum.github.io/builder-specs/#/Builder", + Usage: "A MEV builder relay string http endpoint, this will be used to interact MEV builder network using API defined in: https://ethereum.github.io/builder-specs/#/Builder", Value: "", } MaxBuilderConsecutiveMissedSlots = &cli.IntFlag{ diff --git a/cmd/beacon-chain/storage/options.go b/cmd/beacon-chain/storage/options.go index d4ad6ef5c18c..d69db14a3090 100644 --- a/cmd/beacon-chain/storage/options.go +++ b/cmd/beacon-chain/storage/options.go @@ -53,7 +53,7 @@ func blobStoragePath(c *cli.Context) string { var errInvalidBlobRetentionEpochs = errors.New("value is smaller than spec minimum") -// blobRetentionEpoch returns the spec deffault MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUEST +// blobRetentionEpoch returns the spec default MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUEST // or a user-specified flag overriding this value. If a user-specified override is // smaller than the spec default, an error will be returned. func blobRetentionEpoch(cliCtx *cli.Context) (primitives.Epoch, error) { diff --git a/cmd/beacon-chain/usage.go b/cmd/beacon-chain/usage.go index e8f4841c0702..edccf796810e 100644 --- a/cmd/beacon-chain/usage.go +++ b/cmd/beacon-chain/usage.go @@ -62,7 +62,6 @@ var appHelpFlagGroups = []flagGroup{ cmd.TracingEndpointFlag, cmd.TraceSampleFractionFlag, cmd.MonitoringHostFlag, - cmd.BackupWebhookOutputDir, flags.MonitoringPortFlag, cmd.DisableMonitoringFlag, cmd.MaxGoroutines, @@ -185,6 +184,12 @@ var appHelpFlagGroups = []flagGroup{ flags.InteropNumValidatorsFlag, }, }, + { + Name: "deprecated", + Flags: []cli.Flag{ + cmd.BackupWebhookOutputDir, + }, + }, } func init() { diff --git a/cmd/defaults.go b/cmd/defaults.go index 406c9060d93d..246ec8b1df3f 100644 --- a/cmd/defaults.go +++ b/cmd/defaults.go @@ -31,11 +31,12 @@ func DefaultDataDir() string { // Try to place the data folder in the user's home dir home := file.HomeDir() if home != "" { - if runtime.GOOS == "darwin" { + switch runtime.GOOS { + case "darwin": return filepath.Join(home, "Library", "Eth2") - } else if runtime.GOOS == "windows" { + case "windows": return filepath.Join(home, "AppData", "Local", "Eth2") - } else { + default: return filepath.Join(home, ".eth2") } } diff --git a/cmd/password_reader.go b/cmd/password_reader.go index 1c2162ad1875..5e68ef39d85b 100644 --- a/cmd/password_reader.go +++ b/cmd/password_reader.go @@ -16,7 +16,7 @@ type StdInPasswordReader struct { } // ReadPassword reads a password from stdin. -func (_ StdInPasswordReader) ReadPassword() (string, error) { +func (StdInPasswordReader) ReadPassword() (string, error) { pwd, err := terminal.ReadPassword(int(os.Stdin.Fd())) return string(pwd), err } diff --git a/cmd/prysmctl/BUILD.bazel b/cmd/prysmctl/BUILD.bazel index e1c3f62a1583..fdff6175755b 100644 --- a/cmd/prysmctl/BUILD.bazel +++ b/cmd/prysmctl/BUILD.bazel @@ -33,7 +33,7 @@ prysm_image_upload( entrypoint = ["/prysmctl"], repository = "gcr.io/prysmaticlabs/prysm/cmd/prysmctl", symlinks = { - # Backwards compatiability for images that depended on the old filepath. + # Backwards compatibility for images that depended on the old filepath. "/app/cmd/prysmctl/prysmctl": "/prysmctl", }, tags = ["manual"], diff --git a/cmd/validator/BUILD.bazel b/cmd/validator/BUILD.bazel index 809cc2ac86dd..770c7e57a8a9 100644 --- a/cmd/validator/BUILD.bazel +++ b/cmd/validator/BUILD.bazel @@ -5,7 +5,6 @@ load("//tools:prysm_image.bzl", "prysm_image_upload") go_library( name = "go_default_library", srcs = [ - "log.go", "main.go", "usage.go", ], @@ -30,6 +29,7 @@ go_library( "//runtime/version:go_default_library", "//validator/node:go_default_library", "@com_github_joonix_log//:go_default_library", + "@com_github_pkg_errors//:go_default_library", "@com_github_sirupsen_logrus//:go_default_library", "@com_github_urfave_cli_v2//:go_default_library", ], @@ -62,7 +62,7 @@ prysm_image_upload( entrypoint = ["/validator"], repository = "gcr.io/prysmaticlabs/prysm/validator", symlinks = { - # Backwards compatiability for images that depended on the old filepath. + # Backwards compatibility for images that depended on the old filepath. "/app/cmd/validator/validator": "/validator", }, tags = ["manual"], diff --git a/cmd/validator/log.go b/cmd/validator/log.go deleted file mode 100644 index f295010594fe..000000000000 --- a/cmd/validator/log.go +++ /dev/null @@ -1,5 +0,0 @@ -package main - -import "github.com/sirupsen/logrus" - -var log = logrus.WithField("prefix", "main") diff --git a/cmd/validator/main.go b/cmd/validator/main.go index 884d7803581d..150425dd117d 100644 --- a/cmd/validator/main.go +++ b/cmd/validator/main.go @@ -10,6 +10,7 @@ import ( runtimeDebug "runtime/debug" joonix "github.com/joonix/log" + "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/v4/cmd" accountcommands "github.com/prysmaticlabs/prysm/v4/cmd/validator/accounts" dbcommands "github.com/prysmaticlabs/prysm/v4/cmd/validator/db" @@ -31,8 +32,10 @@ import ( "github.com/urfave/cli/v2" ) +var log = logrus.WithField("prefix", "main") + func startNode(ctx *cli.Context) error { - // verify if ToS accepted + // Verify if ToS is accepted. if err := tos.VerifyTosAcceptedOrPrompt(ctx); err != nil { return err } @@ -141,6 +144,8 @@ func main() { return err } + logFileName := ctx.String(cmd.LogFileName.Name) + format := ctx.String(cmd.LogFormat.Name) switch format { case "text": @@ -148,8 +153,8 @@ func main() { formatter.TimestampFormat = "2006-01-02 15:04:05" formatter.FullTimestamp = true // If persistent log files are written - we disable the log messages coloring because - // the colors are ANSI codes and seen as Gibberish in the log files. - formatter.DisableColors = ctx.String(cmd.LogFileName.Name) != "" + // the colors are ANSI codes and seen as gibberish in the log files. + formatter.DisableColors = logFileName != "" logrus.SetFormatter(formatter) case "fluentd": f := joonix.NewFormatter() @@ -167,7 +172,6 @@ func main() { return fmt.Errorf("unknown log format %s", format) } - logFileName := ctx.String(cmd.LogFileName.Name) if logFileName != "" { if err := logs.ConfigurePersistentLogging(logFileName); err != nil { log.WithError(err).Error("Failed to configuring logging to disk.") @@ -182,8 +186,9 @@ func main() { } if err := debug.Setup(ctx); err != nil { - return err + return errors.Wrap(err, "failed to setup debug") } + return cmd.ValidateNoArgs(ctx) }, After: func(ctx *cli.Context) error { diff --git a/config/features/README.md b/config/features/README.md index e4881155d4b9..6ed52ab9e5d2 100644 --- a/config/features/README.md +++ b/config/features/README.md @@ -29,7 +29,7 @@ Examples of when not to use a feature flag: Once it has been decided that you should use a feature flag. Follow these steps to safely releasing your feature. In general, try to create a single PR for each step of this process. -1. Add your feature flag to shared/featureconfig/flags.go, use the flag to toggle a boolean in the +1. Add your feature flag to `shared/featureconfig/flags.go`, use the flag to toggle a boolean in the feature config in shared/featureconfig/config.go. It is a good idea to use the `enable` prefix for your flag since you're going to invert the flag in a later step. i.e you will use `disable` prefix later. For example, `--enable-my-feature`. Additionally, [create a feature flag tracking issue](https://github.com/prysmaticlabs/prysm/issues/new?template=feature_flag.md) @@ -48,7 +48,7 @@ func someExistingMethod(ctx context.Context) error { 3. Add the flag to the end to end tests. This set of flags can also be found in shared/featureconfig/flags.go. 4. Test the functionality locally and safely in production. Once you have enough confidence that your new function works and is safe to release then move onto the next step. -5. Move your existing flag to the deprecated section of shared/featureconfig/flags.go. It is +5. Move your existing flag to the deprecated section of `shared/featureconfig/flags.go`. It is important NOT to delete your existing flag outright. Deleting a flag can be extremely frustrating to users as it may break their existing workflow! Marking a flag as deprecated gives users time to adjust their start scripts and workflow. Add another feature flag to represent the inverse of your diff --git a/config/features/config.go b/config/features/config.go index b7acb5cb28b9..1ff43273f351 100644 --- a/config/features/config.go +++ b/config/features/config.go @@ -23,10 +23,11 @@ import ( "sync" "time" - "github.com/prysmaticlabs/prysm/v4/cmd" - "github.com/prysmaticlabs/prysm/v4/config/params" "github.com/sirupsen/logrus" "github.com/urfave/cli/v2" + + "github.com/prysmaticlabs/prysm/v4/cmd" + "github.com/prysmaticlabs/prysm/v4/config/params" ) var log = logrus.WithField("prefix", "flags") @@ -40,6 +41,7 @@ type Flags struct { EnableExperimentalState bool // EnableExperimentalState turns on the latest and greatest (but potentially unstable) changes to the beacon state. WriteSSZStateTransitions bool // WriteSSZStateTransitions to tmp directory. EnablePeerScorer bool // EnablePeerScorer enables experimental peer scoring in p2p. + EnableLightClient bool // EnableLightClient enables light client APIs. WriteWalletPasswordOnWebOnboarding bool // WriteWalletPasswordOnWebOnboarding writes the password to disk after Prysm web signup. EnableDoppelGanger bool // EnableDoppelGanger enables doppelganger protection on startup for the validator. EnableHistoricalSpaceRepresentation bool // EnableHistoricalSpaceRepresentation enables the saving of registry validators in separate buckets to save space @@ -64,13 +66,10 @@ type Flags struct { DisableStakinContractCheck bool // Disables check for deposit contract when proposing blocks EnableVerboseSigVerification bool // EnableVerboseSigVerification specifies whether to verify individual signature if batch verification fails - EnableOptionalEngineMethods bool // EnableOptionalEngineMethods specifies whether to activate capella specific engine methods EnableEIP4881 bool // EnableEIP4881 specifies whether to use the deposit tree from EIP4881 PrepareAllPayloads bool // PrepareAllPayloads informs the engine to prepare a block on every slot. - AggregateParallel bool // AggregateParallel aggregates attestations in parallel. - // KeystoreImportDebounceInterval specifies the time duration the validator waits to reload new keys if they have // changed on disk. This feature is for advanced use cases only. KeystoreImportDebounceInterval time.Duration @@ -228,20 +227,10 @@ func ConfigureBeaconChain(ctx *cli.Context) error { logEnabled(enableVerboseSigVerification) cfg.EnableVerboseSigVerification = true } - cfg.EnableOptionalEngineMethods = true - if ctx.IsSet(disableOptionalEngineMethods.Name) { - logEnabled(disableOptionalEngineMethods) - cfg.EnableOptionalEngineMethods = false - } if ctx.IsSet(prepareAllPayloads.Name) { logEnabled(prepareAllPayloads) cfg.PrepareAllPayloads = true } - cfg.AggregateParallel = true - if ctx.IsSet(disableAggregateParallel.Name) { - logEnabled(disableAggregateParallel) - cfg.AggregateParallel = false - } if ctx.IsSet(disableResourceManager.Name) { logEnabled(disableResourceManager) cfg.DisableResourceManager = true @@ -250,6 +239,10 @@ func ConfigureBeaconChain(ctx *cli.Context) error { logEnabled(EnableEIP4881) cfg.EnableEIP4881 = true } + if ctx.IsSet(EnableLightClient.Name) { + logEnabled(EnableLightClient) + cfg.EnableLightClient = true + } cfg.AggregateIntervals = [3]time.Duration{aggregateFirstInterval.Value, aggregateSecondInterval.Value, aggregateThirdInterval.Value} Init(cfg) return nil diff --git a/config/features/deprecated_flags.go b/config/features/deprecated_flags.go index c8d4725d9742..6e7a9db99003 100644 --- a/config/features/deprecated_flags.go +++ b/config/features/deprecated_flags.go @@ -58,6 +58,16 @@ var ( Usage: deprecatedUsage, Hidden: true, } + deprecatedDisableOptionalEngineMethods = &cli.BoolFlag{ + Name: "disable-optional-engine-methods", + Usage: deprecatedUsage, + Hidden: true, + } + deprecatedDisableAggregateParallel = &cli.BoolFlag{ + Name: "disable-aggregate-parallel", + Usage: deprecatedUsage, + Hidden: true, + } ) // Deprecated flags for both the beacon node and validator client. @@ -72,6 +82,8 @@ var deprecatedFlags = []cli.Flag{ deprecatedEnableOptionalEngineMethods, deprecatedDisableBuildBlockParallel, deprecatedDisableReorgLateBlocks, + deprecatedDisableOptionalEngineMethods, + deprecatedDisableAggregateParallel, } // deprecatedBeaconFlags contains flags that are still used by other components diff --git a/config/features/flags.go b/config/features/flags.go index 83e5db08b4e4..0327a973cddb 100644 --- a/config/features/flags.go +++ b/config/features/flags.go @@ -132,10 +132,6 @@ var ( Name: "enable-verbose-sig-verification", Usage: "Enables identifying invalid signatures if batch verification fails when processing block.", } - disableOptionalEngineMethods = &cli.BoolFlag{ - Name: "disable-optional-engine-methods", - Usage: "Disables the optional engine methods.", - } prepareAllPayloads = &cli.BoolFlag{ Name: "prepare-all-payloads", Usage: "Informs the engine to prepare all local payloads. Useful for relayers and builders.", @@ -144,6 +140,10 @@ var ( Name: "enable-eip-4881", Usage: "Enables the deposit tree specified in EIP-4881.", } + EnableLightClient = &cli.BoolFlag{ + Name: "enable-lightclient", + Usage: "Enables the light client support in the beacon node", + } disableResourceManager = &cli.BoolFlag{ Name: "disable-resource-manager", Usage: "Disables running the libp2p resource manager.", @@ -154,11 +154,6 @@ var ( Name: "disable-registration-cache", Usage: "Temporary flag for disabling the validator registration cache instead of using the DB. Note: registrations do not clear on restart while using the DB.", } - - disableAggregateParallel = &cli.BoolFlag{ - Name: "disable-aggregate-parallel", - Usage: "Disables parallel aggregation of attestations.", - } ) // devModeFlags holds list of flags that are set when development mode is on. @@ -206,7 +201,6 @@ var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []c enableStartupOptimistic, enableFullSSZDataLogging, enableVerboseSigVerification, - disableOptionalEngineMethods, prepareAllPayloads, aggregateFirstInterval, aggregateSecondInterval, @@ -214,7 +208,7 @@ var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []c EnableEIP4881, disableResourceManager, DisableRegistrationCache, - disableAggregateParallel, + EnableLightClient, }...)...) // E2EBeaconChainFlags contains a list of the beacon chain feature flags to be tested in E2E. diff --git a/config/params/mainnet_config.go b/config/params/mainnet_config.go index 1633d95bce63..2ebc35508965 100644 --- a/config/params/mainnet_config.go +++ b/config/params/mainnet_config.go @@ -34,7 +34,7 @@ var mainnetNetworkConfig = &NetworkConfig{ ContractDeploymentBlock: 11184524, // Note: contract was deployed in block 11052984 but no transactions were sent until 11184524. BootstrapNodes: []string{ // Teku team's bootnode - "enr:-KG4QMOEswP62yzDjSwWS4YEjtTZ5PO6r65CPqYBkgTTkrpaedQ8uEUo1uMALtJIvb2w_WWEVmg5yt1UAuK1ftxUU7QDhGV0aDKQu6TalgMAAAD__________4JpZIJ2NIJpcIQEnfA2iXNlY3AyNTZrMaEDfol8oLr6XJ7FsdAYE7lpJhKMls4G_v6qQOGKJUWGb_uDdGNwgiMog3VkcIIjKA", + "enr:-KG4QNTx85fjxABbSq_Rta9wy56nQ1fHK0PewJbGjLm1M4bMGx5-3Qq4ZX2-iFJ0pys_O90sVXNNOxp2E7afBsGsBrgDhGV0aDKQu6TalgMAAAD__________4JpZIJ2NIJpcIQEnfA2iXNlY3AyNTZrMaECGXWQ-rQ2KZKRH1aOW4IlPDBkY4XDphxg9pxKytFCkayDdGNwgiMog3VkcIIjKA", "enr:-KG4QF4B5WrlFcRhUU6dZETwY5ZzAXnA0vGC__L1Kdw602nDZwXSTs5RFXFIFUnbQJmhNGVU6OIX7KVrCSTODsz1tK4DhGV0aDKQu6TalgMAAAD__________4JpZIJ2NIJpcIQExNYEiXNlY3AyNTZrMaECQmM9vp7KhaXhI-nqL_R0ovULLCFSFTa9CPPSdb1zPX6DdGNwgiMog3VkcIIjKA", // Prylab team's bootnodes "enr:-Ku4QImhMc1z8yCiNJ1TyUxdcfNucje3BGwEHzodEZUan8PherEo4sF7pPHPSIB1NNuSg5fZy7qFsjmUKs2ea1Whi0EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQOVphkDqal4QzPMksc5wnpuC3gvSC8AfbFOnZY_On34wIN1ZHCCIyg", diff --git a/consensus-types/blocks/roblock.go b/consensus-types/blocks/roblock.go index 9b7c21023970..77e43e00ae8d 100644 --- a/consensus-types/blocks/roblock.go +++ b/consensus-types/blocks/roblock.go @@ -74,14 +74,18 @@ func (s ROBlockSlice) Len() int { return len(s) } -type BlockWithVerifiedBlobs struct { +// BlockWithROBlobs is a wrapper that collects the block and blob values together. +// This is helpful because these values are collated from separate RPC requests. +type BlockWithROBlobs struct { Block ROBlock Blobs []ROBlob } -type BlockWithVerifiedBlobsSlice []BlockWithVerifiedBlobs +// BlockWithROBlobsSlice gives convenient access to getting a slice of just the ROBlocks, +// and defines sorting helpers. +type BlockWithROBlobsSlice []BlockWithROBlobs -func (s BlockWithVerifiedBlobsSlice) ROBlocks() []ROBlock { +func (s BlockWithROBlobsSlice) ROBlocks() []ROBlock { r := make([]ROBlock, len(s)) for i := range s { r[i] = s[i].Block @@ -92,7 +96,7 @@ func (s BlockWithVerifiedBlobsSlice) ROBlocks() []ROBlock { // Less reports whether the element with index i must sort before the element with index j. // ROBlocks are ordered first by their slot, // with a lexicographic sort of roots breaking ties for slots with duplicate blocks. -func (s BlockWithVerifiedBlobsSlice) Less(i, j int) bool { +func (s BlockWithROBlobsSlice) Less(i, j int) bool { si, sj := s[i].Block.Block().Slot(), s[j].Block.Block().Slot() // lower slot wins @@ -106,11 +110,11 @@ func (s BlockWithVerifiedBlobsSlice) Less(i, j int) bool { } // Swap swaps the elements with indexes i and j. -func (s BlockWithVerifiedBlobsSlice) Swap(i, j int) { +func (s BlockWithROBlobsSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // Len is the number of elements in the collection. -func (s BlockWithVerifiedBlobsSlice) Len() int { +func (s BlockWithROBlobsSlice) Len() int { return len(s) } diff --git a/container/multi-value-slice/multi_value_slice.go b/container/multi-value-slice/multi_value_slice.go index b56144dea3b7..81d8009fb692 100644 --- a/container/multi-value-slice/multi_value_slice.go +++ b/container/multi-value-slice/multi_value_slice.go @@ -96,6 +96,10 @@ import ( "github.com/pkg/errors" ) +// Amount of references beyond which a multivalue object is considered +// fragmented. +const fragmentationLimit = 50000 + // Id is an object identifier. type Id = uint64 @@ -394,6 +398,88 @@ func (s *Slice[V]) Detach(obj Identifiable) { delete(s.cachedLengths, obj.Id()) } +// MultiValueStatistics generates the multi-value stats object for the respective +// multivalue slice. +func (s *Slice[V]) MultiValueStatistics() MultiValueStatistics { + s.lock.RLock() + defer s.lock.RUnlock() + + stats := MultiValueStatistics{} + stats.TotalIndividualElements = len(s.individualItems) + totalIndRefs := 0 + + for _, v := range s.individualItems { + for _, ival := range v.Values { + totalIndRefs += len(ival.ids) + } + } + + stats.TotalAppendedElements = len(s.appendedItems) + totalAppRefs := 0 + + for _, v := range s.appendedItems { + for _, ival := range v.Values { + totalAppRefs += len(ival.ids) + } + } + stats.TotalIndividualElemReferences = totalIndRefs + stats.TotalAppendedElemReferences = totalAppRefs + + return stats +} + +// IsFragmented checks if our mutlivalue object is fragmented (individual references held). +// If the number of references is higher than our threshold we return true. +func (s *Slice[V]) IsFragmented() bool { + stats := s.MultiValueStatistics() + return stats.TotalIndividualElemReferences+stats.TotalAppendedElemReferences >= fragmentationLimit +} + +// Reset builds a new multivalue object with respect to the +// provided object's id. The base slice will be based on this +// particular id. +func (s *Slice[V]) Reset(obj Identifiable) *Slice[V] { + s.lock.RLock() + defer s.lock.RUnlock() + + l, ok := s.cachedLengths[obj.Id()] + if !ok { + l = len(s.sharedItems) + } + + items := make([]V, l) + copy(items, s.sharedItems) + for i, ind := range s.individualItems { + for _, v := range ind.Values { + _, found := containsId(v.ids, obj.Id()) + if found { + items[i] = v.val + break + } + } + } + + index := len(s.sharedItems) + for _, app := range s.appendedItems { + found := true + for _, v := range app.Values { + _, found = containsId(v.ids, obj.Id()) + if found { + items[index] = v.val + index++ + break + } + } + if !found { + break + } + } + + reset := &Slice[V]{} + reset.Init(items) + return reset +} + func (s *Slice[V]) fillOriginalItems(obj Identifiable, items *[]V) { for i, item := range s.sharedItems { ind, ok := s.individualItems[uint64(i)] @@ -538,3 +624,11 @@ func BuildEmptyCompositeSlice[V comparable](values []V) MultiValueSliceComposite MultiValueSlice: EmptyMVSlice[V]{fullSlice: values}, } } + +// MultiValueStatistics represents the internal properties of a multivalue slice. +type MultiValueStatistics struct { + TotalIndividualElements int + TotalAppendedElements int + TotalIndividualElemReferences int + TotalAppendedElemReferences int +} diff --git a/container/multi-value-slice/multi_value_slice_test.go b/container/multi-value-slice/multi_value_slice_test.go index 0d19db6bfbd7..b5ecbefabb93 100644 --- a/container/multi-value-slice/multi_value_slice_test.go +++ b/container/multi-value-slice/multi_value_slice_test.go @@ -326,6 +326,156 @@ func TestDetach(t *testing.T) { assert.Equal(t, false, ok) } +func TestReset(t *testing.T) { + s := setup() + obj := &testObject{id: 2} + + reset := s.Reset(obj) + + assert.Equal(t, 8, len(reset.sharedItems)) + assert.Equal(t, 123, reset.sharedItems[0]) + assert.Equal(t, 2, reset.sharedItems[1]) + assert.Equal(t, 3, reset.sharedItems[2]) + assert.Equal(t, 123, reset.sharedItems[3]) + assert.Equal(t, 2, reset.sharedItems[4]) + assert.Equal(t, 2, reset.sharedItems[5]) + assert.Equal(t, 3, reset.sharedItems[6]) + assert.Equal(t, 2, reset.sharedItems[7]) + assert.Equal(t, 0, len(reset.individualItems)) + assert.Equal(t, 0, len(reset.appendedItems)) +} + +func TestFragmentation_IndividualReferences(t *testing.T) { + s := &Slice[int]{} + s.Init([]int{123, 123, 123, 123, 123}) + s.individualItems[1] = &MultiValueItem[int]{ + Values: []*Value[int]{ + { + val: 1, + ids: []uint64{1}, + }, + { + val: 2, + ids: []uint64{2}, + }, + }, + } + s.individualItems[2] = &MultiValueItem[int]{ + Values: []*Value[int]{ + { + val: 3, + ids: []uint64{1, 2}, + }, + }, + } + + numOfRefs := fragmentationLimit / 2 + for i := 3; i < numOfRefs; i++ { + obj := &testObject{id: uint64(i)} + s.Copy(&testObject{id: 1}, obj) + } + + assert.Equal(t, false, s.IsFragmented()) + + // Add more references to hit fragmentation limit. Id 1 + // has 2 references above. + for i := numOfRefs; i < numOfRefs+3; i++ { + obj := &testObject{id: uint64(i)} + s.Copy(&testObject{id: 1}, obj) + } + assert.Equal(t, true, s.IsFragmented()) +} + +func TestFragmentation_AppendedReferences(t *testing.T) { + s := &Slice[int]{} + s.Init([]int{123, 123, 123, 123, 123}) + s.appendedItems = []*MultiValueItem[int]{ + { + Values: []*Value[int]{ + { + val: 1, + ids: []uint64{1}, + }, + { + val: 2, + ids: []uint64{2}, + }, + }, + }, + { + Values: []*Value[int]{ + { + val: 3, + ids: []uint64{1, 2}, + }, + }, + }, + } + s.cachedLengths[1] = 7 + s.cachedLengths[2] = 8 + + numOfRefs := fragmentationLimit / 2 + for i := 3; i < numOfRefs; i++ { + obj := &testObject{id: uint64(i)} + s.Copy(&testObject{id: 1}, obj) + } + + assert.Equal(t, false, s.IsFragmented()) + + // Add more references to hit fragmentation limit. Id 1 + // has 2 references above. + for i := numOfRefs; i < numOfRefs+3; i++ { + obj := &testObject{id: uint64(i)} + s.Copy(&testObject{id: 1}, obj) + } + assert.Equal(t, true, s.IsFragmented()) +} + +func TestFragmentation_IndividualAndAppendedReferences(t *testing.T) { + s := &Slice[int]{} + s.Init([]int{123, 123, 123, 123, 123}) + s.individualItems[2] = &MultiValueItem[int]{ + Values: []*Value[int]{ + { + val: 3, + ids: []uint64{1, 2}, + }, + }, + } + s.appendedItems = []*MultiValueItem[int]{ + { + Values: []*Value[int]{ + { + val: 1, + ids: []uint64{1}, + }, + { + val: 2, + ids: []uint64{2}, + }, + }, + }, + } + s.cachedLengths[1] = 7 + s.cachedLengths[2] = 8 + + numOfRefs := fragmentationLimit / 2 + for i := 3; i < numOfRefs; i++ { + obj := &testObject{id: uint64(i)} + s.Copy(&testObject{id: 1}, obj) + } + + assert.Equal(t, false, s.IsFragmented()) + + // Add more references to hit fragmentation limit. Id 1 + // has 2 references above. + for i := numOfRefs; i < numOfRefs+3; i++ { + obj := &testObject{id: uint64(i)} + s.Copy(&testObject{id: 1}, obj) + } + assert.Equal(t, true, s.IsFragmented()) +} + // Share the slice between 2 objects. // Index 0: Shared value // Index 1: Different individual value diff --git a/container/trie/sparse_merkle.go b/container/trie/sparse_merkle.go index 0f2dc4cfce90..6c34bf3f1a7d 100644 --- a/container/trie/sparse_merkle.go +++ b/container/trie/sparse_merkle.go @@ -250,7 +250,7 @@ func (m *SparseMerkleTrie) Copy() *SparseMerkleTrie { // NumOfItems returns the num of items stored in // the sparse merkle trie. We handle a special case -// where if there is only one item stored and it is a +// where if there is only one item stored and it is an // empty 32-byte root. func (m *SparseMerkleTrie) NumOfItems() int { var zeroBytes [32]byte diff --git a/deps.bzl b/deps.bzl index 6cdd174e693d..3a4baf24aa8c 100644 --- a/deps.bzl +++ b/deps.bzl @@ -2990,8 +2990,8 @@ def prysm_deps(): go_repository( name = "com_github_multiformats_go_multiaddr", importpath = "github.com/multiformats/go-multiaddr", - sum = "h1:1QlibTFkoXJuDjjYsMHhE73TnzJQl8FSWatk/0gxGzE=", - version = "v0.12.0", + sum = "h1:vm+BA/WZA8QZDp1pF1FWhi5CT3g1tbi5GJmqpb6wnlk=", + version = "v0.12.1", ) go_repository( name = "com_github_multiformats_go_multiaddr_dns", @@ -3617,8 +3617,8 @@ def prysm_deps(): "gazelle:exclude tools.go", ], importpath = "github.com/quic-go/quic-go", - sum = "h1:o3YB6t2SR+HU/pgwF29kJ6g4jJIJEwEZ8CKia1h1TKg=", - version = "v0.39.3", + sum = "h1:PelfiuG7wXEffUT2yceiqz5V6Pc0TA5ruOd1LcmFc1s=", + version = "v0.39.4", ) go_repository( name = "com_github_quic_go_webtransport_go", diff --git a/go.mod b/go.mod index ecd24422fb02..bb3001c8be49 100644 --- a/go.mod +++ b/go.mod @@ -53,7 +53,7 @@ require ( github.com/minio/highwayhash v1.0.2 github.com/minio/sha256-simd v1.0.1 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 - github.com/multiformats/go-multiaddr v0.12.0 + github.com/multiformats/go-multiaddr v0.12.1 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.27.10 github.com/patrickmn/go-cache v2.1.0+incompatible @@ -213,7 +213,7 @@ require ( github.com/prometheus/procfs v0.9.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/qtls-go1-20 v0.3.4 // indirect - github.com/quic-go/quic-go v0.39.3 // indirect + github.com/quic-go/quic-go v0.39.4 // indirect github.com/quic-go/webtransport-go v0.6.0 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect diff --git a/go.sum b/go.sum index 012209a62279..2f41d320af46 100644 --- a/go.sum +++ b/go.sum @@ -922,8 +922,8 @@ github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9 github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.12.0 h1:1QlibTFkoXJuDjjYsMHhE73TnzJQl8FSWatk/0gxGzE= -github.com/multiformats/go-multiaddr v0.12.0/go.mod h1:WmZXgObOQOYp9r3cslLlppkrz1FYSHmE834dfz/lWu8= +github.com/multiformats/go-multiaddr v0.12.1 h1:vm+BA/WZA8QZDp1pF1FWhi5CT3g1tbi5GJmqpb6wnlk= +github.com/multiformats/go-multiaddr v0.12.1/go.mod h1:7mPkiBMmLeFipt+nNSq9pHZUeJSt8lHBgH6yhj0YQzE= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= @@ -1117,8 +1117,8 @@ github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= github.com/quic-go/qtls-go1-20 v0.3.4 h1:MfFAPULvst4yoMgY9QmtpYmfij/em7O8UUi+bNVm7Cg= github.com/quic-go/qtls-go1-20 v0.3.4/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= -github.com/quic-go/quic-go v0.39.3 h1:o3YB6t2SR+HU/pgwF29kJ6g4jJIJEwEZ8CKia1h1TKg= -github.com/quic-go/quic-go v0.39.3/go.mod h1:T09QsDQWjLiQ74ZmacDfqZmhY/NLnw5BC40MANNNZ1Q= +github.com/quic-go/quic-go v0.39.4 h1:PelfiuG7wXEffUT2yceiqz5V6Pc0TA5ruOd1LcmFc1s= +github.com/quic-go/quic-go v0.39.4/go.mod h1:T09QsDQWjLiQ74ZmacDfqZmhY/NLnw5BC40MANNNZ1Q= github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY= github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= diff --git a/nogo_config.json b/nogo_config.json index 851039a02b10..c6bc6fe85d55 100644 --- a/nogo_config.json +++ b/nogo_config.json @@ -190,5 +190,23 @@ "external/.*": "Third party code", "rules_go_work-.*": "Third party code" } + }, + "reflectvaluecompare": { + "exclude_files": { + "external/.*": "Third party code", + "rules_go_work-.*": "Third party code" + } + }, + "unusedwrite": { + "exclude_files": { + "external/.*": "Third party code", + "rules_go_work-.*": "Third party code" + } + }, + "stringintconv": { + "exclude_files": { + "external/.*": "Third party code", + "rules_go_work-.*": "Third party code" + } } } diff --git a/proto/prysm/v1alpha1/BUILD.bazel b/proto/prysm/v1alpha1/BUILD.bazel index 754d0428baea..e9a969bb67dd 100644 --- a/proto/prysm/v1alpha1/BUILD.bazel +++ b/proto/prysm/v1alpha1/BUILD.bazel @@ -127,10 +127,8 @@ ssz_gen_marshal( "BuilderBid", "BuilderBidCapella", "BuilderBidDeneb", - "DeprecatedBlobSidecar", "BlobSidecar", "BlobSidecars", - "SignedBlobSidecar", "BlobIdentifier", ], ) diff --git a/proto/prysm/v1alpha1/blobs.pb.go b/proto/prysm/v1alpha1/blobs.pb.go index 0c7c4b339d20..0d8e36937601 100755 --- a/proto/prysm/v1alpha1/blobs.pb.go +++ b/proto/prysm/v1alpha1/blobs.pb.go @@ -23,211 +23,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -type DeprecatedBlobSidecars struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Sidecars []*DeprecatedBlobSidecar `protobuf:"bytes,1,rep,name=sidecars,proto3" json:"sidecars,omitempty"` -} - -func (x *DeprecatedBlobSidecars) Reset() { - *x = DeprecatedBlobSidecars{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeprecatedBlobSidecars) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeprecatedBlobSidecars) ProtoMessage() {} - -func (x *DeprecatedBlobSidecars) ProtoReflect() protoreflect.Message { - mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeprecatedBlobSidecars.ProtoReflect.Descriptor instead. -func (*DeprecatedBlobSidecars) Descriptor() ([]byte, []int) { - return file_proto_prysm_v1alpha1_blobs_proto_rawDescGZIP(), []int{0} -} - -func (x *DeprecatedBlobSidecars) GetSidecars() []*DeprecatedBlobSidecar { - if x != nil { - return x.Sidecars - } - return nil -} - -type DeprecatedBlobSidecar struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - BlockRoot []byte `protobuf:"bytes,1,opt,name=block_root,json=blockRoot,proto3" json:"block_root,omitempty" ssz-size:"32"` - Index uint64 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` - Slot github_com_prysmaticlabs_prysm_v4_consensus_types_primitives.Slot `protobuf:"varint,3,opt,name=slot,proto3" json:"slot,omitempty" cast-type:"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives.Slot"` - BlockParentRoot []byte `protobuf:"bytes,4,opt,name=block_parent_root,json=blockParentRoot,proto3" json:"block_parent_root,omitempty" ssz-size:"32"` - ProposerIndex github_com_prysmaticlabs_prysm_v4_consensus_types_primitives.ValidatorIndex `protobuf:"varint,5,opt,name=proposer_index,json=proposerIndex,proto3" json:"proposer_index,omitempty" cast-type:"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives.ValidatorIndex"` - Blob []byte `protobuf:"bytes,6,opt,name=blob,proto3" json:"blob,omitempty" ssz-size:"131072"` - KzgCommitment []byte `protobuf:"bytes,7,opt,name=kzg_commitment,json=kzgCommitment,proto3" json:"kzg_commitment,omitempty" ssz-size:"48"` - KzgProof []byte `protobuf:"bytes,8,opt,name=kzg_proof,json=kzgProof,proto3" json:"kzg_proof,omitempty" ssz-size:"48"` -} - -func (x *DeprecatedBlobSidecar) Reset() { - *x = DeprecatedBlobSidecar{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeprecatedBlobSidecar) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeprecatedBlobSidecar) ProtoMessage() {} - -func (x *DeprecatedBlobSidecar) ProtoReflect() protoreflect.Message { - mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeprecatedBlobSidecar.ProtoReflect.Descriptor instead. -func (*DeprecatedBlobSidecar) Descriptor() ([]byte, []int) { - return file_proto_prysm_v1alpha1_blobs_proto_rawDescGZIP(), []int{1} -} - -func (x *DeprecatedBlobSidecar) GetBlockRoot() []byte { - if x != nil { - return x.BlockRoot - } - return nil -} - -func (x *DeprecatedBlobSidecar) GetIndex() uint64 { - if x != nil { - return x.Index - } - return 0 -} - -func (x *DeprecatedBlobSidecar) GetSlot() github_com_prysmaticlabs_prysm_v4_consensus_types_primitives.Slot { - if x != nil { - return x.Slot - } - return github_com_prysmaticlabs_prysm_v4_consensus_types_primitives.Slot(0) -} - -func (x *DeprecatedBlobSidecar) GetBlockParentRoot() []byte { - if x != nil { - return x.BlockParentRoot - } - return nil -} - -func (x *DeprecatedBlobSidecar) GetProposerIndex() github_com_prysmaticlabs_prysm_v4_consensus_types_primitives.ValidatorIndex { - if x != nil { - return x.ProposerIndex - } - return github_com_prysmaticlabs_prysm_v4_consensus_types_primitives.ValidatorIndex(0) -} - -func (x *DeprecatedBlobSidecar) GetBlob() []byte { - if x != nil { - return x.Blob - } - return nil -} - -func (x *DeprecatedBlobSidecar) GetKzgCommitment() []byte { - if x != nil { - return x.KzgCommitment - } - return nil -} - -func (x *DeprecatedBlobSidecar) GetKzgProof() []byte { - if x != nil { - return x.KzgProof - } - return nil -} - -type SignedBlobSidecar struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Message *DeprecatedBlobSidecar `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` - Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty" ssz-size:"96"` -} - -func (x *SignedBlobSidecar) Reset() { - *x = SignedBlobSidecar{} - if protoimpl.UnsafeEnabled { - mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SignedBlobSidecar) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SignedBlobSidecar) ProtoMessage() {} - -func (x *SignedBlobSidecar) ProtoReflect() protoreflect.Message { - mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SignedBlobSidecar.ProtoReflect.Descriptor instead. -func (*SignedBlobSidecar) Descriptor() ([]byte, []int) { - return file_proto_prysm_v1alpha1_blobs_proto_rawDescGZIP(), []int{2} -} - -func (x *SignedBlobSidecar) GetMessage() *DeprecatedBlobSidecar { - if x != nil { - return x.Message - } - return nil -} - -func (x *SignedBlobSidecar) GetSignature() []byte { - if x != nil { - return x.Signature - } - return nil -} - type BlindedBlobSidecars struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -239,7 +34,7 @@ type BlindedBlobSidecars struct { func (x *BlindedBlobSidecars) Reset() { *x = BlindedBlobSidecars{} if protoimpl.UnsafeEnabled { - mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[3] + mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -252,7 +47,7 @@ func (x *BlindedBlobSidecars) String() string { func (*BlindedBlobSidecars) ProtoMessage() {} func (x *BlindedBlobSidecars) ProtoReflect() protoreflect.Message { - mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[3] + mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -265,7 +60,7 @@ func (x *BlindedBlobSidecars) ProtoReflect() protoreflect.Message { // Deprecated: Use BlindedBlobSidecars.ProtoReflect.Descriptor instead. func (*BlindedBlobSidecars) Descriptor() ([]byte, []int) { - return file_proto_prysm_v1alpha1_blobs_proto_rawDescGZIP(), []int{3} + return file_proto_prysm_v1alpha1_blobs_proto_rawDescGZIP(), []int{0} } func (x *BlindedBlobSidecars) GetSidecars() []*BlindedBlobSidecar { @@ -293,7 +88,7 @@ type BlindedBlobSidecar struct { func (x *BlindedBlobSidecar) Reset() { *x = BlindedBlobSidecar{} if protoimpl.UnsafeEnabled { - mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[4] + mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -306,7 +101,7 @@ func (x *BlindedBlobSidecar) String() string { func (*BlindedBlobSidecar) ProtoMessage() {} func (x *BlindedBlobSidecar) ProtoReflect() protoreflect.Message { - mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[4] + mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -319,7 +114,7 @@ func (x *BlindedBlobSidecar) ProtoReflect() protoreflect.Message { // Deprecated: Use BlindedBlobSidecar.ProtoReflect.Descriptor instead. func (*BlindedBlobSidecar) Descriptor() ([]byte, []int) { - return file_proto_prysm_v1alpha1_blobs_proto_rawDescGZIP(), []int{4} + return file_proto_prysm_v1alpha1_blobs_proto_rawDescGZIP(), []int{1} } func (x *BlindedBlobSidecar) GetBlockRoot() []byte { @@ -390,7 +185,7 @@ type SignedBlindedBlobSidecar struct { func (x *SignedBlindedBlobSidecar) Reset() { *x = SignedBlindedBlobSidecar{} if protoimpl.UnsafeEnabled { - mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[5] + mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -403,7 +198,7 @@ func (x *SignedBlindedBlobSidecar) String() string { func (*SignedBlindedBlobSidecar) ProtoMessage() {} func (x *SignedBlindedBlobSidecar) ProtoReflect() protoreflect.Message { - mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[5] + mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -416,7 +211,7 @@ func (x *SignedBlindedBlobSidecar) ProtoReflect() protoreflect.Message { // Deprecated: Use SignedBlindedBlobSidecar.ProtoReflect.Descriptor instead. func (*SignedBlindedBlobSidecar) Descriptor() ([]byte, []int) { - return file_proto_prysm_v1alpha1_blobs_proto_rawDescGZIP(), []int{5} + return file_proto_prysm_v1alpha1_blobs_proto_rawDescGZIP(), []int{2} } func (x *SignedBlindedBlobSidecar) GetMessage() *BlindedBlobSidecar { @@ -445,7 +240,7 @@ type BlobIdentifier struct { func (x *BlobIdentifier) Reset() { *x = BlobIdentifier{} if protoimpl.UnsafeEnabled { - mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[6] + mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -458,7 +253,7 @@ func (x *BlobIdentifier) String() string { func (*BlobIdentifier) ProtoMessage() {} func (x *BlobIdentifier) ProtoReflect() protoreflect.Message { - mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[6] + mi := &file_proto_prysm_v1alpha1_blobs_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -471,7 +266,7 @@ func (x *BlobIdentifier) ProtoReflect() protoreflect.Message { // Deprecated: Use BlobIdentifier.ProtoReflect.Descriptor instead. func (*BlobIdentifier) Descriptor() ([]byte, []int) { - return file_proto_prysm_v1alpha1_blobs_proto_rawDescGZIP(), []int{6} + return file_proto_prysm_v1alpha1_blobs_proto_rawDescGZIP(), []int{3} } func (x *BlobIdentifier) GetBlockRoot() []byte { @@ -496,109 +291,66 @@ var file_proto_prysm_v1alpha1_blobs_proto_rawDesc = []byte{ 0x74, 0x6f, 0x12, 0x15, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x65, 0x78, 0x74, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x62, 0x0a, 0x16, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, - 0x12, 0x48, 0x0a, 0x08, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, - 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, - 0x52, 0x08, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x22, 0xcf, 0x03, 0x0a, 0x15, 0x44, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x53, 0x69, 0x64, - 0x65, 0x63, 0x61, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, - 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, - 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, - 0x78, 0x12, 0x59, 0x0a, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, - 0x45, 0x82, 0xb5, 0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, - 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x34, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, - 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, - 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x12, 0x32, 0x0a, 0x11, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x6f, 0x6f, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, - 0x0f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x6f, 0x74, - 0x12, 0x76, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x42, 0x4f, 0x82, 0xb5, 0x18, 0x4b, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, - 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x34, 0x2f, - 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, - 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x70, 0x6f, - 0x73, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1e, 0x0a, 0x04, 0x62, 0x6c, 0x6f, 0x62, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0a, 0x8a, 0xb5, 0x18, 0x06, 0x31, 0x33, 0x31, 0x30, - 0x37, 0x32, 0x52, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x12, 0x2d, 0x0a, 0x0e, 0x6b, 0x7a, 0x67, 0x5f, - 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, - 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x34, 0x38, 0x52, 0x0d, 0x6b, 0x7a, 0x67, 0x43, 0x6f, 0x6d, - 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x09, 0x6b, 0x7a, 0x67, 0x5f, 0x70, - 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, - 0x34, 0x38, 0x52, 0x08, 0x6b, 0x7a, 0x67, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x81, 0x01, 0x0a, - 0x11, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, - 0x61, 0x72, 0x12, 0x46, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, - 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, - 0x72, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x09, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, - 0xb5, 0x18, 0x02, 0x39, 0x36, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x22, 0x63, 0x0a, 0x13, 0x42, 0x6c, 0x69, 0x6e, 0x64, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x53, - 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x08, 0x73, 0x69, 0x64, 0x65, 0x63, - 0x61, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x74, 0x68, 0x65, - 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x42, 0x6c, 0x69, 0x6e, 0x64, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x53, 0x69, 0x64, - 0x65, 0x63, 0x61, 0x72, 0x42, 0x05, 0x92, 0xb5, 0x18, 0x01, 0x36, 0x52, 0x08, 0x73, 0x69, 0x64, - 0x65, 0x63, 0x61, 0x72, 0x73, 0x22, 0xd1, 0x03, 0x0a, 0x12, 0x42, 0x6c, 0x69, 0x6e, 0x64, 0x65, - 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x12, 0x25, 0x0a, 0x0a, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, - 0x6f, 0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x59, 0x0a, 0x04, 0x73, 0x6c, 0x6f, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, 0x45, 0x82, 0xb5, 0x18, 0x41, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, - 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x34, 0x2f, 0x63, - 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, - 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x04, - 0x73, 0x6c, 0x6f, 0x74, 0x12, 0x32, 0x0a, 0x11, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, - 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x50, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x76, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x70, - 0x6f, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, - 0x42, 0x4f, 0x82, 0xb5, 0x18, 0x4b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, - 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x34, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, - 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x73, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, - 0x78, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, - 0x12, 0x23, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x08, 0x62, 0x6c, 0x6f, - 0x62, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2d, 0x0a, 0x0e, 0x6b, 0x7a, 0x67, 0x5f, 0x63, 0x6f, 0x6d, - 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, - 0xb5, 0x18, 0x02, 0x34, 0x38, 0x52, 0x0d, 0x6b, 0x7a, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, - 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x09, 0x6b, 0x7a, 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x6f, - 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x34, 0x38, 0x52, - 0x08, 0x6b, 0x7a, 0x67, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x85, 0x01, 0x0a, 0x18, 0x53, 0x69, - 0x67, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x69, 0x6e, 0x64, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x53, - 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x12, 0x43, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, - 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x13, 0x42, 0x6c, 0x69, 0x6e, 0x64, 0x65, + 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x12, 0x4c, 0x0a, + 0x08, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x29, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x6c, 0x69, 0x6e, 0x64, 0x65, 0x64, 0x42, + 0x6c, 0x6f, 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x42, 0x05, 0x92, 0xb5, 0x18, 0x01, + 0x36, 0x52, 0x08, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x73, 0x22, 0xd1, 0x03, 0x0a, 0x12, 0x42, 0x6c, 0x69, 0x6e, 0x64, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, - 0x61, 0x72, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x09, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, - 0x8a, 0xb5, 0x18, 0x02, 0x39, 0x36, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x22, 0x4d, 0x0a, 0x0e, 0x42, 0x6c, 0x6f, 0x62, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, - 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x42, 0x95, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, - 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0a, - 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, - 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x34, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, - 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x59, 0x0a, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, 0x45, 0x82, + 0xb5, 0x18, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, + 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, + 0x6d, 0x2f, 0x76, 0x34, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, + 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x12, 0x32, 0x0a, 0x11, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0f, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x76, + 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x42, 0x4f, 0x82, 0xb5, 0x18, 0x4b, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, + 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x34, 0x2f, 0x63, 0x6f, + 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, + 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, + 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x23, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x72, + 0x6f, 0x6f, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, + 0x32, 0x52, 0x08, 0x62, 0x6c, 0x6f, 0x62, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2d, 0x0a, 0x0e, 0x6b, + 0x7a, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x34, 0x38, 0x52, 0x0d, 0x6b, 0x7a, 0x67, + 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x09, 0x6b, 0x7a, + 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, + 0xb5, 0x18, 0x02, 0x34, 0x38, 0x52, 0x08, 0x6b, 0x7a, 0x67, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, + 0x85, 0x01, 0x0a, 0x18, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x69, 0x6e, 0x64, 0x65, + 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x12, 0x43, 0x0a, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x6c, 0x69, 0x6e, 0x64, 0x65, 0x64, 0x42, 0x6c, 0x6f, + 0x62, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x24, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x39, 0x36, 0x52, 0x09, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x4d, 0x0a, 0x0e, 0x42, 0x6c, 0x6f, 0x62, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, + 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, + 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x95, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, + 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x42, 0x0a, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, + 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, + 0x73, 0x6d, 0x2f, 0x76, 0x34, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, + 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, + 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, + 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -613,26 +365,21 @@ func file_proto_prysm_v1alpha1_blobs_proto_rawDescGZIP() []byte { return file_proto_prysm_v1alpha1_blobs_proto_rawDescData } -var file_proto_prysm_v1alpha1_blobs_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_proto_prysm_v1alpha1_blobs_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_proto_prysm_v1alpha1_blobs_proto_goTypes = []interface{}{ - (*DeprecatedBlobSidecars)(nil), // 0: ethereum.eth.v1alpha1.DeprecatedBlobSidecars - (*DeprecatedBlobSidecar)(nil), // 1: ethereum.eth.v1alpha1.DeprecatedBlobSidecar - (*SignedBlobSidecar)(nil), // 2: ethereum.eth.v1alpha1.SignedBlobSidecar - (*BlindedBlobSidecars)(nil), // 3: ethereum.eth.v1alpha1.BlindedBlobSidecars - (*BlindedBlobSidecar)(nil), // 4: ethereum.eth.v1alpha1.BlindedBlobSidecar - (*SignedBlindedBlobSidecar)(nil), // 5: ethereum.eth.v1alpha1.SignedBlindedBlobSidecar - (*BlobIdentifier)(nil), // 6: ethereum.eth.v1alpha1.BlobIdentifier + (*BlindedBlobSidecars)(nil), // 0: ethereum.eth.v1alpha1.BlindedBlobSidecars + (*BlindedBlobSidecar)(nil), // 1: ethereum.eth.v1alpha1.BlindedBlobSidecar + (*SignedBlindedBlobSidecar)(nil), // 2: ethereum.eth.v1alpha1.SignedBlindedBlobSidecar + (*BlobIdentifier)(nil), // 3: ethereum.eth.v1alpha1.BlobIdentifier } var file_proto_prysm_v1alpha1_blobs_proto_depIdxs = []int32{ - 1, // 0: ethereum.eth.v1alpha1.DeprecatedBlobSidecars.sidecars:type_name -> ethereum.eth.v1alpha1.DeprecatedBlobSidecar - 1, // 1: ethereum.eth.v1alpha1.SignedBlobSidecar.message:type_name -> ethereum.eth.v1alpha1.DeprecatedBlobSidecar - 4, // 2: ethereum.eth.v1alpha1.BlindedBlobSidecars.sidecars:type_name -> ethereum.eth.v1alpha1.BlindedBlobSidecar - 4, // 3: ethereum.eth.v1alpha1.SignedBlindedBlobSidecar.message:type_name -> ethereum.eth.v1alpha1.BlindedBlobSidecar - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name + 1, // 0: ethereum.eth.v1alpha1.BlindedBlobSidecars.sidecars:type_name -> ethereum.eth.v1alpha1.BlindedBlobSidecar + 1, // 1: ethereum.eth.v1alpha1.SignedBlindedBlobSidecar.message:type_name -> ethereum.eth.v1alpha1.BlindedBlobSidecar + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_proto_prysm_v1alpha1_blobs_proto_init() } @@ -642,42 +389,6 @@ func file_proto_prysm_v1alpha1_blobs_proto_init() { } if !protoimpl.UnsafeEnabled { file_proto_prysm_v1alpha1_blobs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeprecatedBlobSidecars); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_prysm_v1alpha1_blobs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeprecatedBlobSidecar); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_prysm_v1alpha1_blobs_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SignedBlobSidecar); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_proto_prysm_v1alpha1_blobs_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BlindedBlobSidecars); i { case 0: return &v.state @@ -689,7 +400,7 @@ func file_proto_prysm_v1alpha1_blobs_proto_init() { return nil } } - file_proto_prysm_v1alpha1_blobs_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_proto_prysm_v1alpha1_blobs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BlindedBlobSidecar); i { case 0: return &v.state @@ -701,7 +412,7 @@ func file_proto_prysm_v1alpha1_blobs_proto_init() { return nil } } - file_proto_prysm_v1alpha1_blobs_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_proto_prysm_v1alpha1_blobs_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SignedBlindedBlobSidecar); i { case 0: return &v.state @@ -713,7 +424,7 @@ func file_proto_prysm_v1alpha1_blobs_proto_init() { return nil } } - file_proto_prysm_v1alpha1_blobs_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_proto_prysm_v1alpha1_blobs_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BlobIdentifier); i { case 0: return &v.state @@ -732,7 +443,7 @@ func file_proto_prysm_v1alpha1_blobs_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_proto_prysm_v1alpha1_blobs_proto_rawDesc, NumEnums: 0, - NumMessages: 7, + NumMessages: 4, NumExtensions: 0, NumServices: 0, }, diff --git a/proto/prysm/v1alpha1/blobs.proto b/proto/prysm/v1alpha1/blobs.proto index e198f636d3af..014dbd3b7345 100644 --- a/proto/prysm/v1alpha1/blobs.proto +++ b/proto/prysm/v1alpha1/blobs.proto @@ -24,26 +24,6 @@ option java_outer_classname = "BlobsProto"; option java_package = "org.ethereum.eth.v1alpha1"; option php_namespace = "Ethereum\\Eth\\v1alpha1"; -message DeprecatedBlobSidecars { - repeated DeprecatedBlobSidecar sidecars = 1; -} - -message DeprecatedBlobSidecar { - bytes block_root = 1 [(ethereum.eth.ext.ssz_size) = "32"]; - uint64 index = 2; - uint64 slot = 3 [(ethereum.eth.ext.cast_type) = "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives.Slot"]; - bytes block_parent_root = 4 [(ethereum.eth.ext.ssz_size) = "32"]; - uint64 proposer_index = 5 [(ethereum.eth.ext.cast_type) = "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives.ValidatorIndex"]; - bytes blob = 6 [(ethereum.eth.ext.ssz_size) = "blob.size"]; - bytes kzg_commitment = 7 [(ethereum.eth.ext.ssz_size) = "48"]; - bytes kzg_proof = 8 [(ethereum.eth.ext.ssz_size) = "48"]; -} - -message SignedBlobSidecar { - DeprecatedBlobSidecar message = 1; - bytes signature = 2 [(ethereum.eth.ext.ssz_size) = "96"]; -} - message BlindedBlobSidecars { repeated BlindedBlobSidecar sidecars = 1 [(ethereum.eth.ext.ssz_max) = "max_blobs_per_block.size"]; } diff --git a/proto/prysm/v1alpha1/cloners.go b/proto/prysm/v1alpha1/cloners.go index 491bf1fed735..ab726975e0ad 100644 --- a/proto/prysm/v1alpha1/cloners.go +++ b/proto/prysm/v1alpha1/cloners.go @@ -623,8 +623,8 @@ func CopyExecutionPayloadHeader(payload *enginev1.ExecutionPayloadHeader) *engin GasLimit: payload.GasLimit, GasUsed: payload.GasUsed, Timestamp: payload.Timestamp, - BaseFeePerGas: bytesutil.SafeCopyBytes(payload.BaseFeePerGas), ExtraData: bytesutil.SafeCopyBytes(payload.ExtraData), + BaseFeePerGas: bytesutil.SafeCopyBytes(payload.BaseFeePerGas), BlockHash: bytesutil.SafeCopyBytes(payload.BlockHash), TransactionsRoot: bytesutil.SafeCopyBytes(payload.TransactionsRoot), } @@ -646,8 +646,8 @@ func CopyExecutionPayloadHeaderCapella(payload *enginev1.ExecutionPayloadHeaderC GasLimit: payload.GasLimit, GasUsed: payload.GasUsed, Timestamp: payload.Timestamp, - BaseFeePerGas: bytesutil.SafeCopyBytes(payload.BaseFeePerGas), ExtraData: bytesutil.SafeCopyBytes(payload.ExtraData), + BaseFeePerGas: bytesutil.SafeCopyBytes(payload.BaseFeePerGas), BlockHash: bytesutil.SafeCopyBytes(payload.BlockHash), TransactionsRoot: bytesutil.SafeCopyBytes(payload.TransactionsRoot), WithdrawalsRoot: bytesutil.SafeCopyBytes(payload.WithdrawalsRoot), @@ -812,13 +812,13 @@ func CopyExecutionPayloadHeaderDeneb(payload *enginev1.ExecutionPayloadHeaderDen GasLimit: payload.GasLimit, GasUsed: payload.GasUsed, Timestamp: payload.Timestamp, - BlobGasUsed: payload.BlobGasUsed, - ExcessBlobGas: payload.ExcessBlobGas, - BaseFeePerGas: bytesutil.SafeCopyBytes(payload.BaseFeePerGas), ExtraData: bytesutil.SafeCopyBytes(payload.ExtraData), + BaseFeePerGas: bytesutil.SafeCopyBytes(payload.BaseFeePerGas), BlockHash: bytesutil.SafeCopyBytes(payload.BlockHash), TransactionsRoot: bytesutil.SafeCopyBytes(payload.TransactionsRoot), WithdrawalsRoot: bytesutil.SafeCopyBytes(payload.WithdrawalsRoot), + BlobGasUsed: payload.BlobGasUsed, + ExcessBlobGas: payload.ExcessBlobGas, } } @@ -838,13 +838,13 @@ func CopyExecutionPayloadDeneb(payload *enginev1.ExecutionPayloadDeneb) *enginev GasLimit: payload.GasLimit, GasUsed: payload.GasUsed, Timestamp: payload.Timestamp, - BlobGasUsed: payload.BlobGasUsed, - ExcessBlobGas: payload.ExcessBlobGas, ExtraData: bytesutil.SafeCopyBytes(payload.ExtraData), BaseFeePerGas: bytesutil.SafeCopyBytes(payload.BaseFeePerGas), BlockHash: bytesutil.SafeCopyBytes(payload.BlockHash), Transactions: bytesutil.SafeCopy2dBytes(payload.Transactions), Withdrawals: CopyWithdrawalSlice(payload.Withdrawals), + BlobGasUsed: payload.BlobGasUsed, + ExcessBlobGas: payload.ExcessBlobGas, } } diff --git a/proto/prysm/v1alpha1/generated.ssz.go b/proto/prysm/v1alpha1/generated.ssz.go index e29e484895b2..3f286e1c98c5 100644 --- a/proto/prysm/v1alpha1/generated.ssz.go +++ b/proto/prysm/v1alpha1/generated.ssz.go @@ -1,5 +1,5 @@ // Code generated by fastssz. DO NOT EDIT. -// Hash: 3f430aea0c076f79689535373e221ae26a7f8d581d9ab214f15d4fd3a761a82c +// Hash: 28837d1c80ab7592d4877ebbd9881adf756b7da77c5614263c8442c2b3dea5c2 package eth import ( @@ -15047,266 +15047,6 @@ func (h *HistoricalSummary) HashTreeRootWith(hh *ssz.Hasher) (err error) { return } -// MarshalSSZ ssz marshals the DeprecatedBlobSidecar object -func (d *DeprecatedBlobSidecar) MarshalSSZ() ([]byte, error) { - return ssz.MarshalSSZ(d) -} - -// MarshalSSZTo ssz marshals the DeprecatedBlobSidecar object to a target array -func (d *DeprecatedBlobSidecar) MarshalSSZTo(buf []byte) (dst []byte, err error) { - dst = buf - - // Field (0) 'BlockRoot' - if size := len(d.BlockRoot); size != 32 { - err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32) - return - } - dst = append(dst, d.BlockRoot...) - - // Field (1) 'Index' - dst = ssz.MarshalUint64(dst, d.Index) - - // Field (2) 'Slot' - dst = ssz.MarshalUint64(dst, uint64(d.Slot)) - - // Field (3) 'BlockParentRoot' - if size := len(d.BlockParentRoot); size != 32 { - err = ssz.ErrBytesLengthFn("--.BlockParentRoot", size, 32) - return - } - dst = append(dst, d.BlockParentRoot...) - - // Field (4) 'ProposerIndex' - dst = ssz.MarshalUint64(dst, uint64(d.ProposerIndex)) - - // Field (5) 'Blob' - if size := len(d.Blob); size != 131072 { - err = ssz.ErrBytesLengthFn("--.Blob", size, 131072) - return - } - dst = append(dst, d.Blob...) - - // Field (6) 'KzgCommitment' - if size := len(d.KzgCommitment); size != 48 { - err = ssz.ErrBytesLengthFn("--.KzgCommitment", size, 48) - return - } - dst = append(dst, d.KzgCommitment...) - - // Field (7) 'KzgProof' - if size := len(d.KzgProof); size != 48 { - err = ssz.ErrBytesLengthFn("--.KzgProof", size, 48) - return - } - dst = append(dst, d.KzgProof...) - - return -} - -// UnmarshalSSZ ssz unmarshals the DeprecatedBlobSidecar object -func (d *DeprecatedBlobSidecar) UnmarshalSSZ(buf []byte) error { - var err error - size := uint64(len(buf)) - if size != 131256 { - return ssz.ErrSize - } - - // Field (0) 'BlockRoot' - if cap(d.BlockRoot) == 0 { - d.BlockRoot = make([]byte, 0, len(buf[0:32])) - } - d.BlockRoot = append(d.BlockRoot, buf[0:32]...) - - // Field (1) 'Index' - d.Index = ssz.UnmarshallUint64(buf[32:40]) - - // Field (2) 'Slot' - d.Slot = github_com_prysmaticlabs_prysm_v4_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[40:48])) - - // Field (3) 'BlockParentRoot' - if cap(d.BlockParentRoot) == 0 { - d.BlockParentRoot = make([]byte, 0, len(buf[48:80])) - } - d.BlockParentRoot = append(d.BlockParentRoot, buf[48:80]...) - - // Field (4) 'ProposerIndex' - d.ProposerIndex = github_com_prysmaticlabs_prysm_v4_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[80:88])) - - // Field (5) 'Blob' - if cap(d.Blob) == 0 { - d.Blob = make([]byte, 0, len(buf[88:131160])) - } - d.Blob = append(d.Blob, buf[88:131160]...) - - // Field (6) 'KzgCommitment' - if cap(d.KzgCommitment) == 0 { - d.KzgCommitment = make([]byte, 0, len(buf[131160:131208])) - } - d.KzgCommitment = append(d.KzgCommitment, buf[131160:131208]...) - - // Field (7) 'KzgProof' - if cap(d.KzgProof) == 0 { - d.KzgProof = make([]byte, 0, len(buf[131208:131256])) - } - d.KzgProof = append(d.KzgProof, buf[131208:131256]...) - - return err -} - -// SizeSSZ returns the ssz encoded size in bytes for the DeprecatedBlobSidecar object -func (d *DeprecatedBlobSidecar) SizeSSZ() (size int) { - size = 131256 - return -} - -// HashTreeRoot ssz hashes the DeprecatedBlobSidecar object -func (d *DeprecatedBlobSidecar) HashTreeRoot() ([32]byte, error) { - return ssz.HashWithDefaultHasher(d) -} - -// HashTreeRootWith ssz hashes the DeprecatedBlobSidecar object with a hasher -func (d *DeprecatedBlobSidecar) HashTreeRootWith(hh *ssz.Hasher) (err error) { - indx := hh.Index() - - // Field (0) 'BlockRoot' - if size := len(d.BlockRoot); size != 32 { - err = ssz.ErrBytesLengthFn("--.BlockRoot", size, 32) - return - } - hh.PutBytes(d.BlockRoot) - - // Field (1) 'Index' - hh.PutUint64(d.Index) - - // Field (2) 'Slot' - hh.PutUint64(uint64(d.Slot)) - - // Field (3) 'BlockParentRoot' - if size := len(d.BlockParentRoot); size != 32 { - err = ssz.ErrBytesLengthFn("--.BlockParentRoot", size, 32) - return - } - hh.PutBytes(d.BlockParentRoot) - - // Field (4) 'ProposerIndex' - hh.PutUint64(uint64(d.ProposerIndex)) - - // Field (5) 'Blob' - if size := len(d.Blob); size != 131072 { - err = ssz.ErrBytesLengthFn("--.Blob", size, 131072) - return - } - hh.PutBytes(d.Blob) - - // Field (6) 'KzgCommitment' - if size := len(d.KzgCommitment); size != 48 { - err = ssz.ErrBytesLengthFn("--.KzgCommitment", size, 48) - return - } - hh.PutBytes(d.KzgCommitment) - - // Field (7) 'KzgProof' - if size := len(d.KzgProof); size != 48 { - err = ssz.ErrBytesLengthFn("--.KzgProof", size, 48) - return - } - hh.PutBytes(d.KzgProof) - - if ssz.EnableVectorizedHTR { - hh.MerkleizeVectorizedHTR(indx) - } else { - hh.Merkleize(indx) - } - return -} - -// MarshalSSZ ssz marshals the SignedBlobSidecar object -func (s *SignedBlobSidecar) MarshalSSZ() ([]byte, error) { - return ssz.MarshalSSZ(s) -} - -// MarshalSSZTo ssz marshals the SignedBlobSidecar object to a target array -func (s *SignedBlobSidecar) MarshalSSZTo(buf []byte) (dst []byte, err error) { - dst = buf - - // Field (0) 'Message' - if s.Message == nil { - s.Message = new(DeprecatedBlobSidecar) - } - if dst, err = s.Message.MarshalSSZTo(dst); err != nil { - return - } - - // Field (1) 'Signature' - if size := len(s.Signature); size != 96 { - err = ssz.ErrBytesLengthFn("--.Signature", size, 96) - return - } - dst = append(dst, s.Signature...) - - return -} - -// UnmarshalSSZ ssz unmarshals the SignedBlobSidecar object -func (s *SignedBlobSidecar) UnmarshalSSZ(buf []byte) error { - var err error - size := uint64(len(buf)) - if size != 131352 { - return ssz.ErrSize - } - - // Field (0) 'Message' - if s.Message == nil { - s.Message = new(DeprecatedBlobSidecar) - } - if err = s.Message.UnmarshalSSZ(buf[0:131256]); err != nil { - return err - } - - // Field (1) 'Signature' - if cap(s.Signature) == 0 { - s.Signature = make([]byte, 0, len(buf[131256:131352])) - } - s.Signature = append(s.Signature, buf[131256:131352]...) - - return err -} - -// SizeSSZ returns the ssz encoded size in bytes for the SignedBlobSidecar object -func (s *SignedBlobSidecar) SizeSSZ() (size int) { - size = 131352 - return -} - -// HashTreeRoot ssz hashes the SignedBlobSidecar object -func (s *SignedBlobSidecar) HashTreeRoot() ([32]byte, error) { - return ssz.HashWithDefaultHasher(s) -} - -// HashTreeRootWith ssz hashes the SignedBlobSidecar object with a hasher -func (s *SignedBlobSidecar) HashTreeRootWith(hh *ssz.Hasher) (err error) { - indx := hh.Index() - - // Field (0) 'Message' - if err = s.Message.HashTreeRootWith(hh); err != nil { - return - } - - // Field (1) 'Signature' - if size := len(s.Signature); size != 96 { - err = ssz.ErrBytesLengthFn("--.Signature", size, 96) - return - } - hh.PutBytes(s.Signature) - - if ssz.EnableVectorizedHTR { - hh.MerkleizeVectorizedHTR(indx) - } else { - hh.Merkleize(indx) - } - return -} - // MarshalSSZ ssz marshals the BlobIdentifier object func (b *BlobIdentifier) MarshalSSZ() ([]byte, error) { return ssz.MarshalSSZ(b) diff --git a/runtime/interop/premine-state.go b/runtime/interop/premine-state.go index b2babe0ca068..fa98a95cbb7c 100644 --- a/runtime/interop/premine-state.go +++ b/runtime/interop/premine-state.go @@ -462,6 +462,7 @@ func (s *PremineGenesisConfig) setLatestBlockHeader(g state.BeaconState) error { ReceiptsRoot: make([]byte, 32), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), Transactions: make([][]byte, 0), @@ -486,6 +487,7 @@ func (s *PremineGenesisConfig) setLatestBlockHeader(g state.BeaconState) error { ReceiptsRoot: make([]byte, 32), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), Transactions: make([][]byte, 0), @@ -512,12 +514,11 @@ func (s *PremineGenesisConfig) setLatestBlockHeader(g state.BeaconState) error { ReceiptsRoot: make([]byte, 32), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), Transactions: make([][]byte, 0), Withdrawals: make([]*enginev1.Withdrawal, 0), - ExcessBlobGas: 0, - BlobGasUsed: 0, }, BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0), BlobKzgCommitments: make([][]byte, 0), @@ -623,8 +624,8 @@ func (s *PremineGenesisConfig) setExecutionPayload(g state.BeaconState) error { BlockHash: gb.Hash().Bytes(), Transactions: make([][]byte, 0), Withdrawals: make([]*enginev1.Withdrawal, 0), - ExcessBlobGas: 0, - BlobGasUsed: 0, + ExcessBlobGas: *gb.ExcessBlobGas(), + BlobGasUsed: *gb.BlobGasUsed(), } wep, err := blocks.WrappedExecutionPayloadDeneb(payload, 0) if err != nil { diff --git a/runtime/logging/blob.go b/runtime/logging/blob.go index 848bf6146dd2..728c79beb6f2 100644 --- a/runtime/logging/blob.go +++ b/runtime/logging/blob.go @@ -19,3 +19,14 @@ func BlobFields(blob blocks.ROBlob) logrus.Fields { "index": blob.Index, } } + +// BlockFieldsFromBlob extracts the set of fields from a given BlobSidecar which are shared by the block and +// all other sidecars for the block. +func BlockFieldsFromBlob(blob blocks.ROBlob) logrus.Fields { + return logrus.Fields{ + "slot": blob.Slot(), + "proposer_index": blob.ProposerIndex(), + "block_root": fmt.Sprintf("%#x", blob.BlockRoot()), + "parent_root": fmt.Sprintf("%#x", blob.ParentRoot()), + } +} diff --git a/runtime/prereqs/prereq.go b/runtime/prereqs/prereq.go index cd3b7742e1b2..288bd75e0318 100644 --- a/runtime/prereqs/prereq.go +++ b/runtime/prereqs/prereq.go @@ -103,7 +103,12 @@ func WarnIfPlatformNotSupported(ctx context.Context) { return } if !supported { - log.Warn("This platform is not supported. The following platforms are supported: Linux/AMD64," + - " Linux/ARM64, Mac OS X/AMD64 (10.14+ only), and Windows/AMD64") + log.Warn(`This platform is not supported. The following platforms are supported: + - Linux/AMD64 + - Linux/ARM64 + - Mac OS X/AMD64 (from 10.14+) + - Mac OS X/ARM64 (from 12.5+) + - Windows/AMD64`, + ) } } diff --git a/runtime/service_registry.go b/runtime/service_registry.go index 29ba85611380..d5bbab034ff3 100644 --- a/runtime/service_registry.go +++ b/runtime/service_registry.go @@ -31,7 +31,7 @@ type ServiceRegistry struct { serviceTypes []reflect.Type // keep an ordered slice of registered service types. } -// NewServiceRegistry starts a registry instance for convenience +// NewServiceRegistry starts a registry instance for convenience. func NewServiceRegistry() *ServiceRegistry { return &ServiceRegistry{ services: make(map[reflect.Type]Service), diff --git a/runtime/tos/tos.go b/runtime/tos/tos.go index 40090e49c5cb..de95428ab2e5 100644 --- a/runtime/tos/tos.go +++ b/runtime/tos/tos.go @@ -35,11 +35,13 @@ var ( log = logrus.WithField("prefix", "tos") ) -// VerifyTosAcceptedOrPrompt check if Tos was accepted before or asks to accept. +// VerifyTosAcceptedOrPrompt checks if Tos was accepted before or asks to accept. func VerifyTosAcceptedOrPrompt(ctx *cli.Context) error { - if file.Exists(filepath.Join(ctx.String(cmd.DataDirFlag.Name), acceptTosFilename)) { + tosFilePath := filepath.Join(ctx.String(cmd.DataDirFlag.Name), acceptTosFilename) + if file.Exists(tosFilePath) { return nil } + if ctx.Bool(cmd.AcceptTosFlag.Name) { saveTosAccepted(ctx) return nil @@ -49,6 +51,7 @@ func VerifyTosAcceptedOrPrompt(ctx *cli.Context) error { if err != nil { return errors.New(acceptTosPromptErrText) } + if !strings.EqualFold(input, "accept") { return errors.New("you have to accept Terms and Conditions in order to continue") } diff --git a/testing/endtoend/endtoend_setup_test.go b/testing/endtoend/endtoend_setup_test.go index c25110424a0a..a2910536e954 100644 --- a/testing/endtoend/endtoend_setup_test.go +++ b/testing/endtoend/endtoend_setup_test.go @@ -77,7 +77,7 @@ func e2eMinimal(t *testing.T, cfg *params.BeaconChainConfig, cfgo ...types.E2ECo TestFeature: true, TestDeposits: true, UsePrysmShValidator: false, - UsePprof: !longRunning, + UsePprof: true, TracingSinkEndpoint: tracingEndpoint, Evaluators: evals, EvalInterceptor: defaultInterceptor, @@ -153,7 +153,7 @@ func e2eMainnet(t *testing.T, usePrysmSh, useMultiClient bool, cfg *params.Beaco TestDeposits: true, UseFixedPeerIDs: true, UsePrysmShValidator: usePrysmSh, - UsePprof: !longRunning, + UsePprof: true, TracingSinkEndpoint: tracingEndpoint, Evaluators: evals, EvalInterceptor: defaultInterceptor, diff --git a/testing/spectest/general/deneb/kzg/BUILD.bazel b/testing/spectest/general/deneb/kzg/BUILD.bazel index b513dd3944e9..90b3f91dde8e 100644 --- a/testing/spectest/general/deneb/kzg/BUILD.bazel +++ b/testing/spectest/general/deneb/kzg/BUILD.bazel @@ -10,6 +10,7 @@ go_test( tags = ["spectest"], deps = [ "//beacon-chain/blockchain/kzg:go_default_library", + "//consensus-types/blocks:go_default_library", "//proto/prysm/v1alpha1:go_default_library", "//testing/require:go_default_library", "//testing/spectest/utils:go_default_library", diff --git a/testing/spectest/general/deneb/kzg/verify_blob_kzg_proof_batch_test.go b/testing/spectest/general/deneb/kzg/verify_blob_kzg_proof_batch_test.go index 938768da6999..2c9b5283b71f 100644 --- a/testing/spectest/general/deneb/kzg/verify_blob_kzg_proof_batch_test.go +++ b/testing/spectest/general/deneb/kzg/verify_blob_kzg_proof_batch_test.go @@ -7,6 +7,7 @@ import ( "github.com/ghodss/yaml" kzgPrysm "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg" + "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/v4/testing/require" "github.com/prysmaticlabs/prysm/v4/testing/spectest/utils" @@ -36,37 +37,40 @@ func TestVerifyBlobKZGProofBatch(t *testing.T) { require.NoError(t, err) test := &KZGTestData{} require.NoError(t, yaml.Unmarshal(file, test)) - var sidecars []*ethpb.DeprecatedBlobSidecar + var sidecars []blocks.ROBlob blobs := test.Input.Blobs - commitments := test.Input.Commitments proofs := test.Input.Proofs + kzgs := test.Input.Commitments if len(proofs) != len(blobs) { require.Equal(t, false, test.Output) return } - var kzgs [][]byte - // Need separate loops to test length checks in - // `IsDataAvailable` + if len(kzgs) != len(blobs) { + require.Equal(t, false, test.Output) + return + } + for i, blob := range blobs { blobBytes, err := hex.DecodeString(blob[2:]) require.NoError(t, err) proofBytes, err := hex.DecodeString(proofs[i][2:]) require.NoError(t, err) - sidecar := ðpb.DeprecatedBlobSidecar{ - Blob: blobBytes, - KzgProof: proofBytes, + kzgBytes, err := hex.DecodeString(kzgs[i][2:]) + require.NoError(t, err) + sidecar := ðpb.BlobSidecar{ + Blob: blobBytes, + KzgProof: proofBytes, + KzgCommitment: kzgBytes, } - sidecars = append(sidecars, sidecar) - } - for _, commitment := range commitments { - commitmentBytes, err := hex.DecodeString(commitment[2:]) + sidecar.SignedBlockHeader = util.HydrateSignedBeaconHeader(ðpb.SignedBeaconBlockHeader{}) + sc, err := blocks.NewROBlob(sidecar) require.NoError(t, err) - kzgs = append(kzgs, commitmentBytes) + sidecars = append(sidecars, sc) } if test.Output { - require.NoError(t, kzgPrysm.IsDataAvailable(kzgs, sidecars)) + require.NoError(t, kzgPrysm.Verify(sidecars...)) } else { - require.NotNil(t, kzgPrysm.IsDataAvailable(kzgs, sidecars)) + require.NotNil(t, kzgPrysm.Verify(sidecars...)) } }) } diff --git a/testing/spectest/shared/common/forkchoice/builder.go b/testing/spectest/shared/common/forkchoice/builder.go index f8243707cfdf..848df5b115e4 100644 --- a/testing/spectest/shared/common/forkchoice/builder.go +++ b/testing/spectest/shared/common/forkchoice/builder.go @@ -90,7 +90,7 @@ func (bb *Builder) InvalidBlock(t testing.TB, b interfaces.ReadOnlySignedBeaconB r := bb.block(t, b) ctx, cancel := context.WithTimeout(context.TODO(), time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second) defer cancel() - require.Equal(t, true, bb.service.ReceiveBlock(ctx, b, r) != nil) + require.Equal(t, true, bb.service.ReceiveBlock(ctx, b, r, nil) != nil) } // ValidBlock receives the valid block and notifies forkchoice. @@ -98,7 +98,7 @@ func (bb *Builder) ValidBlock(t testing.TB, b interfaces.ReadOnlySignedBeaconBlo r := bb.block(t, b) ctx, cancel := context.WithTimeout(context.TODO(), time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second) defer cancel() - require.NoError(t, bb.service.ReceiveBlock(ctx, b, r)) + require.NoError(t, bb.service.ReceiveBlock(ctx, b, r, nil)) } // PoWBlock receives the block and notifies a mocked execution engine. diff --git a/testing/spectest/shared/deneb/ssz_static/ssz_static.go b/testing/spectest/shared/deneb/ssz_static/ssz_static.go index 91a0000f9cdf..0a30b7983a3a 100644 --- a/testing/spectest/shared/deneb/ssz_static/ssz_static.go +++ b/testing/spectest/shared/deneb/ssz_static/ssz_static.go @@ -131,8 +131,6 @@ func UnmarshalledSSZ(t *testing.T, serializedBytes []byte, folderName string) (i obj = ðpb.BlobIdentifier{} case "BlobSidecar": obj = ðpb.BlobSidecar{} - case "SignedBlobSidecar": - obj = ðpb.SignedBlobSidecar{} case "PowBlock": obj = ðpb.PowBlock{} case "Withdrawal": diff --git a/testing/util/bellatrix_state.go b/testing/util/bellatrix_state.go index c0fbecf48eca..0dc8669cd566 100644 --- a/testing/util/bellatrix_state.go +++ b/testing/util/bellatrix_state.go @@ -207,8 +207,10 @@ func buildGenesisBeaconStateBellatrix(genesisTime uint64, preState state.BeaconS ReceiptsRoot: make([]byte, 32), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), + Transactions: make([][]byte, 0), }, }).HashTreeRoot() if err != nil { @@ -241,6 +243,7 @@ func buildGenesisBeaconStateBellatrix(genesisTime uint64, preState state.BeaconS ReceiptsRoot: make([]byte, 32), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), TransactionsRoot: make([]byte, 32), diff --git a/testing/util/block.go b/testing/util/block.go index b179194233e1..951b380bc4a4 100644 --- a/testing/util/block.go +++ b/testing/util/block.go @@ -653,9 +653,10 @@ func HydrateV2BellatrixBeaconBlockBody(b *v2.BeaconBlockBodyBellatrix) *v2.Beaco ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, fieldparams.RootLength), - ExtraData: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), + Transactions: make([][]byte, 0), } } return b @@ -772,10 +773,10 @@ func HydrateBeaconBlockBodyBellatrix(b *ethpb.BeaconBlockBodyBellatrix) *ethpb.B ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), Transactions: make([][]byte, 0), - ExtraData: make([]byte, 0), } } return b @@ -839,10 +840,10 @@ func HydrateBlindedBeaconBlockBodyBellatrix(b *ethpb.BlindedBeaconBlockBodyBella ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), TransactionsRoot: make([]byte, fieldparams.RootLength), - ExtraData: make([]byte, 0), } } return b @@ -906,6 +907,7 @@ func HydrateV2BlindedBeaconBlockBodyBellatrix(b *v2.BlindedBeaconBlockBodyBellat ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), TransactionsRoot: make([]byte, fieldparams.RootLength), @@ -972,10 +974,11 @@ func HydrateBeaconBlockBodyCapella(b *ethpb.BeaconBlockBodyCapella) *ethpb.Beaco ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), Transactions: make([][]byte, 0), - ExtraData: make([]byte, 0), + Withdrawals: make([]*enginev1.Withdrawal, 0), } } return b @@ -1039,10 +1042,10 @@ func HydrateBlindedBeaconBlockBodyCapella(b *ethpb.BlindedBeaconBlockBodyCapella ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), TransactionsRoot: make([]byte, fieldparams.RootLength), - ExtraData: make([]byte, 0), WithdrawalsRoot: make([]byte, fieldparams.RootLength), } } @@ -1107,6 +1110,7 @@ func HydrateV2BlindedBeaconBlockBodyCapella(b *v2.BlindedBeaconBlockBodyCapella) ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), TransactionsRoot: make([]byte, fieldparams.RootLength), @@ -1217,10 +1221,11 @@ func HydrateBeaconBlockBodyDeneb(b *ethpb.BeaconBlockBodyDeneb) *ethpb.BeaconBlo ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), Transactions: make([][]byte, 0), - ExtraData: make([]byte, 0), + Withdrawals: make([]*enginev1.Withdrawal, 0), } } return b @@ -1258,10 +1263,11 @@ func HydrateV2BeaconBlockBodyDeneb(b *v2.BeaconBlockBodyDeneb) *v2.BeaconBlockBo ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, fieldparams.RootLength), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, fieldparams.RootLength), BlockHash: make([]byte, fieldparams.RootLength), Transactions: make([][]byte, 0), - ExtraData: make([]byte, 0), + Withdrawals: make([]*enginev1.Withdrawal, 0), } } return b @@ -1351,10 +1357,10 @@ func HydrateBlindedBeaconBlockBodyDeneb(b *ethpb.BlindedBeaconBlockBodyDeneb) *e ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), TransactionsRoot: make([]byte, fieldparams.RootLength), - ExtraData: make([]byte, 0), WithdrawalsRoot: make([]byte, fieldparams.RootLength), } } @@ -1393,10 +1399,10 @@ func HydrateV2BlindedBeaconBlockBodyDeneb(b *v2.BlindedBeaconBlockBodyDeneb) *v2 ReceiptsRoot: make([]byte, fieldparams.RootLength), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), TransactionsRoot: make([]byte, fieldparams.RootLength), - ExtraData: make([]byte, 0), WithdrawalsRoot: make([]byte, fieldparams.RootLength), } } diff --git a/testing/util/capella_state.go b/testing/util/capella_state.go index 2a15c9d31250..a46e4cf50fff 100644 --- a/testing/util/capella_state.go +++ b/testing/util/capella_state.go @@ -200,8 +200,11 @@ func buildGenesisBeaconStateCapella(genesisTime uint64, preState state.BeaconSta ReceiptsRoot: make([]byte, 32), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), + Transactions: make([][]byte, 0), + Withdrawals: make([]*enginev1.Withdrawal, 0), }, }).HashTreeRoot() if err != nil { @@ -240,6 +243,7 @@ func buildGenesisBeaconStateCapella(genesisTime uint64, preState state.BeaconSta ReceiptsRoot: make([]byte, 32), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), TransactionsRoot: make([]byte, 32), diff --git a/testing/util/deneb.go b/testing/util/deneb.go index 9a9d915c23b4..97dc4e76dc9c 100644 --- a/testing/util/deneb.go +++ b/testing/util/deneb.go @@ -87,10 +87,11 @@ func GenerateTestDenebBlockWithSidecar(t *testing.T, parent [32]byte, slot primi Timestamp: 0, ExtraData: make([]byte, 0), BaseFeePerGas: bytesutil.PadTo([]byte("baseFeePerGas"), fieldparams.RootLength), - ExcessBlobGas: 0, - BlobGasUsed: 0, BlockHash: blockHash[:], Transactions: encodedBinaryTxs, + Withdrawals: make([]*enginev1.Withdrawal, 0), + BlobGasUsed: 0, + ExcessBlobGas: 0, } block := NewBeaconBlockDeneb() block.Block.Body.ExecutionPayload = payload @@ -188,7 +189,7 @@ func ExtendBlocksPlusBlobs(t *testing.T, blks []blocks.ROBlock, size int) ([]blo return blks, blobs } -// HackDenebForkEpoch is helpful for tests that need to set up cases where the deneb fork has passed. +// HackDenebMaxuint is helpful for tests that need to set up cases where the deneb fork has passed. // We have unit tests that assert our config matches the upstream config, where the next fork is always // set to MaxUint64 until the fork epoch is formally set. This creates an issue for tests that want to // work with slots that are defined to be after deneb because converting the max epoch to a slot leads diff --git a/testing/util/deneb_state.go b/testing/util/deneb_state.go index 24197275d1c8..4bee14125e08 100644 --- a/testing/util/deneb_state.go +++ b/testing/util/deneb_state.go @@ -200,8 +200,11 @@ func buildGenesisBeaconStateDeneb(genesisTime uint64, preState state.BeaconState ReceiptsRoot: make([]byte, 32), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), + Transactions: make([][]byte, 0), + Withdrawals: make([]*enginev1.Withdrawal, 0), }, }).HashTreeRoot() if err != nil { @@ -240,6 +243,7 @@ func buildGenesisBeaconStateDeneb(genesisTime uint64, preState state.BeaconState ReceiptsRoot: make([]byte, 32), LogsBloom: make([]byte, 256), PrevRandao: make([]byte, 32), + ExtraData: make([]byte, 0), BaseFeePerGas: make([]byte, 32), BlockHash: make([]byte, 32), TransactionsRoot: make([]byte, 32), diff --git a/testing/validator-mock/validator_client_mock.go b/testing/validator-mock/validator_client_mock.go index 82afc94b2df9..071b039d110a 100644 --- a/testing/validator-mock/validator_client_mock.go +++ b/testing/validator-mock/validator_client_mock.go @@ -1,7 +1,7 @@ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/prysmaticlabs/prysm/v4/validator/client/iface (interfaces: ValidatorClient) -// Package mock is a generated GoMock package. +// Package validator_mock is a generated GoMock package. package validator_mock import ( @@ -247,20 +247,6 @@ func (mr *MockValidatorClientMockRecorder) ProposeExit(arg0, arg1 interface{}) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProposeExit", reflect.TypeOf((*MockValidatorClient)(nil).ProposeExit), arg0, arg1) } -// StartEventStream mocks base method. -func (m *MockValidatorClient) StartEventStream(arg0 context.Context) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StartEventStream", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// StartEventStream indicates an expected call of StartEventStream. -func (mr *MockValidatorClientMockRecorder) StartEventStream(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartEventStream", reflect.TypeOf((*MockValidatorClient)(nil).StartEventStream), arg0) -} - // StreamSlots mocks base method. func (m *MockValidatorClient) StreamSlots(arg0 context.Context, arg1 *eth.StreamSlotsRequest) (eth.BeaconNodeValidator_StreamSlotsClient, error) { m.ctrl.T.Helper() diff --git a/time/slots/slottime.go b/time/slots/slottime.go index 1e2f5cf28f25..587398f855f7 100644 --- a/time/slots/slottime.go +++ b/time/slots/slottime.go @@ -267,7 +267,7 @@ func TimeIntoSlot(genesisTime uint64) time.Duration { // WithinVotingWindow returns whether the current time is within the voting window // (eg. 4 seconds on mainnet) of the current slot. -func WithinVotingWindow(genesisTime uint64) bool { +func WithinVotingWindow(genesisTime uint64, slot primitives.Slot) bool { votingWindow := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot - return TimeIntoSlot(genesisTime) < time.Duration(votingWindow)*time.Second + return time.Since(StartTime(genesisTime, slot)) < time.Duration(votingWindow)*time.Second } diff --git a/time/slots/slottime_test.go b/time/slots/slottime_test.go index 42bc79466520..04ed844d8ce8 100644 --- a/time/slots/slottime_test.go +++ b/time/slots/slottime_test.go @@ -603,7 +603,7 @@ func TestTimeIntoSlot(t *testing.T) { func TestWithinVotingWindow(t *testing.T) { genesisTime := uint64(time.Now().Add(-37 * time.Second).Unix()) - require.Equal(t, true, WithinVotingWindow(genesisTime)) + require.Equal(t, true, WithinVotingWindow(genesisTime, 3)) genesisTime = uint64(time.Now().Add(-40 * time.Second).Unix()) - require.Equal(t, false, WithinVotingWindow(genesisTime)) + require.Equal(t, false, WithinVotingWindow(genesisTime, 3)) } diff --git a/tools/bootnode/bootnode.go b/tools/bootnode/bootnode.go index e03850955d28..bc2df9070093 100644 --- a/tools/bootnode/bootnode.go +++ b/tools/bootnode/bootnode.go @@ -185,7 +185,7 @@ func (h *handler) httpHandler(w http.ResponseWriter, _ *http.Request) { write(w, []byte("Node ID: "+n.ID().String()+"\n")) write(w, []byte("IP: "+n.IP().String()+"\n")) write(w, []byte(fmt.Sprintf("UDP Port: %d", n.UDP())+"\n")) - write(w, []byte(fmt.Sprintf("TCP Port: %d", n.UDP())+"\n\n")) + write(w, []byte(fmt.Sprintf("TCP Port: %d", n.TCP())+"\n\n")) } } diff --git a/tools/go/def.bzl b/tools/go/def.bzl index ccf0eb1f8480..f901844317db 100644 --- a/tools/go/def.bzl +++ b/tools/go/def.bzl @@ -8,13 +8,13 @@ def _go_test_transition_impl(settings, attr): if attr.eth_network == "minimal": settings["//proto:network"] = "minimal" - settings["@io_bazel_rules_go//go/config:tags"] += ["minimal"] + settings["@io_bazel_rules_go//go/config:tags"] = ["minimal"] + settings["@io_bazel_rules_go//go/config:tags"] elif attr.eth_network == "mainnet": # Default / optional settings["//proto:network"] = "mainnet" - settings["@io_bazel_rules_go//go/config:tags"] += ["mainnet"] + settings["@io_bazel_rules_go//go/config:tags"] = ["mainnet"] + settings["@io_bazel_rules_go//go/config:tags"] if attr.gotags: - settings["@io_bazel_rules_go//go/config:tags"] += attr.gotags + settings["@io_bazel_rules_go//go/config:tags"] = attr.gotags + settings["@io_bazel_rules_go//go/config:tags"] if str(settings["//command_line_option:compilation_mode"]) == "dbg": settings["@io_bazel_rules_go//go/config:debug"] = True @@ -42,9 +42,6 @@ def _go_test_transition_rule(**kwargs): attrs = dict(kwargs["attrs"]) attrs.update({ "eth_network": attr.string(values = ["mainnet", "minimal"]), - "_whitelist_function_transition": attr.label( - default = "@bazel_tools//tools/whitelists/function_transition_whitelist", - ), }) kwargs["attrs"] = attrs kwargs["cfg"] = go_test_transition diff --git a/validator/accounts/cli_manager.go b/validator/accounts/cli_manager.go index de07b4f686e5..c3a6b2e370db 100644 --- a/validator/accounts/cli_manager.go +++ b/validator/accounts/cli_manager.go @@ -3,6 +3,7 @@ package accounts import ( "context" "io" + "net/http" "os" "time" @@ -10,6 +11,7 @@ import ( grpcutil "github.com/prysmaticlabs/prysm/v4/api/grpc" "github.com/prysmaticlabs/prysm/v4/crypto/bls" "github.com/prysmaticlabs/prysm/v4/validator/accounts/wallet" + beaconApi "github.com/prysmaticlabs/prysm/v4/validator/client/beacon-api" iface "github.com/prysmaticlabs/prysm/v4/validator/client/iface" nodeClientFactory "github.com/prysmaticlabs/prysm/v4/validator/client/node-client-factory" validatorClientFactory "github.com/prysmaticlabs/prysm/v4/validator/client/validator-client-factory" @@ -80,14 +82,18 @@ func (acm *CLIManager) prepareBeaconClients(ctx context.Context) (*iface.Validat if err != nil { return nil, nil, errors.Wrapf(err, "could not dial endpoint %s", acm.beaconRPCProvider) } - conn := validatorHelpers.NewNodeConnection( grpcConn, acm.beaconApiEndpoint, acm.beaconApiTimeout, ) - validatorClient := validatorClientFactory.NewValidatorClient(conn) - nodeClient := nodeClientFactory.NewNodeClient(conn) + restHandler := &beaconApi.BeaconApiJsonRestHandler{ + HttpClient: http.Client{Timeout: acm.beaconApiTimeout}, + Host: acm.beaconApiEndpoint, + } + validatorClient := validatorClientFactory.NewValidatorClient(conn, restHandler) + nodeClient := nodeClientFactory.NewNodeClient(conn, restHandler) + return &validatorClient, &nodeClient, nil } diff --git a/validator/accounts/iface/wallet.go b/validator/accounts/iface/wallet.go index 9eb8299fa5b6..b5afbc90aa25 100644 --- a/validator/accounts/iface/wallet.go +++ b/validator/accounts/iface/wallet.go @@ -23,7 +23,7 @@ type Wallet interface { // Read methods for important wallet and accounts-related files. ReadFileAtPath(ctx context.Context, filePath string, fileName string) ([]byte, error) // Write methods to persist important wallet and accounts-related files to disk. - WriteFileAtPath(ctx context.Context, pathName string, fileName string, data []byte) error + WriteFileAtPath(ctx context.Context, pathName string, fileName string, data []byte) (bool, error) // Method for initializing a new keymanager. InitializeKeymanager(ctx context.Context, cfg InitKeymanagerConfig) (keymanager.IKeymanager, error) } diff --git a/validator/accounts/testing/mock.go b/validator/accounts/testing/mock.go index ba024e5c6849..f43ed5130e89 100644 --- a/validator/accounts/testing/mock.go +++ b/validator/accounts/testing/mock.go @@ -55,19 +55,19 @@ func (w *Wallet) Password() string { } // WriteFileAtPath -- -func (w *Wallet) WriteFileAtPath(_ context.Context, pathName, fileName string, data []byte) error { +func (w *Wallet) WriteFileAtPath(_ context.Context, pathName, fileName string, data []byte) (bool, error) { w.lock.Lock() defer w.lock.Unlock() if w.HasWriteFileError { // reset the flag to not contaminate other tests w.HasWriteFileError = false - return errors.New("could not write keystore file for accounts") + return false, errors.New("could not write keystore file for accounts") } if w.Files[pathName] == nil { w.Files[pathName] = make(map[string][]byte) } w.Files[pathName][fileName] = data - return nil + return true, nil } // ReadFileAtPath -- @@ -212,7 +212,3 @@ func (m *Validator) SetProposerSettings(_ context.Context, settings *validatorse m.proposerSettings = settings return nil } - -func (_ *Validator) StartEventStream(_ context.Context) error { - panic("implement me") -} diff --git a/validator/accounts/wallet/wallet.go b/validator/accounts/wallet/wallet.go index acbc07a1e998..fc97d577f29d 100644 --- a/validator/accounts/wallet/wallet.go +++ b/validator/accounts/wallet/wallet.go @@ -366,26 +366,27 @@ func (w *Wallet) InitializeKeymanager(ctx context.Context, cfg iface.InitKeymana } // WriteFileAtPath within the wallet directory given the desired path, filename, and raw data. -func (w *Wallet) WriteFileAtPath(_ context.Context, filePath, fileName string, data []byte) error { +func (w *Wallet) WriteFileAtPath(_ context.Context, filePath, fileName string, data []byte) (bool /* exited previously */, error) { accountPath := filepath.Join(w.accountsPath, filePath) hasDir, err := file.HasDir(accountPath) if err != nil { - return err + return false, err } if !hasDir { if err := file.MkdirAll(accountPath); err != nil { - return errors.Wrapf(err, "could not create path: %s", accountPath) + return false, errors.Wrapf(err, "could not create path: %s", accountPath) } } fullPath := filepath.Join(accountPath, fileName) + existedPreviously := file.Exists(fullPath) if err := file.WriteFile(fullPath, data); err != nil { - return errors.Wrapf(err, "could not write %s", filePath) + return false, errors.Wrapf(err, "could not write %s", filePath) } log.WithFields(logrus.Fields{ "path": fullPath, "fileName": fileName, }).Debug("Wrote new file at path") - return nil + return existedPreviously, nil } // ReadFileAtPath within the wallet directory given the desired path and filename. diff --git a/validator/accounts/wallet_create.go b/validator/accounts/wallet_create.go index b1b081bf4f2b..1e2852863228 100644 --- a/validator/accounts/wallet_create.go +++ b/validator/accounts/wallet_create.go @@ -32,7 +32,8 @@ func (acm *CLIManager) WalletCreate(ctx context.Context) (*wallet.Wallet, error) if err != nil { return nil, err } - if err = w.WriteFileAtPath(ctx, local.AccountsPath, local.AccountsKeystoreFileName, encodedAccounts); err != nil { + _, err = w.WriteFileAtPath(ctx, local.AccountsPath, local.AccountsKeystoreFileName, encodedAccounts) + if err != nil { return nil, err } log.WithField("--wallet-dir", acm.walletDir).Info( diff --git a/validator/client/BUILD.bazel b/validator/client/BUILD.bazel index 223a3f16a99f..268d0afd603c 100644 --- a/validator/client/BUILD.bazel +++ b/validator/client/BUILD.bazel @@ -32,6 +32,7 @@ go_library( "//beacon-chain/core/altair:go_default_library", "//beacon-chain/core/signing:go_default_library", "//cache/lru:go_default_library", + "//cmd:go_default_library", "//config/features:go_default_library", "//config/fieldparams:go_default_library", "//config/params:go_default_library", diff --git a/validator/client/beacon-api/BUILD.bazel b/validator/client/beacon-api/BUILD.bazel index fad6d0ea3bb0..07746d098eff 100644 --- a/validator/client/beacon-api/BUILD.bazel +++ b/validator/client/beacon-api/BUILD.bazel @@ -15,7 +15,6 @@ go_library( "domain_data.go", "doppelganger.go", "duties.go", - "event_handler.go", "genesis.go", "get_beacon_block.go", "index.go", @@ -44,7 +43,6 @@ go_library( "//beacon-chain/core/signing:go_default_library", "//beacon-chain/rpc/eth/beacon:go_default_library", "//beacon-chain/rpc/eth/config:go_default_library", - "//beacon-chain/rpc/eth/events:go_default_library", "//beacon-chain/rpc/eth/node:go_default_library", "//beacon-chain/rpc/eth/shared:go_default_library", "//beacon-chain/rpc/eth/validator:go_default_library", @@ -85,7 +83,6 @@ go_test( "domain_data_test.go", "doppelganger_test.go", "duties_test.go", - "event_handler_test.go", "genesis_test.go", "get_beacon_block_test.go", "index_test.go", @@ -142,7 +139,6 @@ go_test( "@com_github_golang_mock//gomock:go_default_library", "@com_github_golang_protobuf//ptypes/empty", "@com_github_pkg_errors//:go_default_library", - "@com_github_sirupsen_logrus//hooks/test:go_default_library", "@org_golang_google_protobuf//types/known/emptypb:go_default_library", "@org_golang_google_protobuf//types/known/timestamppb:go_default_library", ], diff --git a/validator/client/beacon-api/beacon_api_beacon_chain_client.go b/validator/client/beacon-api/beacon_api_beacon_chain_client.go index 08805224deba..1ae598785793 100644 --- a/validator/client/beacon-api/beacon_api_beacon_chain_client.go +++ b/validator/client/beacon-api/beacon_api_beacon_chain_client.go @@ -4,10 +4,8 @@ import ( "bytes" "context" "encoding/json" - "net/http" "reflect" "strconv" - "time" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/golang/protobuf/ptypes/empty" @@ -357,12 +355,7 @@ func (c beaconApiBeaconChainClient) GetValidatorParticipation(ctx context.Contex panic("beaconApiBeaconChainClient.GetValidatorParticipation is not implemented. To use a fallback client, pass a fallback client as the last argument of NewBeaconApiBeaconChainClientWithFallback.") } -func NewBeaconApiBeaconChainClientWithFallback(host string, timeout time.Duration, fallbackClient iface.BeaconChainClient) iface.BeaconChainClient { - jsonRestHandler := beaconApiJsonRestHandler{ - httpClient: http.Client{Timeout: timeout}, - host: host, - } - +func NewBeaconApiBeaconChainClientWithFallback(jsonRestHandler JsonRestHandler, fallbackClient iface.BeaconChainClient) iface.BeaconChainClient { return &beaconApiBeaconChainClient{ jsonRestHandler: jsonRestHandler, fallbackClient: fallbackClient, diff --git a/validator/client/beacon-api/beacon_api_node_client.go b/validator/client/beacon-api/beacon_api_node_client.go index adc3ff6bf766..d66f26ca19db 100644 --- a/validator/client/beacon-api/beacon_api_node_client.go +++ b/validator/client/beacon-api/beacon_api_node_client.go @@ -2,9 +2,7 @@ package beacon_api import ( "context" - "net/http" "strconv" - "time" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/golang/protobuf/ptypes/empty" @@ -100,12 +98,7 @@ func (c *beaconApiNodeClient) ListPeers(ctx context.Context, in *empty.Empty) (* panic("beaconApiNodeClient.ListPeers is not implemented. To use a fallback client, pass a fallback client as the last argument of NewBeaconApiNodeClientWithFallback.") } -func NewNodeClientWithFallback(host string, timeout time.Duration, fallbackClient iface.NodeClient) iface.NodeClient { - jsonRestHandler := beaconApiJsonRestHandler{ - httpClient: http.Client{Timeout: timeout}, - host: host, - } - +func NewNodeClientWithFallback(jsonRestHandler JsonRestHandler, fallbackClient iface.NodeClient) iface.NodeClient { return &beaconApiNodeClient{ jsonRestHandler: jsonRestHandler, fallbackClient: fallbackClient, diff --git a/validator/client/beacon-api/beacon_api_validator_client.go b/validator/client/beacon-api/beacon_api_validator_client.go index d0c761866b9a..fd9aaa13bd6f 100644 --- a/validator/client/beacon-api/beacon_api_validator_client.go +++ b/validator/client/beacon-api/beacon_api_validator_client.go @@ -2,7 +2,6 @@ package beacon_api import ( "context" - "net/http" "time" "github.com/ethereum/go-ethereum/common/hexutil" @@ -14,38 +13,17 @@ import ( "github.com/prysmaticlabs/prysm/v4/validator/client/iface" ) -type ValidatorClientOpt func(*beaconApiValidatorClient) - -func WithEventHandler(h *EventHandler) ValidatorClientOpt { - return func(c *beaconApiValidatorClient) { - c.eventHandler = h - } -} - -func WithEventErrorChannel(ch chan error) ValidatorClientOpt { - return func(c *beaconApiValidatorClient) { - c.eventErrCh = ch - } -} - type beaconApiValidatorClient struct { genesisProvider GenesisProvider dutiesProvider dutiesProvider stateValidatorsProvider StateValidatorsProvider jsonRestHandler JsonRestHandler - eventHandler *EventHandler - eventErrCh chan error beaconBlockConverter BeaconBlockConverter prysmBeaconChainCLient iface.PrysmBeaconChainClient } -func NewBeaconApiValidatorClient(host string, timeout time.Duration, opts ...ValidatorClientOpt) iface.ValidatorClient { - jsonRestHandler := beaconApiJsonRestHandler{ - httpClient: http.Client{Timeout: timeout}, - host: host, - } - - c := &beaconApiValidatorClient{ +func NewBeaconApiValidatorClient(jsonRestHandler JsonRestHandler) iface.ValidatorClient { + return &beaconApiValidatorClient{ genesisProvider: beaconApiGenesisProvider{jsonRestHandler: jsonRestHandler}, dutiesProvider: beaconApiDutiesProvider{jsonRestHandler: jsonRestHandler}, stateValidatorsProvider: beaconApiStateValidatorsProvider{jsonRestHandler: jsonRestHandler}, @@ -56,10 +34,6 @@ func NewBeaconApiValidatorClient(host string, timeout time.Duration, opts ...Val jsonRestHandler: jsonRestHandler, }, } - for _, o := range opts { - o(c) - } - return c } func (c *beaconApiValidatorClient) GetDuties(ctx context.Context, in *ethpb.DutiesRequest) (*ethpb.DutiesResponse, error) { @@ -175,15 +149,3 @@ func (c *beaconApiValidatorClient) WaitForActivation(ctx context.Context, in *et func (c *beaconApiValidatorClient) WaitForChainStart(ctx context.Context, _ *empty.Empty) (*ethpb.ChainStartResponse, error) { return c.waitForChainStart(ctx) } - -func (c *beaconApiValidatorClient) StartEventStream(ctx context.Context) error { - if c.eventHandler != nil { - if c.eventErrCh == nil { - return errors.New("event handler cannot be initialized without an event error channel") - } - if err := c.eventHandler.get(ctx, []string{"head"}, c.eventErrCh); err != nil { - return errors.Wrapf(err, "event handler stopped working") - } - } - return nil -} diff --git a/validator/client/beacon-api/event_handler.go b/validator/client/beacon-api/event_handler.go deleted file mode 100644 index 85efa1286559..000000000000 --- a/validator/client/beacon-api/event_handler.go +++ /dev/null @@ -1,126 +0,0 @@ -package beacon_api - -import ( - "context" - "net/http" - "strings" - "sync" - - "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/v4/api" -) - -// Currently set to the first power of 2 bigger than the size of the `head` event -// which is 446 bytes -const eventByteLimit = 512 - -// EventHandler is responsible for subscribing to the Beacon API events endpoint -// and dispatching received events to subscribers. -type EventHandler struct { - httpClient *http.Client - host string - subs []eventSub - sync.Mutex -} - -type eventSub struct { - name string - ch chan<- event -} - -type event struct { - eventType string - data string -} - -// NewEventHandler returns a new handler. -func NewEventHandler(httpClient *http.Client, host string) *EventHandler { - return &EventHandler{ - httpClient: httpClient, - host: host, - subs: make([]eventSub, 0), - } -} - -func (h *EventHandler) subscribe(sub eventSub) { - h.Lock() - h.subs = append(h.subs, sub) - h.Unlock() -} - -func (h *EventHandler) get(ctx context.Context, topics []string, eventErrCh chan<- error) error { - if len(topics) == 0 { - return errors.New("no topics provided") - } - - allTopics := strings.Join(topics, ",") - log.Info("Starting listening to Beacon API events on topics " + allTopics) - url := h.host + "/eth/v1/events?topics=" + allTopics - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - if err != nil { - return errors.Wrap(err, "failed to create HTTP request") - } - - req.Header.Set("Accept", api.EventStreamMediaType) - req.Header.Set("Connection", "keep-alive") - - resp, err := h.httpClient.Do(req) - if err != nil { - return errors.Wrap(err, "failed to perform HTTP request") - } - - go func() { - defer func() { - if closeErr := resp.Body.Close(); closeErr != nil { - log.WithError(closeErr).Error("Failed to close events response body") - } - }() - - // We signal an EOF error in a special way. When we get this error while reading the response body, - // there might still be an event received in the body that we should handle. - eof := false - for { - if ctx.Err() != nil { - eventErrCh <- ctx.Err() - return - } - - rawData := make([]byte, eventByteLimit) - _, err = resp.Body.Read(rawData) - if err != nil { - if strings.Contains(err.Error(), "EOF") { - log.Error("Received EOF while reading events response body") - eof = true - } else { - eventErrCh <- err - return - } - } - - e := strings.Split(string(rawData), "\n") - // We expect that the event format will contain event type and data separated with a newline - if len(e) < 2 { - // We reached EOF and there is no event to send - if eof { - return - } - continue - } - - for _, sub := range h.subs { - select { - case sub.ch <- event{eventType: e[0], data: e[1]}: - // Event sent successfully. - default: - log.Warn("Subscriber '" + sub.name + "' not ready to receive events") - } - } - // We reached EOF and sent the last event - if eof { - return - } - } - }() - - return nil -} diff --git a/validator/client/beacon-api/event_handler_test.go b/validator/client/beacon-api/event_handler_test.go deleted file mode 100644 index 3de171cd4d33..000000000000 --- a/validator/client/beacon-api/event_handler_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package beacon_api - -import ( - "context" - "fmt" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/prysmaticlabs/prysm/v4/testing/assert" - "github.com/prysmaticlabs/prysm/v4/testing/require" - logtest "github.com/sirupsen/logrus/hooks/test" -) - -func TestEventHandler(t *testing.T) { - logHook := logtest.NewGlobal() - - mux := http.NewServeMux() - mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) { - flusher, ok := w.(http.Flusher) - require.Equal(t, true, ok) - _, err := fmt.Fprint(w, "head\ndata\n\n") - require.NoError(t, err) - flusher.Flush() - }) - server := httptest.NewServer(mux) - defer server.Close() - - handler := NewEventHandler(http.DefaultClient, server.URL) - ch1 := make(chan event, 1) - sub1 := eventSub{ch: ch1} - ch2 := make(chan event, 1) - sub2 := eventSub{ch: ch2} - ch3 := make(chan event, 1) - sub3 := eventSub{name: "sub3", ch: ch3} - // fill up the channel so that it can't receive more events - ch3 <- event{} - handler.subscribe(sub1) - handler.subscribe(sub2) - handler.subscribe(sub3) - - require.NoError(t, handler.get(context.Background(), []string{"head"}, make(chan error))) - // make sure the goroutine inside handler.get is invoked - time.Sleep(500 * time.Millisecond) - - e := <-ch1 - assert.Equal(t, "head", e.eventType) - assert.Equal(t, "data", e.data) - e = <-ch2 - assert.Equal(t, "head", e.eventType) - assert.Equal(t, "data", e.data) - - assert.LogsContain(t, logHook, "Subscriber 'sub3' not ready to receive events") -} diff --git a/validator/client/beacon-api/genesis.go b/validator/client/beacon-api/genesis.go index 5059da4ca74f..dcc819a12747 100644 --- a/validator/client/beacon-api/genesis.go +++ b/validator/client/beacon-api/genesis.go @@ -45,10 +45,6 @@ func (c beaconApiValidatorClient) waitForChainStart(ctx context.Context) (*ethpb return nil, errors.Wrapf(err, "failed to parse genesis time: %s", genesis.GenesisTime) } - chainStartResponse := ðpb.ChainStartResponse{} - chainStartResponse.Started = true - chainStartResponse.GenesisTime = genesisTime - if !validRoot(genesis.GenesisValidatorsRoot) { return nil, errors.Errorf("invalid genesis validators root: %s", genesis.GenesisValidatorsRoot) } @@ -57,7 +53,12 @@ func (c beaconApiValidatorClient) waitForChainStart(ctx context.Context) (*ethpb if err != nil { return nil, errors.Wrap(err, "failed to decode genesis validators root") } - chainStartResponse.GenesisValidatorsRoot = genesisValidatorRoot + + chainStartResponse := ðpb.ChainStartResponse{ + Started: true, + GenesisTime: genesisTime, + GenesisValidatorsRoot: genesisValidatorRoot, + } return chainStartResponse, nil } diff --git a/validator/client/beacon-api/json_rest_handler.go b/validator/client/beacon-api/json_rest_handler.go index 9a7c30e3e312..9271a84826df 100644 --- a/validator/client/beacon-api/json_rest_handler.go +++ b/validator/client/beacon-api/json_rest_handler.go @@ -13,29 +13,29 @@ import ( ) type JsonRestHandler interface { - Get(ctx context.Context, query string, resp interface{}) error + Get(ctx context.Context, endpoint string, resp interface{}) error Post(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer, resp interface{}) error } -type beaconApiJsonRestHandler struct { - httpClient http.Client - host string +type BeaconApiJsonRestHandler struct { + HttpClient http.Client + Host string } // Get sends a GET request and decodes the response body as a JSON object into the passed in object. // If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value. -func (c beaconApiJsonRestHandler) Get(ctx context.Context, endpoint string, resp interface{}) error { +func (c BeaconApiJsonRestHandler) Get(ctx context.Context, endpoint string, resp interface{}) error { if resp == nil { return errors.New("resp is nil") } - url := c.host + endpoint + url := c.Host + endpoint req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return errors.Wrapf(err, "failed to create request for endpoint %s", url) } - httpResp, err := c.httpClient.Do(req) + httpResp, err := c.HttpClient.Do(req) if err != nil { return errors.Wrapf(err, "failed to perform request for endpoint %s", url) } @@ -50,7 +50,7 @@ func (c beaconApiJsonRestHandler) Get(ctx context.Context, endpoint string, resp // Post sends a POST request and decodes the response body as a JSON object into the passed in object. // If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value. -func (c beaconApiJsonRestHandler) Post( +func (c BeaconApiJsonRestHandler) Post( ctx context.Context, apiEndpoint string, headers map[string]string, @@ -61,7 +61,7 @@ func (c beaconApiJsonRestHandler) Post( return errors.New("data is nil") } - url := c.host + apiEndpoint + url := c.Host + apiEndpoint req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, data) if err != nil { return errors.Wrapf(err, "failed to create request for endpoint %s", url) @@ -72,7 +72,7 @@ func (c beaconApiJsonRestHandler) Post( } req.Header.Set("Content-Type", api.JsonMediaType) - httpResp, err := c.httpClient.Do(req) + httpResp, err := c.HttpClient.Do(req) if err != nil { return errors.Wrapf(err, "failed to perform request for endpoint %s", url) } diff --git a/validator/client/beacon-api/json_rest_handler_test.go b/validator/client/beacon-api/json_rest_handler_test.go index b9652a6d6f90..03ba8d9e3088 100644 --- a/validator/client/beacon-api/json_rest_handler_test.go +++ b/validator/client/beacon-api/json_rest_handler_test.go @@ -40,9 +40,9 @@ func TestGet(t *testing.T) { server := httptest.NewServer(mux) defer server.Close() - jsonRestHandler := beaconApiJsonRestHandler{ - httpClient: http.Client{Timeout: time.Second * 5}, - host: server.URL, + jsonRestHandler := BeaconApiJsonRestHandler{ + HttpClient: http.Client{Timeout: time.Second * 5}, + Host: server.URL, } resp := &beacon.GetGenesisResponse{} require.NoError(t, jsonRestHandler.Get(ctx, endpoint+"?arg1=abc&arg2=def", resp)) @@ -86,9 +86,9 @@ func TestPost(t *testing.T) { server := httptest.NewServer(mux) defer server.Close() - jsonRestHandler := beaconApiJsonRestHandler{ - httpClient: http.Client{Timeout: time.Second * 5}, - host: server.URL, + jsonRestHandler := BeaconApiJsonRestHandler{ + HttpClient: http.Client{Timeout: time.Second * 5}, + Host: server.URL, } resp := &beacon.GetGenesisResponse{} require.NoError(t, jsonRestHandler.Post(ctx, endpoint, headers, bytes.NewBuffer(dataBytes), resp)) diff --git a/validator/client/beacon-api/propose_beacon_block.go b/validator/client/beacon-api/propose_beacon_block.go index 536adb510061..fd48d72ca9e1 100644 --- a/validator/client/beacon-api/propose_beacon_block.go +++ b/validator/client/beacon-api/propose_beacon_block.go @@ -223,19 +223,19 @@ func marshallBeaconBlockBellatrix(block *ethpb.SignedBeaconBlockBellatrix) ([]by SyncCommitteeSignature: hexutil.Encode(block.Block.Body.SyncAggregate.SyncCommitteeSignature), }, ExecutionPayload: &shared.ExecutionPayload{ - BaseFeePerGas: bytesutil.LittleEndianBytesToBigInt(block.Block.Body.ExecutionPayload.BaseFeePerGas).String(), - BlockHash: hexutil.Encode(block.Block.Body.ExecutionPayload.BlockHash), - BlockNumber: uint64ToString(block.Block.Body.ExecutionPayload.BlockNumber), - ExtraData: hexutil.Encode(block.Block.Body.ExecutionPayload.ExtraData), + ParentHash: hexutil.Encode(block.Block.Body.ExecutionPayload.ParentHash), FeeRecipient: hexutil.Encode(block.Block.Body.ExecutionPayload.FeeRecipient), - GasLimit: uint64ToString(block.Block.Body.ExecutionPayload.GasLimit), - GasUsed: uint64ToString(block.Block.Body.ExecutionPayload.GasUsed), + StateRoot: hexutil.Encode(block.Block.Body.ExecutionPayload.StateRoot), + ReceiptsRoot: hexutil.Encode(block.Block.Body.ExecutionPayload.ReceiptsRoot), LogsBloom: hexutil.Encode(block.Block.Body.ExecutionPayload.LogsBloom), - ParentHash: hexutil.Encode(block.Block.Body.ExecutionPayload.ParentHash), PrevRandao: hexutil.Encode(block.Block.Body.ExecutionPayload.PrevRandao), - ReceiptsRoot: hexutil.Encode(block.Block.Body.ExecutionPayload.ReceiptsRoot), - StateRoot: hexutil.Encode(block.Block.Body.ExecutionPayload.StateRoot), + BlockNumber: uint64ToString(block.Block.Body.ExecutionPayload.BlockNumber), + GasLimit: uint64ToString(block.Block.Body.ExecutionPayload.GasLimit), + GasUsed: uint64ToString(block.Block.Body.ExecutionPayload.GasUsed), Timestamp: uint64ToString(block.Block.Body.ExecutionPayload.Timestamp), + ExtraData: hexutil.Encode(block.Block.Body.ExecutionPayload.ExtraData), + BaseFeePerGas: bytesutil.LittleEndianBytesToBigInt(block.Block.Body.ExecutionPayload.BaseFeePerGas).String(), + BlockHash: hexutil.Encode(block.Block.Body.ExecutionPayload.BlockHash), Transactions: jsonifyTransactions(block.Block.Body.ExecutionPayload.Transactions), }, }, @@ -267,19 +267,19 @@ func marshallBeaconBlockBlindedBellatrix(block *ethpb.SignedBlindedBeaconBlockBe SyncCommitteeSignature: hexutil.Encode(block.Block.Body.SyncAggregate.SyncCommitteeSignature), }, ExecutionPayloadHeader: &shared.ExecutionPayloadHeader{ - BaseFeePerGas: bytesutil.LittleEndianBytesToBigInt(block.Block.Body.ExecutionPayloadHeader.BaseFeePerGas).String(), - BlockHash: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.BlockHash), - BlockNumber: uint64ToString(block.Block.Body.ExecutionPayloadHeader.BlockNumber), - ExtraData: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.ExtraData), + ParentHash: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.ParentHash), FeeRecipient: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.FeeRecipient), - GasLimit: uint64ToString(block.Block.Body.ExecutionPayloadHeader.GasLimit), - GasUsed: uint64ToString(block.Block.Body.ExecutionPayloadHeader.GasUsed), + StateRoot: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.StateRoot), + ReceiptsRoot: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.ReceiptsRoot), LogsBloom: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.LogsBloom), - ParentHash: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.ParentHash), PrevRandao: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.PrevRandao), - ReceiptsRoot: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.ReceiptsRoot), - StateRoot: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.StateRoot), + BlockNumber: uint64ToString(block.Block.Body.ExecutionPayloadHeader.BlockNumber), + GasLimit: uint64ToString(block.Block.Body.ExecutionPayloadHeader.GasLimit), + GasUsed: uint64ToString(block.Block.Body.ExecutionPayloadHeader.GasUsed), Timestamp: uint64ToString(block.Block.Body.ExecutionPayloadHeader.Timestamp), + ExtraData: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.ExtraData), + BaseFeePerGas: bytesutil.LittleEndianBytesToBigInt(block.Block.Body.ExecutionPayloadHeader.BaseFeePerGas).String(), + BlockHash: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.BlockHash), TransactionsRoot: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.TransactionsRoot), }, }, @@ -311,19 +311,19 @@ func marshallBeaconBlockCapella(block *ethpb.SignedBeaconBlockCapella) ([]byte, SyncCommitteeSignature: hexutil.Encode(block.Block.Body.SyncAggregate.SyncCommitteeSignature), }, ExecutionPayload: &shared.ExecutionPayloadCapella{ - BaseFeePerGas: bytesutil.LittleEndianBytesToBigInt(block.Block.Body.ExecutionPayload.BaseFeePerGas).String(), - BlockHash: hexutil.Encode(block.Block.Body.ExecutionPayload.BlockHash), - BlockNumber: uint64ToString(block.Block.Body.ExecutionPayload.BlockNumber), - ExtraData: hexutil.Encode(block.Block.Body.ExecutionPayload.ExtraData), + ParentHash: hexutil.Encode(block.Block.Body.ExecutionPayload.ParentHash), FeeRecipient: hexutil.Encode(block.Block.Body.ExecutionPayload.FeeRecipient), - GasLimit: uint64ToString(block.Block.Body.ExecutionPayload.GasLimit), - GasUsed: uint64ToString(block.Block.Body.ExecutionPayload.GasUsed), + StateRoot: hexutil.Encode(block.Block.Body.ExecutionPayload.StateRoot), + ReceiptsRoot: hexutil.Encode(block.Block.Body.ExecutionPayload.ReceiptsRoot), LogsBloom: hexutil.Encode(block.Block.Body.ExecutionPayload.LogsBloom), - ParentHash: hexutil.Encode(block.Block.Body.ExecutionPayload.ParentHash), PrevRandao: hexutil.Encode(block.Block.Body.ExecutionPayload.PrevRandao), - ReceiptsRoot: hexutil.Encode(block.Block.Body.ExecutionPayload.ReceiptsRoot), - StateRoot: hexutil.Encode(block.Block.Body.ExecutionPayload.StateRoot), + BlockNumber: uint64ToString(block.Block.Body.ExecutionPayload.BlockNumber), + GasLimit: uint64ToString(block.Block.Body.ExecutionPayload.GasLimit), + GasUsed: uint64ToString(block.Block.Body.ExecutionPayload.GasUsed), Timestamp: uint64ToString(block.Block.Body.ExecutionPayload.Timestamp), + ExtraData: hexutil.Encode(block.Block.Body.ExecutionPayload.ExtraData), + BaseFeePerGas: bytesutil.LittleEndianBytesToBigInt(block.Block.Body.ExecutionPayload.BaseFeePerGas).String(), + BlockHash: hexutil.Encode(block.Block.Body.ExecutionPayload.BlockHash), Transactions: jsonifyTransactions(block.Block.Body.ExecutionPayload.Transactions), Withdrawals: jsonifyWithdrawals(block.Block.Body.ExecutionPayload.Withdrawals), }, @@ -357,19 +357,19 @@ func marshallBeaconBlockBlindedCapella(block *ethpb.SignedBlindedBeaconBlockCape SyncCommitteeSignature: hexutil.Encode(block.Block.Body.SyncAggregate.SyncCommitteeSignature), }, ExecutionPayloadHeader: &shared.ExecutionPayloadHeaderCapella{ - BaseFeePerGas: bytesutil.LittleEndianBytesToBigInt(block.Block.Body.ExecutionPayloadHeader.BaseFeePerGas).String(), - BlockHash: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.BlockHash), - BlockNumber: uint64ToString(block.Block.Body.ExecutionPayloadHeader.BlockNumber), - ExtraData: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.ExtraData), + ParentHash: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.ParentHash), FeeRecipient: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.FeeRecipient), - GasLimit: uint64ToString(block.Block.Body.ExecutionPayloadHeader.GasLimit), - GasUsed: uint64ToString(block.Block.Body.ExecutionPayloadHeader.GasUsed), + StateRoot: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.StateRoot), + ReceiptsRoot: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.ReceiptsRoot), LogsBloom: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.LogsBloom), - ParentHash: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.ParentHash), PrevRandao: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.PrevRandao), - ReceiptsRoot: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.ReceiptsRoot), - StateRoot: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.StateRoot), + BlockNumber: uint64ToString(block.Block.Body.ExecutionPayloadHeader.BlockNumber), + GasLimit: uint64ToString(block.Block.Body.ExecutionPayloadHeader.GasLimit), + GasUsed: uint64ToString(block.Block.Body.ExecutionPayloadHeader.GasUsed), Timestamp: uint64ToString(block.Block.Body.ExecutionPayloadHeader.Timestamp), + ExtraData: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.ExtraData), + BaseFeePerGas: bytesutil.LittleEndianBytesToBigInt(block.Block.Body.ExecutionPayloadHeader.BaseFeePerGas).String(), + BlockHash: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.BlockHash), TransactionsRoot: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.TransactionsRoot), WithdrawalsRoot: hexutil.Encode(block.Block.Body.ExecutionPayloadHeader.WithdrawalsRoot), }, diff --git a/validator/client/beacon-api/prysm_beacon_chain_client.go b/validator/client/beacon-api/prysm_beacon_chain_client.go index 6a9876ac7042..c8227674677b 100644 --- a/validator/client/beacon-api/prysm_beacon_chain_client.go +++ b/validator/client/beacon-api/prysm_beacon_chain_client.go @@ -3,11 +3,9 @@ package beacon_api import ( "context" "fmt" - "net/http" neturl "net/url" "strconv" "strings" - "time" "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/prysm/validator" @@ -16,12 +14,7 @@ import ( ) // NewPrysmBeaconChainClient returns implementation of iface.PrysmBeaconChainClient. -func NewPrysmBeaconChainClient(host string, timeout time.Duration, nodeClient iface.NodeClient) iface.PrysmBeaconChainClient { - jsonRestHandler := beaconApiJsonRestHandler{ - httpClient: http.Client{Timeout: timeout}, - host: host, - } - +func NewPrysmBeaconChainClient(jsonRestHandler JsonRestHandler, nodeClient iface.NodeClient) iface.PrysmBeaconChainClient { return prysmBeaconChainClient{ jsonRestHandler: jsonRestHandler, nodeClient: nodeClient, diff --git a/validator/client/beacon-api/stream_blocks.go b/validator/client/beacon-api/stream_blocks.go index 2d6d453aec3a..d68b7e030af2 100644 --- a/validator/client/beacon-api/stream_blocks.go +++ b/validator/client/beacon-api/stream_blocks.go @@ -4,12 +4,10 @@ import ( "bytes" "context" "encoding/json" - "strconv" "time" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events" "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared" "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" @@ -28,8 +26,8 @@ type streamSlotsClient struct { ctx context.Context beaconApiClient beaconApiValidatorClient streamSlotsRequest *ethpb.StreamSlotsRequest + prevBlockSlot primitives.Slot pingDelay time.Duration - ch chan event } type streamBlocksAltairClient struct { @@ -48,14 +46,11 @@ type headSignedBeaconBlockResult struct { } func (c beaconApiValidatorClient) streamSlots(ctx context.Context, in *ethpb.StreamSlotsRequest, pingDelay time.Duration) ethpb.BeaconNodeValidator_StreamSlotsClient { - ch := make(chan event, 1) - c.eventHandler.subscribe(eventSub{name: "stream slots", ch: ch}) return &streamSlotsClient{ ctx: ctx, beaconApiClient: c, streamSlotsRequest: in, pingDelay: pingDelay, - ch: ch, } } @@ -69,27 +64,28 @@ func (c beaconApiValidatorClient) streamBlocks(ctx context.Context, in *ethpb.St } func (c *streamSlotsClient) Recv() (*ethpb.StreamSlotsResponse, error) { - for { + result, err := c.beaconApiClient.getHeadSignedBeaconBlock(c.ctx) + if err != nil { + return nil, errors.Wrap(err, "failed to get latest signed block") + } + + // We keep querying the beacon chain for the latest block until we receive a new slot + for (c.streamSlotsRequest.VerifiedOnly && result.executionOptimistic) || c.prevBlockSlot == result.slot { select { - case rawEvent := <-c.ch: - if rawEvent.eventType != events.HeadTopic { - continue - } - e := &events.HeadEvent{} - if err := json.Unmarshal([]byte(rawEvent.data), e); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal head event into JSON") - } - uintSlot, err := strconv.ParseUint(e.Slot, 10, 64) + case <-time.After(c.pingDelay): + result, err = c.beaconApiClient.getHeadSignedBeaconBlock(c.ctx) if err != nil { - return nil, errors.Wrap(err, "failed to parse slot") + return nil, errors.Wrap(err, "failed to get latest signed block") } - return ðpb.StreamSlotsResponse{ - Slot: primitives.Slot(uintSlot), - }, nil case <-c.ctx.Done(): return nil, errors.New("context canceled") } } + + c.prevBlockSlot = result.slot + return ðpb.StreamSlotsResponse{ + Slot: result.slot, + }, nil } func (c *streamBlocksAltairClient) Recv() (*ethpb.StreamBlocksResponse, error) { diff --git a/validator/client/beacon-chain-client-factory/beacon_chain_client_factory.go b/validator/client/beacon-chain-client-factory/beacon_chain_client_factory.go index 0cc5d888584e..3b15003085e2 100644 --- a/validator/client/beacon-chain-client-factory/beacon_chain_client_factory.go +++ b/validator/client/beacon-chain-client-factory/beacon_chain_client_factory.go @@ -9,30 +9,18 @@ import ( validatorHelpers "github.com/prysmaticlabs/prysm/v4/validator/helpers" ) -func NewBeaconChainClient(validatorConn validatorHelpers.NodeConnection) iface.BeaconChainClient { +func NewBeaconChainClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.JsonRestHandler) iface.BeaconChainClient { grpcClient := grpcApi.NewGrpcBeaconChainClient(validatorConn.GetGrpcClientConn()) - featureFlags := features.Get() - - if featureFlags.EnableBeaconRESTApi { - return beaconApi.NewBeaconApiBeaconChainClientWithFallback( - validatorConn.GetBeaconApiUrl(), - validatorConn.GetBeaconApiTimeout(), - grpcClient, - ) + if features.Get().EnableBeaconRESTApi { + return beaconApi.NewBeaconApiBeaconChainClientWithFallback(jsonRestHandler, grpcClient) } else { return grpcClient } } -func NewPrysmBeaconClient(validatorConn validatorHelpers.NodeConnection) iface.PrysmBeaconChainClient { - featureFlags := features.Get() - - if featureFlags.EnableBeaconRESTApi { - return beaconApi.NewPrysmBeaconChainClient( - validatorConn.GetBeaconApiUrl(), - validatorConn.GetBeaconApiTimeout(), - nodeClientFactory.NewNodeClient(validatorConn), - ) +func NewPrysmBeaconClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.JsonRestHandler) iface.PrysmBeaconChainClient { + if features.Get().EnableBeaconRESTApi { + return beaconApi.NewPrysmBeaconChainClient(jsonRestHandler, nodeClientFactory.NewNodeClient(validatorConn, jsonRestHandler)) } else { return grpcApi.NewGrpcPrysmBeaconChainClient(validatorConn.GetGrpcClientConn()) } diff --git a/validator/client/grpc-api/grpc_validator_client.go b/validator/client/grpc-api/grpc_validator_client.go index cfbe34455cc5..aa1c079130c6 100644 --- a/validator/client/grpc-api/grpc_validator_client.go +++ b/validator/client/grpc-api/grpc_validator_client.go @@ -141,8 +141,3 @@ func (c *grpcValidatorClient) AggregatedSigAndAggregationBits( func NewGrpcValidatorClient(cc grpc.ClientConnInterface) iface.ValidatorClient { return &grpcValidatorClient{ethpb.NewBeaconNodeValidatorClient(cc)} } - -// StartEventStream doesn't do anything in gRPC client -func (c *grpcValidatorClient) StartEventStream(context.Context) error { - return nil -} diff --git a/validator/client/iface/validator.go b/validator/client/iface/validator.go index 589086f5e5b7..a43fb97f6606 100644 --- a/validator/client/iface/validator.go +++ b/validator/client/iface/validator.go @@ -64,7 +64,6 @@ type Validator interface { SignValidatorRegistrationRequest(ctx context.Context, signer SigningFunc, newValidatorRegistration *ethpb.ValidatorRegistrationV1) (*ethpb.SignedValidatorRegistrationV1, error) ProposerSettings() *validatorserviceconfig.ProposerSettings SetProposerSettings(context.Context, *validatorserviceconfig.ProposerSettings) error - StartEventStream(ctx context.Context) error } // SigningFunc interface defines a type for the a function that signs a message diff --git a/validator/client/iface/validator_client.go b/validator/client/iface/validator_client.go index c29f47d1587c..e83e5d1faa4f 100644 --- a/validator/client/iface/validator_client.go +++ b/validator/client/iface/validator_client.go @@ -34,5 +34,4 @@ type ValidatorClient interface { SubmitSignedContributionAndProof(ctx context.Context, in *ethpb.SignedContributionAndProof) (*empty.Empty, error) StreamSlots(ctx context.Context, in *ethpb.StreamSlotsRequest) (ethpb.BeaconNodeValidator_StreamSlotsClient, error) SubmitValidatorRegistrations(ctx context.Context, in *ethpb.SignedValidatorRegistrationsV1) (*empty.Empty, error) - StartEventStream(ctx context.Context) error } diff --git a/validator/client/key_reload.go b/validator/client/key_reload.go index cbfc67a74eed..bb8add0fde3f 100644 --- a/validator/client/key_reload.go +++ b/validator/client/key_reload.go @@ -48,10 +48,5 @@ func (v *validator) HandleKeyReload(ctx context.Context, currentKeys [][fieldpar valCount = int64(valCounts[0].Count) } - anyActive = v.checkAndLogValidatorStatus(statuses, valCount) - if anyActive { - logActiveValidatorStatus(statuses) - } - - return anyActive, nil + return v.checkAndLogValidatorStatus(statuses, valCount), nil } diff --git a/validator/client/node-client-factory/node_client_factory.go b/validator/client/node-client-factory/node_client_factory.go index 90f60c8c9528..65a6158bbe5b 100644 --- a/validator/client/node-client-factory/node_client_factory.go +++ b/validator/client/node-client-factory/node_client_factory.go @@ -8,12 +8,10 @@ import ( validatorHelpers "github.com/prysmaticlabs/prysm/v4/validator/helpers" ) -func NewNodeClient(validatorConn validatorHelpers.NodeConnection) iface.NodeClient { +func NewNodeClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.JsonRestHandler) iface.NodeClient { grpcClient := grpcApi.NewNodeClient(validatorConn.GetGrpcClientConn()) - featureFlags := features.Get() - - if featureFlags.EnableBeaconRESTApi { - return beaconApi.NewNodeClientWithFallback(validatorConn.GetBeaconApiUrl(), validatorConn.GetBeaconApiTimeout(), grpcClient) + if features.Get().EnableBeaconRESTApi { + return beaconApi.NewNodeClientWithFallback(jsonRestHandler, grpcClient) } else { return grpcClient } diff --git a/validator/client/runner.go b/validator/client/runner.go index a85ff92ab598..16d71717ac13 100644 --- a/validator/client/runner.go +++ b/validator/client/runner.go @@ -18,7 +18,7 @@ import ( "google.golang.org/grpc/status" ) -// time to wait before trying to reconnect with beacon node. +// Time to wait before trying to reconnect with beacon node. var backOffPeriod = 10 * time.Second // Run the main validator routine. This routine exits if the context is @@ -149,8 +149,13 @@ func initializeValidatorAndGetHeadSlot(ctx context.Context, v iface.Validator) ( ticker := time.NewTicker(backOffPeriod) defer ticker.Stop() - var headSlot primitives.Slot firstTime := true + + var ( + headSlot primitives.Slot + err error + ) + for { if !firstTime { if ctx.Err() != nil { @@ -158,55 +163,54 @@ func initializeValidatorAndGetHeadSlot(ctx context.Context, v iface.Validator) ( return headSlot, errors.New("context canceled") } <-ticker.C - } else { - firstTime = false - } - err := v.WaitForChainStart(ctx) - if isConnectionError(err) { - log.WithError(err).Warn("Could not determine if beacon chain started") - continue } - if err != nil { + + firstTime = false + + if err := v.WaitForChainStart(ctx); err != nil { + if isConnectionError(err) { + log.WithError(err).Warn("Could not determine if beacon chain started") + continue + } + log.WithError(err).Fatal("Could not determine if beacon chain started") } - err = v.WaitForKeymanagerInitialization(ctx) - if err != nil { + if err := v.WaitForKeymanagerInitialization(ctx); err != nil { // log.Fatal will prevent defer from being called v.Done() log.WithError(err).Fatal("Wallet is not ready") } - err = v.WaitForSync(ctx) - if isConnectionError(err) { - log.WithError(err).Warn("Could not determine if beacon chain started") - continue - } - if err != nil { + if err := v.WaitForSync(ctx); err != nil { + if isConnectionError(err) { + log.WithError(err).Warn("Could not determine if beacon chain started") + continue + } + log.WithError(err).Fatal("Could not determine if beacon node synced") } - err = v.WaitForActivation(ctx, nil /* accountsChangedChan */) - if err != nil { + + if err := v.WaitForActivation(ctx, nil /* accountsChangedChan */); err != nil { log.WithError(err).Fatal("Could not wait for validator activation") } - if err = v.StartEventStream(ctx); err != nil { - log.WithError(err).Fatal("Could not start API event stream") - } headSlot, err = v.CanonicalHeadSlot(ctx) if isConnectionError(err) { log.WithError(err).Warn("Could not get current canonical head slot") continue } + if err != nil { log.WithError(err).Fatal("Could not get current canonical head slot") } - err = v.CheckDoppelGanger(ctx) - if isConnectionError(err) { - log.WithError(err).Warn("Could not wait for checking doppelganger") - continue - } - if err != nil { + + if err := v.CheckDoppelGanger(ctx); err != nil { + if isConnectionError(err) { + log.WithError(err).Warn("Could not wait for checking doppelganger") + continue + } + log.WithError(err).Fatal("Could not succeed with doppelganger check") } break diff --git a/validator/client/service.go b/validator/client/service.go index 56de84631486..a499dfca0f2e 100644 --- a/validator/client/service.go +++ b/validator/client/service.go @@ -191,24 +191,16 @@ func (v *ValidatorService) Start() { return } - evHandler := beaconApi.NewEventHandler(http.DefaultClient, v.conn.GetBeaconApiUrl()) - evErrCh := make(chan error) - opts := []beaconApi.ValidatorClientOpt{beaconApi.WithEventHandler(evHandler), beaconApi.WithEventErrorChannel(evErrCh)} - validatorClient := validatorClientFactory.NewValidatorClient(v.conn, opts...) - go func() { - e := <-evErrCh - log.WithError(e).Error("Event streaming failed") - v.cancel() - }() - - beaconClient := beaconChainClientFactory.NewBeaconChainClient(v.conn) - prysmBeaconClient := beaconChainClientFactory.NewPrysmBeaconClient(v.conn) + restHandler := &beaconApi.BeaconApiJsonRestHandler{ + HttpClient: http.Client{Timeout: v.conn.GetBeaconApiTimeout()}, + Host: v.conn.GetBeaconApiUrl(), + } valStruct := &validator{ db: v.db, - validatorClient: validatorClient, - beaconClient: beaconClient, - node: nodeClientFactory.NewNodeClient(v.conn), + validatorClient: validatorClientFactory.NewValidatorClient(v.conn, restHandler), + beaconClient: beaconChainClientFactory.NewBeaconChainClient(v.conn, restHandler), + node: nodeClientFactory.NewNodeClient(v.conn, restHandler), graffiti: v.graffiti, logValidatorBalances: v.logValidatorBalances, emitAccountMetrics: v.emitAccountMetrics, @@ -232,7 +224,7 @@ func (v *ValidatorService) Start() { Web3SignerConfig: v.Web3SignerConfig, proposerSettings: v.proposerSettings, walletInitializedChannel: make(chan *wallet.Wallet, 1), - prysmBeaconClient: prysmBeaconClient, + prysmBeaconClient: beaconChainClientFactory.NewPrysmBeaconClient(v.conn, restHandler), validatorsRegBatchSize: v.validatorsRegBatchSize, } diff --git a/validator/client/testutil/mock_validator.go b/validator/client/testutil/mock_validator.go index 72bb9272aa58..b7956d04ea08 100644 --- a/validator/client/testutil/mock_validator.go +++ b/validator/client/testutil/mock_validator.go @@ -174,18 +174,18 @@ func (fv *FakeValidator) ProposeBlock(_ context.Context, slot primitives.Slot, _ } // SubmitAggregateAndProof for mocking. -func (*FakeValidator) SubmitAggregateAndProof(_ context.Context, _ primitives.Slot, _ [fieldparams.BLSPubkeyLength]byte) { +func (_ *FakeValidator) SubmitAggregateAndProof(_ context.Context, _ primitives.Slot, _ [fieldparams.BLSPubkeyLength]byte) { } // SubmitSyncCommitteeMessage for mocking. -func (*FakeValidator) SubmitSyncCommitteeMessage(_ context.Context, _ primitives.Slot, _ [fieldparams.BLSPubkeyLength]byte) { +func (_ *FakeValidator) SubmitSyncCommitteeMessage(_ context.Context, _ primitives.Slot, _ [fieldparams.BLSPubkeyLength]byte) { } // LogAttestationsSubmitted for mocking. -func (*FakeValidator) LogAttestationsSubmitted() {} +func (_ *FakeValidator) LogAttestationsSubmitted() {} // UpdateDomainDataCaches for mocking. -func (*FakeValidator) UpdateDomainDataCaches(context.Context, primitives.Slot) {} +func (_ *FakeValidator) UpdateDomainDataCaches(context.Context, primitives.Slot) {} // BalancesByPubkeys for mocking. func (fv *FakeValidator) BalancesByPubkeys(_ context.Context) map[[fieldparams.BLSPubkeyLength]byte]uint64 { @@ -213,7 +213,7 @@ func (fv *FakeValidator) Keymanager() (keymanager.IKeymanager, error) { } // CheckDoppelGanger for mocking -func (*FakeValidator) CheckDoppelGanger(_ context.Context) error { +func (_ *FakeValidator) CheckDoppelGanger(_ context.Context) error { return nil } @@ -237,7 +237,7 @@ func (fv *FakeValidator) HandleKeyReload(_ context.Context, newKeys [][fieldpara } // SubmitSignedContributionAndProof for mocking -func (*FakeValidator) SubmitSignedContributionAndProof(_ context.Context, _ primitives.Slot, _ [fieldparams.BLSPubkeyLength]byte) { +func (_ *FakeValidator) SubmitSignedContributionAndProof(_ context.Context, _ primitives.Slot, _ [fieldparams.BLSPubkeyLength]byte) { } // HasProposerSettings for mocking @@ -266,26 +266,22 @@ func (fv *FakeValidator) PushProposerSettings(ctx context.Context, km keymanager } // SetPubKeyToValidatorIndexMap for mocking -func (*FakeValidator) SetPubKeyToValidatorIndexMap(_ context.Context, _ keymanager.IKeymanager) error { +func (_ *FakeValidator) SetPubKeyToValidatorIndexMap(_ context.Context, _ keymanager.IKeymanager) error { return nil } // SignValidatorRegistrationRequest for mocking -func (*FakeValidator) SignValidatorRegistrationRequest(_ context.Context, _ iface.SigningFunc, _ *ethpb.ValidatorRegistrationV1) (*ethpb.SignedValidatorRegistrationV1, error) { +func (_ *FakeValidator) SignValidatorRegistrationRequest(_ context.Context, _ iface.SigningFunc, _ *ethpb.ValidatorRegistrationV1) (*ethpb.SignedValidatorRegistrationV1, error) { return nil, nil } // ProposerSettings for mocking -func (fv *FakeValidator) ProposerSettings() *validatorserviceconfig.ProposerSettings { - return fv.proposerSettings +func (f *FakeValidator) ProposerSettings() *validatorserviceconfig.ProposerSettings { + return f.proposerSettings } // SetProposerSettings for mocking -func (fv *FakeValidator) SetProposerSettings(_ context.Context, settings *validatorserviceconfig.ProposerSettings) error { - fv.proposerSettings = settings - return nil -} - -func (fv *FakeValidator) StartEventStream(_ context.Context) error { +func (f *FakeValidator) SetProposerSettings(_ context.Context, settings *validatorserviceconfig.ProposerSettings) error { + f.proposerSettings = settings return nil } diff --git a/validator/client/validator-client-factory/validator_client_factory.go b/validator/client/validator-client-factory/validator_client_factory.go index edbf51ee0983..a3e71b74d222 100644 --- a/validator/client/validator-client-factory/validator_client_factory.go +++ b/validator/client/validator-client-factory/validator_client_factory.go @@ -8,11 +8,12 @@ import ( validatorHelpers "github.com/prysmaticlabs/prysm/v4/validator/helpers" ) -func NewValidatorClient(validatorConn validatorHelpers.NodeConnection, opt ...beaconApi.ValidatorClientOpt) iface.ValidatorClient { - featureFlags := features.Get() - - if featureFlags.EnableBeaconRESTApi { - return beaconApi.NewBeaconApiValidatorClient(validatorConn.GetBeaconApiUrl(), validatorConn.GetBeaconApiTimeout(), opt...) +func NewValidatorClient( + validatorConn validatorHelpers.NodeConnection, + jsonRestHandler beaconApi.JsonRestHandler, +) iface.ValidatorClient { + if features.Get().EnableBeaconRESTApi { + return beaconApi.NewBeaconApiValidatorClient(jsonRestHandler) } else { return grpcApi.NewGrpcValidatorClient(validatorConn.GetGrpcClientConn()) } diff --git a/validator/client/validator.go b/validator/client/validator.go index cd8f9e5fe540..bcf28905a12a 100644 --- a/validator/client/validator.go +++ b/validator/client/validator.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/v4/async/event" "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair" + "github.com/prysmaticlabs/prysm/v4/cmd" "github.com/prysmaticlabs/prysm/v4/config/features" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" "github.com/prysmaticlabs/prysm/v4/config/params" @@ -53,14 +54,13 @@ import ( // keyFetchPeriod is the frequency that we try to refetch validating keys // in case no keys were fetched previously. var ( - keyRefetchPeriod = 30 * time.Second ErrBuilderValidatorRegistration = errors.New("Builder API validator registration unsuccessful") ErrValidatorsAllExited = errors.New("All validators are exited, no more work to perform...") ) var ( msgCouldNotFetchKeys = "could not fetch validating keys" - msgNoKeysFetched = "No validating keys fetched. Trying again" + msgNoKeysFetched = "No validating keys fetched. Waiting for keys..." ) type validator struct { @@ -238,50 +238,65 @@ func recheckValidatingKeysBucket(ctx context.Context, valDB vdb.Database, km key func (v *validator) WaitForChainStart(ctx context.Context) error { ctx, span := trace.StartSpan(ctx, "validator.WaitForChainStart") defer span.End() + // First, check if the beacon chain has started. log.Info("Syncing with beacon node to align on chain genesis info") + chainStartRes, err := v.validatorClient.WaitForChainStart(ctx, &emptypb.Empty{}) - if err != io.EOF { - if ctx.Err() == context.Canceled { - return errors.Wrap(ctx.Err(), "context has been canceled so shutting down the loop") - } - if err != nil { - return errors.Wrap( - iface.ErrConnectionIssue, - errors.Wrap(err, "could not receive ChainStart from stream").Error(), - ) - } - v.genesisTime = chainStartRes.GenesisTime - curGenValRoot, err := v.db.GenesisValidatorsRoot(ctx) - if err != nil { - return errors.Wrap(err, "could not get current genesis validators root") - } - if len(curGenValRoot) == 0 { - if err := v.db.SaveGenesisValidatorsRoot(ctx, chainStartRes.GenesisValidatorsRoot); err != nil { - return errors.Wrap(err, "could not save genesis validators root") - } - } else { - if !bytes.Equal(curGenValRoot, chainStartRes.GenesisValidatorsRoot) { - log.Errorf("The genesis validators root received from the beacon node does not match what is in " + - "your validator database. This could indicate that this is a database meant for another network. If " + - "you were previously running this validator database on another network, please run --clear-db to " + - "clear the database. If not, please file an issue at https://github.com/prysmaticlabs/prysm/issues") - return fmt.Errorf( - "genesis validators root from beacon node (%#x) does not match root saved in validator db (%#x)", - chainStartRes.GenesisValidatorsRoot, - curGenValRoot, - ) - } - } - } else { + if err == io.EOF { return iface.ErrConnectionIssue } + if ctx.Err() == context.Canceled { + return errors.Wrap(ctx.Err(), "context has been canceled so shutting down the loop") + } + + if err != nil { + return errors.Wrap( + iface.ErrConnectionIssue, + errors.Wrap(err, "could not receive ChainStart from stream").Error(), + ) + } + + v.genesisTime = chainStartRes.GenesisTime + + curGenValRoot, err := v.db.GenesisValidatorsRoot(ctx) + if err != nil { + return errors.Wrap(err, "could not get current genesis validators root") + } + + if len(curGenValRoot) == 0 { + if err := v.db.SaveGenesisValidatorsRoot(ctx, chainStartRes.GenesisValidatorsRoot); err != nil { + return errors.Wrap(err, "could not save genesis validators root") + } + + v.setTicker() + return nil + } + + if !bytes.Equal(curGenValRoot, chainStartRes.GenesisValidatorsRoot) { + log.Errorf(`The genesis validators root received from the beacon node does not match what is in + your validator database. This could indicate that this is a database meant for another network. If + you were previously running this validator database on another network, please run --%s to + clear the database. If not, please file an issue at https://github.com/prysmaticlabs/prysm/issues`, + cmd.ClearDB.Name, + ) + return fmt.Errorf( + "genesis validators root from beacon node (%#x) does not match root saved in validator db (%#x)", + chainStartRes.GenesisValidatorsRoot, + curGenValRoot, + ) + } + + v.setTicker() + return nil +} + +func (v *validator) setTicker() { // Once the ChainStart log is received, we update the genesis time of the validator client // and begin a slot ticker used to track the current slot the beacon node is in. v.ticker = slots.NewSlotTicker(time.Unix(int64(v.genesisTime), 0), params.BeaconConfig().SecondsPerSlot) log.WithField("genesisTime", time.Unix(int64(v.genesisTime), 0)).Info("Beacon chain started") - return nil } // WaitForSync checks whether the beacon node has sync to the latest head. @@ -333,7 +348,7 @@ func (v *validator) ReceiveSlots(ctx context.Context, connectionErrorChannel cha } res, err := stream.Recv() if err != nil { - log.WithError(err).Error("Could not receive slots from beacon node: " + iface.ErrConnectionIssue.Error()) + log.WithError(err).Error("Could not receive slots from beacon node, " + iface.ErrConnectionIssue.Error()) connectionErrorChannel <- errors.Wrap(iface.ErrConnectionIssue, err.Error()) return } @@ -387,6 +402,10 @@ func (v *validator) checkAndLogValidatorStatus(statuses []*validatorStatus, acti } case ethpb.ValidatorStatus_ACTIVE, ethpb.ValidatorStatus_EXITING: validatorActivated = true + log.WithFields(logrus.Fields{ + "publicKey": fmt.Sprintf("%#x", bytesutil.Trunc(status.publicKey)), + "index": status.index, + }).Info("Validator activated") case ethpb.ValidatorStatus_EXITED: log.Info("Validator exited") case ethpb.ValidatorStatus_INVALID: @@ -400,18 +419,6 @@ func (v *validator) checkAndLogValidatorStatus(statuses []*validatorStatus, acti return validatorActivated } -func logActiveValidatorStatus(statuses []*validatorStatus) { - for _, s := range statuses { - if s.status.Status != ethpb.ValidatorStatus_ACTIVE { - continue - } - log.WithFields(logrus.Fields{ - "publicKey": fmt.Sprintf("%#x", bytesutil.Trunc(s.publicKey)), - "index": s.index, - }).Info("Validator activated") - } -} - // CanonicalHeadSlot returns the slot of canonical block currently found in the // beacon chain via RPC. func (v *validator) CanonicalHeadSlot(ctx context.Context) (primitives.Slot, error) { @@ -582,6 +589,11 @@ func (v *validator) UpdateDuties(ctx context.Context, slot primitives.Slot) erro return err } + v.dutiesLock.Lock() + v.duties = resp + v.logDuties(slot, v.duties.CurrentEpochDuties, v.duties.NextEpochDuties) + v.dutiesLock.Unlock() + allExitedCounter := 0 for i := range resp.CurrentEpochDuties { if resp.CurrentEpochDuties[i].Status == ethpb.ValidatorStatus_EXITED { @@ -592,11 +604,6 @@ func (v *validator) UpdateDuties(ctx context.Context, slot primitives.Slot) erro return ErrValidatorsAllExited } - v.dutiesLock.Lock() - v.duties = resp - v.logDuties(slot, v.duties.CurrentEpochDuties, v.duties.NextEpochDuties) - v.dutiesLock.Unlock() - // Non-blocking call for beacon node to start subscriptions for aggregators. // Make sure to copy metadata into a new context md, exists := metadata.FromOutgoingContext(ctx) @@ -1035,10 +1042,6 @@ func (v *validator) PushProposerSettings(ctx context.Context, km keymanager.IKey return nil } -func (v *validator) StartEventStream(ctx context.Context) error { - return v.validatorClient.StartEventStream(ctx) -} - func (v *validator) filterAndCacheActiveKeys(ctx context.Context, pubkeys [][fieldparams.BLSPubkeyLength]byte, slot primitives.Slot) ([][fieldparams.BLSPubkeyLength]byte, error) { filteredKeys := make([][fieldparams.BLSPubkeyLength]byte, 0) statusRequestKeys := make([][]byte, 0) diff --git a/validator/client/validator_test.go b/validator/client/validator_test.go index 8ea977848006..55aea89d7c3c 100644 --- a/validator/client/validator_test.go +++ b/validator/client/validator_test.go @@ -388,43 +388,6 @@ func TestWaitMultipleActivation_LogsActivationEpochOK(t *testing.T) { require.LogsContain(t, hook, "Validator activated") } -func TestWaitActivation_NotAllValidatorsActivatedOK(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - validatorClient := validatormock.NewMockValidatorClient(ctrl) - beaconClient := validatormock.NewMockBeaconChainClient(ctrl) - prysmBeaconClient := validatormock.NewMockPrysmBeaconChainClient(ctrl) - - kp := randKeypair(t) - v := validator{ - validatorClient: validatorClient, - keyManager: newMockKeymanager(t, kp), - beaconClient: beaconClient, - prysmBeaconClient: prysmBeaconClient, - } - resp := generateMockStatusResponse([][]byte{kp.pub[:]}) - resp.Statuses[0].Status.Status = ethpb.ValidatorStatus_ACTIVE - clientStream := mock2.NewMockBeaconNodeValidator_WaitForActivationClient(ctrl) - validatorClient.EXPECT().WaitForActivation( - gomock.Any(), - gomock.Any(), - ).Return(clientStream, nil) - prysmBeaconClient.EXPECT().GetValidatorCount( - gomock.Any(), - "head", - []validatorType.Status{validatorType.Active}, - ).Return([]iface.ValidatorCount{}, nil).Times(2) - clientStream.EXPECT().Recv().Return( - ðpb.ValidatorActivationResponse{}, - nil, - ) - clientStream.EXPECT().Recv().Return( - resp, - nil, - ) - assert.NoError(t, v.WaitForActivation(context.Background(), nil), "Could not wait for activation") -} - func TestWaitSync_ContextCanceled(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() diff --git a/validator/client/wait_for_activation.go b/validator/client/wait_for_activation.go index c2c3d10fab34..61bbd6810c1b 100644 --- a/validator/client/wait_for_activation.go +++ b/validator/client/wait_for_activation.go @@ -5,17 +5,14 @@ import ( "io" "time" - validator2 "github.com/prysmaticlabs/prysm/v4/consensus-types/validator" - "github.com/prysmaticlabs/prysm/v4/validator/client/iface" - "github.com/pkg/errors" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" - "github.com/prysmaticlabs/prysm/v4/config/params" + validator2 "github.com/prysmaticlabs/prysm/v4/consensus-types/validator" "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" "github.com/prysmaticlabs/prysm/v4/math" "github.com/prysmaticlabs/prysm/v4/monitoring/tracing" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" - "github.com/prysmaticlabs/prysm/v4/time/slots" + "github.com/prysmaticlabs/prysm/v4/validator/client/iface" "go.opencensus.io/trace" ) @@ -33,18 +30,18 @@ func (v *validator) WaitForActivation(ctx context.Context, accountsChangedChan c if err != nil { return err } + // subscribe to the channel if it's the first time sub := km.SubscribeAccountChanges(accountsChangedChan) defer func() { sub.Unsubscribe() close(accountsChangedChan) }() } - return v.internalWaitForActivation(ctx, accountsChangedChan) } // internalWaitForActivation performs the following: -// 1) While the key manager is empty, poll the key manager until some validator keys exist. +// 1) While the key manager is empty, subscribe to keymanager changes until some validator keys exist. // 2) Open a server side stream for activation events against the given keys. // 3) In another go routine, the key manager is monitored for updates and emits an update event on // the accountsChangedChan. When an event signal is received, restart the internalWaitForActivation routine. @@ -53,39 +50,26 @@ func (v *validator) WaitForActivation(ctx context.Context, accountsChangedChan c func (v *validator) internalWaitForActivation(ctx context.Context, accountsChangedChan <-chan [][fieldparams.BLSPubkeyLength]byte) error { ctx, span := trace.StartSpan(ctx, "validator.WaitForActivation") defer span.End() - validatingKeys, err := v.keyManager.FetchValidatingPublicKeys(ctx) if err != nil { - return errors.Wrap(err, "could not fetch validating keys") + return errors.Wrap(err, msgCouldNotFetchKeys) } + // if there are no validating keys, wait for some if len(validatingKeys) == 0 { log.Warn(msgNoKeysFetched) - - ticker := time.NewTicker(keyRefetchPeriod) - defer ticker.Stop() - for { - select { - case <-ticker.C: - validatingKeys, err = v.keyManager.FetchValidatingPublicKeys(ctx) - if err != nil { - return errors.Wrap(err, msgCouldNotFetchKeys) - } - if len(validatingKeys) == 0 { - log.Warn(msgNoKeysFetched) - continue - } - case <-ctx.Done(): - log.Debug("Context closed, exiting fetching validating keys") - return ctx.Err() - } - break + select { + case <-ctx.Done(): + log.Debug("Context closed, exiting fetching validating keys") + return ctx.Err() + case <-accountsChangedChan: + // if the accounts changed try it again + return v.internalWaitForActivation(ctx, accountsChangedChan) } } - req := ðpb.ValidatorActivationRequest{ + stream, err := v.validatorClient.WaitForActivation(ctx, ðpb.ValidatorActivationRequest{ PublicKeys: bytesutil.FromBytes48Array(validatingKeys), - } - stream, err := v.validatorClient.WaitForActivation(ctx, req) + }) if err != nil { tracing.AnnotateError(span, err) attempts := streamAttempts(ctx) @@ -96,22 +80,17 @@ func (v *validator) internalWaitForActivation(ctx context.Context, accountsChang return v.internalWaitForActivation(incrementRetries(ctx), accountsChangedChan) } - if err = v.handleAccountsChanged(ctx, accountsChangedChan, &stream, span); err != nil { - return err - } - - v.ticker = slots.NewSlotTicker(time.Unix(int64(v.genesisTime), 0), params.BeaconConfig().SecondsPerSlot) - return nil -} - -func (v *validator) handleAccountsChanged(ctx context.Context, accountsChangedChan <-chan [][fieldparams.BLSPubkeyLength]byte, stream *ethpb.BeaconNodeValidator_WaitForActivationClient, span *trace.Span) error { - for { + someAreActive := false + for !someAreActive { select { + case <-ctx.Done(): + log.Debug("Context closed, exiting fetching validating keys") + return ctx.Err() case <-accountsChangedChan: // Accounts (keys) changed, restart the process. return v.internalWaitForActivation(ctx, accountsChangedChan) default: - res, err := (*stream).Recv() + res, err := (stream).Recv() // retrieve from stream one loop at a time // If the stream is closed, we stop the loop. if errors.Is(err, io.EOF) { break @@ -150,15 +129,10 @@ func (v *validator) handleAccountsChanged(ctx context.Context, accountsChangedCh valCount = int64(valCounts[0].Count) } - valActivated := v.checkAndLogValidatorStatus(statuses, valCount) - if valActivated { - logActiveValidatorStatus(statuses) - } else { - continue - } + someAreActive = v.checkAndLogValidatorStatus(statuses, valCount) } - break } + return nil } diff --git a/validator/client/wait_for_activation_test.go b/validator/client/wait_for_activation_test.go index e28aa2451481..25c8ca39096f 100644 --- a/validator/client/wait_for_activation_test.go +++ b/validator/client/wait_for_activation_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/prysmaticlabs/prysm/v4/config/params" validatorType "github.com/prysmaticlabs/prysm/v4/consensus-types/validator" "github.com/prysmaticlabs/prysm/v4/validator/client/iface" @@ -39,7 +40,7 @@ func TestWaitActivation_ContextCanceled(t *testing.T) { beaconClient: beaconClient, } clientStream := mock.NewMockBeaconNodeValidator_WaitForActivationClient(ctrl) - + ctx, cancel := context.WithCancel(context.Background()) validatorClient.EXPECT().WaitForActivation( gomock.Any(), ðpb.ValidatorActivationRequest{ @@ -49,9 +50,7 @@ func TestWaitActivation_ContextCanceled(t *testing.T) { clientStream.EXPECT().Recv().Return( ðpb.ValidatorActivationResponse{}, nil, - ) - ctx, cancel := context.WithCancel(context.Background()) - cancel() + ).Do(func() { cancel() }) assert.ErrorContains(t, cancelledCtx, v.WaitForActivation(ctx, nil)) } @@ -193,12 +192,11 @@ func TestWaitForActivation_Exiting(t *testing.T) { } func TestWaitForActivation_RefetchKeys(t *testing.T) { - originalPeriod := keyRefetchPeriod - defer func() { - keyRefetchPeriod = originalPeriod - }() - keyRefetchPeriod = 1 * time.Second - + params.SetupTestConfigCleanup(t) + cfg := params.MainnetConfig().Copy() + cfg.ConfigName = "test" + cfg.SecondsPerSlot = 1 + params.OverrideBeaconConfig(cfg) hook := logTest.NewGlobal() ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -207,8 +205,7 @@ func TestWaitForActivation_RefetchKeys(t *testing.T) { prysmBeaconClient := validatormock.NewMockPrysmBeaconChainClient(ctrl) kp := randKeypair(t) - km := newMockKeymanager(t, kp) - km.fetchNoKeys = true + km := newMockKeymanager(t) v := validator{ validatorClient: validatorClient, @@ -233,7 +230,19 @@ func TestWaitForActivation_RefetchKeys(t *testing.T) { clientStream.EXPECT().Recv().Return( resp, nil) - assert.NoError(t, v.internalWaitForActivation(context.Background(), make(chan [][fieldparams.BLSPubkeyLength]byte)), "Could not wait for activation") + accountChan := make(chan [][fieldparams.BLSPubkeyLength]byte) + sub := km.SubscribeAccountChanges(accountChan) + defer func() { + sub.Unsubscribe() + close(accountChan) + }() + // update the accounts after a delay + go func() { + time.Sleep(2 * time.Second) + require.NoError(t, km.add(kp)) + km.SimulateAccountChanges([][48]byte{kp.pub}) + }() + assert.NoError(t, v.internalWaitForActivation(context.Background(), accountChan), "Could not wait for activation") assert.LogsContain(t, hook, msgNoKeysFetched) assert.LogsContain(t, hook, "Validator activated") } @@ -265,7 +274,11 @@ func TestWaitForActivation_AccountsChanged(t *testing.T) { ðpb.ValidatorActivationRequest{ PublicKeys: [][]byte{inactive.pub[:]}, }, - ).Return(inactiveClientStream, nil) + ).DoAndReturn(func(ctx context.Context, in *ethpb.ValidatorActivationRequest) (*mock.MockBeaconNodeValidator_WaitForActivationClient, error) { + //delay a bit so that other key can be added + time.Sleep(time.Second * 2) + return inactiveClientStream, nil + }) prysmBeaconClient.EXPECT().GetValidatorCount( gomock.Any(), "head", @@ -353,7 +366,11 @@ func TestWaitForActivation_AccountsChanged(t *testing.T) { ðpb.ValidatorActivationRequest{ PublicKeys: [][]byte{inactivePubKey[:]}, }, - ).Return(inactiveClientStream, nil) + ).DoAndReturn(func(ctx context.Context, in *ethpb.ValidatorActivationRequest) (*mock.MockBeaconNodeValidator_WaitForActivationClient, error) { + //delay a bit so that other key can be added + time.Sleep(time.Second * 2) + return inactiveClientStream, nil + }) prysmBeaconClient.EXPECT().GetValidatorCount( gomock.Any(), "head", @@ -393,3 +410,40 @@ func TestWaitForActivation_AccountsChanged(t *testing.T) { assert.LogsContain(t, hook, "Validator activated") }) } + +func TestWaitActivation_NotAllValidatorsActivatedOK(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + validatorClient := validatormock.NewMockValidatorClient(ctrl) + beaconClient := validatormock.NewMockBeaconChainClient(ctrl) + prysmBeaconClient := validatormock.NewMockPrysmBeaconChainClient(ctrl) + + kp := randKeypair(t) + v := validator{ + validatorClient: validatorClient, + keyManager: newMockKeymanager(t, kp), + beaconClient: beaconClient, + prysmBeaconClient: prysmBeaconClient, + } + resp := generateMockStatusResponse([][]byte{kp.pub[:]}) + resp.Statuses[0].Status.Status = ethpb.ValidatorStatus_ACTIVE + clientStream := mock.NewMockBeaconNodeValidator_WaitForActivationClient(ctrl) + validatorClient.EXPECT().WaitForActivation( + gomock.Any(), + gomock.Any(), + ).Return(clientStream, nil) + prysmBeaconClient.EXPECT().GetValidatorCount( + gomock.Any(), + "head", + []validatorType.Status{validatorType.Active}, + ).Return([]iface.ValidatorCount{}, nil).Times(2) + clientStream.EXPECT().Recv().Return( + ðpb.ValidatorActivationResponse{}, + nil, + ) + clientStream.EXPECT().Recv().Return( + resp, + nil, + ) + assert.NoError(t, v.WaitForActivation(context.Background(), nil), "Could not wait for activation") +} diff --git a/validator/keymanager/local/keymanager.go b/validator/keymanager/local/keymanager.go index 3b4a2c933f98..4cae2ed8f299 100644 --- a/validator/keymanager/local/keymanager.go +++ b/validator/keymanager/local/keymanager.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "path/filepath" "strings" "sync" @@ -282,18 +283,29 @@ func (km *Keymanager) SaveStoreAndReInitialize(ctx context.Context, store *accou if err != nil { return err } - if err := km.wallet.WriteFileAtPath(ctx, AccountsPath, AccountsKeystoreFileName, encodedAccounts); err != nil { + + existedPreviously, err := km.wallet.WriteFileAtPath(ctx, AccountsPath, AccountsKeystoreFileName, encodedAccounts) + if err != nil { return err } - // Reinitialize account store and cache - // This will update the in-memory information instead of reading from the file itself for safety concerns - km.accountsStore = store - err = km.initializeKeysCachesFromKeystore() - if err != nil { - return errors.Wrap(err, "failed to initialize keys caches") + if existedPreviously { + // Reinitialize account store and cache + // This will update the in-memory information instead of reading from the file itself for safety concerns + km.accountsStore = store + err = km.initializeKeysCachesFromKeystore() + if err != nil { + return errors.Wrap(err, "failed to initialize keys caches") + } + + return nil } - return err + + // manually reload the account from the keystore the first time + km.reloadAccountsFromKeystoreFile(filepath.Join(km.wallet.AccountsDir(), AccountsPath, AccountsKeystoreFileName)) + // listen to account changes of the new file + go km.listenForAccountChanges(ctx) + return nil } // CreateAccountsKeystoreRepresentation is a pure function that takes an accountStore and wallet password and returns the encrypted formatted json version for local writing. diff --git a/validator/keymanager/local/refresh.go b/validator/keymanager/local/refresh.go index 472d42831dd8..eeaa9e766e4a 100644 --- a/validator/keymanager/local/refresh.go +++ b/validator/keymanager/local/refresh.go @@ -26,6 +26,7 @@ func (km *Keymanager) listenForAccountChanges(ctx context.Context) { debounceFileChangesInterval := features.Get().KeystoreImportDebounceInterval accountsFilePath := filepath.Join(km.wallet.AccountsDir(), AccountsPath, AccountsKeystoreFileName) if !file.Exists(accountsFilePath) { + log.Warnf("Starting without accounts located in wallet at %s", accountsFilePath) return } watcher, err := fsnotify.NewWatcher() @@ -56,27 +57,7 @@ func (km *Keymanager) listenForAccountChanges(ctx context.Context) { log.Errorf("Type %T is not a valid file system event", event) return } - fileBytes, err := os.ReadFile(ev.Name) - if err != nil { - log.WithError(err).Errorf("Could not read file at path: %s", ev.Name) - return - } - if fileBytes == nil { - log.WithError(err).Errorf("Loaded in an empty file: %s", ev.Name) - return - } - accountsKeystore := &AccountsKeystoreRepresentation{} - if err := json.Unmarshal(fileBytes, accountsKeystore); err != nil { - log.WithError( - err, - ).Errorf("Could not read valid, EIP-2335 keystore json file at path: %s", ev.Name) - return - } - if err := km.reloadAccountsFromKeystore(accountsKeystore); err != nil { - log.WithError( - err, - ).Error("Could not replace the accounts store from keystore file") - } + km.reloadAccountsFromKeystoreFile(ev.Name) }) for { select { @@ -92,6 +73,34 @@ func (km *Keymanager) listenForAccountChanges(ctx context.Context) { } } +func (km *Keymanager) reloadAccountsFromKeystoreFile(accountsFilePath string) { + if km.wallet == nil { + log.Error("Could not reload accounts because wallet was undefined") + return + } + fileBytes, err := os.ReadFile(filepath.Clean(accountsFilePath)) + if err != nil { + log.WithError(err).Errorf("Could not read file at path: %s", accountsFilePath) + return + } + if fileBytes == nil { + log.WithError(err).Errorf("Loaded in an empty file: %s", accountsFilePath) + return + } + accountsKeystore := &AccountsKeystoreRepresentation{} + if err := json.Unmarshal(fileBytes, accountsKeystore); err != nil { + log.WithError( + err, + ).Errorf("Could not read valid, EIP-2335 keystore json file at path: %s", accountsFilePath) + return + } + if err := km.reloadAccountsFromKeystore(accountsKeystore); err != nil { + log.WithError( + err, + ).Error("Could not replace the accounts store from keystore file") + } +} + // Replaces the accounts store struct in the local keymanager with // the contents of a keystore file by decrypting it with the accounts password. func (km *Keymanager) reloadAccountsFromKeystore(keystore *AccountsKeystoreRepresentation) error { @@ -107,6 +116,7 @@ func (km *Keymanager) reloadAccountsFromKeystore(keystore *AccountsKeystoreRepre if len(newAccountsStore.PublicKeys) != len(newAccountsStore.PrivateKeys) { return errors.New("number of public and private keys in keystore do not match") } + pubKeys := make([][fieldparams.BLSPubkeyLength]byte, len(newAccountsStore.PublicKeys)) for i := 0; i < len(newAccountsStore.PrivateKeys); i++ { privKey, err := bls.SecretKeyFromBytes(newAccountsStore.PrivateKeys[i]) diff --git a/validator/node/BUILD.bazel b/validator/node/BUILD.bazel index 5c4bd43994b5..90baedd49f56 100644 --- a/validator/node/BUILD.bazel +++ b/validator/node/BUILD.bazel @@ -7,16 +7,20 @@ go_test( data = glob(["testdata/**"]), embed = [":go_default_library"], deps = [ + "//cmd:go_default_library", "//cmd/validator/flags:go_default_library", "//config/fieldparams:go_default_library", "//config/params:go_default_library", "//config/validator/service:go_default_library", "//consensus-types/validator:go_default_library", "//encoding/bytesutil:go_default_library", + "//io/file:go_default_library", "//testing/assert:go_default_library", "//testing/require:go_default_library", "//validator/accounts:go_default_library", + "//validator/accounts/wallet:go_default_library", "//validator/db/iface:go_default_library", + "//validator/db/kv:go_default_library", "//validator/db/testing:go_default_library", "//validator/keymanager:go_default_library", "//validator/keymanager/remote-web3signer:go_default_library", @@ -39,7 +43,6 @@ go_library( "//validator:__subpackages__", ], deps = [ - "//api:go_default_library", "//api/gateway:go_default_library", "//api/server:go_default_library", "//async/event:go_default_library", diff --git a/validator/node/node.go b/validator/node/node.go index a12c9ae44a02..111e68dcdddc 100644 --- a/validator/node/node.go +++ b/validator/node/node.go @@ -26,7 +26,6 @@ import ( gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" "github.com/pkg/errors" fastssz "github.com/prysmaticlabs/fastssz" - "github.com/prysmaticlabs/prysm/v4/api" "github.com/prysmaticlabs/prysm/v4/api/gateway" "github.com/prysmaticlabs/prysm/v4/api/server" "github.com/prysmaticlabs/prysm/v4/async/event" @@ -219,7 +218,7 @@ func (c *ValidatorClient) getLegacyDatabaseLocation( dataFile string, walletDir string, ) (string, string) { - if isInteropNumValidatorsSet || dataDir != cmd.DefaultDataDir() || file.Exists(dataFile) { + if isInteropNumValidatorsSet || dataDir != cmd.DefaultDataDir() || file.Exists(dataFile) || c.wallet == nil { return dataDir, dataFile } @@ -233,8 +232,15 @@ func (c *ValidatorClient) getLegacyDatabaseLocation( legacyDataFile := filepath.Join(legacyDataDir, kv.ProtectionDbFileName) if file.Exists(legacyDataFile) { - log.Warningf("Database not found in `--datadir` (%s) but found in `--wallet-dir` (%s).", dataFile, legacyDataFile) - log.Warningf("Please move the database from (%s) to (%s).", legacyDataFile, dataFile) + log.Infof(`Database not found in the --datadir directory (%s) + but found in the --wallet-dir directory (%s), + which was the legacy default. + The next time you run the validator client without a database, + it will be created into the --datadir directory (%s). + To silence this message, you can move the database from (%s) + to (%s).`, + dataDir, legacyDataDir, dataDir, legacyDataFile, dataFile) + dataDir = legacyDataDir dataFile = legacyDataFile } @@ -424,16 +430,22 @@ func (c *ValidatorClient) registerPrometheusService(cliCtx *cli.Context) error { } func (c *ValidatorClient) registerValidatorService(cliCtx *cli.Context) error { - endpoint := c.cliCtx.String(flags.BeaconRPCProviderFlag.Name) - dataDir := c.cliCtx.String(cmd.DataDirFlag.Name) - logValidatorBalances := !c.cliCtx.Bool(flags.DisablePenaltyRewardLogFlag.Name) - emitAccountMetrics := !c.cliCtx.Bool(flags.DisableAccountMetricsFlag.Name) - cert := c.cliCtx.String(flags.CertFlag.Name) - graffiti := c.cliCtx.String(flags.GraffitiFlag.Name) - maxCallRecvMsgSize := c.cliCtx.Int(cmd.GrpcMaxCallRecvMsgSizeFlag.Name) - grpcRetries := c.cliCtx.Uint(flags.GrpcRetriesFlag.Name) - grpcRetryDelay := c.cliCtx.Duration(flags.GrpcRetryDelayFlag.Name) - var interopKeysConfig *local.InteropKeymanagerConfig + var ( + endpoint string = c.cliCtx.String(flags.BeaconRPCProviderFlag.Name) + dataDir string = c.cliCtx.String(cmd.DataDirFlag.Name) + logValidatorBalances bool = !c.cliCtx.Bool(flags.DisablePenaltyRewardLogFlag.Name) + emitAccountMetrics bool = !c.cliCtx.Bool(flags.DisableAccountMetricsFlag.Name) + cert string = c.cliCtx.String(flags.CertFlag.Name) + graffiti string = c.cliCtx.String(flags.GraffitiFlag.Name) + maxCallRecvMsgSize int = c.cliCtx.Int(cmd.GrpcMaxCallRecvMsgSizeFlag.Name) + grpcRetries uint = c.cliCtx.Uint(flags.GrpcRetriesFlag.Name) + grpcRetryDelay time.Duration = c.cliCtx.Duration(flags.GrpcRetryDelayFlag.Name) + + interopKeysConfig *local.InteropKeymanagerConfig + err error + ) + + // Configure interop. if c.cliCtx.IsSet(flags.InteropNumValidators.Name) { interopKeysConfig = &local.InteropKeymanagerConfig{ Offset: cliCtx.Uint64(flags.InteropStartIndex.Name), @@ -441,27 +453,28 @@ func (c *ValidatorClient) registerValidatorService(cliCtx *cli.Context) error { } } - gStruct := &g.Graffiti{} - var err error + // Configure graffiti. + graffitiStruct := &g.Graffiti{} if c.cliCtx.IsSet(flags.GraffitiFileFlag.Name) { - n := c.cliCtx.String(flags.GraffitiFileFlag.Name) - gStruct, err = g.ParseGraffitiFile(n) + graffitiFilePath := c.cliCtx.String(flags.GraffitiFileFlag.Name) + + graffitiStruct, err = g.ParseGraffitiFile(graffitiFilePath) if err != nil { log.WithError(err).Warn("Could not parse graffiti file") } } - wsc, err := Web3SignerConfig(c.cliCtx) + web3signerConfig, err := Web3SignerConfig(c.cliCtx) if err != nil { return err } - bpc, err := proposerSettings(c.cliCtx, c.db) + proposerSettings, err := proposerSettings(c.cliCtx, c.db) if err != nil { return err } - v, err := client.NewValidatorService(c.cliCtx.Context, &client.Config{ + validatorService, err := client.NewValidatorService(c.cliCtx.Context, &client.Config{ Endpoint: endpoint, DataDir: dataDir, LogValidatorBalances: logValidatorBalances, @@ -477,9 +490,9 @@ func (c *ValidatorClient) registerValidatorService(cliCtx *cli.Context) error { InteropKeysConfig: interopKeysConfig, Wallet: c.wallet, WalletInitializedFeed: c.walletInitialized, - GraffitiStruct: gStruct, - Web3SignerConfig: wsc, - ProposerSettings: bpc, + GraffitiStruct: graffitiStruct, + Web3SignerConfig: web3signerConfig, + ProposerSettings: proposerSettings, BeaconApiTimeout: time.Second * 30, BeaconApiEndpoint: c.cliCtx.String(flags.BeaconRESTApiProviderFlag.Name), ValidatorsRegBatchSize: c.cliCtx.Int(flags.ValidatorsRegistrationBatchSizeFlag.Name), @@ -488,7 +501,7 @@ func (c *ValidatorClient) registerValidatorService(cliCtx *cli.Context) error { return errors.Wrap(err, "could not initialize validator service") } - return c.services.RegisterService(v) + return c.services.RegisterService(validatorService) } func Web3SignerConfig(cliCtx *cli.Context) (*remoteweb3signer.SetupConfig, error) { @@ -852,7 +865,7 @@ func (c *ValidatorClient) registerRPCGatewayService(router *mux.Router) error { }, }), gwruntime.WithMarshalerOption( - api.EventStreamMediaType, &gwruntime.EventSourceJSONPb{}, // TODO: remove this + "text/event-stream", &gwruntime.EventSourceJSONPb{}, // TODO: remove this ), gwruntime.WithForwardResponseOption(gateway.HttpResponseModifier), ) diff --git a/validator/node/node_test.go b/validator/node/node_test.go index 750313d4a159..067ebd97109b 100644 --- a/validator/node/node_test.go +++ b/validator/node/node_test.go @@ -7,21 +7,26 @@ import ( "net/http" "net/http/httptest" "os" + "path" "path/filepath" "testing" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/prysmaticlabs/prysm/v4/cmd" "github.com/prysmaticlabs/prysm/v4/cmd/validator/flags" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" "github.com/prysmaticlabs/prysm/v4/config/params" validatorserviceconfig "github.com/prysmaticlabs/prysm/v4/config/validator/service" "github.com/prysmaticlabs/prysm/v4/consensus-types/validator" "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil" + "github.com/prysmaticlabs/prysm/v4/io/file" "github.com/prysmaticlabs/prysm/v4/testing/assert" "github.com/prysmaticlabs/prysm/v4/testing/require" "github.com/prysmaticlabs/prysm/v4/validator/accounts" + "github.com/prysmaticlabs/prysm/v4/validator/accounts/wallet" "github.com/prysmaticlabs/prysm/v4/validator/db/iface" + "github.com/prysmaticlabs/prysm/v4/validator/db/kv" dbTest "github.com/prysmaticlabs/prysm/v4/validator/db/testing" "github.com/prysmaticlabs/prysm/v4/validator/keymanager" remoteweb3signer "github.com/prysmaticlabs/prysm/v4/validator/keymanager/remote-web3signer" @@ -67,6 +72,138 @@ func TestNode_Builds(t *testing.T) { require.NoError(t, err) } +func TestGetLegacyDatabaseLocation(t *testing.T) { + dataDir := t.TempDir() + dataFile := path.Join(dataDir, "dataFile") + nonExistingDataFile := path.Join(dataDir, "nonExistingDataFile") + _, err := os.Create(dataFile) + require.NoError(t, err, "Failed to create data file") + + walletDir := t.TempDir() + derivedDir := path.Join(walletDir, "derived") + err = file.MkdirAll(derivedDir) + require.NoError(t, err, "Failed to create derived dir") + + derivedDbFile := path.Join(derivedDir, kv.ProtectionDbFileName) + _, err = os.Create(derivedDbFile) + require.NoError(t, err, "Failed to create derived db file") + + dbFile := path.Join(walletDir, kv.ProtectionDbFileName) + _, err = os.Create(dbFile) + require.NoError(t, err, "Failed to create db file") + + nonExistingWalletDir := t.TempDir() + + testCases := []struct { + name string + isInteropNumValidatorsSet bool + isWeb3SignerURLFlagSet bool + dataDir string + dataFile string + walletDir string + validatorClient *ValidatorClient + wallet *wallet.Wallet + expectedDataDir string + expectedDataFile string + }{ + { + name: "interop num validators set", + isInteropNumValidatorsSet: true, + dataDir: dataDir, + dataFile: dataFile, + expectedDataDir: dataDir, + expectedDataFile: dataFile, + }, + { + name: "dataDir differs from default", + dataDir: dataDir, + dataFile: dataFile, + expectedDataDir: dataDir, + expectedDataFile: dataFile, + }, + { + name: "dataFile exists", + dataDir: cmd.DefaultDataDir(), + dataFile: dataFile, + expectedDataDir: cmd.DefaultDataDir(), + expectedDataFile: dataFile, + }, + { + name: "wallet is nil", + dataDir: cmd.DefaultDataDir(), + dataFile: nonExistingDataFile, + expectedDataDir: cmd.DefaultDataDir(), + expectedDataFile: nonExistingDataFile, + }, + { + name: "web3signer url is not set and legacy data file does not exist", + dataDir: cmd.DefaultDataDir(), + dataFile: nonExistingDataFile, + wallet: wallet.New(&wallet.Config{ + WalletDir: nonExistingWalletDir, + KeymanagerKind: keymanager.Derived, + }), + expectedDataDir: cmd.DefaultDataDir(), + expectedDataFile: nonExistingDataFile, + }, + { + name: "web3signer url is not set and legacy data file does exist", + dataDir: cmd.DefaultDataDir(), + dataFile: nonExistingDataFile, + wallet: wallet.New(&wallet.Config{ + WalletDir: walletDir, + KeymanagerKind: keymanager.Derived, + }), + expectedDataDir: path.Join(walletDir, "derived"), + expectedDataFile: path.Join(walletDir, "derived", kv.ProtectionDbFileName), + }, + { + name: "web3signer url is set and legacy data file does not exist", + isWeb3SignerURLFlagSet: true, + dataDir: cmd.DefaultDataDir(), + dataFile: nonExistingDataFile, + walletDir: nonExistingWalletDir, + wallet: wallet.New(&wallet.Config{ + WalletDir: walletDir, + KeymanagerKind: keymanager.Derived, + }), + expectedDataDir: cmd.DefaultDataDir(), + expectedDataFile: nonExistingDataFile, + }, + { + name: "web3signer url is set and legacy data file does exist", + isWeb3SignerURLFlagSet: true, + dataDir: cmd.DefaultDataDir(), + dataFile: nonExistingDataFile, + walletDir: walletDir, + wallet: wallet.New(&wallet.Config{ + WalletDir: walletDir, + KeymanagerKind: keymanager.Derived, + }), + expectedDataDir: walletDir, + expectedDataFile: path.Join(walletDir, kv.ProtectionDbFileName), + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + validatorClient := &ValidatorClient{wallet: tt.wallet} + actualDataDir, actualDataFile := validatorClient.getLegacyDatabaseLocation( + tt.isInteropNumValidatorsSet, + tt.isWeb3SignerURLFlagSet, + tt.dataDir, + tt.dataFile, + tt.walletDir, + ) + + assert.Equal(t, tt.expectedDataDir, actualDataDir, "data dir should be equal") + assert.Equal(t, tt.expectedDataFile, actualDataFile, "data file should be equal") + }) + + } + +} + // TestClearDB tests clearing the database func TestClearDB(t *testing.T) { hook := logtest.NewGlobal() diff --git a/validator/rpc/BUILD.bazel b/validator/rpc/BUILD.bazel index adaa8eff80e1..45b2f120c766 100644 --- a/validator/rpc/BUILD.bazel +++ b/validator/rpc/BUILD.bazel @@ -49,6 +49,7 @@ go_library( "//validator/accounts/petnames:go_default_library", "//validator/accounts/wallet:go_default_library", "//validator/client:go_default_library", + "//validator/client/beacon-api:go_default_library", "//validator/client/beacon-chain-client-factory:go_default_library", "//validator/client/iface:go_default_library", "//validator/client/node-client-factory:go_default_library", diff --git a/validator/rpc/auth_token.go b/validator/rpc/auth_token.go index 390dad19389c..e35b42d41e30 100644 --- a/validator/rpc/auth_token.go +++ b/validator/rpc/auth_token.go @@ -149,7 +149,16 @@ func saveAuthToken(walletDirPath string, jwtKey []byte, token string) error { if _, err := bytesBuf.WriteString("\n"); err != nil { return err } - return file.WriteFile(hashFilePath, bytesBuf.Bytes()) + + if err := file.MkdirAll(walletDirPath); err != nil { + return errors.Wrapf(err, "could not create directory %s", walletDirPath) + } + + if err := file.WriteFile(hashFilePath, bytesBuf.Bytes()); err != nil { + return errors.Wrapf(err, "could not write to file %s", hashFilePath) + } + + return nil } func readAuthTokenFile(r io.Reader) (secret []byte, token string, err error) { diff --git a/validator/rpc/beacon.go b/validator/rpc/beacon.go index d79bb44e0e55..dd177d00fc7e 100644 --- a/validator/rpc/beacon.go +++ b/validator/rpc/beacon.go @@ -1,6 +1,8 @@ package rpc import ( + "net/http" + middleware "github.com/grpc-ecosystem/go-grpc-middleware" grpcretry "github.com/grpc-ecosystem/go-grpc-middleware/retry" grpcopentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing" @@ -9,6 +11,7 @@ import ( grpcutil "github.com/prysmaticlabs/prysm/v4/api/grpc" ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/v4/validator/client" + beaconApi "github.com/prysmaticlabs/prysm/v4/validator/client/beacon-api" beaconChainClientFactory "github.com/prysmaticlabs/prysm/v4/validator/client/beacon-chain-client-factory" nodeClientFactory "github.com/prysmaticlabs/prysm/v4/validator/client/node-client-factory" validatorClientFactory "github.com/prysmaticlabs/prysm/v4/validator/client/validator-client-factory" @@ -51,8 +54,13 @@ func (s *Server) registerBeaconClient() error { s.beaconApiTimeout, ) - s.beaconChainClient = beaconChainClientFactory.NewBeaconChainClient(conn) - s.beaconNodeClient = nodeClientFactory.NewNodeClient(conn) - s.beaconNodeValidatorClient = validatorClientFactory.NewValidatorClient(conn) + restHandler := &beaconApi.BeaconApiJsonRestHandler{ + HttpClient: http.Client{Timeout: s.beaconApiTimeout}, + Host: s.beaconApiEndpoint, + } + s.beaconChainClient = beaconChainClientFactory.NewBeaconChainClient(conn, restHandler) + s.beaconNodeClient = nodeClientFactory.NewNodeClient(conn, restHandler) + s.beaconNodeValidatorClient = validatorClientFactory.NewValidatorClient(conn, restHandler) + return nil } diff --git a/validator/rpc/handlers_health.go b/validator/rpc/handlers_health.go index d845b6de3a02..4ab87bc87d05 100644 --- a/validator/rpc/handlers_health.go +++ b/validator/rpc/handlers_health.go @@ -5,7 +5,6 @@ import ( "fmt" "net/http" - "github.com/prysmaticlabs/prysm/v4/api" "github.com/prysmaticlabs/prysm/v4/network/httputil" pb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/v4/runtime/version" @@ -40,7 +39,7 @@ func (s *Server) StreamBeaconLogs(w http.ResponseWriter, r *http.Request) { ctx, span := trace.StartSpan(r.Context(), "validator.web.health.StreamBeaconLogs") defer span.End() // Set up SSE response headers - w.Header().Set("Content-Type", api.EventStreamMediaType) + w.Header().Set("Content-Type", "text/event-stream") w.Header().Set("Cache-Control", "no-cache") w.Header().Set("Connection", "keep-alive") @@ -109,7 +108,7 @@ func (s *Server) StreamValidatorLogs(w http.ResponseWriter, r *http.Request) { close(ch) }() // Set up SSE response headers - w.Header().Set("Content-Type", api.EventStreamMediaType) + w.Header().Set("Content-Type", "text/event-stream") w.Header().Set("Cache-Control", "no-cache") w.Header().Set("Connection", "keep-alive") diff --git a/validator/rpc/handlers_health_test.go b/validator/rpc/handlers_health_test.go index f3dc20b4bde5..7be1938bc55b 100644 --- a/validator/rpc/handlers_health_test.go +++ b/validator/rpc/handlers_health_test.go @@ -11,7 +11,6 @@ import ( "github.com/golang/mock/gomock" "github.com/golang/protobuf/ptypes/empty" - "github.com/prysmaticlabs/prysm/v4/api" "github.com/prysmaticlabs/prysm/v4/io/logs/mock" eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" pb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1" @@ -93,7 +92,7 @@ func TestStreamBeaconLogs(t *testing.T) { } ct, ok := resp.Header["Content-Type"] require.Equal(t, ok, true) - require.Equal(t, ct[0], api.EventStreamMediaType) + require.Equal(t, ct[0], "text/event-stream") cn, ok := resp.Header["Connection"] require.Equal(t, ok, true) require.Equal(t, cn[0], "keep-alive") @@ -144,7 +143,7 @@ func TestStreamValidatorLogs(t *testing.T) { } ct, ok := resp.Header["Content-Type"] require.Equal(t, ok, true) - require.Equal(t, ct[0], api.EventStreamMediaType) + require.Equal(t, ct[0], "text/event-stream") cn, ok := resp.Header["Connection"] require.Equal(t, ok, true) require.Equal(t, cn[0], "keep-alive") diff --git a/validator/rpc/handlers_keymanager.go b/validator/rpc/handlers_keymanager.go index 1977962f5526..89094d35efa2 100644 --- a/validator/rpc/handlers_keymanager.go +++ b/validator/rpc/handlers_keymanager.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/gorilla/mux" "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared" fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams" @@ -705,14 +704,8 @@ func (s *Server) GetGasLimit(w http.ResponseWriter, r *http.Request) { return } - rawPubkey := mux.Vars(r)["pubkey"] - if rawPubkey == "" { - httputil.HandleError(w, "pubkey is required in URL params", http.StatusBadRequest) - return - } - - pubkey, valid := shared.ValidateHex(w, "pubkey", rawPubkey, fieldparams.BLSPubkeyLength) - if !valid { + rawPubkey, pubkey, ok := shared.HexFromRoute(w, r, "pubkey", fieldparams.BLSPubkeyLength) + if !ok { return }