diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index ada29474be7..fbe5bd1bf59 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -41,7 +41,7 @@ jobs: - name: Setup go uses: actions/setup-go@v3 with: - go-version: "1.19" + go-version: "1.20" cache: true - name: Build relic diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml index eb28e840078..9079fb06a98 100644 --- a/.github/workflows/cd.yml +++ b/.github/workflows/cd.yml @@ -14,7 +14,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v2 with: - go-version: '1.19' + go-version: "1.20" - name: Checkout repo uses: actions/checkout@v2 - name: Build relic diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bc0a7b5ebec..26d14496fb5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,7 +16,7 @@ on: - 'v[0-9]+.[0-9]+' env: - GO_VERSION: 1.19 + GO_VERSION: "1.20" concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} @@ -47,7 +47,7 @@ jobs: uses: golangci/golangci-lint-action@v3 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.49 + version: v1.51 args: -v --build-tags relic working-directory: ${{ matrix.dir }} # https://github.com/golangci/golangci-lint-action/issues/244 @@ -66,8 +66,8 @@ jobs: cache: true - name: Run tidy run: make tidy - - name: Emulator no relic check - run: make emulator-norelic-check + - name: code sanity check + run: make code-sanity-check shell-check: name: ShellCheck diff --git a/.github/workflows/flaky-test-debug.yml b/.github/workflows/flaky-test-debug.yml index 3a5b47e2c2f..8058a656f29 100644 --- a/.github/workflows/flaky-test-debug.yml +++ b/.github/workflows/flaky-test-debug.yml @@ -5,7 +5,7 @@ on: branches: - '**/*flaky-test-debug*' env: - GO_VERSION: 1.19 + GO_VERSION: "1.20" #concurrency: # group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} @@ -36,7 +36,7 @@ jobs: uses: golangci/golangci-lint-action@v3 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.49 + version: v1.51 args: -v --build-tags relic working-directory: ${{ matrix.dir }} # https://github.com/golangci/golangci-lint-action/issues/244 diff --git a/.github/workflows/test-monitor-flaky.yml b/.github/workflows/test-monitor-flaky.yml index fcf215b734e..442d71c3e07 100644 --- a/.github/workflows/test-monitor-flaky.yml +++ b/.github/workflows/test-monitor-flaky.yml @@ -13,7 +13,7 @@ on: env: BIGQUERY_DATASET: production_src_flow_test_metrics BIGQUERY_TABLE: test_results - GO_VERSION: 1.19 + GO_VERSION: "1.20" concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} diff --git a/.github/workflows/test-monitor-regular-skipped.yml b/.github/workflows/test-monitor-regular-skipped.yml index 74736a00431..d9f696ab87c 100644 --- a/.github/workflows/test-monitor-regular-skipped.yml +++ b/.github/workflows/test-monitor-regular-skipped.yml @@ -15,7 +15,7 @@ env: BIGQUERY_DATASET: production_src_flow_test_metrics BIGQUERY_TABLE: skipped_tests BIGQUERY_TABLE2: test_results - GO_VERSION: 1.19 + GO_VERSION: "1.20" concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} diff --git a/.github/workflows/tools.yml b/.github/workflows/tools.yml index 2e297adb6ff..9f228f215ba 100644 --- a/.github/workflows/tools.yml +++ b/.github/workflows/tools.yml @@ -25,7 +25,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v2 with: - go-version: '1.19' + go-version: "1.20" - name: Set up Google Cloud SDK uses: google-github-actions/setup-gcloud@v1 with: diff --git a/Makefile b/Makefile index 14625ddf649..81dd2ce3ce4 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,7 @@ ifeq (${IMAGE_TAG},) IMAGE_TAG := ${SHORT_COMMIT} endif -IMAGE_TAG_NO_NETGO := $(IMAGE_TAG)-without_netgo +IMAGE_TAG_NO_NETGO := $(IMAGE_TAG)-without-netgo # Name of the cover profile COVER_PROFILE := coverage.txt @@ -87,6 +87,23 @@ emulator-norelic-check: # test the fvm package compiles with Relic library disabled (required for the emulator build) cd ./fvm && go test ./... -run=NoTestHasThisPrefix +.SILENT: go-math-rand-check +go-math-rand-check: + # check that the insecure math/rand Go package isn't used by production code. + # `exclude` should only specify non production code (test, bench..). + # If this check fails, try updating your code by using: + # - "crypto/rand" or "flow-go/utils/rand" for non-deterministic randomness + # - "flow-go/crypto/random" for deterministic randomness + grep --include=\*.go \ + --exclude=*test* --exclude=*helper* --exclude=*example* --exclude=*fixture* --exclude=*benchmark* --exclude=*profiler* \ + --exclude-dir=*test* --exclude-dir=*helper* --exclude-dir=*example* --exclude-dir=*fixture* --exclude-dir=*benchmark* --exclude-dir=*profiler* -rnw '"math/rand"'; \ + if [ $$? -ne 1 ]; then \ + echo "[Error] Go production code should not use math/rand package"; exit 1; \ + fi + +.PHONY: code-sanity-check +code-sanity-check: go-math-rand-check emulator-norelic-check + .PHONY: fuzz-fvm fuzz-fvm: # run fuzz tests in the fvm package @@ -167,7 +184,7 @@ generate-mocks: install-mock-generators rm -rf ./fvm/environment/mock mockery --name '.*' --dir=fvm/environment --case=underscore --output="./fvm/environment/mock" --outpkg="mock" mockery --name '.*' --dir=ledger --case=underscore --output="./ledger/mock" --outpkg="mock" - mockery --name 'ViolationsConsumer' --dir=network/slashing --case=underscore --output="./network/mocknetwork" --outpkg="mocknetwork" + mockery --name 'ViolationsConsumer' --dir=network --case=underscore --output="./network/mocknetwork" --outpkg="mocknetwork" mockery --name '.*' --dir=network/p2p/ --case=underscore --output="./network/p2p/mock" --outpkg="mockp2p" mockery --name '.*' --dir=network/alsp --case=underscore --output="./network/alsp/mock" --outpkg="mockalsp" mockery --name 'Vertex' --dir="./module/forest" --case=underscore --output="./module/forest/mock" --outpkg="mock" diff --git a/access/handler.go b/access/handler.go index 404bfa81318..11e47dd3521 100644 --- a/access/handler.go +++ b/access/handler.go @@ -3,17 +3,17 @@ package access import ( "context" - "github.com/onflow/flow/protobuf/go/flow/access" - "github.com/onflow/flow/protobuf/go/flow/entities" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/timestamppb" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/consensus/hotstuff/signature" "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + + "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" ) type Handler struct { @@ -516,7 +516,7 @@ func (h *Handler) GetEventsForHeightRange( return nil, err } - resultEvents, err := blockEventsToMessages(results) + resultEvents, err := convert.BlockEventsToMessages(results) if err != nil { return nil, err } @@ -548,7 +548,7 @@ func (h *Handler) GetEventsForBlockIDs( return nil, err } - resultEvents, err := blockEventsToMessages(results) + resultEvents, err := convert.BlockEventsToMessages(results) if err != nil { return nil, err } @@ -590,6 +590,27 @@ func (h *Handler) GetExecutionResultForBlockID(ctx context.Context, req *access. return executionResultToMessages(result, metadata) } +// GetExecutionResultByID returns the execution result for the given ID. +func (h *Handler) GetExecutionResultByID(ctx context.Context, req *access.GetExecutionResultByIDRequest) (*access.ExecutionResultByIDResponse, error) { + metadata := h.buildMetadataResponse() + + blockID := convert.MessageToIdentifier(req.GetId()) + + result, err := h.api.GetExecutionResultByID(ctx, blockID) + if err != nil { + return nil, err + } + + execResult, err := convert.ExecutionResultToMessage(result) + if err != nil { + return nil, err + } + return &access.ExecutionResultByIDResponse{ + ExecutionResult: execResult, + Metadata: metadata, + }, nil +} + func (h *Handler) blockResponse(block *flow.Block, fullResponse bool, status flow.BlockStatus) (*access.BlockResponse, error) { metadata := h.buildMetadataResponse() @@ -659,34 +680,6 @@ func executionResultToMessages(er *flow.ExecutionResult, metadata *entities.Meta }, nil } -func blockEventsToMessages(blocks []flow.BlockEvents) ([]*access.EventsResponse_Result, error) { - results := make([]*access.EventsResponse_Result, len(blocks)) - - for i, block := range blocks { - event, err := blockEventsToMessage(block) - if err != nil { - return nil, err - } - results[i] = event - } - - return results, nil -} - -func blockEventsToMessage(block flow.BlockEvents) (*access.EventsResponse_Result, error) { - eventMessages := make([]*entities.Event, len(block.Events)) - for i, event := range block.Events { - eventMessages[i] = convert.EventToMessage(event) - } - timestamp := timestamppb.New(block.BlockTimestamp) - return &access.EventsResponse_Result{ - BlockId: block.BlockID[:], - BlockHeight: block.BlockHeight, - BlockTimestamp: timestamp, - Events: eventMessages, - }, nil -} - // WithBlockSignerDecoder configures the Handler to decode signer indices // via the provided hotstuff.BlockSignerDecoder func WithBlockSignerDecoder(signerIndicesDecoder hotstuff.BlockSignerDecoder) func(*Handler) { diff --git a/cmd/Dockerfile b/cmd/Dockerfile index fc4bcf7badb..d9d7800546c 100644 --- a/cmd/Dockerfile +++ b/cmd/Dockerfile @@ -3,7 +3,7 @@ #################################### ## (1) Setup the build environment -FROM golang:1.19-bullseye AS build-setup +FROM golang:1.20-bullseye AS build-setup RUN apt-get update RUN apt-get -y install cmake zip @@ -71,7 +71,7 @@ RUN --mount=type=ssh \ RUN chmod a+x /app/app ## (4) Add the statically linked debug binary to a distroless image configured for debugging -FROM golang:1.19-bullseye as debug +FROM golang:1.20-bullseye as debug RUN go install github.com/go-delve/delve/cmd/dlv@latest diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index bf7a52047b4..0607870cf53 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -37,6 +37,7 @@ import ( "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine/access/ingestion" pingeng "github.com/onflow/flow-go/engine/access/ping" + "github.com/onflow/flow-go/engine/access/rest/routes" "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/engine/access/rpc/backend" "github.com/onflow/flow-go/engine/access/state_stream" @@ -51,6 +52,7 @@ import ( "github.com/onflow/flow-go/module/executiondatasync/execution_data" execdatacache "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" finalizer "github.com/onflow/flow-go/module/finalizer/consensus" + "github.com/onflow/flow-go/module/grpcserver" "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/mempool/stdmap" @@ -77,7 +79,6 @@ import ( "github.com/onflow/flow-go/network/p2p/translator" "github.com/onflow/flow-go/network/p2p/unicast/protocols" relaynet "github.com/onflow/flow-go/network/relay" - "github.com/onflow/flow-go/network/slashing" "github.com/onflow/flow-go/network/topology" "github.com/onflow/flow-go/network/validator" "github.com/onflow/flow-go/state/protocol" @@ -146,20 +147,22 @@ func DefaultAccessNodeConfig() *AccessNodeConfig { collectionGRPCPort: 9000, executionGRPCPort: 9000, rpcConf: rpc.Config{ - UnsecureGRPCListenAddr: "0.0.0.0:9000", - SecureGRPCListenAddr: "0.0.0.0:9001", - HTTPListenAddr: "0.0.0.0:8000", - RESTListenAddr: "", - CollectionAddr: "", - HistoricalAccessAddrs: "", - CollectionClientTimeout: 3 * time.Second, - ExecutionClientTimeout: 3 * time.Second, - ConnectionPoolSize: backend.DefaultConnectionPoolSize, - MaxHeightRange: backend.DefaultMaxHeightRange, - PreferredExecutionNodeIDs: nil, - FixedExecutionNodeIDs: nil, - ArchiveAddressList: nil, - MaxMsgSize: grpcutils.DefaultMaxMsgSize, + UnsecureGRPCListenAddr: "0.0.0.0:9000", + SecureGRPCListenAddr: "0.0.0.0:9001", + HTTPListenAddr: "0.0.0.0:8000", + RESTListenAddr: "", + CollectionAddr: "", + HistoricalAccessAddrs: "", + BackendConfig: backend.Config{ + CollectionClientTimeout: 3 * time.Second, + ExecutionClientTimeout: 3 * time.Second, + ConnectionPoolSize: backend.DefaultConnectionPoolSize, + MaxHeightRange: backend.DefaultMaxHeightRange, + PreferredExecutionNodeIDs: nil, + FixedExecutionNodeIDs: nil, + ArchiveAddressList: nil, + }, + MaxMsgSize: grpcutils.DefaultMaxMsgSize, }, stateStreamConf: state_stream.Config{ MaxExecutionDataMsgSize: grpcutils.DefaultMaxMsgSize, @@ -217,6 +220,7 @@ type FlowAccessNodeBuilder struct { CollectionsToMarkExecuted *stdmap.Times BlocksToMarkExecuted *stdmap.Times TransactionMetrics *metrics.TransactionCollector + RestMetrics *metrics.RestCollector AccessMetrics module.AccessMetrics PingMetrics module.PingMetrics Committee hotstuff.DynamicCommittee @@ -240,6 +244,11 @@ type FlowAccessNodeBuilder struct { FollowerEng *followereng.ComplianceEngine SyncEng *synceng.Engine StateStreamEng *state_stream.Engine + + // grpc servers + secureGrpcServer *grpcserver.GrpcServer + unsecureGrpcServer *grpcserver.GrpcServer + stateStreamGrpcServer *grpcserver.GrpcServer } func (builder *FlowAccessNodeBuilder) buildFollowerState() *FlowAccessNodeBuilder { @@ -612,8 +621,7 @@ func (builder *FlowAccessNodeBuilder) BuildExecutionDataRequester() *FlowAccessN node.RootChainID, builder.executionDataConfig.InitialBlockHeight, highestAvailableHeight, - builder.apiRatelimits, - builder.apiBurstlimits, + builder.stateStreamGrpcServer, ) if err != nil { return nil, fmt.Errorf("could not create state stream engine: %w", err) @@ -661,15 +669,15 @@ func (builder *FlowAccessNodeBuilder) extraFlags() { flags.StringVar(&builder.rpcConf.RESTListenAddr, "rest-addr", defaultConfig.rpcConf.RESTListenAddr, "the address the REST server listens on (if empty the REST server will not be started)") flags.StringVarP(&builder.rpcConf.CollectionAddr, "static-collection-ingress-addr", "", defaultConfig.rpcConf.CollectionAddr, "the address (of the collection node) to send transactions to") flags.StringVarP(&builder.ExecutionNodeAddress, "script-addr", "s", defaultConfig.ExecutionNodeAddress, "the address (of the execution node) forward the script to") - flags.StringSliceVar(&builder.rpcConf.ArchiveAddressList, "archive-address-list", defaultConfig.rpcConf.ArchiveAddressList, "the list of address of the archive node to forward the script queries to") + flags.StringSliceVar(&builder.rpcConf.BackendConfig.ArchiveAddressList, "archive-address-list", defaultConfig.rpcConf.BackendConfig.ArchiveAddressList, "the list of address of the archive node to forward the script queries to") flags.StringVarP(&builder.rpcConf.HistoricalAccessAddrs, "historical-access-addr", "", defaultConfig.rpcConf.HistoricalAccessAddrs, "comma separated rpc addresses for historical access nodes") - flags.DurationVar(&builder.rpcConf.CollectionClientTimeout, "collection-client-timeout", defaultConfig.rpcConf.CollectionClientTimeout, "grpc client timeout for a collection node") - flags.DurationVar(&builder.rpcConf.ExecutionClientTimeout, "execution-client-timeout", defaultConfig.rpcConf.ExecutionClientTimeout, "grpc client timeout for an execution node") - flags.UintVar(&builder.rpcConf.ConnectionPoolSize, "connection-pool-size", defaultConfig.rpcConf.ConnectionPoolSize, "maximum number of connections allowed in the connection pool, size of 0 disables the connection pooling, and anything less than the default size will be overridden to use the default size") + flags.DurationVar(&builder.rpcConf.BackendConfig.CollectionClientTimeout, "collection-client-timeout", defaultConfig.rpcConf.BackendConfig.CollectionClientTimeout, "grpc client timeout for a collection node") + flags.DurationVar(&builder.rpcConf.BackendConfig.ExecutionClientTimeout, "execution-client-timeout", defaultConfig.rpcConf.BackendConfig.ExecutionClientTimeout, "grpc client timeout for an execution node") + flags.UintVar(&builder.rpcConf.BackendConfig.ConnectionPoolSize, "connection-pool-size", defaultConfig.rpcConf.BackendConfig.ConnectionPoolSize, "maximum number of connections allowed in the connection pool, size of 0 disables the connection pooling, and anything less than the default size will be overridden to use the default size") flags.UintVar(&builder.rpcConf.MaxMsgSize, "rpc-max-message-size", grpcutils.DefaultMaxMsgSize, "the maximum message size in bytes for messages sent or received over grpc") - flags.UintVar(&builder.rpcConf.MaxHeightRange, "rpc-max-height-range", defaultConfig.rpcConf.MaxHeightRange, "maximum size for height range requests") - flags.StringSliceVar(&builder.rpcConf.PreferredExecutionNodeIDs, "preferred-execution-node-ids", defaultConfig.rpcConf.PreferredExecutionNodeIDs, "comma separated list of execution nodes ids to choose from when making an upstream call e.g. b4a4dbdcd443d...,fb386a6a... etc.") - flags.StringSliceVar(&builder.rpcConf.FixedExecutionNodeIDs, "fixed-execution-node-ids", defaultConfig.rpcConf.FixedExecutionNodeIDs, "comma separated list of execution nodes ids to choose from when making an upstream call if no matching preferred execution id is found e.g. b4a4dbdcd443d...,fb386a6a... etc.") + flags.UintVar(&builder.rpcConf.BackendConfig.MaxHeightRange, "rpc-max-height-range", defaultConfig.rpcConf.BackendConfig.MaxHeightRange, "maximum size for height range requests") + flags.StringSliceVar(&builder.rpcConf.BackendConfig.PreferredExecutionNodeIDs, "preferred-execution-node-ids", defaultConfig.rpcConf.BackendConfig.PreferredExecutionNodeIDs, "comma separated list of execution nodes ids to choose from when making an upstream call e.g. b4a4dbdcd443d...,fb386a6a... etc.") + flags.StringSliceVar(&builder.rpcConf.BackendConfig.FixedExecutionNodeIDs, "fixed-execution-node-ids", defaultConfig.rpcConf.BackendConfig.FixedExecutionNodeIDs, "comma separated list of execution nodes ids to choose from when making an upstream call if no matching preferred execution id is found e.g. b4a4dbdcd443d...,fb386a6a... etc.") flags.BoolVar(&builder.logTxTimeToFinalized, "log-tx-time-to-finalized", defaultConfig.logTxTimeToFinalized, "log transaction time to finalized") flags.BoolVar(&builder.logTxTimeToExecuted, "log-tx-time-to-executed", defaultConfig.logTxTimeToExecuted, "log transaction time to executed") flags.BoolVar(&builder.logTxTimeToFinalizedExecuted, "log-tx-time-to-finalized-executed", defaultConfig.logTxTimeToFinalizedExecuted, "log transaction time to finalized and executed") @@ -909,7 +917,7 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { builder.rpcConf.CollectionAddr, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(builder.rpcConf.MaxMsgSize))), grpc.WithTransportCredentials(insecure.NewCredentials()), - backend.WithClientUnaryInterceptor(builder.rpcConf.CollectionClientTimeout)) + backend.WithClientUnaryInterceptor(builder.rpcConf.BackendConfig.CollectionClientTimeout)) if err != nil { return err } @@ -965,10 +973,19 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { ) return nil }). + Module("rest metrics", func(node *cmd.NodeConfig) error { + m, err := metrics.NewRestCollector(routes.URLToRoute, node.MetricsRegisterer) + if err != nil { + return err + } + builder.RestMetrics = m + return nil + }). Module("access metrics", func(node *cmd.NodeConfig) error { builder.AccessMetrics = metrics.NewAccessCollector( metrics.WithTransactionMetrics(builder.TransactionMetrics), metrics.WithBackendScriptsMetrics(builder.TransactionMetrics), + metrics.WithRestMetrics(builder.RestMetrics), ) return nil }). @@ -986,11 +1003,64 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { builder.rpcConf.TransportCredentials = credentials.NewTLS(tlsConfig) return nil }). - Component("RPC engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - engineBuilder, err := rpc.NewBuilder( + Module("creating grpc servers", func(node *cmd.NodeConfig) error { + builder.secureGrpcServer = grpcserver.NewGrpcServerBuilder( node.Logger, + builder.rpcConf.SecureGRPCListenAddr, + builder.rpcConf.MaxMsgSize, + builder.rpcMetricsEnabled, + builder.apiRatelimits, + builder.apiBurstlimits, + grpcserver.WithTransportCredentials(builder.rpcConf.TransportCredentials)).Build() + + builder.stateStreamGrpcServer = grpcserver.NewGrpcServerBuilder( + node.Logger, + builder.stateStreamConf.ListenAddr, + builder.stateStreamConf.MaxExecutionDataMsgSize, + builder.rpcMetricsEnabled, + builder.apiRatelimits, + builder.apiBurstlimits, + grpcserver.WithStreamInterceptor()).Build() + + if builder.rpcConf.UnsecureGRPCListenAddr != builder.stateStreamConf.ListenAddr { + builder.unsecureGrpcServer = grpcserver.NewGrpcServerBuilder(node.Logger, + builder.rpcConf.UnsecureGRPCListenAddr, + builder.rpcConf.MaxMsgSize, + builder.rpcMetricsEnabled, + builder.apiRatelimits, + builder.apiBurstlimits).Build() + } else { + builder.unsecureGrpcServer = builder.stateStreamGrpcServer + } + + return nil + }). + Component("RPC engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + config := builder.rpcConf + backendConfig := config.BackendConfig + accessMetrics := builder.AccessMetrics + + backendCache, cacheSize, err := backend.NewCache(node.Logger, + accessMetrics, + backendConfig.ConnectionPoolSize) + if err != nil { + return nil, fmt.Errorf("could not initialize backend cache: %w", err) + } + + connFactory := &backend.ConnectionFactoryImpl{ + CollectionGRPCPort: builder.collectionGRPCPort, + ExecutionGRPCPort: builder.executionGRPCPort, + CollectionNodeGRPCTimeout: backendConfig.CollectionClientTimeout, + ExecutionNodeGRPCTimeout: backendConfig.ExecutionClientTimeout, + ConnectionsCache: backendCache, + CacheSize: cacheSize, + MaxMsgSize: config.MaxMsgSize, + AccessMetrics: accessMetrics, + Log: node.Logger, + } + + backend := backend.New( node.State, - builder.rpcConf, builder.CollectionRPC, builder.HistoricalAccessRPCs, node.Storage.Blocks, @@ -1001,13 +1071,27 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { node.Storage.Results, node.RootChainID, builder.AccessMetrics, - builder.collectionGRPCPort, - builder.executionGRPCPort, + connFactory, builder.retryEnabled, + backendConfig.MaxHeightRange, + backendConfig.PreferredExecutionNodeIDs, + backendConfig.FixedExecutionNodeIDs, + node.Logger, + backend.DefaultSnapshotHistoryLimit, + backendConfig.ArchiveAddressList) + + engineBuilder, err := rpc.NewBuilder( + node.Logger, + node.State, + config, + node.RootChainID, + builder.AccessMetrics, builder.rpcMetricsEnabled, - builder.apiRatelimits, - builder.apiBurstlimits, builder.Me, + backend, + backend, + builder.secureGrpcServer, + builder.unsecureGrpcServer, ) if err != nil { return nil, err @@ -1097,6 +1181,20 @@ func (builder *FlowAccessNodeBuilder) Build() (cmd.Node, error) { builder.BuildExecutionDataRequester() } + builder.Component("secure grpc server", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + return builder.secureGrpcServer, nil + }) + + builder.Component("state stream unsecure grpc server", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + return builder.stateStreamGrpcServer, nil + }) + + if builder.rpcConf.UnsecureGRPCListenAddr != builder.stateStreamConf.ListenAddr { + builder.Component("unsecure grpc server", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + return builder.unsecureGrpcServer, nil + }) + } + builder.Component("ping engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { ping, err := pingeng.New( node.Logger, @@ -1192,11 +1290,15 @@ func (builder *FlowAccessNodeBuilder) initPublicLibp2pNode(networkKey crypto.Pri return nil, fmt.Errorf("could not create connection manager: %w", err) } - meshTracer := tracer.NewGossipSubMeshTracer( - builder.Logger, - networkMetrics, - builder.IdentityProvider, - builder.FlowConfig.NetworkConfig.GossipSubConfig.LocalMeshLogInterval) + meshTracerCfg := &tracer.GossipSubMeshTracerConfig{ + Logger: builder.Logger, + Metrics: networkMetrics, + IDProvider: builder.IdentityProvider, + LoggerInterval: builder.FlowConfig.NetworkConfig.GossipSubConfig.LocalMeshLogInterval, + RpcSentTrackerCacheCollector: metrics.GossipSubRPCSentTrackerMetricFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), + RpcSentTrackerCacheSize: builder.FlowConfig.NetworkConfig.GossipSubConfig.RPCSentTrackerCacheSize, + } + meshTracer := tracer.NewGossipSubMeshTracer(meshTracerCfg) libp2pNode, err := p2pbuilder.NewNodeBuilder( builder.Logger, @@ -1261,15 +1363,14 @@ func (builder *FlowAccessNodeBuilder) initMiddleware(nodeID flow.Identifier, ) network.Middleware { logger := builder.Logger.With().Bool("staked", false).Logger() mw := middleware.NewMiddleware(&middleware.Config{ - Logger: logger, - Libp2pNode: libp2pNode, - FlowId: nodeID, - BitSwapMetrics: builder.Metrics.Bitswap, - RootBlockID: builder.SporkID, - UnicastMessageTimeout: middleware.DefaultUnicastTimeout, - IdTranslator: builder.IDTranslator, - Codec: builder.CodecFactory(), - SlashingViolationsConsumer: slashing.NewSlashingViolationsConsumer(logger, networkMetrics), + Logger: logger, + Libp2pNode: libp2pNode, + FlowId: nodeID, + BitSwapMetrics: builder.Metrics.Bitswap, + RootBlockID: builder.SporkID, + UnicastMessageTimeout: middleware.DefaultUnicastTimeout, + IdTranslator: builder.IDTranslator, + Codec: builder.CodecFactory(), }, middleware.WithMessageValidators(validators...), // use default identifier provider ) diff --git a/cmd/bootstrap/cmd/clusters.go b/cmd/bootstrap/cmd/clusters.go index 3165e7b0934..441f573f429 100644 --- a/cmd/bootstrap/cmd/clusters.go +++ b/cmd/bootstrap/cmd/clusters.go @@ -10,7 +10,6 @@ import ( "github.com/onflow/flow-go/model/flow/assignment" "github.com/onflow/flow-go/model/flow/factory" "github.com/onflow/flow-go/model/flow/filter" - "github.com/onflow/flow-go/utils/rand" ) // Construct random cluster assignment with internal and partner nodes. @@ -39,12 +38,11 @@ func constructClusterAssignment(partnerNodes, internalNodes []model.NodeInfo) (f } // shuffle both collector lists based on a non-deterministic algorithm - var err error - err = rand.Shuffle(uint(len(partners)), func(i, j uint) { partners[i], partners[j] = partners[j], partners[i] }) + partners, err := partners.Shuffle() if err != nil { log.Fatal().Err(err).Msg("could not shuffle partners") } - err = rand.Shuffle(uint(len(internals)), func(i, j uint) { internals[i], internals[j] = internals[j], internals[i] }) + internals, err = internals.Shuffle() if err != nil { log.Fatal().Err(err).Msg("could not shuffle internals") } diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index be518249714..8fe2a0f8d91 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -28,6 +28,8 @@ import ( recovery "github.com/onflow/flow-go/consensus/recovery/protocol" "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/engine/access/apiproxy" + restapiproxy "github.com/onflow/flow-go/engine/access/rest/apiproxy" + "github.com/onflow/flow-go/engine/access/rest/routes" "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/engine/access/rpc/backend" "github.com/onflow/flow-go/engine/common/follower" @@ -39,6 +41,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/chainsync" finalizer "github.com/onflow/flow-go/module/finalizer/consensus" + "github.com/onflow/flow-go/module/grpcserver" "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/module/local" "github.com/onflow/flow-go/module/metrics" @@ -62,7 +65,6 @@ import ( "github.com/onflow/flow-go/network/p2p/translator" "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/network/p2p/utils" - "github.com/onflow/flow-go/network/slashing" "github.com/onflow/flow-go/network/validator" stateprotocol "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" @@ -110,19 +112,22 @@ type ObserverServiceConfig struct { func DefaultObserverServiceConfig() *ObserverServiceConfig { return &ObserverServiceConfig{ rpcConf: rpc.Config{ - UnsecureGRPCListenAddr: "0.0.0.0:9000", - SecureGRPCListenAddr: "0.0.0.0:9001", - HTTPListenAddr: "0.0.0.0:8000", - RESTListenAddr: "", - CollectionAddr: "", - HistoricalAccessAddrs: "", - CollectionClientTimeout: 3 * time.Second, - ExecutionClientTimeout: 3 * time.Second, - MaxHeightRange: backend.DefaultMaxHeightRange, - PreferredExecutionNodeIDs: nil, - FixedExecutionNodeIDs: nil, - ArchiveAddressList: nil, - MaxMsgSize: grpcutils.DefaultMaxMsgSize, + UnsecureGRPCListenAddr: "0.0.0.0:9000", + SecureGRPCListenAddr: "0.0.0.0:9001", + HTTPListenAddr: "0.0.0.0:8000", + RESTListenAddr: "", + CollectionAddr: "", + HistoricalAccessAddrs: "", + BackendConfig: backend.Config{ + CollectionClientTimeout: 3 * time.Second, + ExecutionClientTimeout: 3 * time.Second, + ConnectionPoolSize: backend.DefaultConnectionPoolSize, + MaxHeightRange: backend.DefaultMaxHeightRange, + PreferredExecutionNodeIDs: nil, + FixedExecutionNodeIDs: nil, + ArchiveAddressList: nil, + }, + MaxMsgSize: grpcutils.DefaultMaxMsgSize, }, rpcMetricsEnabled: false, apiRatelimits: nil, @@ -163,6 +168,12 @@ type ObserverServiceBuilder struct { // Public network peerID peer.ID + + RestMetrics *metrics.RestCollector + AccessMetrics module.AccessMetrics + // grpc servers + secureGrpcServer *grpcserver.GrpcServer + unsecureGrpcServer *grpcserver.GrpcServer } // deriveBootstrapPeerIdentities derives the Flow Identity of the bootstrap peers from the parameters. @@ -447,7 +458,8 @@ func (builder *ObserverServiceBuilder) extraFlags() { flags.StringVarP(&builder.rpcConf.HTTPListenAddr, "http-addr", "h", defaultConfig.rpcConf.HTTPListenAddr, "the address the http proxy server listens on") flags.StringVar(&builder.rpcConf.RESTListenAddr, "rest-addr", defaultConfig.rpcConf.RESTListenAddr, "the address the REST server listens on (if empty the REST server will not be started)") flags.UintVar(&builder.rpcConf.MaxMsgSize, "rpc-max-message-size", defaultConfig.rpcConf.MaxMsgSize, "the maximum message size in bytes for messages sent or received over grpc") - flags.UintVar(&builder.rpcConf.MaxHeightRange, "rpc-max-height-range", defaultConfig.rpcConf.MaxHeightRange, "maximum size for height range requests") + flags.UintVar(&builder.rpcConf.BackendConfig.ConnectionPoolSize, "connection-pool-size", defaultConfig.rpcConf.BackendConfig.ConnectionPoolSize, "maximum number of connections allowed in the connection pool, size of 0 disables the connection pooling, and anything less than the default size will be overridden to use the default size") + flags.UintVar(&builder.rpcConf.BackendConfig.MaxHeightRange, "rpc-max-height-range", defaultConfig.rpcConf.BackendConfig.MaxHeightRange, "maximum size for height range requests") flags.StringToIntVar(&builder.apiRatelimits, "api-rate-limits", defaultConfig.apiRatelimits, "per second rate limits for Access API methods e.g. Ping=300,GetTransaction=500 etc.") flags.StringToIntVar(&builder.apiBurstlimits, "api-burst-limits", defaultConfig.apiBurstlimits, "burst limits for Access API methods e.g. Ping=100,GetTransaction=100 etc.") flags.StringVar(&builder.observerNetworkingKeyPath, "observer-networking-key-path", defaultConfig.observerNetworkingKeyPath, "path to the networking key for observer") @@ -703,11 +715,15 @@ func (builder *ObserverServiceBuilder) initPublicLibp2pNode(networkKey crypto.Pr pis = append(pis, pi) } - meshTracer := tracer.NewGossipSubMeshTracer( - builder.Logger, - builder.Metrics.Network, - builder.IdentityProvider, - builder.FlowConfig.NetworkConfig.GossipSubConfig.LocalMeshLogInterval) + meshTracerCfg := &tracer.GossipSubMeshTracerConfig{ + Logger: builder.Logger, + Metrics: builder.Metrics.Network, + IDProvider: builder.IdentityProvider, + LoggerInterval: builder.FlowConfig.NetworkConfig.GossipSubConfig.LocalMeshLogInterval, + RpcSentTrackerCacheCollector: metrics.GossipSubRPCSentTrackerMetricFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), + RpcSentTrackerCacheSize: builder.FlowConfig.NetworkConfig.GossipSubConfig.RPCSentTrackerCacheSize, + } + meshTracer := tracer.NewGossipSubMeshTracer(meshTracerCfg) node, err := p2pbuilder.NewNodeBuilder( builder.Logger, @@ -841,11 +857,64 @@ func (builder *ObserverServiceBuilder) enqueueConnectWithStakedAN() { } func (builder *ObserverServiceBuilder) enqueueRPCServer() { + builder.Module("creating grpc servers", func(node *cmd.NodeConfig) error { + builder.secureGrpcServer = grpcserver.NewGrpcServerBuilder(node.Logger, + builder.rpcConf.SecureGRPCListenAddr, + builder.rpcConf.MaxMsgSize, + builder.rpcMetricsEnabled, + builder.apiRatelimits, + builder.apiBurstlimits, + grpcserver.WithTransportCredentials(builder.rpcConf.TransportCredentials)).Build() + + builder.unsecureGrpcServer = grpcserver.NewGrpcServerBuilder(node.Logger, + builder.rpcConf.UnsecureGRPCListenAddr, + builder.rpcConf.MaxMsgSize, + builder.rpcMetricsEnabled, + builder.apiRatelimits, + builder.apiBurstlimits).Build() + + return nil + }) + builder.Module("rest metrics", func(node *cmd.NodeConfig) error { + m, err := metrics.NewRestCollector(routes.URLToRoute, node.MetricsRegisterer) + if err != nil { + return err + } + builder.RestMetrics = m + return nil + }) + builder.Module("access metrics", func(node *cmd.NodeConfig) error { + builder.AccessMetrics = metrics.NewAccessCollector( + metrics.WithRestMetrics(builder.RestMetrics), + ) + return nil + }) builder.Component("RPC engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - engineBuilder, err := rpc.NewBuilder( - node.Logger, + accessMetrics := builder.AccessMetrics + config := builder.rpcConf + backendConfig := config.BackendConfig + + backendCache, cacheSize, err := backend.NewCache(node.Logger, + accessMetrics, + config.BackendConfig.ConnectionPoolSize) + if err != nil { + return nil, fmt.Errorf("could not initialize backend cache: %w", err) + } + + connFactory := &backend.ConnectionFactoryImpl{ + CollectionGRPCPort: 0, + ExecutionGRPCPort: 0, + CollectionNodeGRPCTimeout: backendConfig.CollectionClientTimeout, + ExecutionNodeGRPCTimeout: backendConfig.ExecutionClientTimeout, + ConnectionsCache: backendCache, + CacheSize: cacheSize, + MaxMsgSize: config.MaxMsgSize, + AccessMetrics: accessMetrics, + Log: node.Logger, + } + + accessBackend := backend.New( node.State, - builder.rpcConf, nil, nil, node.Storage.Blocks, @@ -855,28 +924,55 @@ func (builder *ObserverServiceBuilder) enqueueRPCServer() { node.Storage.Receipts, node.Storage.Results, node.RootChainID, - metrics.NewNoopCollector(), - 0, - 0, + accessMetrics, + connFactory, false, + backendConfig.MaxHeightRange, + backendConfig.PreferredExecutionNodeIDs, + backendConfig.FixedExecutionNodeIDs, + node.Logger, + backend.DefaultSnapshotHistoryLimit, + backendConfig.ArchiveAddressList) + + observerCollector := metrics.NewObserverCollector() + restHandler, err := restapiproxy.NewRestProxyHandler( + accessBackend, + builder.upstreamIdentities, + builder.apiTimeout, + config.MaxMsgSize, + builder.Logger, + observerCollector, + node.RootChainID.Chain()) + if err != nil { + return nil, err + } + + engineBuilder, err := rpc.NewBuilder( + node.Logger, + node.State, + config, + node.RootChainID, + accessMetrics, builder.rpcMetricsEnabled, - builder.apiRatelimits, - builder.apiBurstlimits, builder.Me, + accessBackend, + restHandler, + builder.secureGrpcServer, + builder.unsecureGrpcServer, ) if err != nil { return nil, err } // upstream access node forwarder - forwarder, err := apiproxy.NewFlowAccessAPIForwarder(builder.upstreamIdentities, builder.apiTimeout, builder.rpcConf.MaxMsgSize) + forwarder, err := apiproxy.NewFlowAccessAPIForwarder(builder.upstreamIdentities, builder.apiTimeout, config.MaxMsgSize) if err != nil { return nil, err } - proxy := &apiproxy.FlowAccessAPIRouter{ + rpcHandler := &apiproxy.FlowAccessAPIRouter{ Logger: builder.Logger, - Metrics: metrics.NewObserverCollector(), + Metrics: observerCollector, Upstream: forwarder, Observer: protocol.NewHandler(protocol.New( node.State, @@ -888,7 +984,7 @@ func (builder *ObserverServiceBuilder) enqueueRPCServer() { // build the rpc engine builder.RpcEng, err = engineBuilder. - WithNewHandler(proxy). + WithRpcHandler(rpcHandler). WithLegacy(). Build() if err != nil { @@ -897,6 +993,16 @@ func (builder *ObserverServiceBuilder) enqueueRPCServer() { builder.FollowerDistributor.AddOnBlockFinalizedConsumer(builder.RpcEng.OnFinalizedBlock) return builder.RpcEng, nil }) + + // build secure grpc server + builder.Component("secure grpc server", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + return builder.secureGrpcServer, nil + }) + + // build unsecure grpc server + builder.Component("unsecure grpc server", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { + return builder.unsecureGrpcServer, nil + }) } // initMiddleware creates the network.Middleware implementation with the libp2p factory function, metrics, peer update @@ -905,17 +1011,15 @@ func (builder *ObserverServiceBuilder) initMiddleware(nodeID flow.Identifier, libp2pNode p2p.LibP2PNode, validators ...network.MessageValidator, ) network.Middleware { - slashingViolationsConsumer := slashing.NewSlashingViolationsConsumer(builder.Logger, builder.Metrics.Network) mw := middleware.NewMiddleware(&middleware.Config{ - Logger: builder.Logger, - Libp2pNode: libp2pNode, - FlowId: nodeID, - BitSwapMetrics: builder.Metrics.Bitswap, - RootBlockID: builder.SporkID, - UnicastMessageTimeout: middleware.DefaultUnicastTimeout, - IdTranslator: builder.IDTranslator, - Codec: builder.CodecFactory(), - SlashingViolationsConsumer: slashingViolationsConsumer, + Logger: builder.Logger, + Libp2pNode: libp2pNode, + FlowId: nodeID, + BitSwapMetrics: builder.Metrics.Bitswap, + RootBlockID: builder.SporkID, + UnicastMessageTimeout: middleware.DefaultUnicastTimeout, + IdTranslator: builder.IDTranslator, + Codec: builder.CodecFactory(), }, middleware.WithMessageValidators(validators...), // use default identifier provider ) diff --git a/cmd/scaffold.go b/cmd/scaffold.go index 32114adb25e..e6043fd1801 100644 --- a/cmd/scaffold.go +++ b/cmd/scaffold.go @@ -5,7 +5,6 @@ import ( "crypto/x509" "errors" "fmt" - "math/rand" "os" "runtime" "strings" @@ -60,7 +59,6 @@ import ( "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/network/p2p/unicast/ratelimit" "github.com/onflow/flow-go/network/p2p/utils/ratelimiter" - "github.com/onflow/flow-go/network/slashing" "github.com/onflow/flow-go/network/topology" "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" @@ -437,17 +435,15 @@ func (fnb *FlowNodeBuilder) InitFlowNetworkWithConduitFactory( mwOpts = append(mwOpts, middleware.WithPeerManagerFilters(peerManagerFilters)) } - slashingViolationsConsumer := slashing.NewSlashingViolationsConsumer(fnb.Logger, fnb.Metrics.Network) mw := middleware.NewMiddleware(&middleware.Config{ - Logger: fnb.Logger, - Libp2pNode: fnb.LibP2PNode, - FlowId: fnb.Me.NodeID(), - BitSwapMetrics: fnb.Metrics.Bitswap, - RootBlockID: fnb.SporkID, - UnicastMessageTimeout: fnb.BaseConfig.FlowConfig.NetworkConfig.UnicastMessageTimeout, - IdTranslator: fnb.IDTranslator, - Codec: fnb.CodecFactory(), - SlashingViolationsConsumer: slashingViolationsConsumer, + Logger: fnb.Logger, + Libp2pNode: fnb.LibP2PNode, + FlowId: fnb.Me.NodeID(), + BitSwapMetrics: fnb.Metrics.Bitswap, + RootBlockID: fnb.SporkID, + UnicastMessageTimeout: fnb.FlowConfig.NetworkConfig.UnicastMessageTimeout, + IdTranslator: fnb.IDTranslator, + Codec: fnb.CodecFactory(), }, mwOpts...) @@ -1776,10 +1772,6 @@ func (fnb *FlowNodeBuilder) Build() (Node, error) { } func (fnb *FlowNodeBuilder) onStart() error { - - // seed random generator - rand.Seed(time.Now().UnixNano()) - // init nodeinfo by reading the private bootstrap file if not already set if fnb.NodeID == flow.ZeroID { if err := fnb.initNodeInfo(); err != nil { diff --git a/cmd/testclient/go.mod b/cmd/testclient/go.mod index 0a02e69ad42..dbe66a78fb5 100644 --- a/cmd/testclient/go.mod +++ b/cmd/testclient/go.mod @@ -1,6 +1,6 @@ module github.com/onflow/flow-go/cmd/testclient -go 1.19 +go 1.20 require ( github.com/onflow/flow-go-sdk v0.4.1 diff --git a/cmd/util/cmd/execution-state-extract/export_report.json b/cmd/util/cmd/execution-state-extract/export_report.json index 2cbadb698d0..4c8484e4396 100644 --- a/cmd/util/cmd/execution-state-extract/export_report.json +++ b/cmd/util/cmd/execution-state-extract/export_report.json @@ -1,6 +1,6 @@ { "EpochCounter": 0, - "PreviousStateCommitment": "18eb0e8beef7ce851e552ecd29c813fde0a9e6f0c5614d7615642076602a48cf", - "CurrentStateCommitment": "18eb0e8beef7ce851e552ecd29c813fde0a9e6f0c5614d7615642076602a48cf", + "PreviousStateCommitment": "1c9f9d343cb8d4610e0b2c1eb74d6ea2f2f8aef2d666281dc22870e3efaa607b", + "CurrentStateCommitment": "1c9f9d343cb8d4610e0b2c1eb74d6ea2f2f8aef2d666281dc22870e3efaa607b", "ReportSucceeded": true } \ No newline at end of file diff --git a/config/default-config.yml b/config/default-config.yml index 371fc4c385c..9834694b0e2 100644 --- a/config/default-config.yml +++ b/config/default-config.yml @@ -63,6 +63,9 @@ network-config: # The default interval at which the gossipsub score tracer logs the peer scores. This is used for debugging and forensics purposes. # Note that we purposefully choose this logging interval high enough to avoid spamming the logs. gossipsub-score-tracer-interval: 1m + # The default RPC sent tracker cache size. The RPC sent tracker is used to track RPC control messages sent from the local node. + # Note: this cache size must be large enough to keep a history of sent messages in a reasonable time window of past history. + gossipsub-rpc-sent-tracker-cache-size: 1_000_000 # Peer scoring is the default value for enabling peer scoring gossipsub-peer-scoring-enabled: true # Gossipsub rpc inspectors configs diff --git a/consensus/hotstuff/signature/block_signer_decoder_test.go b/consensus/hotstuff/signature/block_signer_decoder_test.go index 78efb3005eb..c065e315add 100644 --- a/consensus/hotstuff/signature/block_signer_decoder_test.go +++ b/consensus/hotstuff/signature/block_signer_decoder_test.go @@ -140,7 +140,8 @@ func (s *blockSignerDecoderSuite) Test_EpochTransition() { blockView := s.block.Header.View parentView := s.block.Header.ParentView epoch1Committee := s.allConsensus - epoch2Committee := s.allConsensus.SamplePct(.8) + epoch2Committee, err := s.allConsensus.SamplePct(.8) + require.NoError(s.T(), err) *s.committee = *hotstuff.NewDynamicCommittee(s.T()) s.committee.On("IdentitiesByEpoch", parentView).Return(epoch1Committee, nil).Maybe() diff --git a/consensus/integration/network_test.go b/consensus/integration/network_test.go index dfa71c53066..1a3eb31d58b 100644 --- a/consensus/integration/network_test.go +++ b/consensus/integration/network_test.go @@ -160,7 +160,11 @@ func (n *Network) publish(event interface{}, channel channels.Channel, targetIDs // Engines attached to the same channel on other nodes. The targeted nodes are selected based on the selector. // In this test helper implementation, multicast uses submit method under the hood. func (n *Network) multicast(event interface{}, channel channels.Channel, num uint, targetIDs ...flow.Identifier) error { - targetIDs = flow.Sample(num, targetIDs...) + var err error + targetIDs, err = flow.Sample(num, targetIDs...) + if err != nil { + return fmt.Errorf("sampling failed: %w", err) + } return n.submit(event, channel, targetIDs...) } diff --git a/crypto/Dockerfile b/crypto/Dockerfile index 37a0b373171..d75e9543de4 100644 --- a/crypto/Dockerfile +++ b/crypto/Dockerfile @@ -1,6 +1,6 @@ # gcr.io/dl-flow/golang-cmake -FROM golang:1.19-buster +FROM golang:1.20-buster RUN apt-get update RUN apt-get -y install cmake zip RUN go install github.com/axw/gocov/gocov@latest diff --git a/crypto/go.mod b/crypto/go.mod index c7fe54f9ff5..9895e1c35db 100644 --- a/crypto/go.mod +++ b/crypto/go.mod @@ -1,6 +1,6 @@ module github.com/onflow/flow-go/crypto -go 1.19 +go 1.20 require ( github.com/btcsuite/btcd/btcec/v2 v2.2.1 diff --git a/engine/Readme.md b/engine/Readme.md index 8faebe0b332..cd082cdf557 100644 --- a/engine/Readme.md +++ b/engine/Readme.md @@ -1,5 +1,4 @@ # Notifier - The Notifier implements the following state machine ![Notifier State Machine](/docs/NotifierStateMachine.png) diff --git a/engine/access/apiproxy/access_api_proxy.go b/engine/access/apiproxy/access_api_proxy.go index d72ec5bb5e2..f5898686fc6 100644 --- a/engine/access/apiproxy/access_api_proxy.go +++ b/engine/access/apiproxy/access_api_proxy.go @@ -2,26 +2,17 @@ package apiproxy import ( "context" - "fmt" - "sync" "time" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials/insecure" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/status" "github.com/onflow/flow/protobuf/go/flow/access" "github.com/rs/zerolog" - "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/common/grpc/forwarder" "github.com/onflow/flow-go/engine/protocol" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/utils/grpcutils" ) // FlowAccessAPIRouter is a structure that represents the routing proxy algorithm. @@ -51,88 +42,6 @@ func (h *FlowAccessAPIRouter) log(handler, rpc string, err error) { logger.Info().Msg("request succeeded") } -// reconnectingClient returns an active client, or -// creates one, if the last one is not ready anymore. -func (h *FlowAccessAPIForwarder) reconnectingClient(i int) error { - timeout := h.timeout - - if h.connections[i] == nil || h.connections[i].GetState() != connectivity.Ready { - identity := h.ids[i] - var connection *grpc.ClientConn - var err error - if identity.NetworkPubKey == nil { - connection, err = grpc.Dial( - identity.Address, - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(h.maxMsgSize))), - grpc.WithTransportCredentials(insecure.NewCredentials()), - backend.WithClientUnaryInterceptor(timeout)) - if err != nil { - return err - } - } else { - tlsConfig, err := grpcutils.DefaultClientTLSConfig(identity.NetworkPubKey) - if err != nil { - return fmt.Errorf("failed to get default TLS client config using public flow networking key %s %w", identity.NetworkPubKey.String(), err) - } - - connection, err = grpc.Dial( - identity.Address, - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(h.maxMsgSize))), - grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), - backend.WithClientUnaryInterceptor(timeout)) - if err != nil { - return fmt.Errorf("cannot connect to %s %w", identity.Address, err) - } - } - connection.Connect() - time.Sleep(1 * time.Second) - state := connection.GetState() - if state != connectivity.Ready && state != connectivity.Connecting { - return fmt.Errorf("%v", state) - } - h.connections[i] = connection - h.upstream[i] = access.NewAccessAPIClient(connection) - } - - return nil -} - -// faultTolerantClient implements an upstream connection that reconnects on errors -// a reasonable amount of time. -func (h *FlowAccessAPIForwarder) faultTolerantClient() (access.AccessAPIClient, error) { - if h.upstream == nil || len(h.upstream) == 0 { - return nil, status.Errorf(codes.Unimplemented, "method not implemented") - } - - // Reasoning: A retry count of three gives an acceptable 5% failure ratio from a 37% failure ratio. - // A bigger number is problematic due to the DNS resolve and connection times, - // plus the need to log and debug each individual connection failure. - // - // This reasoning eliminates the need of making this parameter configurable. - // The logic works rolling over a single connection as well making clean code. - const retryMax = 3 - - h.lock.Lock() - defer h.lock.Unlock() - - var err error - for i := 0; i < retryMax; i++ { - h.roundRobin++ - h.roundRobin = h.roundRobin % len(h.upstream) - err = h.reconnectingClient(h.roundRobin) - if err != nil { - continue - } - state := h.connections[h.roundRobin].GetState() - if state != connectivity.Ready && state != connectivity.Connecting { - continue - } - return h.upstream[h.roundRobin], nil - } - - return nil, status.Errorf(codes.Unavailable, err.Error()) -} - // Ping pings the service. It is special in the sense that it responds successful, // only if all underlying services are ready. func (h *FlowAccessAPIRouter) Ping(context context.Context, req *access.PingRequest) (*access.PingResponse, error) { @@ -290,54 +199,33 @@ func (h *FlowAccessAPIRouter) GetExecutionResultForBlockID(context context.Conte return res, err } +func (h *FlowAccessAPIRouter) GetExecutionResultByID(context context.Context, req *access.GetExecutionResultByIDRequest) (*access.ExecutionResultByIDResponse, error) { + res, err := h.Upstream.GetExecutionResultByID(context, req) + h.log("upstream", "GetExecutionResultByID", err) + return res, err +} + // FlowAccessAPIForwarder forwards all requests to a set of upstream access nodes or observers type FlowAccessAPIForwarder struct { - lock sync.Mutex - roundRobin int - ids flow.IdentityList - upstream []access.AccessAPIClient - connections []*grpc.ClientConn - timeout time.Duration - maxMsgSize uint + *forwarder.Forwarder } func NewFlowAccessAPIForwarder(identities flow.IdentityList, timeout time.Duration, maxMsgSize uint) (*FlowAccessAPIForwarder, error) { - forwarder := &FlowAccessAPIForwarder{maxMsgSize: maxMsgSize} - err := forwarder.setFlowAccessAPI(identities, timeout) - return forwarder, err -} - -// setFlowAccessAPI sets a backend access API that forwards some requests to an upstream node. -// It is used by Observer services, Blockchain Data Service, etc. -// Make sure that this is just for observation and not a staked participant in the flow network. -// This means that observers see a copy of the data but there is no interaction to ensure integrity from the root block. -func (ret *FlowAccessAPIForwarder) setFlowAccessAPI(accessNodeAddressAndPort flow.IdentityList, timeout time.Duration) error { - ret.timeout = timeout - ret.ids = accessNodeAddressAndPort - ret.upstream = make([]access.AccessAPIClient, accessNodeAddressAndPort.Count()) - ret.connections = make([]*grpc.ClientConn, accessNodeAddressAndPort.Count()) - for i, identity := range accessNodeAddressAndPort { - // Store the faultTolerantClient setup parameters such as address, public, key and timeout, so that - // we can refresh the API on connection loss - ret.ids[i] = identity - - // We fail on any single error on startup, so that - // we identify bootstrapping errors early - err := ret.reconnectingClient(i) - if err != nil { - return err - } + forwarder, err := forwarder.NewForwarder(identities, timeout, maxMsgSize) + if err != nil { + return nil, err } - ret.roundRobin = 0 - return nil + return &FlowAccessAPIForwarder{ + Forwarder: forwarder, + }, nil } // Ping pings the service. It is special in the sense that it responds successful, // only if all underlying services are ready. func (h *FlowAccessAPIForwarder) Ping(context context.Context, req *access.PingRequest) (*access.PingResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -346,7 +234,7 @@ func (h *FlowAccessAPIForwarder) Ping(context context.Context, req *access.PingR func (h *FlowAccessAPIForwarder) GetNodeVersionInfo(context context.Context, req *access.GetNodeVersionInfoRequest) (*access.GetNodeVersionInfoResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -355,7 +243,7 @@ func (h *FlowAccessAPIForwarder) GetNodeVersionInfo(context context.Context, req func (h *FlowAccessAPIForwarder) GetLatestBlockHeader(context context.Context, req *access.GetLatestBlockHeaderRequest) (*access.BlockHeaderResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -364,7 +252,7 @@ func (h *FlowAccessAPIForwarder) GetLatestBlockHeader(context context.Context, r func (h *FlowAccessAPIForwarder) GetBlockHeaderByID(context context.Context, req *access.GetBlockHeaderByIDRequest) (*access.BlockHeaderResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -373,7 +261,7 @@ func (h *FlowAccessAPIForwarder) GetBlockHeaderByID(context context.Context, req func (h *FlowAccessAPIForwarder) GetBlockHeaderByHeight(context context.Context, req *access.GetBlockHeaderByHeightRequest) (*access.BlockHeaderResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -382,7 +270,7 @@ func (h *FlowAccessAPIForwarder) GetBlockHeaderByHeight(context context.Context, func (h *FlowAccessAPIForwarder) GetLatestBlock(context context.Context, req *access.GetLatestBlockRequest) (*access.BlockResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -391,7 +279,7 @@ func (h *FlowAccessAPIForwarder) GetLatestBlock(context context.Context, req *ac func (h *FlowAccessAPIForwarder) GetBlockByID(context context.Context, req *access.GetBlockByIDRequest) (*access.BlockResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -400,7 +288,7 @@ func (h *FlowAccessAPIForwarder) GetBlockByID(context context.Context, req *acce func (h *FlowAccessAPIForwarder) GetBlockByHeight(context context.Context, req *access.GetBlockByHeightRequest) (*access.BlockResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -409,7 +297,7 @@ func (h *FlowAccessAPIForwarder) GetBlockByHeight(context context.Context, req * func (h *FlowAccessAPIForwarder) GetCollectionByID(context context.Context, req *access.GetCollectionByIDRequest) (*access.CollectionResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -418,7 +306,7 @@ func (h *FlowAccessAPIForwarder) GetCollectionByID(context context.Context, req func (h *FlowAccessAPIForwarder) SendTransaction(context context.Context, req *access.SendTransactionRequest) (*access.SendTransactionResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -427,7 +315,7 @@ func (h *FlowAccessAPIForwarder) SendTransaction(context context.Context, req *a func (h *FlowAccessAPIForwarder) GetTransaction(context context.Context, req *access.GetTransactionRequest) (*access.TransactionResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -436,7 +324,7 @@ func (h *FlowAccessAPIForwarder) GetTransaction(context context.Context, req *ac func (h *FlowAccessAPIForwarder) GetTransactionResult(context context.Context, req *access.GetTransactionRequest) (*access.TransactionResultResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -445,7 +333,7 @@ func (h *FlowAccessAPIForwarder) GetTransactionResult(context context.Context, r func (h *FlowAccessAPIForwarder) GetTransactionResultByIndex(context context.Context, req *access.GetTransactionByIndexRequest) (*access.TransactionResultResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -454,7 +342,7 @@ func (h *FlowAccessAPIForwarder) GetTransactionResultByIndex(context context.Con func (h *FlowAccessAPIForwarder) GetTransactionResultsByBlockID(context context.Context, req *access.GetTransactionsByBlockIDRequest) (*access.TransactionResultsResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -462,7 +350,7 @@ func (h *FlowAccessAPIForwarder) GetTransactionResultsByBlockID(context context. } func (h *FlowAccessAPIForwarder) GetTransactionsByBlockID(context context.Context, req *access.GetTransactionsByBlockIDRequest) (*access.TransactionsResponse, error) { - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -471,7 +359,7 @@ func (h *FlowAccessAPIForwarder) GetTransactionsByBlockID(context context.Contex func (h *FlowAccessAPIForwarder) GetAccount(context context.Context, req *access.GetAccountRequest) (*access.GetAccountResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -480,7 +368,7 @@ func (h *FlowAccessAPIForwarder) GetAccount(context context.Context, req *access func (h *FlowAccessAPIForwarder) GetAccountAtLatestBlock(context context.Context, req *access.GetAccountAtLatestBlockRequest) (*access.AccountResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -489,7 +377,7 @@ func (h *FlowAccessAPIForwarder) GetAccountAtLatestBlock(context context.Context func (h *FlowAccessAPIForwarder) GetAccountAtBlockHeight(context context.Context, req *access.GetAccountAtBlockHeightRequest) (*access.AccountResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -498,7 +386,7 @@ func (h *FlowAccessAPIForwarder) GetAccountAtBlockHeight(context context.Context func (h *FlowAccessAPIForwarder) ExecuteScriptAtLatestBlock(context context.Context, req *access.ExecuteScriptAtLatestBlockRequest) (*access.ExecuteScriptResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -507,7 +395,7 @@ func (h *FlowAccessAPIForwarder) ExecuteScriptAtLatestBlock(context context.Cont func (h *FlowAccessAPIForwarder) ExecuteScriptAtBlockID(context context.Context, req *access.ExecuteScriptAtBlockIDRequest) (*access.ExecuteScriptResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -516,7 +404,7 @@ func (h *FlowAccessAPIForwarder) ExecuteScriptAtBlockID(context context.Context, func (h *FlowAccessAPIForwarder) ExecuteScriptAtBlockHeight(context context.Context, req *access.ExecuteScriptAtBlockHeightRequest) (*access.ExecuteScriptResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -525,7 +413,7 @@ func (h *FlowAccessAPIForwarder) ExecuteScriptAtBlockHeight(context context.Cont func (h *FlowAccessAPIForwarder) GetEventsForHeightRange(context context.Context, req *access.GetEventsForHeightRangeRequest) (*access.EventsResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -534,7 +422,7 @@ func (h *FlowAccessAPIForwarder) GetEventsForHeightRange(context context.Context func (h *FlowAccessAPIForwarder) GetEventsForBlockIDs(context context.Context, req *access.GetEventsForBlockIDsRequest) (*access.EventsResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -543,7 +431,7 @@ func (h *FlowAccessAPIForwarder) GetEventsForBlockIDs(context context.Context, r func (h *FlowAccessAPIForwarder) GetNetworkParameters(context context.Context, req *access.GetNetworkParametersRequest) (*access.GetNetworkParametersResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -552,7 +440,7 @@ func (h *FlowAccessAPIForwarder) GetNetworkParameters(context context.Context, r func (h *FlowAccessAPIForwarder) GetLatestProtocolStateSnapshot(context context.Context, req *access.GetLatestProtocolStateSnapshotRequest) (*access.ProtocolStateSnapshotResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } @@ -561,9 +449,18 @@ func (h *FlowAccessAPIForwarder) GetLatestProtocolStateSnapshot(context context. func (h *FlowAccessAPIForwarder) GetExecutionResultForBlockID(context context.Context, req *access.GetExecutionResultForBlockIDRequest) (*access.ExecutionResultForBlockIDResponse, error) { // This is a passthrough request - upstream, err := h.faultTolerantClient() + upstream, err := h.FaultTolerantClient() if err != nil { return nil, err } return upstream.GetExecutionResultForBlockID(context, req) } + +func (h *FlowAccessAPIForwarder) GetExecutionResultByID(context context.Context, req *access.GetExecutionResultByIDRequest) (*access.ExecutionResultByIDResponse, error) { + // This is a passthrough request + upstream, err := h.FaultTolerantClient() + if err != nil { + return nil, err + } + return upstream.GetExecutionResultByID(context, req) +} diff --git a/engine/access/apiproxy/access_api_proxy_test.go b/engine/access/apiproxy/access_api_proxy_test.go index 9f5a5aa74b8..d20c5ee705d 100644 --- a/engine/access/apiproxy/access_api_proxy_test.go +++ b/engine/access/apiproxy/access_api_proxy_test.go @@ -11,6 +11,7 @@ import ( "google.golang.org/grpc" grpcinsecure "google.golang.org/grpc/credentials/insecure" + "github.com/onflow/flow-go/engine/common/grpc/forwarder" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/grpcutils" "github.com/onflow/flow-go/utils/unittest" @@ -137,7 +138,8 @@ func TestNewFlowCachedAccessAPIProxy(t *testing.T) { // Prepare a proxy that fails due to the second connection being idle l := flow.IdentityList{{Address: unittest.IPPort("11634")}, {Address: unittest.IPPort("11635")}} c := FlowAccessAPIForwarder{} - err = c.setFlowAccessAPI(l, time.Second) + c.Forwarder, err = forwarder.NewForwarder(l, time.Second, grpcutils.DefaultMaxMsgSize) + if err == nil { t.Fatal(fmt.Errorf("should not start with one connection ready")) } @@ -153,7 +155,7 @@ func TestNewFlowCachedAccessAPIProxy(t *testing.T) { // Prepare a proxy l = flow.IdentityList{{Address: unittest.IPPort("11634")}, {Address: unittest.IPPort("11635")}} c = FlowAccessAPIForwarder{} - err = c.setFlowAccessAPI(l, time.Second) + c.Forwarder, err = forwarder.NewForwarder(l, time.Second, grpcutils.DefaultMaxMsgSize) if err != nil { t.Fatal(err) } diff --git a/engine/access/integration_unsecure_grpc_server_test.go b/engine/access/integration_unsecure_grpc_server_test.go new file mode 100644 index 00000000000..af658b390fd --- /dev/null +++ b/engine/access/integration_unsecure_grpc_server_test.go @@ -0,0 +1,314 @@ +package access + +import ( + "context" + + "io" + "os" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + + "github.com/onflow/flow-go/engine" + accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc" + "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/access/state_stream" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/blobs" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" + "github.com/onflow/flow-go/module/grpcserver" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/mempool/herocache" + "github.com/onflow/flow-go/module/metrics" + module "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network" + protocol "github.com/onflow/flow-go/state/protocol/mock" + "github.com/onflow/flow-go/storage" + storagemock "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/grpcutils" + "github.com/onflow/flow-go/utils/unittest" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + executiondataproto "github.com/onflow/flow/protobuf/go/flow/executiondata" +) + +// SameGRPCPortTestSuite verifies both AccessAPI and ExecutionDataAPI client continue to work when configured +// on the same port +type SameGRPCPortTestSuite struct { + suite.Suite + state *protocol.State + snapshot *protocol.Snapshot + epochQuery *protocol.EpochQuery + log zerolog.Logger + net *network.Network + request *module.Requester + collClient *accessmock.AccessAPIClient + execClient *accessmock.ExecutionAPIClient + me *module.Local + chainID flow.ChainID + metrics *metrics.NoopCollector + rpcEng *rpc.Engine + stateStreamEng *state_stream.Engine + + // storage + blocks *storagemock.Blocks + headers *storagemock.Headers + collections *storagemock.Collections + transactions *storagemock.Transactions + receipts *storagemock.ExecutionReceipts + seals *storagemock.Seals + results *storagemock.ExecutionResults + + ctx irrecoverable.SignalerContext + cancel context.CancelFunc + + // grpc servers + secureGrpcServer *grpcserver.GrpcServer + unsecureGrpcServer *grpcserver.GrpcServer + + bs blobs.Blobstore + eds execution_data.ExecutionDataStore + broadcaster *engine.Broadcaster + execDataCache *cache.ExecutionDataCache + execDataHeroCache *herocache.BlockExecutionData + + blockMap map[uint64]*flow.Block +} + +func (suite *SameGRPCPortTestSuite) SetupTest() { + suite.log = zerolog.New(os.Stdout) + suite.net = new(network.Network) + suite.state = new(protocol.State) + suite.snapshot = new(protocol.Snapshot) + + suite.epochQuery = new(protocol.EpochQuery) + suite.state.On("Sealed").Return(suite.snapshot, nil).Maybe() + suite.state.On("Final").Return(suite.snapshot, nil).Maybe() + suite.snapshot.On("Epochs").Return(suite.epochQuery).Maybe() + suite.blocks = new(storagemock.Blocks) + suite.headers = new(storagemock.Headers) + suite.transactions = new(storagemock.Transactions) + suite.collections = new(storagemock.Collections) + suite.receipts = new(storagemock.ExecutionReceipts) + suite.results = new(storagemock.ExecutionResults) + suite.seals = new(storagemock.Seals) + + suite.collClient = new(accessmock.AccessAPIClient) + suite.execClient = new(accessmock.ExecutionAPIClient) + + suite.request = new(module.Requester) + suite.request.On("EntityByID", mock.Anything, mock.Anything) + + suite.me = new(module.Local) + suite.eds = execution_data.NewExecutionDataStore(suite.bs, execution_data.DefaultSerializer) + + suite.broadcaster = engine.NewBroadcaster() + + suite.execDataHeroCache = herocache.NewBlockExecutionData(state_stream.DefaultCacheSize, suite.log, metrics.NewNoopCollector()) + suite.execDataCache = cache.NewExecutionDataCache(suite.eds, suite.headers, suite.seals, suite.results, suite.execDataHeroCache) + + accessIdentity := unittest.IdentityFixture(unittest.WithRole(flow.RoleAccess)) + suite.me. + On("NodeID"). + Return(accessIdentity.NodeID) + + suite.chainID = flow.Testnet + suite.metrics = metrics.NewNoopCollector() + + config := rpc.Config{ + UnsecureGRPCListenAddr: unittest.DefaultAddress, + SecureGRPCListenAddr: unittest.DefaultAddress, + HTTPListenAddr: unittest.DefaultAddress, + } + + blockCount := 5 + suite.blockMap = make(map[uint64]*flow.Block, blockCount) + // generate blockCount consecutive blocks with associated seal, result and execution data + rootBlock := unittest.BlockFixture() + parent := rootBlock.Header + suite.blockMap[rootBlock.Header.Height] = &rootBlock + + for i := 0; i < blockCount; i++ { + block := unittest.BlockWithParentFixture(parent) + suite.blockMap[block.Header.Height] = block + } + + // generate a server certificate that will be served by the GRPC server + networkingKey := unittest.NetworkingPrivKeyFixture() + x509Certificate, err := grpcutils.X509Certificate(networkingKey) + assert.NoError(suite.T(), err) + tlsConfig := grpcutils.DefaultServerTLSConfig(x509Certificate) + // set the transport credentials for the server to use + config.TransportCredentials = credentials.NewTLS(tlsConfig) + + suite.secureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.SecureGRPCListenAddr, + grpcutils.DefaultMaxMsgSize, + false, + nil, + nil, + grpcserver.WithTransportCredentials(config.TransportCredentials)).Build() + + suite.unsecureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.UnsecureGRPCListenAddr, + grpcutils.DefaultMaxMsgSize, + false, + nil, + nil).Build() + + block := unittest.BlockHeaderFixture() + suite.snapshot.On("Head").Return(block, nil) + + backend := backend.New( + suite.state, + suite.collClient, + nil, + suite.blocks, + suite.headers, + suite.collections, + suite.transactions, + nil, + nil, + suite.chainID, + suite.metrics, + nil, + false, + 0, + nil, + nil, + suite.log, + 0, + nil) + + // create rpc engine builder + rpcEngBuilder, err := rpc.NewBuilder( + suite.log, + suite.state, + config, + suite.chainID, + suite.metrics, + false, + suite.me, + backend, + backend, + suite.secureGrpcServer, + suite.unsecureGrpcServer, + ) + assert.NoError(suite.T(), err) + suite.rpcEng, err = rpcEngBuilder.WithLegacy().Build() + assert.NoError(suite.T(), err) + suite.ctx, suite.cancel = irrecoverable.NewMockSignalerContextWithCancel(suite.T(), context.Background()) + + suite.headers.On("BlockIDByHeight", mock.AnythingOfType("uint64")).Return( + func(height uint64) flow.Identifier { + if block, ok := suite.blockMap[height]; ok { + return block.Header.ID() + } + return flow.ZeroID + }, + func(height uint64) error { + if _, ok := suite.blockMap[height]; ok { + return nil + } + return storage.ErrNotFound + }, + ).Maybe() + + conf := state_stream.Config{ + ClientSendTimeout: state_stream.DefaultSendTimeout, + ClientSendBufferSize: state_stream.DefaultSendBufferSize, + } + + // create state stream engine + suite.stateStreamEng, err = state_stream.NewEng( + suite.log, + conf, + nil, + suite.execDataCache, + suite.state, + suite.headers, + suite.seals, + suite.results, + suite.chainID, + rootBlock.Header.Height, + rootBlock.Header.Height, + suite.unsecureGrpcServer, + ) + assert.NoError(suite.T(), err) + + suite.rpcEng.Start(suite.ctx) + suite.stateStreamEng.Start(suite.ctx) + + suite.secureGrpcServer.Start(suite.ctx) + suite.unsecureGrpcServer.Start(suite.ctx) + + // wait for the servers to startup + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Ready(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Ready(), 2*time.Second) + + // wait for the rpc engine to startup + unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Ready(), 2*time.Second) + // wait for the state stream engine to startup + unittest.AssertClosesBefore(suite.T(), suite.stateStreamEng.Ready(), 2*time.Second) +} + +// TestEnginesOnTheSameGrpcPort verifies if both AccessAPI and ExecutionDataAPI client successfully connect and continue +// to work when configured on the same port +func (suite *SameGRPCPortTestSuite) TestEnginesOnTheSameGrpcPort() { + ctx := context.Background() + + conn, err := grpc.Dial( + suite.unsecureGrpcServer.GRPCAddress().String(), + grpc.WithTransportCredentials(insecure.NewCredentials())) + assert.NoError(suite.T(), err) + closer := io.Closer(conn) + + suite.Run("happy path - grpc access api client can connect successfully", func() { + req := &accessproto.GetNetworkParametersRequest{} + + // expect 2 upstream calls + suite.execClient.On("GetNetworkParameters", mock.Anything, mock.Anything).Return(nil, nil).Twice() + suite.collClient.On("GetNetworkParameters", mock.Anything, mock.Anything).Return(nil, nil).Twice() + + client := suite.unsecureAccessAPIClient(conn) + + _, err := client.GetNetworkParameters(ctx, req) + assert.NoError(suite.T(), err, "failed to get network") + }) + + suite.Run("happy path - grpc execution data api client can connect successfully", func() { + req := &executiondataproto.SubscribeEventsRequest{} + + client := suite.unsecureExecutionDataAPIClient(conn) + + _, err := client.SubscribeEvents(ctx, req) + assert.NoError(suite.T(), err, "failed to subscribe events") + }) + defer closer.Close() +} + +func TestSameGRPCTestSuite(t *testing.T) { + suite.Run(t, new(SameGRPCPortTestSuite)) +} + +// unsecureAccessAPIClient creates an unsecure grpc AccessAPI client +func (suite *SameGRPCPortTestSuite) unsecureAccessAPIClient(conn *grpc.ClientConn) accessproto.AccessAPIClient { + client := accessproto.NewAccessAPIClient(conn) + return client +} + +// unsecureExecutionDataAPIClient creates an unsecure ExecutionDataAPI client +func (suite *SameGRPCPortTestSuite) unsecureExecutionDataAPIClient(conn *grpc.ClientConn) executiondataproto.ExecutionDataAPIClient { + client := executiondataproto.NewExecutionDataAPIClient(conn) + return client +} diff --git a/engine/access/mock/access_api_client.go b/engine/access/mock/access_api_client.go index 234e4ffcdee..4e2b1d065c7 100644 --- a/engine/access/mock/access_api_client.go +++ b/engine/access/mock/access_api_client.go @@ -446,6 +446,39 @@ func (_m *AccessAPIClient) GetEventsForHeightRange(ctx context.Context, in *acce return r0, r1 } +// GetExecutionResultByID provides a mock function with given fields: ctx, in, opts +func (_m *AccessAPIClient) GetExecutionResultByID(ctx context.Context, in *access.GetExecutionResultByIDRequest, opts ...grpc.CallOption) (*access.ExecutionResultByIDResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *access.ExecutionResultByIDResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetExecutionResultByIDRequest, ...grpc.CallOption) (*access.ExecutionResultByIDResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetExecutionResultByIDRequest, ...grpc.CallOption) *access.ExecutionResultByIDResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.ExecutionResultByIDResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetExecutionResultByIDRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetExecutionResultForBlockID provides a mock function with given fields: ctx, in, opts func (_m *AccessAPIClient) GetExecutionResultForBlockID(ctx context.Context, in *access.GetExecutionResultForBlockIDRequest, opts ...grpc.CallOption) (*access.ExecutionResultForBlockIDResponse, error) { _va := make([]interface{}, len(opts)) diff --git a/engine/access/mock/access_api_server.go b/engine/access/mock/access_api_server.go index 5515698eacd..1a2c3772e44 100644 --- a/engine/access/mock/access_api_server.go +++ b/engine/access/mock/access_api_server.go @@ -353,6 +353,32 @@ func (_m *AccessAPIServer) GetEventsForHeightRange(_a0 context.Context, _a1 *acc return r0, r1 } +// GetExecutionResultByID provides a mock function with given fields: _a0, _a1 +func (_m *AccessAPIServer) GetExecutionResultByID(_a0 context.Context, _a1 *access.GetExecutionResultByIDRequest) (*access.ExecutionResultByIDResponse, error) { + ret := _m.Called(_a0, _a1) + + var r0 *access.ExecutionResultByIDResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *access.GetExecutionResultByIDRequest) (*access.ExecutionResultByIDResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *access.GetExecutionResultByIDRequest) *access.ExecutionResultByIDResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*access.ExecutionResultByIDResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *access.GetExecutionResultByIDRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetExecutionResultForBlockID provides a mock function with given fields: _a0, _a1 func (_m *AccessAPIServer) GetExecutionResultForBlockID(_a0 context.Context, _a1 *access.GetExecutionResultForBlockIDRequest) (*access.ExecutionResultForBlockIDResponse, error) { ret := _m.Called(_a0, _a1) diff --git a/engine/access/rest/README.md b/engine/access/rest/README.md index fd7b970493d..d94af68c238 100644 --- a/engine/access/rest/README.md +++ b/engine/access/rest/README.md @@ -6,10 +6,14 @@ available on our [docs site](https://docs.onflow.org/http-api/). ## Packages -- `rest`: The HTTP handlers for all the request, server generator and the select filter. +- `rest`: The HTTP handlers for the server generator and the select filter, implementation of handling local requests. - `middleware`: The common [middlewares](https://github.com/gorilla/mux#middleware) that all request pass through. - `models`: The generated models using openapi generators and implementation of model builders. - `request`: Implementation of API requests that provide validation for input data and build request models. +- `routes`: The common HTTP handlers for all the requests, tests for each request. +- `apiproxy`: Implementation of proxy backend handler which includes the local backend and forwards the methods which +can't be handled locally to an upstream using gRPC API. This is used by observers that don't have all data in their +local db. ## Request lifecycle @@ -37,7 +41,7 @@ make generate-openapi ### Adding New API Endpoints -A new endpoint can be added by first implementing a new request handler, a request handle is a function in the rest +A new endpoint can be added by first implementing a new request handler, a request handle is a function in the routes package that complies with function interfaced defined as: ```go @@ -48,6 +52,7 @@ generator models.LinkGenerator, ) (interface{}, error) ``` -That handler implementation needs to be added to the `router.go` with corresponding API endpoint and method. Adding a -new API endpoint also requires for a new request builder to be implemented and added in request package. Make sure to -not forget about adding tests for each of the API handler. +That handler implementation needs to be added to the `router.go` with corresponding API endpoint and method. If the data +is not available on observers, an override the method is needed in the backend handler `RestProxyHandler` for request +forwarding. Adding a new API endpoint also requires for a new request builder to be implemented and added in request +package. Make sure to not forget about adding tests for each of the API handler. diff --git a/engine/access/rest/apiproxy/rest_proxy_handler.go b/engine/access/rest/apiproxy/rest_proxy_handler.go new file mode 100644 index 00000000000..01e7b56724d --- /dev/null +++ b/engine/access/rest/apiproxy/rest_proxy_handler.go @@ -0,0 +1,344 @@ +package apiproxy + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc/status" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/common/grpc/forwarder" + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" +) + +// RestProxyHandler is a structure that represents the proxy algorithm for observer node. +// It includes the local backend and forwards the methods which can't be handled locally to an upstream using gRPC API. +type RestProxyHandler struct { + access.API + *forwarder.Forwarder + Logger zerolog.Logger + Metrics metrics.ObserverMetrics + Chain flow.Chain +} + +// NewRestProxyHandler returns a new rest proxy handler for observer node. +func NewRestProxyHandler( + api access.API, + identities flow.IdentityList, + timeout time.Duration, + maxMsgSize uint, + log zerolog.Logger, + metrics metrics.ObserverMetrics, + chain flow.Chain, +) (*RestProxyHandler, error) { + + forwarder, err := forwarder.NewForwarder( + identities, + timeout, + maxMsgSize) + if err != nil { + return nil, fmt.Errorf("could not create REST forwarder: %w", err) + } + + restProxyHandler := &RestProxyHandler{ + Logger: log, + Metrics: metrics, + Chain: chain, + } + + restProxyHandler.API = api + restProxyHandler.Forwarder = forwarder + + return restProxyHandler, nil +} + +func (r *RestProxyHandler) log(handler, rpc string, err error) { + code := status.Code(err) + r.Metrics.RecordRPC(handler, rpc, code) + + logger := r.Logger.With(). + Str("handler", handler). + Str("rest_method", rpc). + Str("rest_code", code.String()). + Logger() + + if err != nil { + logger.Error().Err(err).Msg("request failed") + return + } + + logger.Info().Msg("request succeeded") +} + +// GetCollectionByID returns a collection by ID. +func (r *RestProxyHandler) GetCollectionByID(ctx context.Context, id flow.Identifier) (*flow.LightCollection, error) { + upstream, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + + getCollectionByIDRequest := &accessproto.GetCollectionByIDRequest{ + Id: id[:], + } + + collectionResponse, err := upstream.GetCollectionByID(ctx, getCollectionByIDRequest) + r.log("upstream", "GetCollectionByID", err) + + if err != nil { + return nil, err + } + + transactions, err := convert.MessageToLightCollection(collectionResponse.Collection) + if err != nil { + return nil, err + } + + return transactions, nil +} + +// SendTransaction sends already created transaction. +func (r *RestProxyHandler) SendTransaction(ctx context.Context, tx *flow.TransactionBody) error { + upstream, err := r.FaultTolerantClient() + if err != nil { + return err + } + + transaction := convert.TransactionToMessage(*tx) + sendTransactionRequest := &accessproto.SendTransactionRequest{ + Transaction: transaction, + } + + _, err = upstream.SendTransaction(ctx, sendTransactionRequest) + r.log("upstream", "SendTransaction", err) + + return err +} + +// GetTransaction returns transaction by ID. +func (r *RestProxyHandler) GetTransaction(ctx context.Context, id flow.Identifier) (*flow.TransactionBody, error) { + upstream, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + + getTransactionRequest := &accessproto.GetTransactionRequest{ + Id: id[:], + } + transactionResponse, err := upstream.GetTransaction(ctx, getTransactionRequest) + r.log("upstream", "GetTransaction", err) + + if err != nil { + return nil, err + } + + transactionBody, err := convert.MessageToTransaction(transactionResponse.Transaction, r.Chain) + if err != nil { + return nil, err + } + + return &transactionBody, nil +} + +// GetTransactionResult returns transaction result by the transaction ID. +func (r *RestProxyHandler) GetTransactionResult(ctx context.Context, id flow.Identifier, blockID flow.Identifier, collectionID flow.Identifier) (*access.TransactionResult, error) { + upstream, err := r.FaultTolerantClient() + if err != nil { + + return nil, err + } + + getTransactionResultRequest := &accessproto.GetTransactionRequest{ + Id: id[:], + BlockId: blockID[:], + CollectionId: collectionID[:], + } + + transactionResultResponse, err := upstream.GetTransactionResult(ctx, getTransactionResultRequest) + r.log("upstream", "GetTransactionResult", err) + + if err != nil { + return nil, err + } + + return access.MessageToTransactionResult(transactionResultResponse), nil +} + +// GetAccountAtBlockHeight returns account by account address and block height. +func (r *RestProxyHandler) GetAccountAtBlockHeight(ctx context.Context, address flow.Address, height uint64) (*flow.Account, error) { + upstream, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + + getAccountAtBlockHeightRequest := &accessproto.GetAccountAtBlockHeightRequest{ + Address: address.Bytes(), + BlockHeight: height, + } + + accountResponse, err := upstream.GetAccountAtBlockHeight(ctx, getAccountAtBlockHeightRequest) + r.log("upstream", "GetAccountAtBlockHeight", err) + + if err != nil { + return nil, err + } + + return convert.MessageToAccount(accountResponse.Account) +} + +// ExecuteScriptAtLatestBlock executes script at latest block. +func (r *RestProxyHandler) ExecuteScriptAtLatestBlock(ctx context.Context, script []byte, arguments [][]byte) ([]byte, error) { + upstream, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + + executeScriptAtLatestBlockRequest := &accessproto.ExecuteScriptAtLatestBlockRequest{ + Script: script, + Arguments: arguments, + } + executeScriptAtLatestBlockResponse, err := upstream.ExecuteScriptAtLatestBlock(ctx, executeScriptAtLatestBlockRequest) + r.log("upstream", "ExecuteScriptAtLatestBlock", err) + + if err != nil { + return nil, err + } + + return executeScriptAtLatestBlockResponse.Value, nil +} + +// ExecuteScriptAtBlockHeight executes script at the given block height . +func (r *RestProxyHandler) ExecuteScriptAtBlockHeight(ctx context.Context, blockHeight uint64, script []byte, arguments [][]byte) ([]byte, error) { + upstream, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + + executeScriptAtBlockHeightRequest := &accessproto.ExecuteScriptAtBlockHeightRequest{ + BlockHeight: blockHeight, + Script: script, + Arguments: arguments, + } + executeScriptAtBlockHeightResponse, err := upstream.ExecuteScriptAtBlockHeight(ctx, executeScriptAtBlockHeightRequest) + r.log("upstream", "ExecuteScriptAtBlockHeight", err) + + if err != nil { + return nil, err + } + + return executeScriptAtBlockHeightResponse.Value, nil +} + +// ExecuteScriptAtBlockID executes script at the given block id . +func (r *RestProxyHandler) ExecuteScriptAtBlockID(ctx context.Context, blockID flow.Identifier, script []byte, arguments [][]byte) ([]byte, error) { + upstream, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + + executeScriptAtBlockIDRequest := &accessproto.ExecuteScriptAtBlockIDRequest{ + BlockId: blockID[:], + Script: script, + Arguments: arguments, + } + executeScriptAtBlockIDResponse, err := upstream.ExecuteScriptAtBlockID(ctx, executeScriptAtBlockIDRequest) + r.log("upstream", "ExecuteScriptAtBlockID", err) + + if err != nil { + return nil, err + } + + return executeScriptAtBlockIDResponse.Value, nil +} + +// GetEventsForHeightRange returns events by their name in the specified blocks heights. +func (r *RestProxyHandler) GetEventsForHeightRange(ctx context.Context, eventType string, startHeight, endHeight uint64) ([]flow.BlockEvents, error) { + upstream, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + + getEventsForHeightRangeRequest := &accessproto.GetEventsForHeightRangeRequest{ + Type: eventType, + StartHeight: startHeight, + EndHeight: endHeight, + } + eventsResponse, err := upstream.GetEventsForHeightRange(ctx, getEventsForHeightRangeRequest) + r.log("upstream", "GetEventsForHeightRange", err) + + if err != nil { + return nil, err + } + + return convert.MessagesToBlockEvents(eventsResponse.Results), nil +} + +// GetEventsForBlockIDs returns events by their name in the specified block IDs. +func (r *RestProxyHandler) GetEventsForBlockIDs(ctx context.Context, eventType string, blockIDs []flow.Identifier) ([]flow.BlockEvents, error) { + upstream, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + + blockIds := convert.IdentifiersToMessages(blockIDs) + + getEventsForBlockIDsRequest := &accessproto.GetEventsForBlockIDsRequest{ + Type: eventType, + BlockIds: blockIds, + } + eventsResponse, err := upstream.GetEventsForBlockIDs(ctx, getEventsForBlockIDsRequest) + r.log("upstream", "GetEventsForBlockIDs", err) + + if err != nil { + return nil, err + } + + return convert.MessagesToBlockEvents(eventsResponse.Results), nil +} + +// GetExecutionResultForBlockID gets execution result by provided block ID. +func (r *RestProxyHandler) GetExecutionResultForBlockID(ctx context.Context, blockID flow.Identifier) (*flow.ExecutionResult, error) { + upstream, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + + getExecutionResultForBlockID := &accessproto.GetExecutionResultForBlockIDRequest{ + BlockId: blockID[:], + } + executionResultForBlockIDResponse, err := upstream.GetExecutionResultForBlockID(ctx, getExecutionResultForBlockID) + r.log("upstream", "GetExecutionResultForBlockID", err) + + if err != nil { + return nil, err + } + + return convert.MessageToExecutionResult(executionResultForBlockIDResponse.ExecutionResult) +} + +// GetExecutionResultByID gets execution result by its ID. +func (r *RestProxyHandler) GetExecutionResultByID(ctx context.Context, id flow.Identifier) (*flow.ExecutionResult, error) { + upstream, err := r.FaultTolerantClient() + if err != nil { + return nil, err + } + + executionResultByIDRequest := &accessproto.GetExecutionResultByIDRequest{ + Id: id[:], + } + + executionResultByIDResponse, err := upstream.GetExecutionResultByID(ctx, executionResultByIDRequest) + r.log("upstream", "GetExecutionResultByID", err) + + if err != nil { + return nil, err + } + + return convert.MessageToExecutionResult(executionResultByIDResponse.ExecutionResult) +} diff --git a/engine/access/rest/middleware/metrics.go b/engine/access/rest/middleware/metrics.go index 25f82bf4277..54dd5dd2c6a 100644 --- a/engine/access/rest/middleware/metrics.go +++ b/engine/access/rest/middleware/metrics.go @@ -11,19 +11,12 @@ import ( "github.com/onflow/flow-go/module" ) -func MetricsMiddleware(restCollector module.RestMetrics, urlToRoute func(string) (string, error)) mux.MiddlewareFunc { +func MetricsMiddleware(restCollector module.RestMetrics) mux.MiddlewareFunc { metricsMiddleware := middleware.New(middleware.Config{Recorder: restCollector}) return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - //urlToRoute transforms specific URL to generic url pattern - routeName, err := urlToRoute(req.URL.Path) - if err != nil { - // In case of an error, an empty route name filled with "unknown" - routeName = "unknown" - } - // This is a custom metric being called on every http request - restCollector.AddTotalRequests(req.Context(), req.Method, routeName) + restCollector.AddTotalRequests(req.Context(), req.Method, req.URL.Path) // Modify the writer respWriter := &responseWriter{w, http.StatusOK} diff --git a/engine/access/rest/error.go b/engine/access/rest/models/error.go similarity index 98% rename from engine/access/rest/error.go rename to engine/access/rest/models/error.go index 7403510ba55..2247b38743b 100644 --- a/engine/access/rest/error.go +++ b/engine/access/rest/models/error.go @@ -1,4 +1,4 @@ -package rest +package models import "net/http" diff --git a/engine/access/rest/accounts.go b/engine/access/rest/routes/accounts.go similarity index 94% rename from engine/access/rest/accounts.go rename to engine/access/rest/routes/accounts.go index 36371bf6c57..972c2ba68ac 100644 --- a/engine/access/rest/accounts.go +++ b/engine/access/rest/routes/accounts.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "github.com/onflow/flow-go/access" @@ -10,7 +10,7 @@ import ( func GetAccount(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { req, err := r.GetAccountRequest() if err != nil { - return nil, NewBadRequestError(err) + return nil, models.NewBadRequestError(err) } // in case we receive special height values 'final' and 'sealed', fetch that height and overwrite request with it diff --git a/engine/access/rest/accounts_test.go b/engine/access/rest/routes/accounts_test.go similarity index 93% rename from engine/access/rest/accounts_test.go rename to engine/access/rest/routes/accounts_test.go index 61982ff5f9c..b8bebea8e85 100644 --- a/engine/access/rest/accounts_test.go +++ b/engine/access/rest/routes/accounts_test.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "fmt" @@ -14,6 +14,7 @@ import ( "github.com/onflow/flow-go/access/mock" "github.com/onflow/flow-go/engine/access/rest/middleware" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" ) @@ -33,7 +34,15 @@ func accountURL(t *testing.T, address string, height string) string { return u.String() } -func TestGetAccount(t *testing.T) { +// TestAccessGetAccount tests local getAccount request. +// +// Runs the following tests: +// 1. Get account by address at latest sealed block. +// 2. Get account by address at latest finalized block. +// 3. Get account by address at height. +// 4. Get account by address at height condensed. +// 5. Get invalid account. +func TestAccessGetAccount(t *testing.T) { backend := &mock.API{} t.Run("get by address at latest sealed block", func(t *testing.T) { @@ -165,6 +174,7 @@ func getAccountRequest(t *testing.T, account *flow.Account, height string, expan q.Add(middleware.ExpandQueryParam, fieldParam) req.URL.RawQuery = q.Encode() } + require.NoError(t, err) return req } diff --git a/engine/access/rest/blocks.go b/engine/access/rest/routes/blocks.go similarity index 91% rename from engine/access/rest/blocks.go rename to engine/access/rest/routes/blocks.go index e729f67a9bd..c26f14dd8bf 100644 --- a/engine/access/rest/blocks.go +++ b/engine/access/rest/routes/blocks.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "context" @@ -18,10 +18,11 @@ import ( func GetBlocksByIDs(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { req, err := r.GetBlockByIDsRequest() if err != nil { - return nil, NewBadRequestError(err) + return nil, models.NewBadRequestError(err) } blocks := make([]*models.Block, len(req.IDs)) + for i, id := range req.IDs { block, err := getBlock(forID(&id), r, backend, link) if err != nil { @@ -33,10 +34,11 @@ func GetBlocksByIDs(r *request.Request, backend access.API, link models.LinkGene return blocks, nil } +// GetBlocksByHeight gets blocks by height. func GetBlocksByHeight(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { req, err := r.GetBlockRequest() if err != nil { - return nil, NewBadRequestError(err) + return nil, models.NewBadRequestError(err) } if req.FinalHeight || req.SealedHeight { @@ -72,7 +74,7 @@ func GetBlocksByHeight(r *request.Request, backend access.API, link models.LinkG req.EndHeight = latest.Header.Height // overwrite special value height with fetched if req.StartHeight > req.EndHeight { - return nil, NewBadRequestError(fmt.Errorf("start height must be less than or equal to end height")) + return nil, models.NewBadRequestError(fmt.Errorf("start height must be less than or equal to end height")) } } @@ -93,7 +95,7 @@ func GetBlocksByHeight(r *request.Request, backend access.API, link models.LinkG func GetBlockPayloadByID(r *request.Request, backend access.API, _ models.LinkGenerator) (interface{}, error) { req, err := r.GetBlockPayloadRequest() if err != nil { - return nil, NewBadRequestError(err) + return nil, models.NewBadRequestError(err) } blkProvider := NewBlockProvider(backend, forID(&req.ID)) @@ -194,7 +196,7 @@ func (blkProvider *blockProvider) getBlock(ctx context.Context) (*flow.Block, fl if blkProvider.id != nil { blk, _, err := blkProvider.backend.GetBlockByID(ctx, *blkProvider.id) if err != nil { // unfortunately backend returns internal error status if not found - return nil, flow.BlockStatusUnknown, NewNotFoundError( + return nil, flow.BlockStatusUnknown, models.NewNotFoundError( fmt.Sprintf("error looking up block with ID %s", blkProvider.id.String()), err, ) } @@ -205,14 +207,14 @@ func (blkProvider *blockProvider) getBlock(ctx context.Context) (*flow.Block, fl blk, status, err := blkProvider.backend.GetLatestBlock(ctx, blkProvider.sealed) if err != nil { // cannot be a 'not found' error since final and sealed block should always be found - return nil, flow.BlockStatusUnknown, NewRestError(http.StatusInternalServerError, "block lookup failed", err) + return nil, flow.BlockStatusUnknown, models.NewRestError(http.StatusInternalServerError, "block lookup failed", err) } return blk, status, nil } blk, status, err := blkProvider.backend.GetBlockByHeight(ctx, blkProvider.height) if err != nil { // unfortunately backend returns internal error status if not found - return nil, flow.BlockStatusUnknown, NewNotFoundError( + return nil, flow.BlockStatusUnknown, models.NewNotFoundError( fmt.Sprintf("error looking up block at height %d", blkProvider.height), err, ) } diff --git a/engine/access/rest/blocks_test.go b/engine/access/rest/routes/blocks_test.go similarity index 96% rename from engine/access/rest/blocks_test.go rename to engine/access/rest/routes/blocks_test.go index 7f977b06d69..3abccc9c78a 100644 --- a/engine/access/rest/blocks_test.go +++ b/engine/access/rest/routes/blocks_test.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "fmt" @@ -31,12 +31,12 @@ type testVector struct { expectedResponse string } -// TestGetBlocks tests the get blocks by ID and get blocks by heights API -func TestGetBlocks(t *testing.T) { - backend := &mock.API{} - - blkCnt := 10 - blockIDs, heights, blocks, executionResults := generateMocks(backend, blkCnt) +func prepareTestVectors(t *testing.T, + blockIDs []string, + heights []string, + blocks []*flow.Block, + executionResults []*flow.ExecutionResult, + blkCnt int) []testVector { singleBlockExpandedResponse := expectedBlockResponsesExpanded(blocks[:1], executionResults[:1], true, flow.BlockStatusUnknown) singleSealedBlockExpandedResponse := expectedBlockResponsesExpanded(blocks[:1], executionResults[:1], true, flow.BlockStatusSealed) @@ -137,6 +137,16 @@ func TestGetBlocks(t *testing.T) { expectedResponse: fmt.Sprintf(`{"code":400, "message": "at most %d IDs can be requested at a time"}`, request.MaxBlockRequestHeightRange), }, } + return testVectors +} + +// TestGetBlocks tests local get blocks by ID and get blocks by heights API +func TestAccessGetBlocks(t *testing.T) { + backend := &mock.API{} + + blkCnt := 10 + blockIDs, heights, blocks, executionResults := generateMocks(backend, blkCnt) + testVectors := prepareTestVectors(t, blockIDs, heights, blocks, executionResults, blkCnt) for _, tv := range testVectors { responseRec, err := executeRequest(tv.request, backend) diff --git a/engine/access/rest/collections.go b/engine/access/rest/routes/collections.go similarity index 94% rename from engine/access/rest/collections.go rename to engine/access/rest/routes/collections.go index 807be2c0c41..47b6150f480 100644 --- a/engine/access/rest/collections.go +++ b/engine/access/rest/routes/collections.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "github.com/onflow/flow-go/access" @@ -11,7 +11,7 @@ import ( func GetCollectionByID(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { req, err := r.GetCollectionRequest() if err != nil { - return nil, NewBadRequestError(err) + return nil, models.NewBadRequestError(err) } collection, err := backend.GetCollectionByID(r.Context(), req.ID) diff --git a/engine/access/rest/collections_test.go b/engine/access/rest/routes/collections_test.go similarity index 99% rename from engine/access/rest/collections_test.go rename to engine/access/rest/routes/collections_test.go index 3981541f3a7..de05152b6d5 100644 --- a/engine/access/rest/collections_test.go +++ b/engine/access/rest/routes/collections_test.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "encoding/json" diff --git a/engine/access/rest/events.go b/engine/access/rest/routes/events.go similarity index 85% rename from engine/access/rest/events.go rename to engine/access/rest/routes/events.go index 2a79939bc21..4f03624c768 100644 --- a/engine/access/rest/events.go +++ b/engine/access/rest/routes/events.go @@ -1,22 +1,21 @@ -package rest +package routes import ( "fmt" + "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/engine/access/rest/models" "github.com/onflow/flow-go/engine/access/rest/request" - - "github.com/onflow/flow-go/access" ) -const blockQueryParam = "block_ids" -const eventTypeQuery = "type" +const BlockQueryParam = "block_ids" +const EventTypeQuery = "type" // GetEvents for the provided block range or list of block IDs filtered by type. func GetEvents(r *request.Request, backend access.API, _ models.LinkGenerator) (interface{}, error) { req, err := r.GetEventsRequest() if err != nil { - return nil, NewBadRequestError(err) + return nil, models.NewBadRequestError(err) } // if the request has block IDs provided then return events for block IDs @@ -41,7 +40,7 @@ func GetEvents(r *request.Request, backend access.API, _ models.LinkGenerator) ( req.EndHeight = latest.Height // special check after we resolve special height value if req.StartHeight > req.EndHeight { - return nil, NewBadRequestError(fmt.Errorf("current retrieved end height value is lower than start height")) + return nil, models.NewBadRequestError(fmt.Errorf("current retrieved end height value is lower than start height")) } } diff --git a/engine/access/rest/events_test.go b/engine/access/rest/routes/events_test.go similarity index 98% rename from engine/access/rest/events_test.go rename to engine/access/rest/routes/events_test.go index 9f0fede2c6c..47d4d89fd52 100644 --- a/engine/access/rest/events_test.go +++ b/engine/access/rest/routes/events_test.go @@ -1,24 +1,25 @@ -package rest +package routes import ( "encoding/json" "fmt" + "net/http" "net/url" "strings" "testing" "time" - "github.com/onflow/flow-go/engine/access/rest/util" - - "github.com/onflow/flow-go/access/mock" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" - mocks "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" ) func TestGetEvents(t *testing.T) { @@ -136,7 +137,7 @@ func getEventReq(t *testing.T, eventType string, start string, end string, block q := u.Query() if len(blockIDs) > 0 { - q.Add(blockQueryParam, strings.Join(blockIDs, ",")) + q.Add(BlockQueryParam, strings.Join(blockIDs, ",")) } if start != "" && end != "" { @@ -144,7 +145,7 @@ func getEventReq(t *testing.T, eventType string, start string, end string, block q.Add(endHeightQueryParam, end) } - q.Add(eventTypeQuery, eventType) + q.Add(EventTypeQuery, eventType) u.RawQuery = q.Encode() diff --git a/engine/access/rest/execution_result.go b/engine/access/rest/routes/execution_result.go similarity index 89% rename from engine/access/rest/execution_result.go rename to engine/access/rest/routes/execution_result.go index b0583d43b0d..b999665b26b 100644 --- a/engine/access/rest/execution_result.go +++ b/engine/access/rest/routes/execution_result.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "fmt" @@ -12,7 +12,7 @@ import ( func GetExecutionResultsByBlockIDs(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { req, err := r.GetExecutionResultByBlockIDsRequest() if err != nil { - return nil, NewBadRequestError(err) + return nil, models.NewBadRequestError(err) } // for each block ID we retrieve execution result @@ -38,7 +38,7 @@ func GetExecutionResultsByBlockIDs(r *request.Request, backend access.API, link func GetExecutionResultByID(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { req, err := r.GetExecutionResultRequest() if err != nil { - return nil, NewBadRequestError(err) + return nil, models.NewBadRequestError(err) } res, err := backend.GetExecutionResultByID(r.Context(), req.ID) @@ -48,7 +48,7 @@ func GetExecutionResultByID(r *request.Request, backend access.API, link models. if res == nil { err := fmt.Errorf("execution result with ID: %s not found", req.ID.String()) - return nil, NewNotFoundError(err.Error(), err) + return nil, models.NewNotFoundError(err.Error(), err) } var response models.ExecutionResult diff --git a/engine/access/rest/execution_result_test.go b/engine/access/rest/routes/execution_result_test.go similarity index 99% rename from engine/access/rest/execution_result_test.go rename to engine/access/rest/routes/execution_result_test.go index adb3852c668..ba74974af1a 100644 --- a/engine/access/rest/execution_result_test.go +++ b/engine/access/rest/routes/execution_result_test.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "fmt" @@ -37,7 +37,6 @@ func getResultByIDReq(id string, blockIDs []string) *http.Request { } func TestGetResultByID(t *testing.T) { - t.Run("get by ID", func(t *testing.T) { backend := &mock.API{} result := unittest.ExecutionResultFixture() @@ -68,6 +67,7 @@ func TestGetResultByID(t *testing.T) { } func TestGetResultBlockID(t *testing.T) { + t.Run("get by block ID", func(t *testing.T) { backend := &mock.API{} blockID := unittest.IdentifierFixture() diff --git a/engine/access/rest/handler.go b/engine/access/rest/routes/handler.go similarity index 95% rename from engine/access/rest/handler.go rename to engine/access/rest/routes/handler.go index 028176fc9e0..e323843e50e 100644 --- a/engine/access/rest/handler.go +++ b/engine/access/rest/routes/handler.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "encoding/json" @@ -6,17 +6,17 @@ import ( "fmt" "net/http" - "github.com/onflow/flow-go/engine/access/rest/models" - "github.com/onflow/flow-go/engine/access/rest/request" - "github.com/onflow/flow-go/engine/access/rest/util" - fvmErrors "github.com/onflow/flow-go/fvm/errors" - "github.com/onflow/flow-go/model/flow" - "github.com/rs/zerolog" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/models" + "github.com/onflow/flow-go/engine/access/rest/request" + "github.com/onflow/flow-go/engine/access/rest/util" + fvmErrors "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/model/flow" ) const MaxRequestSize = 2 << 20 // 2MB @@ -93,7 +93,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h *Handler) errorHandler(w http.ResponseWriter, err error, errorLogger zerolog.Logger) { // rest status type error should be returned with status and user message provided - var statusErr StatusError + var statusErr models.StatusError if errors.As(err, &statusErr) { h.errorResponse(w, statusErr.Status(), statusErr.UserMessage(), errorLogger) return @@ -124,6 +124,11 @@ func (h *Handler) errorHandler(w http.ResponseWriter, err error, errorLogger zer h.errorResponse(w, http.StatusBadRequest, msg, errorLogger) return } + if se.Code() == codes.Unavailable { + msg := fmt.Sprintf("Failed to process request: %s", se.Message()) + h.errorResponse(w, http.StatusServiceUnavailable, msg, errorLogger) + return + } } // stop going further - catch all error diff --git a/engine/access/rest/network.go b/engine/access/rest/routes/network.go similarity index 87% rename from engine/access/rest/network.go rename to engine/access/rest/routes/network.go index 6100bc765d5..82abcbb6d49 100644 --- a/engine/access/rest/network.go +++ b/engine/access/rest/routes/network.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "github.com/onflow/flow-go/access" @@ -7,7 +7,7 @@ import ( ) // GetNetworkParameters returns network-wide parameters of the blockchain -func GetNetworkParameters(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { +func GetNetworkParameters(r *request.Request, backend access.API, _ models.LinkGenerator) (interface{}, error) { params := backend.GetNetworkParameters(r.Context()) var response models.NetworkParameters diff --git a/engine/access/rest/network_test.go b/engine/access/rest/routes/network_test.go similarity index 98% rename from engine/access/rest/network_test.go rename to engine/access/rest/routes/network_test.go index c4ce7492476..00d0ca03944 100644 --- a/engine/access/rest/network_test.go +++ b/engine/access/rest/routes/network_test.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "fmt" diff --git a/engine/access/rest/node_version_info.go b/engine/access/rest/routes/node_version_info.go similarity index 87% rename from engine/access/rest/node_version_info.go rename to engine/access/rest/routes/node_version_info.go index 899d159cf4f..31e172bba9f 100644 --- a/engine/access/rest/node_version_info.go +++ b/engine/access/rest/routes/node_version_info.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "github.com/onflow/flow-go/access" @@ -7,7 +7,7 @@ import ( ) // GetNodeVersionInfo returns node version information -func GetNodeVersionInfo(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { +func GetNodeVersionInfo(r *request.Request, backend access.API, _ models.LinkGenerator) (interface{}, error) { params, err := backend.GetNodeVersionInfo(r.Context()) if err != nil { return nil, err diff --git a/engine/access/rest/node_version_info_test.go b/engine/access/rest/routes/node_version_info_test.go similarity index 99% rename from engine/access/rest/node_version_info_test.go rename to engine/access/rest/routes/node_version_info_test.go index 4140089a280..25f19ae1f3c 100644 --- a/engine/access/rest/node_version_info_test.go +++ b/engine/access/rest/routes/node_version_info_test.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "fmt" diff --git a/engine/access/rest/router.go b/engine/access/rest/routes/router.go similarity index 96% rename from engine/access/rest/router.go rename to engine/access/rest/routes/router.go index f51f1c65f3e..a2185e4e9a3 100644 --- a/engine/access/rest/router.go +++ b/engine/access/rest/routes/router.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "fmt" @@ -16,7 +16,7 @@ import ( "github.com/onflow/flow-go/module" ) -func newRouter(backend access.API, logger zerolog.Logger, chain flow.Chain, restCollector module.RestMetrics) (*mux.Router, error) { +func NewRouter(backend access.API, logger zerolog.Logger, chain flow.Chain, restCollector module.RestMetrics) (*mux.Router, error) { router := mux.NewRouter().StrictSlash(true) v1SubRouter := router.PathPrefix("/v1").Subrouter() @@ -24,7 +24,7 @@ func newRouter(backend access.API, logger zerolog.Logger, chain flow.Chain, rest v1SubRouter.Use(middleware.LoggingMiddleware(logger)) v1SubRouter.Use(middleware.QueryExpandable()) v1SubRouter.Use(middleware.QuerySelect()) - v1SubRouter.Use(middleware.MetricsMiddleware(restCollector, URLToRoute)) + v1SubRouter.Use(middleware.MetricsMiddleware(restCollector)) linkGenerator := models.NewLinkGeneratorImpl(v1SubRouter) diff --git a/engine/access/rest/router_test.go b/engine/access/rest/routes/router_test.go similarity index 99% rename from engine/access/rest/router_test.go rename to engine/access/rest/routes/router_test.go index 5b6578be8a1..e3c2a2c3fdd 100644 --- a/engine/access/rest/router_test.go +++ b/engine/access/rest/routes/router_test.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "testing" diff --git a/engine/access/rest/scripts.go b/engine/access/rest/routes/scripts.go similarity index 94% rename from engine/access/rest/scripts.go rename to engine/access/rest/routes/scripts.go index 8bd86bae54f..8627470ab88 100644 --- a/engine/access/rest/scripts.go +++ b/engine/access/rest/routes/scripts.go @@ -1,18 +1,17 @@ -package rest +package routes import ( + "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/engine/access/rest/models" "github.com/onflow/flow-go/engine/access/rest/request" "github.com/onflow/flow-go/model/flow" - - "github.com/onflow/flow-go/access" ) // ExecuteScript handler sends the script from the request to be executed. func ExecuteScript(r *request.Request, backend access.API, _ models.LinkGenerator) (interface{}, error) { req, err := r.GetScriptRequest() if err != nil { - return nil, NewBadRequestError(err) + return nil, models.NewBadRequestError(err) } if req.BlockID != flow.ZeroID { diff --git a/engine/access/rest/scripts_test.go b/engine/access/rest/routes/scripts_test.go similarity index 99% rename from engine/access/rest/scripts_test.go rename to engine/access/rest/routes/scripts_test.go index 7e3271c1d81..8a6a63cc819 100644 --- a/engine/access/rest/scripts_test.go +++ b/engine/access/rest/routes/scripts_test.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "bytes" diff --git a/engine/access/rest/test_helpers.go b/engine/access/rest/routes/test_helpers.go similarity index 77% rename from engine/access/rest/test_helpers.go rename to engine/access/rest/routes/test_helpers.go index 88170769c99..e512cc94434 100644 --- a/engine/access/rest/test_helpers.go +++ b/engine/access/rest/routes/test_helpers.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "bytes" @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" ) @@ -26,11 +26,11 @@ const ( heightQueryParam = "height" ) -func executeRequest(req *http.Request, backend *mock.API) (*httptest.ResponseRecorder, error) { +func executeRequest(req *http.Request, backend access.API) (*httptest.ResponseRecorder, error) { var b bytes.Buffer logger := zerolog.New(&b) - restCollector := metrics.NewNoopCollector() - router, err := newRouter(backend, logger, flow.Testnet.Chain(), restCollector) + + router, err := NewRouter(backend, logger, flow.Testnet.Chain(), metrics.NewNoopCollector()) if err != nil { return nil, err } @@ -40,14 +40,13 @@ func executeRequest(req *http.Request, backend *mock.API) (*httptest.ResponseRec return rr, nil } -func assertOKResponse(t *testing.T, req *http.Request, expectedRespBody string, backend *mock.API) { +func assertOKResponse(t *testing.T, req *http.Request, expectedRespBody string, backend access.API) { assertResponse(t, req, http.StatusOK, expectedRespBody, backend) } -func assertResponse(t *testing.T, req *http.Request, status int, expectedRespBody string, backend *mock.API) { +func assertResponse(t *testing.T, req *http.Request, status int, expectedRespBody string, backend access.API) { rr, err := executeRequest(req, backend) assert.NoError(t, err) - actualResponseBody := rr.Body.String() require.JSONEq(t, expectedRespBody, diff --git a/engine/access/rest/transactions.go b/engine/access/rest/routes/transactions.go similarity index 92% rename from engine/access/rest/transactions.go rename to engine/access/rest/routes/transactions.go index f8dfc83dedb..b77aead82b4 100644 --- a/engine/access/rest/transactions.go +++ b/engine/access/rest/routes/transactions.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "github.com/onflow/flow-go/access" @@ -10,7 +10,7 @@ import ( func GetTransactionByID(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { req, err := r.GetTransactionRequest() if err != nil { - return nil, NewBadRequestError(err) + return nil, models.NewBadRequestError(err) } tx, err := backend.GetTransaction(r.Context(), req.ID) @@ -36,7 +36,7 @@ func GetTransactionByID(r *request.Request, backend access.API, link models.Link func GetTransactionResultByID(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { req, err := r.GetTransactionResultRequest() if err != nil { - return nil, NewBadRequestError(err) + return nil, models.NewBadRequestError(err) } txr, err := backend.GetTransactionResult(r.Context(), req.ID, req.BlockID, req.CollectionID) @@ -53,7 +53,7 @@ func GetTransactionResultByID(r *request.Request, backend access.API, link model func CreateTransaction(r *request.Request, backend access.API, link models.LinkGenerator) (interface{}, error) { req, err := r.CreateTransactionRequest() if err != nil { - return nil, NewBadRequestError(err) + return nil, models.NewBadRequestError(err) } err = backend.SendTransaction(r.Context(), &req.Transaction) diff --git a/engine/access/rest/transactions_test.go b/engine/access/rest/routes/transactions_test.go similarity index 89% rename from engine/access/rest/transactions_test.go rename to engine/access/rest/routes/transactions_test.go index 26710c747e5..3b02c4d5de5 100644 --- a/engine/access/rest/transactions_test.go +++ b/engine/access/rest/routes/transactions_test.go @@ -1,4 +1,4 @@ -package rest +package routes import ( "bytes" @@ -69,40 +69,7 @@ func createTransactionReq(body interface{}) *http.Request { return req } -func validCreateBody(tx flow.TransactionBody) map[string]interface{} { - tx.Arguments = [][]uint8{} // fix how fixture creates nil values - auth := make([]string, len(tx.Authorizers)) - for i, a := range tx.Authorizers { - auth[i] = a.String() - } - - return map[string]interface{}{ - "script": util.ToBase64(tx.Script), - "arguments": tx.Arguments, - "reference_block_id": tx.ReferenceBlockID.String(), - "gas_limit": fmt.Sprintf("%d", tx.GasLimit), - "payer": tx.Payer.String(), - "proposal_key": map[string]interface{}{ - "address": tx.ProposalKey.Address.String(), - "key_index": fmt.Sprintf("%d", tx.ProposalKey.KeyIndex), - "sequence_number": fmt.Sprintf("%d", tx.ProposalKey.SequenceNumber), - }, - "authorizers": auth, - "payload_signatures": []map[string]interface{}{{ - "address": tx.PayloadSignatures[0].Address.String(), - "key_index": fmt.Sprintf("%d", tx.PayloadSignatures[0].KeyIndex), - "signature": util.ToBase64(tx.PayloadSignatures[0].Signature), - }}, - "envelope_signatures": []map[string]interface{}{{ - "address": tx.EnvelopeSignatures[0].Address.String(), - "key_index": fmt.Sprintf("%d", tx.EnvelopeSignatures[0].KeyIndex), - "signature": util.ToBase64(tx.EnvelopeSignatures[0].Signature), - }}, - } -} - func TestGetTransactions(t *testing.T) { - t.Run("get by ID without results", func(t *testing.T) { backend := &mock.API{} tx := unittest.TransactionFixture() @@ -150,6 +117,7 @@ func TestGetTransactions(t *testing.T) { t.Run("Get by ID with results", func(t *testing.T) { backend := &mock.API{} + tx := unittest.TransactionFixture() txr := transactionResultFixture(tx) @@ -227,6 +195,7 @@ func TestGetTransactions(t *testing.T) { t.Run("get by ID non-existing", func(t *testing.T) { backend := &mock.API{} + tx := unittest.TransactionFixture() req := getTransactionReq(tx.ID().String(), false, "", "") @@ -278,6 +247,7 @@ func TestGetTransactionResult(t *testing.T) { t.Run("get by transaction ID", func(t *testing.T) { backend := &mock.API{} + req := getTransactionResultReq(id.String(), "", "") backend.Mock. @@ -289,6 +259,7 @@ func TestGetTransactionResult(t *testing.T) { t.Run("get by block ID", func(t *testing.T) { backend := &mock.API{} + req := getTransactionResultReq(id.String(), bid.String(), "") backend.Mock. @@ -300,6 +271,7 @@ func TestGetTransactionResult(t *testing.T) { t.Run("get by collection ID", func(t *testing.T) { backend := &mock.API{} + req := getTransactionResultReq(id.String(), "", cid.String()) backend.Mock. @@ -311,6 +283,7 @@ func TestGetTransactionResult(t *testing.T) { t.Run("get execution statuses", func(t *testing.T) { backend := &mock.API{} + testVectors := map[*access.TransactionResult]string{{ Status: flow.TransactionStatusExpired, ErrorMessage: "", @@ -359,6 +332,7 @@ func TestGetTransactionResult(t *testing.T) { t.Run("get by ID Invalid", func(t *testing.T) { backend := &mock.API{} + req := getTransactionResultReq("invalid", "", "") expected := `{"code":400, "message":"invalid ID format"}` @@ -367,13 +341,13 @@ func TestGetTransactionResult(t *testing.T) { } func TestCreateTransaction(t *testing.T) { + backend := &mock.API{} t.Run("create", func(t *testing.T) { - backend := &mock.API{} tx := unittest.TransactionBodyFixture() tx.PayloadSignatures = []flow.TransactionSignature{unittest.TransactionSignatureFixture()} tx.Arguments = [][]uint8{} - req := createTransactionReq(validCreateBody(tx)) + req := createTransactionReq(unittest.CreateSendTxHttpPayload(tx)) backend.Mock. On("SendTransaction", mocks.Anything, &tx). @@ -421,7 +395,6 @@ func TestCreateTransaction(t *testing.T) { }) t.Run("post invalid transaction", func(t *testing.T) { - backend := &mock.API{} tests := []struct { inputField string inputValue string @@ -441,7 +414,7 @@ func TestCreateTransaction(t *testing.T) { for _, test := range tests { tx := unittest.TransactionBodyFixture() tx.PayloadSignatures = []flow.TransactionSignature{unittest.TransactionSignatureFixture()} - testTx := validCreateBody(tx) + testTx := unittest.CreateSendTxHttpPayload(tx) testTx[test.inputField] = test.inputValue req := createTransactionReq(testTx) diff --git a/engine/access/rest/server.go b/engine/access/rest/server.go index a1aa83710d8..4a4b1be6f0e 100644 --- a/engine/access/rest/server.go +++ b/engine/access/rest/server.go @@ -8,14 +8,14 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/routes" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" ) // NewServer returns an HTTP server initialized with the REST API handler -func NewServer(backend access.API, listenAddress string, logger zerolog.Logger, chain flow.Chain, restCollector module.RestMetrics) (*http.Server, error) { - - router, err := newRouter(backend, logger, chain, restCollector) +func NewServer(serverAPI access.API, listenAddress string, logger zerolog.Logger, chain flow.Chain, restCollector module.RestMetrics) (*http.Server, error) { + router, err := routes.NewRouter(serverAPI, logger, chain, restCollector) if err != nil { return nil, err } diff --git a/engine/access/rest_api_test.go b/engine/access/rest_api_test.go index 5ee8f6d9730..24ecf554627 100644 --- a/engine/access/rest_api_test.go +++ b/engine/access/rest_api_test.go @@ -10,6 +10,11 @@ import ( "testing" "time" + "google.golang.org/grpc/credentials" + + "github.com/onflow/flow-go/module/grpcserver" + "github.com/onflow/flow-go/utils/grpcutils" + "github.com/antihax/optional" restclient "github.com/onflow/flow/openapi/go-client-generated" "github.com/rs/zerolog" @@ -19,9 +24,10 @@ import ( "github.com/stretchr/testify/suite" accessmock "github.com/onflow/flow-go/engine/access/mock" - "github.com/onflow/flow-go/engine/access/rest" "github.com/onflow/flow-go/engine/access/rest/request" + "github.com/onflow/flow-go/engine/access/rest/routes" "github.com/onflow/flow-go/engine/access/rpc" + "github.com/onflow/flow-go/engine/access/rpc/backend" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" @@ -63,6 +69,10 @@ type RestAPITestSuite struct { ctx irrecoverable.SignalerContext cancel context.CancelFunc + + // grpc servers + secureGrpcServer *grpcserver.GrpcServer + unsecureGrpcServer *grpcserver.GrpcServer } func (suite *RestAPITestSuite) SetupTest() { @@ -118,10 +128,30 @@ func (suite *RestAPITestSuite) SetupTest() { RESTListenAddr: unittest.DefaultAddress, } - rpcEngBuilder, err := rpc.NewBuilder( - suite.log, - suite.state, - config, + // generate a server certificate that will be served by the GRPC server + networkingKey := unittest.NetworkingPrivKeyFixture() + x509Certificate, err := grpcutils.X509Certificate(networkingKey) + assert.NoError(suite.T(), err) + tlsConfig := grpcutils.DefaultServerTLSConfig(x509Certificate) + // set the transport credentials for the server to use + config.TransportCredentials = credentials.NewTLS(tlsConfig) + + suite.secureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.SecureGRPCListenAddr, + grpcutils.DefaultMaxMsgSize, + false, + nil, + nil, + grpcserver.WithTransportCredentials(config.TransportCredentials)).Build() + + suite.unsecureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.UnsecureGRPCListenAddr, + grpcutils.DefaultMaxMsgSize, + false, + nil, + nil).Build() + + backend := backend.New(suite.state, suite.collClient, nil, suite.blocks, @@ -132,26 +162,51 @@ func (suite *RestAPITestSuite) SetupTest() { suite.executionResults, suite.chainID, suite.metrics, - 0, - 0, - false, + nil, false, + 0, nil, nil, + suite.log, + 0, + nil) + + rpcEngBuilder, err := rpc.NewBuilder( + suite.log, + suite.state, + config, + suite.chainID, + suite.metrics, + false, suite.me, + backend, + backend, + suite.secureGrpcServer, + suite.unsecureGrpcServer, ) assert.NoError(suite.T(), err) suite.rpcEng, err = rpcEngBuilder.WithLegacy().Build() assert.NoError(suite.T(), err) suite.ctx, suite.cancel = irrecoverable.NewMockSignalerContextWithCancel(suite.T(), context.Background()) + suite.rpcEng.Start(suite.ctx) - // wait for the server to startup + + suite.secureGrpcServer.Start(suite.ctx) + suite.unsecureGrpcServer.Start(suite.ctx) + + // wait for the servers to startup + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Ready(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Ready(), 2*time.Second) + + // wait for the engine to startup unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Ready(), 2*time.Second) } func (suite *RestAPITestSuite) TearDownTest() { suite.cancel() + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Done(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Done(), 2*time.Second) unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Done(), 2*time.Second) } @@ -356,7 +411,7 @@ func (suite *RestAPITestSuite) TestRequestSizeRestriction() { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() // make a request of size larger than the max permitted size - requestBytes := make([]byte, rest.MaxRequestSize+1) + requestBytes := make([]byte, routes.MaxRequestSize+1) script := restclient.ScriptsBody{ Script: string(requestBytes), } @@ -383,13 +438,13 @@ func assertError(t *testing.T, resp *http.Response, err error, expectedCode int, func optionsForBlockByID() *restclient.BlocksApiBlocksIdGetOpts { return &restclient.BlocksApiBlocksIdGetOpts{ - Expand: optional.NewInterface([]string{rest.ExpandableFieldPayload}), + Expand: optional.NewInterface([]string{routes.ExpandableFieldPayload}), Select_: optional.NewInterface([]string{"header.id"}), } } func optionsForBlockByStartEndHeight(startHeight, endHeight uint64) *restclient.BlocksApiBlocksGetOpts { return &restclient.BlocksApiBlocksGetOpts{ - Expand: optional.NewInterface([]string{rest.ExpandableFieldPayload}), + Expand: optional.NewInterface([]string{routes.ExpandableFieldPayload}), Select_: optional.NewInterface([]string{"header.id", "header.height"}), StartHeight: optional.NewInterface(startHeight), EndHeight: optional.NewInterface(endHeight), @@ -398,7 +453,7 @@ func optionsForBlockByStartEndHeight(startHeight, endHeight uint64) *restclient. func optionsForBlockByHeights(heights []uint64) *restclient.BlocksApiBlocksGetOpts { return &restclient.BlocksApiBlocksGetOpts{ - Expand: optional.NewInterface([]string{rest.ExpandableFieldPayload}), + Expand: optional.NewInterface([]string{routes.ExpandableFieldPayload}), Select_: optional.NewInterface([]string{"header.id", "header.height"}), Height: optional.NewInterface(heights), } @@ -406,7 +461,7 @@ func optionsForBlockByHeights(heights []uint64) *restclient.BlocksApiBlocksGetOp func optionsForFinalizedBlock(finalOrSealed string) *restclient.BlocksApiBlocksGetOpts { return &restclient.BlocksApiBlocksGetOpts{ - Expand: optional.NewInterface([]string{rest.ExpandableFieldPayload}), + Expand: optional.NewInterface([]string{routes.ExpandableFieldPayload}), Select_: optional.NewInterface([]string{"header.id", "header.height"}), Height: optional.NewInterface(finalOrSealed), } diff --git a/engine/access/rpc/backend/backend.go b/engine/access/rpc/backend/backend.go index f1c5fc542e5..fbc98f5319a 100644 --- a/engine/access/rpc/backend/backend.go +++ b/engine/access/rpc/backend/backend.go @@ -11,7 +11,6 @@ import ( "google.golang.org/grpc/status" lru "github.com/hashicorp/golang-lru" - accessproto "github.com/onflow/flow/protobuf/go/flow/access" "github.com/rs/zerolog" "github.com/onflow/flow-go/access" @@ -23,6 +22,8 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" ) // maxExecutionNodesCnt is the max number of execution nodes that will be contacted to complete an execution api request @@ -80,6 +81,17 @@ type Backend struct { connFactory ConnectionFactory } +// Config defines the configurable options for creating Backend +type Config struct { + ExecutionClientTimeout time.Duration // execution API GRPC client timeout + CollectionClientTimeout time.Duration // collection API GRPC client timeout + ConnectionPoolSize uint // size of the cache for storing collection and execution connections + MaxHeightRange uint // max size of height range requests + PreferredExecutionNodeIDs []string // preferred list of upstream execution node IDs + FixedExecutionNodeIDs []string // fixed list of execution node IDs to choose from if no node ID can be chosen from the PreferredExecutionNodeIDs + ArchiveAddressList []string // the archive node address list to send script executions. when configured, script executions will be all sent to the archive node +} + func New( state protocol.State, collectionRPC accessproto.AccessAPIClient, @@ -201,6 +213,40 @@ func New( return b } +// NewCache constructs cache for storing connections to other nodes. +// No errors are expected during normal operations. +func NewCache( + log zerolog.Logger, + accessMetrics module.AccessMetrics, + connectionPoolSize uint, +) (*lru.Cache, uint, error) { + + var cache *lru.Cache + cacheSize := connectionPoolSize + if cacheSize > 0 { + // TODO: remove this fallback after fixing issues with evictions + // It was observed that evictions cause connection errors for in flight requests. This works around + // the issue by forcing hte pool size to be greater than the number of ENs + LNs + if cacheSize < DefaultConnectionPoolSize { + log.Warn().Msg("connection pool size below threshold, setting pool size to default value ") + cacheSize = DefaultConnectionPoolSize + } + var err error + cache, err = lru.NewWithEvict(int(cacheSize), func(_, evictedValue interface{}) { + store := evictedValue.(*CachedClient) + store.Close() + log.Debug().Str("grpc_conn_evicted", store.Address).Msg("closing grpc connection evicted from pool") + if accessMetrics != nil { + accessMetrics.ConnectionFromPoolEvicted() + } + }) + if err != nil { + return nil, 0, fmt.Errorf("could not initialize connection pool cache: %w", err) + } + } + return cache, cacheSize, nil +} + func identifierList(ids []string) (flow.IdentifierList, error) { idList := make(flow.IdentifierList, len(ids)) for i, idStr := range ids { @@ -370,7 +416,10 @@ func executionNodesForBlockID( } // randomly choose upto maxExecutionNodesCnt identities - executionIdentitiesRandom := subsetENs.Sample(maxExecutionNodesCnt) + executionIdentitiesRandom, err := subsetENs.Sample(maxExecutionNodesCnt) + if err != nil { + return nil, fmt.Errorf("sampling failed: %w", err) + } if len(executionIdentitiesRandom) == 0 { return nil, fmt.Errorf("no matching execution node found for block ID %v", blockID) diff --git a/engine/access/rpc/backend/backend_transactions.go b/engine/access/rpc/backend/backend_transactions.go index ec2ec528bd3..a4f905d8b42 100644 --- a/engine/access/rpc/backend/backend_transactions.go +++ b/engine/access/rpc/backend/backend_transactions.go @@ -129,7 +129,10 @@ func (b *backendTransactions) chooseCollectionNodes(tx *flow.TransactionBody, sa } // select a random subset of collection nodes from the cluster to be tried in order - targetNodes := txCluster.Sample(sampleSize) + targetNodes, err := txCluster.Sample(sampleSize) + if err != nil { + return nil, fmt.Errorf("sampling failed: %w", err) + } // collect the addresses of all the chosen collection nodes var targetAddrs = make([]string, len(targetNodes)) diff --git a/engine/access/rpc/engine.go b/engine/access/rpc/engine.go index d4c812df997..eea6dc5d17c 100644 --- a/engine/access/rpc/engine.go +++ b/engine/access/rpc/engine.go @@ -7,47 +7,38 @@ import ( "net" "net/http" "sync" - "time" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - lru "github.com/hashicorp/golang-lru" - accessproto "github.com/onflow/flow/protobuf/go/flow/access" "github.com/rs/zerolog" - "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/engine/access/rest" "github.com/onflow/flow-go/engine/access/rpc/backend" - "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/events" + "github.com/onflow/flow-go/module/grpcserver" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" ) // Config defines the configurable options for the access node server // A secure GRPC server here implies a server that presents a self-signed TLS certificate and a client that authenticates // the server via a pre-shared public key type Config struct { - UnsecureGRPCListenAddr string // the non-secure GRPC server address as ip:port - SecureGRPCListenAddr string // the secure GRPC server address as ip:port - TransportCredentials credentials.TransportCredentials // the secure GRPC credentials - HTTPListenAddr string // the HTTP web proxy address as ip:port - RESTListenAddr string // the REST server address as ip:port (if empty the REST server will not be started) - CollectionAddr string // the address of the upstream collection node - HistoricalAccessAddrs string // the list of all access nodes from previous spork - MaxMsgSize uint // GRPC max message size - ExecutionClientTimeout time.Duration // execution API GRPC client timeout - CollectionClientTimeout time.Duration // collection API GRPC client timeout - ConnectionPoolSize uint // size of the cache for storing collection and execution connections - MaxHeightRange uint // max size of height range requests - PreferredExecutionNodeIDs []string // preferred list of upstream execution node IDs - FixedExecutionNodeIDs []string // fixed list of execution node IDs to choose from if no node node ID can be chosen from the PreferredExecutionNodeIDs - ArchiveAddressList []string // the archive node address list to send script executions. when configured, script executions will be all sent to the archive node + UnsecureGRPCListenAddr string // the non-secure GRPC server address as ip:port + SecureGRPCListenAddr string // the secure GRPC server address as ip:port + TransportCredentials credentials.TransportCredentials // the secure GRPC credentials + HTTPListenAddr string // the HTTP web proxy address as ip:port + RESTListenAddr string // the REST server address as ip:port (if empty the REST server will not be started) + CollectionAddr string // the address of the upstream collection node + HistoricalAccessAddrs string // the list of all access nodes from previous spork + + BackendConfig backend.Config // configurable options for creating Backend + MaxMsgSize uint // GRPC max message size } // Engine exposes the server with a simplified version of the Access API. @@ -62,137 +53,38 @@ type Engine struct { log zerolog.Logger restCollector module.RestMetrics - backend *backend.Backend // the gRPC service implementation - unsecureGrpcServer *grpc.Server // the unsecure gRPC server - secureGrpcServer *grpc.Server // the secure gRPC server + backend *backend.Backend // the gRPC service implementation + unsecureGrpcServer *grpcserver.GrpcServer // the unsecure gRPC server + secureGrpcServer *grpcserver.GrpcServer // the secure gRPC server httpServer *http.Server restServer *http.Server config Config chain flow.Chain - addrLock sync.RWMutex - unsecureGrpcAddress net.Addr - secureGrpcAddress net.Addr - restAPIAddress net.Addr + restHandler access.API + + addrLock sync.RWMutex + restAPIAddress net.Addr } +type Option func(*RPCEngineBuilder) // NewBuilder returns a new RPC engine builder. func NewBuilder(log zerolog.Logger, state protocol.State, config Config, - collectionRPC accessproto.AccessAPIClient, - historicalAccessNodes []accessproto.AccessAPIClient, - blocks storage.Blocks, - headers storage.Headers, - collections storage.Collections, - transactions storage.Transactions, - executionReceipts storage.ExecutionReceipts, - executionResults storage.ExecutionResults, chainID flow.ChainID, accessMetrics module.AccessMetrics, - collectionGRPCPort uint, - executionGRPCPort uint, - retryEnabled bool, rpcMetricsEnabled bool, - apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 - apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the Access API e.g. Ping->50, GetTransaction->10 me module.Local, + backend *backend.Backend, + restHandler access.API, + secureGrpcServer *grpcserver.GrpcServer, + unsecureGrpcServer *grpcserver.GrpcServer, ) (*RPCEngineBuilder, error) { - log = log.With().Str("engine", "rpc").Logger() - // create a GRPC server to serve GRPC clients - grpcOpts := []grpc.ServerOption{ - grpc.MaxRecvMsgSize(int(config.MaxMsgSize)), - grpc.MaxSendMsgSize(int(config.MaxMsgSize)), - } - - var interceptors []grpc.UnaryServerInterceptor // ordered list of interceptors - // if rpc metrics is enabled, first create the grpc metrics interceptor - if rpcMetricsEnabled { - interceptors = append(interceptors, grpc_prometheus.UnaryServerInterceptor) - } - - if len(apiRatelimits) > 0 { - // create a rate limit interceptor - rateLimitInterceptor := rpc.NewRateLimiterInterceptor(log, apiRatelimits, apiBurstLimits).UnaryServerInterceptor - // append the rate limit interceptor to the list of interceptors - interceptors = append(interceptors, rateLimitInterceptor) - } - - // add the logging interceptor, ensure it is innermost wrapper - interceptors = append(interceptors, rpc.LoggingInterceptor(log)...) - - // create a chained unary interceptor - chainedInterceptors := grpc.ChainUnaryInterceptor(interceptors...) - grpcOpts = append(grpcOpts, chainedInterceptors) - - // create an unsecured grpc server - unsecureGrpcServer := grpc.NewServer(grpcOpts...) - - // create a secure server by using the secure grpc credentials that are passed in as part of config - grpcOpts = append(grpcOpts, grpc.Creds(config.TransportCredentials)) - secureGrpcServer := grpc.NewServer(grpcOpts...) - // wrap the unsecured server with an HTTP proxy server to serve HTTP clients - httpServer := newHTTPProxyServer(unsecureGrpcServer) - - var cache *lru.Cache - cacheSize := config.ConnectionPoolSize - if cacheSize > 0 { - // TODO: remove this fallback after fixing issues with evictions - // It was observed that evictions cause connection errors for in flight requests. This works around - // the issue by forcing hte pool size to be greater than the number of ENs + LNs - if cacheSize < backend.DefaultConnectionPoolSize { - log.Warn().Msg("connection pool size below threshold, setting pool size to default value ") - cacheSize = backend.DefaultConnectionPoolSize - } - var err error - cache, err = lru.NewWithEvict(int(cacheSize), func(_, evictedValue interface{}) { - store := evictedValue.(*backend.CachedClient) - store.Close() - log.Debug().Str("grpc_conn_evicted", store.Address).Msg("closing grpc connection evicted from pool") - if accessMetrics != nil { - accessMetrics.ConnectionFromPoolEvicted() - } - }) - if err != nil { - return nil, fmt.Errorf("could not initialize connection pool cache: %w", err) - } - } - - connectionFactory := &backend.ConnectionFactoryImpl{ - CollectionGRPCPort: collectionGRPCPort, - ExecutionGRPCPort: executionGRPCPort, - CollectionNodeGRPCTimeout: config.CollectionClientTimeout, - ExecutionNodeGRPCTimeout: config.ExecutionClientTimeout, - ConnectionsCache: cache, - CacheSize: cacheSize, - MaxMsgSize: config.MaxMsgSize, - AccessMetrics: accessMetrics, - Log: log, - } - - backend := backend.New(state, - collectionRPC, - historicalAccessNodes, - blocks, - headers, - collections, - transactions, - executionReceipts, - executionResults, - chainID, - accessMetrics, - connectionFactory, - retryEnabled, - config.MaxHeightRange, - config.PreferredExecutionNodeIDs, - config.FixedExecutionNodeIDs, - log, - backend.DefaultSnapshotHistoryLimit, - config.ArchiveAddressList, - ) + httpServer := newHTTPProxyServer(unsecureGrpcServer.Server) finalizedCache, finalizedCacheWorker, err := events.NewFinalizedHeaderCache(state) if err != nil { @@ -210,13 +102,20 @@ func NewBuilder(log zerolog.Logger, config: config, chain: chainID.Chain(), restCollector: accessMetrics, + restHandler: restHandler, } backendNotifierActor, backendNotifierWorker := events.NewFinalizationActor(eng.notifyBackendOnBlockFinalized) eng.backendNotifierActor = backendNotifierActor eng.Component = component.NewComponentManagerBuilder(). - AddWorker(eng.serveUnsecureGRPCWorker). - AddWorker(eng.serveSecureGRPCWorker). + AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + <-secureGrpcServer.Done() + }). + AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + <-unsecureGrpcServer.Done() + }). AddWorker(eng.serveGRPCWebProxyWorker). AddWorker(eng.serveREST). AddWorker(finalizedCacheWorker). @@ -245,8 +144,6 @@ func (e *Engine) shutdown() { // use unbounded context, rely on shutdown logic to have timeout ctx := context.Background() - e.unsecureGrpcServer.GracefulStop() - e.secureGrpcServer.GracefulStop() err := e.httpServer.Shutdown(ctx) if err != nil { e.log.Error().Err(err).Msg("error stopping http server") @@ -273,22 +170,6 @@ func (e *Engine) notifyBackendOnBlockFinalized(_ *model.Block) error { return nil } -// UnsecureGRPCAddress returns the listen address of the unsecure GRPC server. -// Guaranteed to be non-nil after Engine.Ready is closed. -func (e *Engine) UnsecureGRPCAddress() net.Addr { - e.addrLock.RLock() - defer e.addrLock.RUnlock() - return e.unsecureGrpcAddress -} - -// SecureGRPCAddress returns the listen address of the secure GRPC server. -// Guaranteed to be non-nil after Engine.Ready is closed. -func (e *Engine) SecureGRPCAddress() net.Addr { - e.addrLock.RLock() - defer e.addrLock.RUnlock() - return e.secureGrpcAddress -} - // RestApiAddress returns the listen address of the REST API server. // Guaranteed to be non-nil after Engine.Ready is closed. func (e *Engine) RestApiAddress() net.Addr { @@ -297,59 +178,6 @@ func (e *Engine) RestApiAddress() net.Addr { return e.restAPIAddress } -// serveUnsecureGRPCWorker is a worker routine which starts the unsecure gRPC server. -// The ready callback is called after the server address is bound and set. -func (e *Engine) serveUnsecureGRPCWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - e.log.Info().Str("grpc_address", e.config.UnsecureGRPCListenAddr).Msg("starting grpc server on address") - - l, err := net.Listen("tcp", e.config.UnsecureGRPCListenAddr) - if err != nil { - e.log.Err(err).Msg("failed to start the grpc server") - ctx.Throw(err) - return - } - - // save the actual address on which we are listening (may be different from e.config.UnsecureGRPCListenAddr if not port - // was specified) - e.addrLock.Lock() - e.unsecureGrpcAddress = l.Addr() - e.addrLock.Unlock() - e.log.Debug().Str("unsecure_grpc_address", e.unsecureGrpcAddress.String()).Msg("listening on port") - ready() - - err = e.unsecureGrpcServer.Serve(l) // blocking call - if err != nil { - e.log.Err(err).Msg("fatal error in unsecure grpc server") - ctx.Throw(err) - } -} - -// serveSecureGRPCWorker is a worker routine which starts the secure gRPC server. -// The ready callback is called after the server address is bound and set. -func (e *Engine) serveSecureGRPCWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - e.log.Info().Str("secure_grpc_address", e.config.SecureGRPCListenAddr).Msg("starting grpc server on address") - - l, err := net.Listen("tcp", e.config.SecureGRPCListenAddr) - if err != nil { - e.log.Err(err).Msg("failed to start the grpc server") - ctx.Throw(err) - return - } - - e.addrLock.Lock() - e.secureGrpcAddress = l.Addr() - e.addrLock.Unlock() - - e.log.Debug().Str("secure_grpc_address", e.secureGrpcAddress.String()).Msg("listening on port") - ready() - - err = e.secureGrpcServer.Serve(l) // blocking call - if err != nil { - e.log.Err(err).Msg("fatal error in secure grpc server") - ctx.Throw(err) - } -} - // serveGRPCWebProxyWorker is a worker routine which starts the gRPC web proxy server. func (e *Engine) serveGRPCWebProxyWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { log := e.log.With().Str("http_proxy_address", e.config.HTTPListenAddr).Logger() @@ -384,7 +212,7 @@ func (e *Engine) serveREST(ctx irrecoverable.SignalerContext, ready component.Re e.log.Info().Str("rest_api_address", e.config.RESTListenAddr).Msg("starting REST server on address") - r, err := rest.NewServer(e.backend, e.config.RESTListenAddr, e.log, e.chain, e.restCollector) + r, err := rest.NewServer(e.restHandler, e.config.RESTListenAddr, e.log, e.chain, e.restCollector) if err != nil { e.log.Err(err).Msg("failed to initialize the REST server") ctx.Throw(err) diff --git a/engine/access/rpc/engine_builder.go b/engine/access/rpc/engine_builder.go index a4694547b03..370f3d0fff4 100644 --- a/engine/access/rpc/engine_builder.go +++ b/engine/access/rpc/engine_builder.go @@ -5,13 +5,13 @@ import ( grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - accessproto "github.com/onflow/flow/protobuf/go/flow/access" - legacyaccessproto "github.com/onflow/flow/protobuf/go/flow/legacy/access" - "github.com/onflow/flow-go/access" legacyaccess "github.com/onflow/flow-go/access/legacy" "github.com/onflow/flow-go/consensus/hotstuff" "github.com/onflow/flow-go/module" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + legacyaccessproto "github.com/onflow/flow/protobuf/go/flow/legacy/access" ) type RPCEngineBuilder struct { @@ -21,7 +21,7 @@ type RPCEngineBuilder struct { // optional parameters, only one can be set during build phase signerIndicesDecoder hotstuff.BlockSignerDecoder - handler accessproto.AccessAPIServer // Use the parent interface instead of implementation, so that we can assign it to proxy. + rpcHandler accessproto.AccessAPIServer // Use the parent interface instead of implementation, so that we can assign it to proxy. } // NewRPCEngineBuilder helps to build a new RPC engine. @@ -34,8 +34,8 @@ func NewRPCEngineBuilder(engine *Engine, me module.Local, finalizedHeaderCache m } } -func (builder *RPCEngineBuilder) Handler() accessproto.AccessAPIServer { - return builder.handler +func (builder *RPCEngineBuilder) RpcHandler() accessproto.AccessAPIServer { + return builder.rpcHandler } // WithBlockSignerDecoder specifies that signer indices in block headers should be translated @@ -51,15 +51,15 @@ func (builder *RPCEngineBuilder) WithBlockSignerDecoder(signerIndicesDecoder hot return builder } -// WithNewHandler specifies that the given `AccessAPIServer` should be used for serving API queries. +// WithRpcHandler specifies that the given `AccessAPIServer` should be used for serving API queries. // Caution: // you can inject either a `BlockSignerDecoder` (via method `WithBlockSignerDecoder`) -// or an `AccessAPIServer` (via method `WithNewHandler`); but not both. If both are +// or an `AccessAPIServer` (via method `WithRpcHandler`); but not both. If both are // specified, the builder will error during the build step. // // Returns self-reference for chaining. -func (builder *RPCEngineBuilder) WithNewHandler(handler accessproto.AccessAPIServer) *RPCEngineBuilder { - builder.handler = handler +func (builder *RPCEngineBuilder) WithRpcHandler(handler accessproto.AccessAPIServer) *RPCEngineBuilder { + builder.rpcHandler = handler return builder } @@ -68,11 +68,11 @@ func (builder *RPCEngineBuilder) WithNewHandler(handler accessproto.AccessAPISer func (builder *RPCEngineBuilder) WithLegacy() *RPCEngineBuilder { // Register legacy gRPC handlers for backwards compatibility, to be removed at a later date legacyaccessproto.RegisterAccessAPIServer( - builder.unsecureGrpcServer, + builder.unsecureGrpcServer.Server, legacyaccess.NewHandler(builder.backend, builder.chain), ) legacyaccessproto.RegisterAccessAPIServer( - builder.secureGrpcServer, + builder.secureGrpcServer.Server, legacyaccess.NewHandler(builder.backend, builder.chain), ) return builder @@ -83,24 +83,24 @@ func (builder *RPCEngineBuilder) WithLegacy() *RPCEngineBuilder { func (builder *RPCEngineBuilder) WithMetrics() *RPCEngineBuilder { // Not interested in legacy metrics, so initialize here grpc_prometheus.EnableHandlingTimeHistogram() - grpc_prometheus.Register(builder.unsecureGrpcServer) - grpc_prometheus.Register(builder.secureGrpcServer) + grpc_prometheus.Register(builder.unsecureGrpcServer.Server) + grpc_prometheus.Register(builder.secureGrpcServer.Server) return builder } func (builder *RPCEngineBuilder) Build() (*Engine, error) { - if builder.signerIndicesDecoder != nil && builder.handler != nil { + if builder.signerIndicesDecoder != nil && builder.rpcHandler != nil { return nil, fmt.Errorf("only BlockSignerDecoder (via method `WithBlockSignerDecoder`) or AccessAPIServer (via method `WithNewHandler`) can be specified but not both") } - handler := builder.handler - if handler == nil { + rpcHandler := builder.rpcHandler + if rpcHandler == nil { if builder.signerIndicesDecoder == nil { - handler = access.NewHandler(builder.Engine.backend, builder.Engine.chain, builder.finalizedHeaderCache, builder.me) + rpcHandler = access.NewHandler(builder.Engine.backend, builder.Engine.chain, builder.finalizedHeaderCache, builder.me) } else { - handler = access.NewHandler(builder.Engine.backend, builder.Engine.chain, builder.finalizedHeaderCache, builder.me, access.WithBlockSignerDecoder(builder.signerIndicesDecoder)) + rpcHandler = access.NewHandler(builder.Engine.backend, builder.Engine.chain, builder.finalizedHeaderCache, builder.me, access.WithBlockSignerDecoder(builder.signerIndicesDecoder)) } } - accessproto.RegisterAccessAPIServer(builder.unsecureGrpcServer, handler) - accessproto.RegisterAccessAPIServer(builder.secureGrpcServer, handler) + accessproto.RegisterAccessAPIServer(builder.unsecureGrpcServer.Server, rpcHandler) + accessproto.RegisterAccessAPIServer(builder.secureGrpcServer.Server, rpcHandler) return builder.Engine, nil } diff --git a/engine/access/rpc/rate_limit_test.go b/engine/access/rpc/rate_limit_test.go index 3cce6e97fda..2d210fc358a 100644 --- a/engine/access/rpc/rate_limit_test.go +++ b/engine/access/rpc/rate_limit_test.go @@ -8,7 +8,6 @@ import ( "testing" "time" - accessproto "github.com/onflow/flow/protobuf/go/flow/access" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -16,11 +15,14 @@ import ( "github.com/stretchr/testify/suite" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" accessmock "github.com/onflow/flow-go/engine/access/mock" + "github.com/onflow/flow-go/engine/access/rpc/backend" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/grpcserver" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" @@ -29,6 +31,8 @@ import ( storagemock "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/grpcutils" "github.com/onflow/flow-go/utils/unittest" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" ) type RateLimitTestSuite struct { @@ -61,6 +65,10 @@ type RateLimitTestSuite struct { ctx irrecoverable.SignalerContext cancel context.CancelFunc + + // grpc servers + secureGrpcServer *grpcserver.GrpcServer + unsecureGrpcServer *grpcserver.GrpcServer } func (suite *RateLimitTestSuite) SetupTest() { @@ -101,6 +109,14 @@ func (suite *RateLimitTestSuite) SetupTest() { HTTPListenAddr: unittest.DefaultAddress, } + // generate a server certificate that will be served by the GRPC server + networkingKey := unittest.NetworkingPrivKeyFixture() + x509Certificate, err := grpcutils.X509Certificate(networkingKey) + assert.NoError(suite.T(), err) + tlsConfig := grpcutils.DefaultServerTLSConfig(x509Certificate) + // set the transport credentials for the server to use + config.TransportCredentials = credentials.NewTLS(tlsConfig) + // set the rate limit to test with suite.rateLimit = 2 // set the burst limit to test with @@ -114,21 +130,77 @@ func (suite *RateLimitTestSuite) SetupTest() { "Ping": suite.rateLimit, } + suite.secureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.SecureGRPCListenAddr, + grpcutils.DefaultMaxMsgSize, + false, + apiRateLimt, + apiBurstLimt, + grpcserver.WithTransportCredentials(config.TransportCredentials)).Build() + + suite.unsecureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.UnsecureGRPCListenAddr, + grpcutils.DefaultMaxMsgSize, + false, + apiRateLimt, + apiBurstLimt).Build() + block := unittest.BlockHeaderFixture() suite.snapshot.On("Head").Return(block, nil) - rpcEngBuilder, err := NewBuilder(suite.log, suite.state, config, suite.collClient, nil, suite.blocks, suite.headers, suite.collections, suite.transactions, nil, - nil, suite.chainID, suite.metrics, 0, 0, false, false, apiRateLimt, apiBurstLimt, suite.me) + backend := backend.New( + suite.state, + suite.collClient, + nil, + suite.blocks, + suite.headers, + suite.collections, + suite.transactions, + nil, + nil, + suite.chainID, + suite.metrics, + nil, + false, + 0, + nil, + nil, + suite.log, + 0, + nil) + + rpcEngBuilder, err := NewBuilder( + suite.log, + suite.state, + config, + suite.chainID, + suite.metrics, + false, + suite.me, + backend, + backend, + suite.secureGrpcServer, + suite.unsecureGrpcServer) + require.NoError(suite.T(), err) suite.rpcEng, err = rpcEngBuilder.WithLegacy().Build() require.NoError(suite.T(), err) suite.ctx, suite.cancel = irrecoverable.NewMockSignalerContextWithCancel(suite.T(), context.Background()) + suite.rpcEng.Start(suite.ctx) - // wait for the server to startup + + suite.secureGrpcServer.Start(suite.ctx) + suite.unsecureGrpcServer.Start(suite.ctx) + + // wait for the servers to startup + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Ready(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Ready(), 2*time.Second) + + // wait for the engine to startup unittest.RequireCloseBefore(suite.T(), suite.rpcEng.Ready(), 2*time.Second, "engine not ready at startup") // create the access api client - suite.client, suite.closer, err = accessAPIClient(suite.rpcEng.UnsecureGRPCAddress().String()) + suite.client, suite.closer, err = accessAPIClient(suite.unsecureGrpcServer.GRPCAddress().String()) require.NoError(suite.T(), err) } @@ -140,8 +212,9 @@ func (suite *RateLimitTestSuite) TearDownTest() { if suite.closer != nil { suite.closer.Close() } - // close the server - unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Done(), 2*time.Second) + // close servers + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Done(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Done(), 2*time.Second) } func TestRateLimit(t *testing.T) { diff --git a/engine/access/secure_grpcr_test.go b/engine/access/secure_grpcr_test.go index b82160668db..ca403ef6391 100644 --- a/engine/access/secure_grpcr_test.go +++ b/engine/access/secure_grpcr_test.go @@ -7,18 +7,22 @@ import ( "testing" "time" - accessproto "github.com/onflow/flow/protobuf/go/flow/access" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" "github.com/onflow/flow-go/crypto" accessmock "github.com/onflow/flow-go/engine/access/mock" "github.com/onflow/flow-go/engine/access/rpc" + "github.com/onflow/flow-go/engine/access/rpc/backend" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/grpcserver" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" module "github.com/onflow/flow-go/module/mock" @@ -55,6 +59,10 @@ type SecureGRPCTestSuite struct { ctx irrecoverable.SignalerContext cancel context.CancelFunc + + // grpc servers + secureGrpcServer *grpcserver.GrpcServer + unsecureGrpcServer *grpcserver.GrpcServer } func (suite *SecureGRPCTestSuite) SetupTest() { @@ -105,13 +113,26 @@ func (suite *SecureGRPCTestSuite) SetupTest() { // save the public key to use later in tests later suite.publicKey = networkingKey.PublicKey() + suite.secureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.SecureGRPCListenAddr, + grpcutils.DefaultMaxMsgSize, + false, + nil, + nil, + grpcserver.WithTransportCredentials(config.TransportCredentials)).Build() + + suite.unsecureGrpcServer = grpcserver.NewGrpcServerBuilder(suite.log, + config.UnsecureGRPCListenAddr, + grpcutils.DefaultMaxMsgSize, + false, + nil, + nil).Build() + block := unittest.BlockHeaderFixture() suite.snapshot.On("Head").Return(block, nil) - rpcEngBuilder, err := rpc.NewBuilder( - suite.log, + backend := backend.New( suite.state, - config, suite.collClient, nil, suite.blocks, @@ -122,25 +143,50 @@ func (suite *SecureGRPCTestSuite) SetupTest() { nil, suite.chainID, suite.metrics, - 0, - 0, - false, + nil, false, + 0, nil, nil, + suite.log, + 0, + nil) + + rpcEngBuilder, err := rpc.NewBuilder( + suite.log, + suite.state, + config, + suite.chainID, + suite.metrics, + false, suite.me, + backend, + backend, + suite.secureGrpcServer, + suite.unsecureGrpcServer, ) assert.NoError(suite.T(), err) suite.rpcEng, err = rpcEngBuilder.WithLegacy().Build() assert.NoError(suite.T(), err) suite.ctx, suite.cancel = irrecoverable.NewMockSignalerContextWithCancel(suite.T(), context.Background()) + suite.rpcEng.Start(suite.ctx) - // wait for the server to startup + + suite.secureGrpcServer.Start(suite.ctx) + suite.unsecureGrpcServer.Start(suite.ctx) + + // wait for the servers to startup + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Ready(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Ready(), 2*time.Second) + + // wait for the engine to startup unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Ready(), 2*time.Second) } func (suite *SecureGRPCTestSuite) TearDownTest() { suite.cancel() + unittest.AssertClosesBefore(suite.T(), suite.secureGrpcServer.Done(), 2*time.Second) + unittest.AssertClosesBefore(suite.T(), suite.unsecureGrpcServer.Done(), 2*time.Second) unittest.AssertClosesBefore(suite.T(), suite.rpcEng.Done(), 2*time.Second) } @@ -172,6 +218,19 @@ func (suite *SecureGRPCTestSuite) TestAPICallUsingSecureGRPC() { _, err := client.Ping(ctx, req) assert.Error(suite.T(), err) }) + + suite.Run("happy path - connection fails, unsecure client can not get info from secure server connection", func() { + conn, err := grpc.Dial( + suite.secureGrpcServer.GRPCAddress().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + assert.NoError(suite.T(), err) + + client := accessproto.NewAccessAPIClient(conn) + closer := io.Closer(conn) + defer closer.Close() + + _, err = client.Ping(ctx, req) + assert.Error(suite.T(), err) + }) } // secureGRPCClient creates a secure GRPC client using the given public key @@ -180,7 +239,7 @@ func (suite *SecureGRPCTestSuite) secureGRPCClient(publicKey crypto.PublicKey) ( assert.NoError(suite.T(), err) conn, err := grpc.Dial( - suite.rpcEng.SecureGRPCAddress().String(), + suite.secureGrpcServer.GRPCAddress().String(), grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) assert.NoError(suite.T(), err) diff --git a/engine/access/state_stream/backend_executiondata_test.go b/engine/access/state_stream/backend_executiondata_test.go index b619a94e322..361cb64aa80 100644 --- a/engine/access/state_stream/backend_executiondata_test.go +++ b/engine/access/state_stream/backend_executiondata_test.go @@ -3,7 +3,6 @@ package state_stream import ( "context" "fmt" - "math/rand" "testing" "time" @@ -65,8 +64,6 @@ func TestBackendExecutionDataSuite(t *testing.T) { } func (s *BackendExecutionDataSuite) SetupTest() { - rand.Seed(time.Now().UnixNano()) - logger := unittest.Logger() s.state = protocolmock.NewState(s.T()) diff --git a/engine/access/state_stream/engine.go b/engine/access/state_stream/engine.go index 7b50c2e3ff8..cb3a3e73813 100644 --- a/engine/access/state_stream/engine.go +++ b/engine/access/state_stream/engine.go @@ -2,20 +2,17 @@ package state_stream import ( "fmt" - "net" "time" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" access "github.com/onflow/flow/protobuf/go/flow/executiondata" "github.com/rs/zerolog" - "google.golang.org/grpc" "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" + "github.com/onflow/flow-go/module/grpcserver" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" @@ -61,7 +58,6 @@ type Engine struct { *component.ComponentManager log zerolog.Logger backend *StateStreamBackend - server *grpc.Server config Config chain flow.Chain handler *Handler @@ -69,8 +65,6 @@ type Engine struct { execDataBroadcaster *engine.Broadcaster execDataCache *cache.ExecutionDataCache headers storage.Headers - - stateStreamGrpcAddress net.Addr } // NewEng returns a new ingress server. @@ -86,45 +80,10 @@ func NewEng( chainID flow.ChainID, initialBlockHeight uint64, highestBlockHeight uint64, - apiRatelimits map[string]int, // the api rate limit (max calls per second) for each of the gRPC API e.g. Ping->100, GetExecutionDataByBlockID->300 - apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the gRPC API e.g. Ping->50, GetExecutionDataByBlockID->10 + server *grpcserver.GrpcServer, ) (*Engine, error) { logger := log.With().Str("engine", "state_stream_rpc").Logger() - // create a GRPC server to serve GRPC clients - grpcOpts := []grpc.ServerOption{ - grpc.MaxRecvMsgSize(int(config.MaxExecutionDataMsgSize)), - grpc.MaxSendMsgSize(int(config.MaxExecutionDataMsgSize)), - } - - // ordered list of interceptors - var unaryInterceptors []grpc.UnaryServerInterceptor - - // if rpc metrics is enabled, add the grpc metrics interceptor as a server option - if config.RpcMetricsEnabled { - unaryInterceptors = append(unaryInterceptors, grpc_prometheus.UnaryServerInterceptor) - - // note: intentionally not adding logging or rate limit interceptors for streams. - // rate limiting is done in the handler, and we don't need log events for every message as - // that would be too noisy. - grpcOpts = append(grpcOpts, grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor)) - } - - if len(apiRatelimits) > 0 { - // create a rate limit interceptor - rateLimitInterceptor := rpc.NewRateLimiterInterceptor(log, apiRatelimits, apiBurstLimits).UnaryServerInterceptor - // append the rate limit interceptor to the list of interceptors - unaryInterceptors = append(unaryInterceptors, rateLimitInterceptor) - } - - // add the logging interceptor, ensure it is innermost wrapper - unaryInterceptors = append(unaryInterceptors, rpc.LoggingInterceptor(log)...) - - // create a chained unary interceptor - grpcOpts = append(grpcOpts, grpc.ChainUnaryInterceptor(unaryInterceptors...)) - - server := grpc.NewServer(grpcOpts...) - broadcaster := engine.NewBroadcaster() backend, err := New( @@ -147,7 +106,6 @@ func NewEng( e := &Engine{ log: logger, backend: backend, - server: server, headers: headers, chain: chainID.Chain(), config: config, @@ -157,10 +115,13 @@ func NewEng( } e.ComponentManager = component.NewComponentManagerBuilder(). - AddWorker(e.serve). + AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + <-server.Done() + }). Build() - access.RegisterExecutionDataAPIServer(e.server, e.handler) + access.RegisterExecutionDataAPIServer(server.Server, e.handler) return e, nil } @@ -191,27 +152,3 @@ func (e *Engine) OnExecutionData(executionData *execution_data.BlockExecutionDat e.execDataBroadcaster.Publish() } - -// serve starts the gRPC server. -// When this function returns, the server is considered ready. -func (e *Engine) serve(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { - e.log.Info().Str("state_stream_address", e.config.ListenAddr).Msg("starting grpc server on address") - l, err := net.Listen("tcp", e.config.ListenAddr) - if err != nil { - ctx.Throw(fmt.Errorf("error starting grpc server: %w", err)) - } - - e.stateStreamGrpcAddress = l.Addr() - e.log.Debug().Str("state_stream_address", e.stateStreamGrpcAddress.String()).Msg("listening on port") - - go func() { - ready() - err = e.server.Serve(l) - if err != nil { - ctx.Throw(fmt.Errorf("error trying to serve grpc server: %w", err)) - } - }() - - <-ctx.Done() - e.server.GracefulStop() -} diff --git a/engine/common/grpc/forwarder/forwarder.go b/engine/common/grpc/forwarder/forwarder.go new file mode 100644 index 00000000000..b5cf6244d44 --- /dev/null +++ b/engine/common/grpc/forwarder/forwarder.go @@ -0,0 +1,145 @@ +package forwarder + +import ( + "fmt" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/status" + + "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/grpcutils" + + "github.com/onflow/flow/protobuf/go/flow/access" +) + +// Forwarder forwards all requests to a set of upstream access nodes or observers +type Forwarder struct { + lock sync.Mutex + roundRobin int + ids flow.IdentityList + upstream []access.AccessAPIClient + connections []*grpc.ClientConn + timeout time.Duration + maxMsgSize uint +} + +func NewForwarder(identities flow.IdentityList, timeout time.Duration, maxMsgSize uint) (*Forwarder, error) { + forwarder := &Forwarder{maxMsgSize: maxMsgSize} + err := forwarder.setFlowAccessAPI(identities, timeout) + return forwarder, err +} + +// setFlowAccessAPI sets a backend access API that forwards some requests to an upstream node. +// It is used by Observer services, Blockchain Data Service, etc. +// Make sure that this is just for observation and not a staked participant in the flow network. +// This means that observers see a copy of the data but there is no interaction to ensure integrity from the root block. +func (f *Forwarder) setFlowAccessAPI(accessNodeAddressAndPort flow.IdentityList, timeout time.Duration) error { + f.timeout = timeout + f.ids = accessNodeAddressAndPort + f.upstream = make([]access.AccessAPIClient, accessNodeAddressAndPort.Count()) + f.connections = make([]*grpc.ClientConn, accessNodeAddressAndPort.Count()) + for i, identity := range accessNodeAddressAndPort { + // Store the faultTolerantClient setup parameters such as address, public, key and timeout, so that + // we can refresh the API on connection loss + f.ids[i] = identity + + // We fail on any single error on startup, so that + // we identify bootstrapping errors early + err := f.reconnectingClient(i) + if err != nil { + return err + } + } + + f.roundRobin = 0 + return nil +} + +// reconnectingClient returns an active client, or +// creates one, if the last one is not ready anymore. +func (f *Forwarder) reconnectingClient(i int) error { + timeout := f.timeout + + if f.connections[i] == nil || f.connections[i].GetState() != connectivity.Ready { + identity := f.ids[i] + var connection *grpc.ClientConn + var err error + if identity.NetworkPubKey == nil { + connection, err = grpc.Dial( + identity.Address, + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(f.maxMsgSize))), + grpc.WithTransportCredentials(insecure.NewCredentials()), + backend.WithClientUnaryInterceptor(timeout)) + if err != nil { + return err + } + } else { + tlsConfig, err := grpcutils.DefaultClientTLSConfig(identity.NetworkPubKey) + if err != nil { + return fmt.Errorf("failed to get default TLS client config using public flow networking key %s %w", identity.NetworkPubKey.String(), err) + } + + connection, err = grpc.Dial( + identity.Address, + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(f.maxMsgSize))), + grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), + backend.WithClientUnaryInterceptor(timeout)) + if err != nil { + return fmt.Errorf("cannot connect to %s %w", identity.Address, err) + } + } + connection.Connect() + time.Sleep(1 * time.Second) + state := connection.GetState() + if state != connectivity.Ready && state != connectivity.Connecting { + return fmt.Errorf("%v", state) + } + f.connections[i] = connection + f.upstream[i] = access.NewAccessAPIClient(connection) + } + + return nil +} + +// FaultTolerantClient implements an upstream connection that reconnects on errors +// a reasonable amount of time. +func (f *Forwarder) FaultTolerantClient() (access.AccessAPIClient, error) { + if f.upstream == nil || len(f.upstream) == 0 { + return nil, status.Errorf(codes.Unimplemented, "method not implemented") + } + + // Reasoning: A retry count of three gives an acceptable 5% failure ratio from a 37% failure ratio. + // A bigger number is problematic due to the DNS resolve and connection times, + // plus the need to log and debug each individual connection failure. + // + // This reasoning eliminates the need of making this parameter configurable. + // The logic works rolling over a single connection as well making clean code. + const retryMax = 3 + + f.lock.Lock() + defer f.lock.Unlock() + + var err error + for i := 0; i < retryMax; i++ { + f.roundRobin++ + f.roundRobin = f.roundRobin % len(f.upstream) + err = f.reconnectingClient(f.roundRobin) + if err != nil { + continue + } + state := f.connections[f.roundRobin].GetState() + if state != connectivity.Ready && state != connectivity.Connecting { + continue + } + return f.upstream[f.roundRobin], nil + } + + return nil, status.Errorf(codes.Unavailable, err.Error()) +} diff --git a/engine/common/requester/engine.go b/engine/common/requester/engine.go index 674ef614f27..bc54d3dd45b 100644 --- a/engine/common/requester/engine.go +++ b/engine/common/requester/engine.go @@ -367,7 +367,11 @@ func (e *Engine) dispatchRequest() (bool, error) { if len(providers) == 0 { return false, fmt.Errorf("no valid providers available") } - providerID = providers.Sample(1)[0].NodeID + id, err := providers.Sample(1) + if err != nil { + return false, fmt.Errorf("sampling failed: %w", err) + } + providerID = id[0].NodeID } // add item to list and set retry parameters diff --git a/engine/common/rpc/convert/blocks.go b/engine/common/rpc/convert/blocks.go index 3c42fffb4c0..2e7f5689515 100644 --- a/engine/common/rpc/convert/blocks.go +++ b/engine/common/rpc/convert/blocks.go @@ -3,10 +3,11 @@ package convert import ( "fmt" - "github.com/onflow/flow/protobuf/go/flow/entities" "google.golang.org/protobuf/types/known/timestamppb" "github.com/onflow/flow-go/model/flow" + + "github.com/onflow/flow/protobuf/go/flow/entities" ) // BlockToMessage converts a flow.Block to a protobuf Block message. diff --git a/engine/common/rpc/convert/collections.go b/engine/common/rpc/convert/collections.go index 00f3f477ccb..69725636449 100644 --- a/engine/common/rpc/convert/collections.go +++ b/engine/common/rpc/convert/collections.go @@ -44,6 +44,18 @@ func LightCollectionToMessage(c *flow.LightCollection) (*entities.Collection, er }, nil } +// MessageToLightCollection converts a protobuf message to a light collection +func MessageToLightCollection(m *entities.Collection) (*flow.LightCollection, error) { + transactions := make([]flow.Identifier, 0, len(m.TransactionIds)) + for _, txId := range m.TransactionIds { + transactions = append(transactions, MessageToIdentifier(txId)) + } + + return &flow.LightCollection{ + Transactions: transactions, + }, nil +} + // CollectionGuaranteeToMessage converts a collection guarantee to a protobuf message func CollectionGuaranteeToMessage(g *flow.CollectionGuarantee) *entities.CollectionGuarantee { id := g.ID() diff --git a/engine/common/rpc/convert/collections_test.go b/engine/common/rpc/convert/collections_test.go index 75ab6f25adc..2e14a6dc225 100644 --- a/engine/common/rpc/convert/collections_test.go +++ b/engine/common/rpc/convert/collections_test.go @@ -9,6 +9,8 @@ import ( "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" + + "github.com/onflow/flow/protobuf/go/flow/entities" ) // TestConvertCollection tests that converting a collection to a protobuf message results in the correct @@ -32,10 +34,12 @@ func TestConvertCollection(t *testing.T) { } }) - t.Run("convert light collection to message", func(t *testing.T) { - lightCollection := flow.LightCollection{Transactions: txIDs} + var msg *entities.Collection + lightCollection := flow.LightCollection{Transactions: txIDs} - msg, err := convert.LightCollectionToMessage(&lightCollection) + t.Run("convert light collection to message", func(t *testing.T) { + var err error + msg, err = convert.LightCollectionToMessage(&lightCollection) require.NoError(t, err) assert.Len(t, msg.TransactionIds, len(txIDs)) @@ -43,6 +47,16 @@ func TestConvertCollection(t *testing.T) { assert.Equal(t, txID[:], msg.TransactionIds[i]) } }) + + t.Run("convert message to light collection", func(t *testing.T) { + lightColl, err := convert.MessageToLightCollection(msg) + require.NoError(t, err) + + assert.Equal(t, len(txIDs), len(lightColl.Transactions)) + for _, txID := range lightColl.Transactions { + assert.Equal(t, txID[:], txID[:]) + } + }) } // TestConvertCollectionGuarantee tests that converting a collection guarantee to and from a protobuf diff --git a/engine/common/rpc/convert/events.go b/engine/common/rpc/convert/events.go index d3bd469cd48..58ccb0ed9a1 100644 --- a/engine/common/rpc/convert/events.go +++ b/engine/common/rpc/convert/events.go @@ -4,12 +4,16 @@ import ( "encoding/json" "fmt" + "google.golang.org/protobuf/types/known/timestamppb" + "github.com/onflow/cadence/encoding/ccf" jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/flow/protobuf/go/flow/entities" - execproto "github.com/onflow/flow/protobuf/go/flow/execution" "github.com/onflow/flow-go/model/flow" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" + execproto "github.com/onflow/flow/protobuf/go/flow/execution" ) // EventToMessage converts a flow.Event to a protobuf message @@ -172,3 +176,51 @@ func CcfEventToJsonEvent(e flow.Event) (*flow.Event, error) { Payload: convertedPayload, }, nil } + +// MessagesToBlockEvents converts a protobuf EventsResponse_Result messages to a slice of flow.BlockEvents. +func MessagesToBlockEvents(blocksEvents []*accessproto.EventsResponse_Result) []flow.BlockEvents { + evs := make([]flow.BlockEvents, len(blocksEvents)) + for i, ev := range blocksEvents { + evs[i] = MessageToBlockEvents(ev) + } + + return evs +} + +// MessageToBlockEvents converts a protobuf EventsResponse_Result message to a flow.BlockEvents. +func MessageToBlockEvents(blockEvents *accessproto.EventsResponse_Result) flow.BlockEvents { + return flow.BlockEvents{ + BlockHeight: blockEvents.BlockHeight, + BlockID: MessageToIdentifier(blockEvents.BlockId), + BlockTimestamp: blockEvents.BlockTimestamp.AsTime(), + Events: MessagesToEvents(blockEvents.Events), + } +} + +func BlockEventsToMessages(blocks []flow.BlockEvents) ([]*accessproto.EventsResponse_Result, error) { + results := make([]*accessproto.EventsResponse_Result, len(blocks)) + + for i, block := range blocks { + event, err := BlockEventsToMessage(block) + if err != nil { + return nil, err + } + results[i] = event + } + + return results, nil +} + +func BlockEventsToMessage(block flow.BlockEvents) (*accessproto.EventsResponse_Result, error) { + eventMessages := make([]*entities.Event, len(block.Events)) + for i, event := range block.Events { + eventMessages[i] = EventToMessage(event) + } + timestamp := timestamppb.New(block.BlockTimestamp) + return &accessproto.EventsResponse_Result{ + BlockId: block.BlockID[:], + BlockHeight: block.BlockHeight, + BlockTimestamp: timestamp, + Events: eventMessages, + }, nil +} diff --git a/engine/common/rpc/convert/events_test.go b/engine/common/rpc/convert/events_test.go index 2cf010fa011..879db710f8b 100644 --- a/engine/common/rpc/convert/events_test.go +++ b/engine/common/rpc/convert/events_test.go @@ -193,3 +193,24 @@ func TestConvertServiceEventList(t *testing.T) { assert.Equal(t, serviceEvents, converted) } + +// TestConvertMessagesToBlockEvents tests that converting a protobuf EventsResponse_Result message to and from block events in the same +// block +func TestConvertMessagesToBlockEvents(t *testing.T) { + t.Parallel() + + count := 2 + blockEvents := make([]flow.BlockEvents, count) + for i := 0; i < count; i++ { + header := unittest.BlockHeaderFixture(unittest.WithHeaderHeight(uint64(i))) + blockEvents[i] = unittest.BlockEventsFixture(header, 2) + } + + msg, err := convert.BlockEventsToMessages(blockEvents) + require.NoError(t, err) + + converted := convert.MessagesToBlockEvents(msg) + require.NoError(t, err) + + assert.Equal(t, blockEvents, converted) +} diff --git a/engine/execution/computation/computer/computer.go b/engine/execution/computation/computer/computer.go index 00a19352a10..4c3749f9e8f 100644 --- a/engine/execution/computation/computer/computer.go +++ b/engine/execution/computation/computer/computer.go @@ -24,6 +24,7 @@ import ( "github.com/onflow/flow-go/module/executiondatasync/provider" "github.com/onflow/flow-go/module/mempool/entity" "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/utils/logging" ) @@ -113,6 +114,7 @@ type blockComputer struct { spockHasher hash.Hasher receiptHasher hash.Hasher colResCons []result.ExecutedCollectionConsumer + protocolState protocol.State maxConcurrency int } @@ -141,6 +143,7 @@ func NewBlockComputer( signer module.Local, executionDataProvider *provider.Provider, colResCons []result.ExecutedCollectionConsumer, + state protocol.State, maxConcurrency int, ) (BlockComputer, error) { if maxConcurrency < 1 { @@ -164,6 +167,7 @@ func NewBlockComputer( spockHasher: utils.NewSPOCKHasher(), receiptHasher: utils.NewExecutionReceiptHasher(), colResCons: colResCons, + protocolState: state, maxConcurrency: maxConcurrency, }, nil } @@ -204,7 +208,15 @@ func (e *blockComputer) queueTransactionRequests( collectionCtx := fvm.NewContextFromParent( e.vmCtx, - fvm.WithBlockHeader(blockHeader)) + fvm.WithBlockHeader(blockHeader), + // `protocol.Snapshot` implements `EntropyProvider` interface + // Note that `Snapshot` possible errors for RandomSource() are: + // - storage.ErrNotFound if the QC is unknown. + // - state.ErrUnknownSnapshotReference if the snapshot reference block is unknown + // However, at this stage, snapshot reference block should be known and the QC should also be known, + // so no error is expected in normal operations, as required by `EntropyProvider`. + fvm.WithEntropyProvider(e.protocolState.AtBlockID(blockId)), + ) for idx, collection := range rawCollections { collectionLogger := collectionCtx.Logger.With(). @@ -237,7 +249,15 @@ func (e *blockComputer) queueTransactionRequests( systemCtx := fvm.NewContextFromParent( e.systemChunkCtx, - fvm.WithBlockHeader(blockHeader)) + fvm.WithBlockHeader(blockHeader), + // `protocol.Snapshot` implements `EntropyProvider` interface + // Note that `Snapshot` possible errors for RandomSource() are: + // - storage.ErrNotFound if the QC is unknown. + // - state.ErrUnknownSnapshotReference if the snapshot reference block is unknown + // However, at this stage, snapshot reference block should be known and the QC should also be known, + // so no error is expected in normal operations, as required by `EntropyProvider`. + fvm.WithEntropyProvider(e.protocolState.AtBlockID(blockId)), + ) systemCollectionLogger := systemCtx.Logger.With(). Str("block_id", blockIdStr). Uint64("height", blockHeader.Height). diff --git a/engine/execution/computation/computer/computer_test.go b/engine/execution/computation/computer/computer_test.go index 8f271abab32..4428f06465f 100644 --- a/engine/execution/computation/computer/computer_test.go +++ b/engine/execution/computation/computer/computer_test.go @@ -172,6 +172,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { me, prov, nil, + testutil.ProtocolStateWithSourceFixture(nil), testMaxConcurrency) require.NoError(t, err) @@ -306,6 +307,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { me, prov, nil, + testutil.ProtocolStateWithSourceFixture(nil), testMaxConcurrency) require.NoError(t, err) @@ -403,6 +405,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { me, prov, nil, + testutil.ProtocolStateWithSourceFixture(nil), testMaxConcurrency) require.NoError(t, err) @@ -462,6 +465,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { me, prov, nil, + testutil.ProtocolStateWithSourceFixture(nil), testMaxConcurrency) require.NoError(t, err) @@ -678,6 +682,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { me, prov, nil, + testutil.ProtocolStateWithSourceFixture(nil), testMaxConcurrency) require.NoError(t, err) @@ -788,6 +793,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { me, prov, nil, + testutil.ProtocolStateWithSourceFixture(nil), testMaxConcurrency) require.NoError(t, err) @@ -900,6 +906,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { me, prov, nil, + testutil.ProtocolStateWithSourceFixture(nil), testMaxConcurrency) require.NoError(t, err) @@ -944,6 +951,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { me, prov, nil, + testutil.ProtocolStateWithSourceFixture(nil), testMaxConcurrency) require.NoError(t, err) @@ -1263,6 +1271,7 @@ func Test_ExecutingSystemCollection(t *testing.T) { me, prov, nil, + testutil.ProtocolStateWithSourceFixture(nil), testMaxConcurrency) require.NoError(t, err) diff --git a/engine/execution/computation/execution_verification_test.go b/engine/execution/computation/execution_verification_test.go index 9738df52813..9b5f53641c1 100644 --- a/engine/execution/computation/execution_verification_test.go +++ b/engine/execution/computation/execution_verification_test.go @@ -781,6 +781,7 @@ func executeBlockAndVerifyWithParameters(t *testing.T, me, prov, nil, + testutil.ProtocolStateWithSourceFixture(nil), testVerifyMaxConcurrency) require.NoError(t, err) diff --git a/engine/execution/computation/manager.go b/engine/execution/computation/manager.go index 7d0b9211549..907e17cd8e7 100644 --- a/engine/execution/computation/manager.go +++ b/engine/execution/computation/manager.go @@ -119,8 +119,7 @@ func New( // Capability Controllers are enabled everywhere except for Mainnet CapabilityControllersEnabled: chainID != flow.Mainnet, }, - ), - ), + )), } if params.ExtensiveTracing { options = append(options, fvm.WithExtensiveTracing()) @@ -138,6 +137,7 @@ func New( me, executionDataProvider, nil, // TODO(ramtin): update me with proper consumers + protoState, params.MaxConcurrency, ) diff --git a/engine/execution/computation/manager_benchmark_test.go b/engine/execution/computation/manager_benchmark_test.go index c43183ad0ee..d5d55a50691 100644 --- a/engine/execution/computation/manager_benchmark_test.go +++ b/engine/execution/computation/manager_benchmark_test.go @@ -201,6 +201,7 @@ func benchmarkComputeBlock( me, prov, nil, + testutil.ProtocolStateWithSourceFixture(nil), maxConcurrency) require.NoError(b, err) diff --git a/engine/execution/computation/manager_test.go b/engine/execution/computation/manager_test.go index 0763eb1c3f9..3dbfcb4e527 100644 --- a/engine/execution/computation/manager_test.go +++ b/engine/execution/computation/manager_test.go @@ -143,6 +143,7 @@ func TestComputeBlockWithStorage(t *testing.T) { me, prov, nil, + testutil.ProtocolStateWithSourceFixture(nil), testMaxConcurrency) require.NoError(t, err) @@ -835,6 +836,7 @@ func Test_EventEncodingFailsOnlyTxAndCarriesOn(t *testing.T) { me, prov, nil, + testutil.ProtocolStateWithSourceFixture(nil), testMaxConcurrency) require.NoError(t, err) diff --git a/engine/execution/computation/programs_test.go b/engine/execution/computation/programs_test.go index 957f41300fa..8fe46e8ea6e 100644 --- a/engine/execution/computation/programs_test.go +++ b/engine/execution/computation/programs_test.go @@ -138,6 +138,7 @@ func TestPrograms_TestContractUpdates(t *testing.T) { me, prov, nil, + testutil.ProtocolStateWithSourceFixture(nil), testMaxConcurrency) require.NoError(t, err) @@ -249,6 +250,7 @@ func TestPrograms_TestBlockForks(t *testing.T) { me, prov, nil, + testutil.ProtocolStateWithSourceFixture(nil), testMaxConcurrency) require.NoError(t, err) diff --git a/engine/execution/computation/query/executor.go b/engine/execution/computation/query/executor.go index 44f7ec69ab6..7b5a7eb4b35 100644 --- a/engine/execution/computation/query/executor.go +++ b/engine/execution/computation/query/executor.go @@ -4,7 +4,6 @@ import ( "context" "encoding/hex" "fmt" - "math/rand" "strings" "sync" "time" @@ -18,6 +17,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/utils/debug" + "github.com/onflow/flow-go/utils/rand" ) const ( @@ -71,7 +71,6 @@ type QueryExecutor struct { vmCtx fvm.Context derivedChainData *derived.DerivedChainData rngLock *sync.Mutex - rng *rand.Rand } var _ Executor = &QueryExecutor{} @@ -92,7 +91,6 @@ func NewQueryExecutor( vmCtx: vmCtx, derivedChainData: derivedChainData, rngLock: &sync.Mutex{}, - rng: rand.New(rand.NewSource(time.Now().UnixNano())), } } @@ -115,8 +113,11 @@ func (e *QueryExecutor) ExecuteScript( // TODO: this is a temporary measure, we could remove this in the future if e.logger.Debug().Enabled() { e.rngLock.Lock() - trackerID := e.rng.Uint32() - e.rngLock.Unlock() + defer e.rngLock.Unlock() + trackerID, err := rand.Uint32() + if err != nil { + return nil, fmt.Errorf("failed to generate trackerID: %w", err) + } trackedLogger := e.logger.With().Hex("script_hex", script).Uint32("trackerID", trackerID).Logger() trackedLogger.Debug().Msg("script is sent for execution") diff --git a/engine/execution/ingestion/uploader/model.go b/engine/execution/ingestion/uploader/model.go index ba01f27ca28..fc39dd08393 100644 --- a/engine/execution/ingestion/uploader/model.go +++ b/engine/execution/ingestion/uploader/model.go @@ -29,9 +29,10 @@ func ComputationResultToBlockData(computationResult *execution.ComputationResult txResults[i] = &AllResults[i] } - events := make([]*flow.Event, 0) - for _, e := range computationResult.AllEvents() { - events = append(events, &e) + eventsList := computationResult.AllEvents() + events := make([]*flow.Event, len(eventsList)) + for i := 0; i < len(eventsList); i++ { + events[i] = &eventsList[i] } trieUpdates := make( diff --git a/engine/execution/ingestion/uploader/model_test.go b/engine/execution/ingestion/uploader/model_test.go index c58979eb44f..5f78824ebe4 100644 --- a/engine/execution/ingestion/uploader/model_test.go +++ b/engine/execution/ingestion/uploader/model_test.go @@ -11,6 +11,7 @@ import ( "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/pathfinder" "github.com/onflow/flow-go/ledger/complete" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) @@ -29,12 +30,23 @@ func Test_ComputationResultToBlockDataConversion(t *testing.T) { assert.Equal(t, result, *blockData.TxResults[i]) } - // ramtin: warning returned events are not preserving orders, - // but since we are going to depricate this part of logic, - // I'm not going to spend more time fixing this mess + // Since returned events are not preserving orders, + // use map with event.ID() as key to confirm all events + // are included. allEvents := cr.AllEvents() require.Equal(t, len(allEvents), len(blockData.Events)) + eventsInBlockData := make(map[flow.Identifier]flow.Event) + for _, e := range blockData.Events { + eventsInBlockData[e.ID()] = *e + } + + for _, expectedEvent := range allEvents { + event, ok := eventsInBlockData[expectedEvent.ID()] + require.True(t, ok) + require.Equal(t, expectedEvent, event) + } + assert.Equal(t, len(expectedTrieUpdates), len(blockData.TrieUpdates)) assert.Equal(t, cr.CurrentEndState(), blockData.FinalStateCommitment) diff --git a/engine/execution/state/bootstrap/bootstrap_test.go b/engine/execution/state/bootstrap/bootstrap_test.go index db44a03230f..1d84b2938db 100644 --- a/engine/execution/state/bootstrap/bootstrap_test.go +++ b/engine/execution/state/bootstrap/bootstrap_test.go @@ -53,7 +53,7 @@ func TestBootstrapLedger(t *testing.T) { } func TestBootstrapLedger_ZeroTokenSupply(t *testing.T) { - expectedStateCommitmentBytes, _ := hex.DecodeString("60a1998dc3c2656758f76520d040e1612b14d80bae263dd0d1118aa7c7d6e4ee") + expectedStateCommitmentBytes, _ := hex.DecodeString("986c540657fdb3b4154311069d901223a3268492f678ae706010cd537cc328ad") expectedStateCommitment, err := flow.ToStateCommitment(expectedStateCommitmentBytes) require.NoError(t, err) diff --git a/engine/execution/testutil/fixtures.go b/engine/execution/testutil/fixtures.go index ecd19ee71a8..3113f2df9af 100644 --- a/engine/execution/testutil/fixtures.go +++ b/engine/execution/testutil/fixtures.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/cadence" "github.com/onflow/cadence/encoding/ccf" jsoncdc "github.com/onflow/cadence/encoding/json" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/crypto" @@ -17,6 +18,8 @@ import ( "github.com/onflow/flow-go/engine/execution" "github.com/onflow/flow-go/engine/execution/utils" "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/environment" + envMock "github.com/onflow/flow-go/fvm/environment/mock" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/pathfinder" @@ -24,6 +27,8 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/epochs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/state/protocol" + protocolMock "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -626,3 +631,30 @@ func ComputationResultFixture(t *testing.T) *execution.ComputationResult { }, } } + +// EntropyProviderFixture returns an entropy provider mock that +// supports RandomSource(). +// If input is nil, a random source fixture is generated. +func EntropyProviderFixture(source []byte) environment.EntropyProvider { + if source == nil { + source = unittest.SignatureFixture() + } + provider := envMock.EntropyProvider{} + provider.On("RandomSource").Return(source, nil) + return &provider +} + +// ProtocolStateWithSourceFixture returns a protocol state mock that only +// supports AtBlockID to return a snapshot mock. +// The snapshot mock only supports RandomSource(). +// If input is nil, a random source fixture is generated. +func ProtocolStateWithSourceFixture(source []byte) protocol.State { + if source == nil { + source = unittest.SignatureFixture() + } + snapshot := &protocolMock.Snapshot{} + snapshot.On("RandomSource").Return(source, nil) + state := protocolMock.State{} + state.On("AtBlockID", mock.Anything).Return(snapshot) + return &state +} diff --git a/engine/verification/Readme.md b/engine/verification/Readme.md new file mode 100644 index 00000000000..ff527a432b0 --- /dev/null +++ b/engine/verification/Readme.md @@ -0,0 +1,170 @@ +# Verification Node +The Verification Node in the Flow blockchain network is a critical component responsible for +verifying `ExecutionResult`s and generating `ResultApproval`s. +Its primary role is to ensure the integrity and validity of block execution by performing verification processes. +In a nutshell, the Verification Node is responsible for the following: +1. Following the chain for new finalized blocks (`Follower` engine). +2. Processing the execution results in the finalized blocks and determining assigned chunks to the node (`Assigner` engine). +3. Requesting chunk data pack from Execution Nodes for the assigned chunks (`Fetcher` and `Requester` engines). +4. Verifying the assigned chunks and emitting `ResultApproval`s for the verified chunks to Consensus Nodes (`Verifier` engine). +![architecture.png](architecture.png) + + +## Block Consumer ([consumer.go](verification%2Fassigner%2Fblockconsumer%2Fconsumer.go)) +The `blockconsumer` package efficiently manages the processing of finalized blocks in Verification Node of Flow blockchain. +Specifically, it listens for notifications from the `Follower` engine regarding finalized blocks, and systematically +queues these blocks for processing. The package employs parallel workers, each an instance of the `Assigner` engine, +to fetch and process blocks from the queue. The `BlockConsumer` diligently coordinates this process by only assigning +a new block to a worker once it has completed processing its current block and signaled its availability. +This ensures that the processing is not only methodical but also resilient to any node crashes. +In case of a crash, the `BlockConsumer` resumes from where it left off by reading the processed block index from storage, reassigning blocks from the queue to workers, +thereby guaranteeing no loss of data. + +## Assigner Engine +The `Assigner` [engine](verification%2Fassigner%2Fengine.go) is an integral part of the verification process in Flow, +focusing on processing the execution results in the finalized blocks, performing chunk assignments on the results, and +queuing the assigned chunks for further processing. The Assigner engine is a worker of the `BlockConsumer` engine, +which assigns finalized blocks to the Assigner engine for processing. +This engine reads execution receipts included in each finalized block, +determines which chunks are assigned to the node for verification, +and stores the assigned chunks into the chunks queue for further processing (by the `Fetcher` engine). + +The core behavior of the Assigner engine is implemented in the `ProcessFinalizedBlock` function. +This function initiates the process of execution receipt indexing, chunk assignment, and processing the assigned chunks. +For every receipt in the block, the engine determines chunk assignments using the verifiable chunk assignment algorithm of Flow. +Each assigned chunk is then processed by the `processChunk` method. This method is responsible for storing a chunk locator in the chunks queue, +which is a crucial step for further processing of the chunks by the fetcher engine. +Deduplication of chunk locators is handled by the chunks queue. +The Assigner engine provides robustness by handling the situation where a node is not authorized at a specific block ID. +It verifies the role of the result executor, checks if the node has been ejected, and assesses the node's staked weight before granting authorization. +Lastly, once the Assigner engine has completed processing the receipts in a block, it sends a notification to the block consumer. This is inline with +Assigner engine as a worker of the block consumer informing the consumer that it is ready to process the next block. +This ensures a smooth and efficient flow of data in the system, promoting consistency across different parts of the Flow architecture. + +### Chunk Locator +A chunk locator in the Flow blockchain is an internal structure of the Verification Nodes that points to a specific chunk +within a specific execution result of a block. It's an important part of the verification process in the Flow network, +allowing verification nodes to efficiently identify, retrieve, and verify individual chunks of computation. + +```go +type ChunkLocator struct { + ResultID flow.Identifier // The identifier of the ExecutionResult + Index uint64 // Index of the chunk +} +``` +- `ResultID`: This is the identifier of the execution result that the chunk is a part of. The execution result contains a list of chunks which each represent a portion of the computation carried out by execution nodes. Each execution result is linked to a specific block in the blockchain. +- `Index`: This is the index of the chunk within the execution result's list of chunks. It's an easy way to refer to a specific chunk within a specific execution result. + +**Note-1**: The `ChunkLocator` doesn't contain the chunk itself but points to where the chunk can be found. In the context of the `Assigner` engine, the `ChunkLocator` is stored in a queue after chunk assignment is done, so the `Fetcher` engine can later retrieve the chunk for verification. +**Note-2**: The `ChunkLocator` is never meant to be sent over the networking layer to another Flow node. It's an internal structure of the verification nodes, and it's only used for internal communication between the `Assigner` and `Fetcher` engines. + + +## ChunkConsumer +The `ChunkConsumer` ([consumer](verification%2Ffetcher%2Fchunkconsumer%2Fconsumer.go)) package orchestrates the processing of chunks in the Verification Node of the Flow blockchain. +Specifically, it keeps tabs on chunks that are assigned for processing by the `Assigner` engine and systematically enqueues these chunks for further handling. +To expedite the processing, the package deploys parallel workers, with each worker being an instance of the `Fetcher` engine, which retrieves and processes the chunks from the queue. +The `ChunkConsumer` administers this process by ensuring that a new chunk is assigned to a worker only after it has finalized processing its current chunk and signaled that it is ready for more. +This systematic approach guarantees not only efficiency but also robustness against any node failures. In an event where a node crashes, +the `ChunkConsumer` picks up right where it left, redistributing chunks from the queue to the workers, ensuring that there is no loss of data or progress. + +## Fetcher Engine - The Journey of a `ChunkLocator` to a `VerifiableChunkData` +The Fetcher [engine.go](fetcher%2Fengine.go) of the Verification Nodes focuses on the lifecycle of a `ChunkLocator` as it transitions into a `VerifiableChunkData`. + +### `VerifiableChunkData` +`VerifiableChunkData` refers to a data structure that encapsulates all the necessary components and resources required to +verify a chunk within the Flow blockchain network. It represents a chunk that has undergone processing and is ready for verification. + +The `VerifiableChunkData` object contains the following key elements: +```go +type VerifiableChunkData struct { + IsSystemChunk bool // indicates whether this is a system chunk + Chunk *flow.Chunk // the chunk to be verified + Header *flow.Header // BlockHeader that contains this chunk + Result *flow.ExecutionResult // execution result of this block + ChunkDataPack *flow.ChunkDataPack // chunk data package needed to verify this chunk + EndState flow.StateCommitment // state commitment at the end of this chunk + TransactionOffset uint32 // index of the first transaction in a chunk within a block +} +``` +1. `IsSystemChunk`: A boolean value that indicates whether the chunk is a system chunk. System chunk is a specific chunk typically representing the last chunk within an execution result. +2. `Chunk`: The actual chunk that needs to be verified. It contains the relevant data and instructions related to the execution of transactions within the blockchain. +3. `Header`: The `BlockHeader` associated with the chunk. It provides important contextual information about the block that the chunk belongs to. +4. `Result`: The `ExecutionResult` object that corresponds to the execution of the block containing the chunk. It contains information about the execution status, including any errors or exceptions encountered during the execution process. +5. `ChunkDataPack`: The `ChunkDataPack`, which is a package containing additional data and resources specific to the chunk being verified. It provides supplementary information required for the verification process. +6. `EndState`: The state commitment at the end of the chunk. It represents the final state of the blockchain after executing all the transactions within the chunk. +7. `TransactionOffset`: An index indicating the position of the first transaction within the chunk in relation to the entire block. This offset helps in locating and tracking individual transactions within the blockchain. +By combining these elements, the VerifiableChunkData object forms a comprehensive representation of a chunk ready for verification. It serves as an input to the `Verifier` engine, which utilizes this data to perform the necessary checks and validations to ensure the integrity and correctness of the chunk within the Flow blockchain network. + +### The Journey of a `ChunkLocator` to a `VerifiableChunkData` +Upon receiving the `ChunkLocator`, the `Fetcher` engine’s `validateAuthorizedExecutionNodeAtBlockID` function is responsible +for validating the authenticity of the sender. It evaluates whether the sender is an authorized execution node for the respective block. +The function cross-references the sender’s credentials against the state snapshot of the specific block. +In the case of unauthorized or invalid credentials, an error is logged, and the `ChunkLocator` is rejected. +For authorized credentials, the processing continues. + +Once authenticated, the `ChunkLocator` is utilized to retrieve the associated Chunk Data Pack. +The `requestChunkDataPack` function takes the Chunk Locator and generates a `ChunkDataPackRequest`. +During this stage, the function segregates execution nodes into two categories - those which agree with the execution result (`agrees`) and those which do not (`disagrees`). +This information is encapsulated within the `ChunkDataPackRequest` and is forwarded to the `Requester` Engine. +The `Requester` Engine handles the retrieval of the `ChunkDataPack` from the network of execution nodes. + +After the Chunk Data Pack is successfully retrieved by the `Requester` Engine, +the next phase involves structuring this data for verification and constructing a `VerifiableChunkData`. +It’s imperative that this construction is performed with utmost accuracy to ensure that the data is in a state that can be properly verified. + +The final step in the lifecycle is forwarding the `VerifiableChunkData` to the `Verifier` Engine. The `Verifier` Engine is tasked with the critical function +of thoroughly analyzing and verifying the data. Depending on the outcome of this verification process, +the chunk may either pass verification successfully or be rejected due to discrepancies. + +### Handling Sealed Chunks +In parallel, the `Fetcher` engine remains vigilant regarding the sealed status of chunks. +The `NotifyChunkDataPackSealed` function monitors the sealing status. +If the Consensus Nodes seal a chunk, this function ensures that the `Fetcher` Engine acknowledges this update and discards the respective +`ChunkDataPack` from its processing pipeline as it is now sealed (i.e., has been verified by an acceptable quota of Verification Nodes). + +## Requester Engine - Retrieving the `ChunkDataPack` +The `Requester` [engine](requester%2Frequester.go) is responsible for handling the request and retrieval of chunk data packs in the Flow blockchain network. +It acts as an intermediary between the `Fetcher` engine and the Execution Nodes, facilitating the communication and coordination required +to obtain the necessary `ChunkDataPack` for verification. + +The `Requester` engine receives `ChunkDataPackRequest`s from the `Fetcher`. +These requests contain information such as the chunk ID, block height, agree and disagree executors, and other relevant details. +Upon receiving a `ChunkDataPackRequest`, the `Requester` engine adds it to the pending requests cache for tracking and further processing. +The Requester engine periodically checks the pending chunk data pack requests and dispatches them to the Execution Nodes for retrieval. +It ensures that only qualified requests are dispatched based on certain criteria, such as the chunk ID and request history. +The dispatching process involves creating a `ChunkDataRequest` message and publishing it to the network. +The request is sent to a selected number of Execution Nodes, determined by the `requestTargets` parameter. + +When an Execution Node receives a `ChunkDataPackRequest`, it processes the request and generates a `ChunkDataResponse` +message containing the requested chunk data pack. The execution node sends this response back to the`Requester` engine. +The `Requester` engine receives the chunk data pack response, verifies its integrity, and passes it to the registered `ChunkDataPackHandler`, +i.e., the `Fetcher` engine. + +### Retry and Backoff Mechanism +In case a `ChunkDataPackRequest` does not receive a response within a certain period, the `Requester` engine retries the request to ensure data retrieval. +It implements an exponential backoff mechanism for retrying failed requests. +The retry interval, backoff multiplier, and backoff intervals can be customized using the respective configuration parameters. + +### Handling Sealed Blocks +If a `ChunkDataPackRequest` pertains to a block that has already been sealed, the `Requester` engine recognizes this and +removes the corresponding request from the pending requests cache. +It notifies the `ChunkDataPackHandler` (i.e., the `Fetcher` engine) about the sealing of the block to ensure proper handling. + +### Parallel Chunk Data Pack Retrieval +The `Requester` processes a number of chunk data pack requests in parallel, +dispatching them to execution nodes and handling the received responses. +However, it is important to note that if a chunk data pack request does not receive a response from the execution nodes, +the `Requester` engine can become stuck in processing, waiting for the missing chunk data pack. +To mitigate this, the engine implements a retry and backoff mechanism, ensuring that requests are retried and backed off if necessary. +This mechanism helps to prevent prolonged waiting and allows the engine to continue processing other requests while waiting for the missing chunk data pack response. + +## Verifier Engine - Verifying Chunks +The `Verifier` [engine](verifier%2Fengine.go) is responsible for verifying chunks, generating `ResultApproval`s, and maintaining a cache of `ResultApproval`s. +It receives verifiable chunks along with the necessary data for verification, verifies the chunks by constructing a partial trie, +executing transactions, and checking the final state commitment and other chunk metadata. +If the verification is successful, it generates a `ResultApproval` and broadcasts it to the consensus nodes. + +The `Verifier` Engine offers the following key features: +1. **Verification of Chunks**: The engine receives verifiable chunks, which include the chunk to be verified, the associated header, execution result, and chunk data pack. It performs the verification process, which involves constructing a partial trie, executing transactions, and checking the final state commitment. The verification process ensures the integrity and validity of the chunk. +2. **Generation of Result Approvals**: If the verification process is successful, the engine generates a result approval for the verified chunk. The result approval includes the block ID, execution result ID, chunk index, attestation, approver ID, attestation signature, and SPoCK (Secure Proof of Confidential Knowledge) signature. The result approval provides a cryptographic proof of the chunk's validity and is used to seal the block. +3. **Cache of Result Approvals**: The engine maintains a cache of result approvals for efficient retrieval and lookup. The result approvals are stored in a storage module, allowing quick access to the approvals associated with specific chunks and execution results. diff --git a/engine/verification/architecture.png b/engine/verification/architecture.png new file mode 100644 index 00000000000..a1a16dec61b Binary files /dev/null and b/engine/verification/architecture.png differ diff --git a/engine/verification/assigner/blockconsumer/consumer_test.go b/engine/verification/assigner/blockconsumer/consumer_test.go index f57bc98ae26..67ea6773194 100644 --- a/engine/verification/assigner/blockconsumer/consumer_test.go +++ b/engine/verification/assigner/blockconsumer/consumer_test.go @@ -149,7 +149,8 @@ func withConsumer( root, err := s.State.Params().FinalizedRoot() require.NoError(t, err) clusterCommittee := participants.Filter(filter.HasRole(flow.RoleCollection)) - results := vertestutils.CompleteExecutionReceiptChainFixture(t, root, blockCount/2, vertestutils.WithClusterCommittee(clusterCommittee)) + sources := unittest.RandomSourcesFixture(110) + results := vertestutils.CompleteExecutionReceiptChainFixture(t, root, blockCount/2, sources, vertestutils.WithClusterCommittee(clusterCommittee)) blocks := vertestutils.ExtendStateWithFinalizedBlocks(t, results, s.State) // makes sure that we generated a block chain of requested length. require.Len(t, blocks, blockCount) diff --git a/engine/verification/fetcher/engine.go b/engine/verification/fetcher/engine.go index 23d02c02474..fd53417b720 100644 --- a/engine/verification/fetcher/engine.go +++ b/engine/verification/fetcher/engine.go @@ -526,8 +526,8 @@ func (e *Engine) pushToVerifier(chunk *flow.Chunk, if err != nil { return fmt.Errorf("could not get block: %w", err) } - - vchunk, err := e.makeVerifiableChunkData(chunk, header, result, chunkDataPack) + snapshot := e.state.AtBlockID(header.ID()) + vchunk, err := e.makeVerifiableChunkData(chunk, header, snapshot, result, chunkDataPack) if err != nil { return fmt.Errorf("could not verify chunk: %w", err) } @@ -545,6 +545,7 @@ func (e *Engine) pushToVerifier(chunk *flow.Chunk, // chunk data to verify it. func (e *Engine) makeVerifiableChunkData(chunk *flow.Chunk, header *flow.Header, + snapshot protocol.Snapshot, result *flow.ExecutionResult, chunkDataPack *flow.ChunkDataPack, ) (*verification.VerifiableChunkData, error) { @@ -566,6 +567,7 @@ func (e *Engine) makeVerifiableChunkData(chunk *flow.Chunk, IsSystemChunk: isSystemChunk, Chunk: chunk, Header: header, + Snapshot: snapshot, Result: result, ChunkDataPack: chunkDataPack, EndState: endState, diff --git a/engine/verification/requester/requester.go b/engine/verification/requester/requester.go index 10f91780c72..2285da61025 100644 --- a/engine/verification/requester/requester.go +++ b/engine/verification/requester/requester.go @@ -331,8 +331,11 @@ func (e *Engine) requestChunkDataPack(request *verification.ChunkDataPackRequest } // publishes the chunk data request to the network - targetIDs := request.SampleTargets(int(e.requestTargets)) - err := e.con.Publish(req, targetIDs...) + targetIDs, err := request.SampleTargets(int(e.requestTargets)) + if err != nil { + return fmt.Errorf("target sampling failed: %w", err) + } + err = e.con.Publish(req, targetIDs...) if err != nil { return fmt.Errorf("could not publish chunk data pack request for chunk (id=%s): %w", request.ChunkID, err) } diff --git a/engine/verification/utils/unittest/fixture.go b/engine/verification/utils/unittest/fixture.go index cd6905709a0..57c9916e62d 100644 --- a/engine/verification/utils/unittest/fixture.go +++ b/engine/verification/utils/unittest/fixture.go @@ -14,7 +14,7 @@ import ( "github.com/onflow/flow-go/engine/execution/computation/committer" "github.com/onflow/flow-go/engine/execution/computation/computer" - "github.com/onflow/flow-go/engine/execution/state" + exstate "github.com/onflow/flow-go/engine/execution/state" "github.com/onflow/flow-go/engine/execution/state/bootstrap" "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/fvm" @@ -189,8 +189,13 @@ func WithClusterCommittee(clusterCommittee flow.IdentityList) CompleteExecutionR // ExecutionResultFixture is a test helper that returns an execution result for the reference block header as well as the execution receipt data // for that result. -func ExecutionResultFixture(t *testing.T, chunkCount int, chain flow.Chain, refBlkHeader *flow.Header, clusterCommittee flow.IdentityList) (*flow.ExecutionResult, - *ExecutionReceiptData) { +func ExecutionResultFixture(t *testing.T, + chunkCount int, + chain flow.Chain, + refBlkHeader *flow.Header, + clusterCommittee flow.IdentityList, + source []byte, +) (*flow.ExecutionResult, *ExecutionReceiptData) { // setups up the first collection of block consists of three transactions tx1 := testutil.DeployCounterContractTransaction(chain.ServiceAddress(), chain) @@ -262,7 +267,7 @@ func ExecutionResultFixture(t *testing.T, chunkCount int, chain flow.Chain, refB ) // create state.View - snapshot := state.NewLedgerStorageSnapshot( + snapshot := exstate.NewLedgerStorageSnapshot( led, startStateCommitment) committer := committer.NewLedgerViewCommitter(led, trace.NewNoopTracer()) @@ -295,6 +300,7 @@ func ExecutionResultFixture(t *testing.T, chunkCount int, chain flow.Chain, refB me, prov, nil, + testutil.ProtocolStateWithSourceFixture(source), testMaxConcurrency) require.NoError(t, err) @@ -367,7 +373,12 @@ func ExecutionResultFixture(t *testing.T, chunkCount int, chain flow.Chain, refB // For sake of simplicity and test, container blocks (i.e., C) do not contain any guarantee. // // It returns a slice of complete execution receipt fixtures that contains a container block as well as all data to verify its contained receipts. -func CompleteExecutionReceiptChainFixture(t *testing.T, root *flow.Header, count int, opts ...CompleteExecutionReceiptBuilderOpt) []*CompleteExecutionReceipt { +func CompleteExecutionReceiptChainFixture(t *testing.T, + root *flow.Header, + count int, + sources [][]byte, + opts ...CompleteExecutionReceiptBuilderOpt, +) []*CompleteExecutionReceipt { completeERs := make([]*CompleteExecutionReceipt, 0, count) parent := root @@ -389,11 +400,14 @@ func CompleteExecutionReceiptChainFixture(t *testing.T, root *flow.Header, count require.GreaterOrEqual(t, len(builder.executorIDs), builder.executorCount, "number of executors in the tests should be greater than or equal to the number of receipts per block") + var sourcesIndex = 0 for i := 0; i < count; i++ { // Generates two blocks as parent <- R <- C where R is a reference block containing guarantees, // and C is a container block containing execution receipt for R. - receipts, allData, head := ExecutionReceiptsFromParentBlockFixture(t, parent, builder) - containerBlock := ContainerBlockFixture(head, receipts) + receipts, allData, head := ExecutionReceiptsFromParentBlockFixture(t, parent, builder, sources[sourcesIndex:]) + sourcesIndex += builder.resultsCount + containerBlock := ContainerBlockFixture(head, receipts, sources[sourcesIndex]) + sourcesIndex++ completeERs = append(completeERs, &CompleteExecutionReceipt{ ContainerBlock: containerBlock, Receipts: receipts, @@ -411,7 +425,10 @@ func CompleteExecutionReceiptChainFixture(t *testing.T, root *flow.Header, count // result (i.e., for the next result). // // Each result may appear in more than one receipt depending on the builder parameters. -func ExecutionReceiptsFromParentBlockFixture(t *testing.T, parent *flow.Header, builder *CompleteExecutionReceiptBuilder) ( +func ExecutionReceiptsFromParentBlockFixture(t *testing.T, + parent *flow.Header, + builder *CompleteExecutionReceiptBuilder, + sources [][]byte) ( []*flow.ExecutionReceipt, []*ExecutionReceiptData, *flow.Header) { @@ -419,7 +436,7 @@ func ExecutionReceiptsFromParentBlockFixture(t *testing.T, parent *flow.Header, allReceipts := make([]*flow.ExecutionReceipt, 0, builder.resultsCount*builder.executorCount) for i := 0; i < builder.resultsCount; i++ { - result, data := ExecutionResultFromParentBlockFixture(t, parent, builder) + result, data := ExecutionResultFromParentBlockFixture(t, parent, builder, sources[i:]) // makes several copies of the same result for cp := 0; cp < builder.executorCount; cp++ { @@ -437,16 +454,22 @@ func ExecutionReceiptsFromParentBlockFixture(t *testing.T, parent *flow.Header, } // ExecutionResultFromParentBlockFixture is a test helper that creates a child (reference) block from the parent, as well as an execution for it. -func ExecutionResultFromParentBlockFixture(t *testing.T, parent *flow.Header, builder *CompleteExecutionReceiptBuilder) (*flow.ExecutionResult, - *ExecutionReceiptData) { - refBlkHeader := unittest.BlockHeaderWithParentFixture(parent) - return ExecutionResultFixture(t, builder.chunksCount, builder.chain, refBlkHeader, builder.clusterCommittee) +func ExecutionResultFromParentBlockFixture(t *testing.T, + parent *flow.Header, + builder *CompleteExecutionReceiptBuilder, + sources [][]byte, +) (*flow.ExecutionResult, *ExecutionReceiptData) { + // create the block header including a QC with source a index `i` + refBlkHeader := unittest.BlockHeaderWithParentWithSoRFixture(parent, sources[0]) + // execute the block with the source a index `i+1` (which will be included later in the child block) + return ExecutionResultFixture(t, builder.chunksCount, builder.chain, refBlkHeader, builder.clusterCommittee, sources[1]) } // ContainerBlockFixture builds and returns a block that contains input execution receipts. -func ContainerBlockFixture(parent *flow.Header, receipts []*flow.ExecutionReceipt) *flow.Block { +func ContainerBlockFixture(parent *flow.Header, receipts []*flow.ExecutionReceipt, source []byte) *flow.Block { // container block is the block that contains the execution receipt of reference block containerBlock := unittest.BlockWithParentFixture(parent) + containerBlock.Header.ParentVoterSigData = unittest.QCSigDataWithSoRFixture(source) containerBlock.SetPayload(unittest.PayloadFixture(unittest.WithReceipts(receipts...))) return containerBlock diff --git a/engine/verification/utils/unittest/helper.go b/engine/verification/utils/unittest/helper.go index 7c6e6eec323..62f26cd7f70 100644 --- a/engine/verification/utils/unittest/helper.go +++ b/engine/verification/utils/unittest/helper.go @@ -494,7 +494,11 @@ func withConsumers(t *testing.T, builder.clusterCommittee = participants.Filter(filter.HasRole(flow.RoleCollection)) }) - completeERs := CompleteExecutionReceiptChainFixture(t, root, blockCount, ops...) + // random sources for all blocks: + // - root block (block[0]) is executed with sources[0] (included in QC of child block[1]) + // - block[i] is executed with sources[i] (included in QC of child block[i+1]) + sources := unittest.RandomSourcesFixture(30) + completeERs := CompleteExecutionReceiptChainFixture(t, root, blockCount, sources, ops...) blocks := ExtendStateWithFinalizedBlocks(t, completeERs, s.State) // chunk assignment @@ -591,10 +595,10 @@ func withConsumers(t *testing.T, } // verifies memory resources are cleaned up all over pipeline - assert.True(t, verNode.BlockConsumer.Size() == 0) - assert.True(t, verNode.ChunkConsumer.Size() == 0) - assert.True(t, verNode.ChunkStatuses.Size() == 0) - assert.True(t, verNode.ChunkRequests.Size() == 0) + assert.Zero(t, verNode.BlockConsumer.Size()) + assert.Zero(t, verNode.ChunkConsumer.Size()) + assert.Zero(t, verNode.ChunkStatuses.Size()) + assert.Zero(t, verNode.ChunkRequests.Size()) } // bootstrapSystem is a test helper that bootstraps a flow system with node of each main roles (except execution nodes that are two). diff --git a/follower/follower_builder.go b/follower/follower_builder.go index 36486907d1c..e2eb43cb49c 100644 --- a/follower/follower_builder.go +++ b/follower/follower_builder.go @@ -56,7 +56,6 @@ import ( "github.com/onflow/flow-go/network/p2p/translator" "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/network/p2p/utils" - "github.com/onflow/flow-go/network/slashing" "github.com/onflow/flow-go/network/validator" "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" @@ -605,11 +604,15 @@ func (builder *FollowerServiceBuilder) initPublicLibp2pNode(networkKey crypto.Pr pis = append(pis, pi) } - meshTracer := tracer.NewGossipSubMeshTracer( - builder.Logger, - builder.Metrics.Network, - builder.IdentityProvider, - builder.FlowConfig.NetworkConfig.GossipSubConfig.LocalMeshLogInterval) + meshTracerCfg := &tracer.GossipSubMeshTracerConfig{ + Logger: builder.Logger, + Metrics: builder.Metrics.Network, + IDProvider: builder.IdentityProvider, + LoggerInterval: builder.FlowConfig.NetworkConfig.GossipSubConfig.LocalMeshLogInterval, + RpcSentTrackerCacheCollector: metrics.GossipSubRPCSentTrackerMetricFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), + RpcSentTrackerCacheSize: builder.FlowConfig.NetworkConfig.GossipSubConfig.RPCSentTrackerCacheSize, + } + meshTracer := tracer.NewGossipSubMeshTracer(meshTracerCfg) node, err := p2pbuilder.NewNodeBuilder( builder.Logger, @@ -748,15 +751,14 @@ func (builder *FollowerServiceBuilder) initMiddleware(nodeID flow.Identifier, validators ...network.MessageValidator, ) network.Middleware { mw := middleware.NewMiddleware(&middleware.Config{ - Logger: builder.Logger, - Libp2pNode: libp2pNode, - FlowId: nodeID, - BitSwapMetrics: builder.Metrics.Bitswap, - RootBlockID: builder.SporkID, - UnicastMessageTimeout: middleware.DefaultUnicastTimeout, - IdTranslator: builder.IDTranslator, - Codec: builder.CodecFactory(), - SlashingViolationsConsumer: slashing.NewSlashingViolationsConsumer(builder.Logger, builder.Metrics.Network), + Logger: builder.Logger, + Libp2pNode: libp2pNode, + FlowId: nodeID, + BitSwapMetrics: builder.Metrics.Bitswap, + RootBlockID: builder.SporkID, + UnicastMessageTimeout: middleware.DefaultUnicastTimeout, + IdTranslator: builder.IDTranslator, + Codec: builder.CodecFactory(), }, middleware.WithMessageValidators(validators...), ) diff --git a/fvm/blueprints/token.go b/fvm/blueprints/token.go index 92cc09e22c3..4058feb6519 100644 --- a/fvm/blueprints/token.go +++ b/fvm/blueprints/token.go @@ -22,6 +22,42 @@ func DeployFungibleTokenContractTransaction(fungibleToken flow.Address) *flow.Tr contractName) } +func DeployNonFungibleTokenContractTransaction(nonFungibleToken flow.Address) *flow.TransactionBody { + contract := contracts.NonFungibleToken() + contractName := "NonFungibleToken" + return DeployContractTransaction( + nonFungibleToken, + contract, + contractName) +} + +func DeployMetadataViewsContractTransaction(fungibleToken, nonFungibleToken flow.Address) *flow.TransactionBody { + contract := contracts.MetadataViews(fungibleToken.HexWithPrefix(), nonFungibleToken.HexWithPrefix()) + contractName := "MetadataViews" + return DeployContractTransaction( + nonFungibleToken, + contract, + contractName) +} + +func DeployViewResolverContractTransaction(nonFungibleToken flow.Address) *flow.TransactionBody { + contract := contracts.ViewResolver() + contractName := "ViewResolver" + return DeployContractTransaction( + nonFungibleToken, + contract, + contractName) +} + +func DeployFungibleTokenMetadataViewsContractTransaction(fungibleToken, nonFungibleToken flow.Address) *flow.TransactionBody { + contract := contracts.FungibleTokenMetadataViews(fungibleToken.Hex(), nonFungibleToken.Hex()) + contractName := "FungibleTokenMetadataViews" + return DeployContractTransaction( + fungibleToken, + contract, + contractName) +} + //go:embed scripts/deployFlowTokenTransactionTemplate.cdc var deployFlowTokenTransactionTemplate string @@ -31,8 +67,8 @@ var createFlowTokenMinterTransactionTemplate string //go:embed scripts/mintFlowTokenTransactionTemplate.cdc var mintFlowTokenTransactionTemplate string -func DeployFlowTokenContractTransaction(service, fungibleToken, flowToken flow.Address) *flow.TransactionBody { - contract := contracts.FlowToken(fungibleToken.HexWithPrefix()) +func DeployFlowTokenContractTransaction(service, fungibleToken, metadataViews, flowToken flow.Address) *flow.TransactionBody { + contract := contracts.FlowToken(fungibleToken.HexWithPrefix(), metadataViews.HexWithPrefix(), metadataViews.HexWithPrefix()) return flow.NewTransactionBody(). SetScript([]byte(deployFlowTokenTransactionTemplate)). diff --git a/fvm/bootstrap.go b/fvm/bootstrap.go index 72d75919927..dc938792d0a 100644 --- a/fvm/bootstrap.go +++ b/fvm/bootstrap.go @@ -318,7 +318,9 @@ func (b *bootstrapExecutor) Execute() error { service := b.createServiceAccount() fungibleToken := b.deployFungibleToken() - flowToken := b.deployFlowToken(service, fungibleToken) + nonFungibleToken := b.deployNonFungibleToken(service) + b.deployMetadataViews(fungibleToken, nonFungibleToken) + flowToken := b.deployFlowToken(service, fungibleToken, nonFungibleToken) storageFees := b.deployStorageFees(service, fungibleToken, flowToken) feeContract := b.deployFlowFees(service, fungibleToken, flowToken, storageFees) @@ -411,7 +413,46 @@ func (b *bootstrapExecutor) deployFungibleToken() flow.Address { return fungibleToken } -func (b *bootstrapExecutor) deployFlowToken(service, fungibleToken flow.Address) flow.Address { +func (b *bootstrapExecutor) deployNonFungibleToken(deployTo flow.Address) flow.Address { + + txError, err := b.invokeMetaTransaction( + b.ctx, + Transaction( + blueprints.DeployNonFungibleTokenContractTransaction(deployTo), + 0), + ) + panicOnMetaInvokeErrf("failed to deploy non-fungible token contract: %s", txError, err) + return deployTo +} + +func (b *bootstrapExecutor) deployMetadataViews(fungibleToken, nonFungibleToken flow.Address) { + + txError, err := b.invokeMetaTransaction( + b.ctx, + Transaction( + blueprints.DeployMetadataViewsContractTransaction(fungibleToken, nonFungibleToken), + 0), + ) + panicOnMetaInvokeErrf("failed to deploy metadata views contract: %s", txError, err) + + txError, err = b.invokeMetaTransaction( + b.ctx, + Transaction( + blueprints.DeployViewResolverContractTransaction(nonFungibleToken), + 0), + ) + panicOnMetaInvokeErrf("failed to deploy view resolver contract: %s", txError, err) + + txError, err = b.invokeMetaTransaction( + b.ctx, + Transaction( + blueprints.DeployFungibleTokenMetadataViewsContractTransaction(fungibleToken, nonFungibleToken), + 0), + ) + panicOnMetaInvokeErrf("failed to deploy fungible token metadata views contract: %s", txError, err) +} + +func (b *bootstrapExecutor) deployFlowToken(service, fungibleToken, metadataViews flow.Address) flow.Address { flowToken := b.createAccount(b.accountKeys.FlowTokenAccountPublicKeys) txError, err := b.invokeMetaTransaction( b.ctx, @@ -419,6 +460,7 @@ func (b *bootstrapExecutor) deployFlowToken(service, fungibleToken flow.Address) blueprints.DeployFlowTokenContractTransaction( service, fungibleToken, + metadataViews, flowToken), 0), ) diff --git a/fvm/context.go b/fvm/context.go index a1c25541360..250955d2082 100644 --- a/fvm/context.go +++ b/fvm/context.go @@ -160,10 +160,19 @@ func WithEventCollectionSizeLimit(limit uint64) Option { } } +// WithEntropyProvider sets the entropy provider of a virtual machine context. +// +// The VM uses the input to provide entropy to the Cadence runtime randomness functions. +func WithEntropyProvider(source environment.EntropyProvider) Option { + return func(ctx Context) Context { + ctx.EntropyProvider = source + return ctx + } +} + // WithBlockHeader sets the block header for a virtual machine context. // -// The VM uses the header to provide current block information to the Cadence runtime, -// as well as to seed the pseudorandom number generator. +// The VM uses the header to provide current block information to the Cadence runtime. func WithBlockHeader(header *flow.Header) Option { return func(ctx Context) Context { ctx.BlockHeader = header diff --git a/fvm/crypto/hash_test.go b/fvm/crypto/hash_test.go index bb9bb64172b..58d15d19b17 100644 --- a/fvm/crypto/hash_test.go +++ b/fvm/crypto/hash_test.go @@ -1,7 +1,7 @@ package crypto_test import ( - "math/rand" + "crypto/rand" "testing" "crypto/sha256" diff --git a/fvm/environment/env.go b/fvm/environment/env.go index 886a82be701..518b49a737a 100644 --- a/fvm/environment/env.go +++ b/fvm/environment/env.go @@ -98,6 +98,8 @@ type EnvironmentParams struct { BlockInfoParams TransactionInfoParams + EntropyProvider + ContractUpdaterParams } diff --git a/fvm/environment/facade_env.go b/fvm/environment/facade_env.go index 35806914e0e..76ac5205725 100644 --- a/fvm/environment/facade_env.go +++ b/fvm/environment/facade_env.go @@ -24,7 +24,7 @@ type facadeEnvironment struct { *ProgramLogger EventEmitter - UnsafeRandomGenerator + RandomGenerator CryptoLibrary BlockInfo @@ -75,11 +75,6 @@ func newFacadeEnvironment( ProgramLogger: logger, EventEmitter: NoEventEmitter{}, - UnsafeRandomGenerator: NewUnsafeRandomGenerator( - tracer, - params.BlockHeader, - params.TxIndex, - ), CryptoLibrary: NewCryptoLibrary(tracer, meter), BlockInfo: NewBlockInfo( @@ -175,9 +170,8 @@ func NewScriptEnv( params, txnState, NewCancellableMeter(ctx, txnState)) - + env.RandomGenerator = NewDummyRandomGenerator() env.addParseRestrictedChecks() - return env } @@ -232,6 +226,12 @@ func NewTransactionEnvironment( txnState, env) + env.RandomGenerator = NewRandomGenerator( + tracer, + params.EntropyProvider, + params.TxId, + ) + env.addParseRestrictedChecks() return env @@ -272,9 +272,9 @@ func (env *facadeEnvironment) addParseRestrictedChecks() { env.TransactionInfo = NewParseRestrictedTransactionInfo( env.txnState, env.TransactionInfo) - env.UnsafeRandomGenerator = NewParseRestrictedUnsafeRandomGenerator( + env.RandomGenerator = NewParseRestrictedRandomGenerator( env.txnState, - env.UnsafeRandomGenerator) + env.RandomGenerator) env.UUIDGenerator = NewParseRestrictedUUIDGenerator( env.txnState, env.UUIDGenerator) diff --git a/fvm/environment/mock/entropy_provider.go b/fvm/environment/mock/entropy_provider.go new file mode 100644 index 00000000000..cf3f19fb306 --- /dev/null +++ b/fvm/environment/mock/entropy_provider.go @@ -0,0 +1,51 @@ +// Code generated by mockery v2.21.4. DO NOT EDIT. + +package mock + +import mock "github.com/stretchr/testify/mock" + +// EntropyProvider is an autogenerated mock type for the EntropyProvider type +type EntropyProvider struct { + mock.Mock +} + +// RandomSource provides a mock function with given fields: +func (_m *EntropyProvider) RandomSource() ([]byte, error) { + ret := _m.Called() + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func() ([]byte, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() []byte); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type mockConstructorTestingTNewEntropyProvider interface { + mock.TestingT + Cleanup(func()) +} + +// NewEntropyProvider creates a new instance of EntropyProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewEntropyProvider(t mockConstructorTestingTNewEntropyProvider) *EntropyProvider { + mock := &EntropyProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/fvm/environment/mock/unsafe_random_generator.go b/fvm/environment/mock/random_generator.go similarity index 52% rename from fvm/environment/mock/unsafe_random_generator.go rename to fvm/environment/mock/random_generator.go index c92560981dd..0d0f1cf00e4 100644 --- a/fvm/environment/mock/unsafe_random_generator.go +++ b/fvm/environment/mock/random_generator.go @@ -4,13 +4,13 @@ package mock import mock "github.com/stretchr/testify/mock" -// UnsafeRandomGenerator is an autogenerated mock type for the UnsafeRandomGenerator type -type UnsafeRandomGenerator struct { +// RandomGenerator is an autogenerated mock type for the RandomGenerator type +type RandomGenerator struct { mock.Mock } // UnsafeRandom provides a mock function with given fields: -func (_m *UnsafeRandomGenerator) UnsafeRandom() (uint64, error) { +func (_m *RandomGenerator) UnsafeRandom() (uint64, error) { ret := _m.Called() var r0 uint64 @@ -33,14 +33,14 @@ func (_m *UnsafeRandomGenerator) UnsafeRandom() (uint64, error) { return r0, r1 } -type mockConstructorTestingTNewUnsafeRandomGenerator interface { +type mockConstructorTestingTNewRandomGenerator interface { mock.TestingT Cleanup(func()) } -// NewUnsafeRandomGenerator creates a new instance of UnsafeRandomGenerator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewUnsafeRandomGenerator(t mockConstructorTestingTNewUnsafeRandomGenerator) *UnsafeRandomGenerator { - mock := &UnsafeRandomGenerator{} +// NewRandomGenerator creates a new instance of RandomGenerator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewRandomGenerator(t mockConstructorTestingTNewRandomGenerator) *RandomGenerator { + mock := &RandomGenerator{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/fvm/environment/programs_test.go b/fvm/environment/programs_test.go index 1ea4e0ac581..9cb50d273b5 100644 --- a/fvm/environment/programs_test.go +++ b/fvm/environment/programs_test.go @@ -209,7 +209,7 @@ func Test_Programs(t *testing.T) { }) t.Run("register touches are captured for simple contract A", func(t *testing.T) { - fmt.Println("---------- Real transaction here ------------") + t.Log("---------- Real transaction here ------------") // run a TX using contract A diff --git a/fvm/environment/random_generator.go b/fvm/environment/random_generator.go new file mode 100644 index 00000000000..63562ff06bf --- /dev/null +++ b/fvm/environment/random_generator.go @@ -0,0 +1,146 @@ +package environment + +import ( + "encoding/binary" + "fmt" + + "github.com/onflow/flow-go/crypto/random" + "github.com/onflow/flow-go/fvm/errors" + "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/fvm/tracing" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/state/protocol/prg" +) + +// EntropyProvider represents an entropy (source of randomness) provider +type EntropyProvider interface { + // RandomSource provides a source of entropy that can be + // expanded into randoms (using a pseudo-random generator). + // The returned slice should have at least 128 bits of entropy. + // The function doesn't error in normal operations, any + // error should be treated as an exception. + RandomSource() ([]byte, error) +} + +type RandomGenerator interface { + // UnsafeRandom returns a random uint64 + // The name follows Cadence interface + UnsafeRandom() (uint64, error) +} + +var _ RandomGenerator = (*randomGenerator)(nil) + +// randomGenerator implements RandomGenerator and is used +// for the transactions execution environment +type randomGenerator struct { + tracer tracing.TracerSpan + entropySource EntropyProvider + txId flow.Identifier + prg random.Rand + isPRGCreated bool +} + +type ParseRestrictedRandomGenerator struct { + txnState state.NestedTransactionPreparer + impl RandomGenerator +} + +func NewParseRestrictedRandomGenerator( + txnState state.NestedTransactionPreparer, + impl RandomGenerator, +) RandomGenerator { + return ParseRestrictedRandomGenerator{ + txnState: txnState, + impl: impl, + } +} + +func (gen ParseRestrictedRandomGenerator) UnsafeRandom() ( + uint64, + error, +) { + return parseRestrict1Ret( + gen.txnState, + trace.FVMEnvRandom, + gen.impl.UnsafeRandom) +} + +func NewRandomGenerator( + tracer tracing.TracerSpan, + entropySource EntropyProvider, + txId flow.Identifier, +) RandomGenerator { + gen := &randomGenerator{ + tracer: tracer, + entropySource: entropySource, + txId: txId, + isPRGCreated: false, // PRG is not created + } + + return gen +} + +func (gen *randomGenerator) createPRG() (random.Rand, error) { + // Use the protocol state source of randomness [SoR] for the current block's + // execution + source, err := gen.entropySource.RandomSource() + // `RandomSource` does not error in normal operations. + // Any error should be treated as an exception. + if err != nil { + return nil, fmt.Errorf("reading random source from state failed: %w", err) + } + + // Use the state/protocol PRG derivation from the source of randomness: + // - for the transaction execution case, the PRG used must be a CSPRG + // - use the state/protocol/prg customizer defined for the execution environment + // - use the transaction ID as an extra diversifier of the CSPRG. Although this + // does not add any extra entropy to the output, it allows creating an independent + // PRG for each transaction. + csprg, err := prg.New(source, prg.ExecutionEnvironment, gen.txId[:]) + if err != nil { + return nil, fmt.Errorf("failed to create a CSPRG from source: %w", err) + } + + return csprg, nil +} + +// UnsafeRandom returns a random uint64 using the underlying PRG (currently +// using a crypto-secure one). This function is not thread safe, due to the gen.prg +// instance currently used. This is fine because a +// single transaction has a single RandomGenerator and is run in a single +// thread. +func (gen *randomGenerator) UnsafeRandom() (uint64, error) { + defer gen.tracer.StartExtensiveTracingChildSpan( + trace.FVMEnvRandom).End() + + // PRG creation is only done once. + if !gen.isPRGCreated { + newPRG, err := gen.createPRG() + if err != nil { + return 0, err + } + gen.prg = newPRG + gen.isPRGCreated = true + } + + buf := make([]byte, 8) + gen.prg.Read(buf) // Note: prg.Read does not return error + return binary.LittleEndian.Uint64(buf), nil +} + +var _ RandomGenerator = (*dummyRandomGenerator)(nil) + +// dummyRandomGenerator implements RandomGenerator and is used +// for the scripts execution environment +type dummyRandomGenerator struct{} + +func NewDummyRandomGenerator() RandomGenerator { + return &dummyRandomGenerator{} +} + +// UnsafeRandom() returns an error because executing scripts +// does not support randomness APIs. +func (gen *dummyRandomGenerator) UnsafeRandom() (uint64, error) { + return 0, errors.NewOperationNotSupportedError("Random") +} diff --git a/fvm/environment/random_generator_test.go b/fvm/environment/random_generator_test.go new file mode 100644 index 00000000000..539aa99423f --- /dev/null +++ b/fvm/environment/random_generator_test.go @@ -0,0 +1,80 @@ +package environment_test + +import ( + "math" + mrand "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/crypto/random" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/environment/mock" + "github.com/onflow/flow-go/fvm/tracing" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestRandomGenerator(t *testing.T) { + entropyProvider := &mock.EntropyProvider{} + entropyProvider.On("RandomSource").Return(unittest.RandomBytes(48), nil) + + getRandoms := func(txId flow.Identifier, N int) []uint64 { + // seed the RG with the same block header + urg := environment.NewRandomGenerator( + tracing.NewTracerSpan(), + entropyProvider, + txId) + numbers := make([]uint64, N) + for i := 0; i < N; i++ { + u, err := urg.UnsafeRandom() + require.NoError(t, err) + numbers[i] = u + } + return numbers + } + + // basic randomness test to check outputs are "uniformly" spread over the + // output space + t.Run("randomness test", func(t *testing.T) { + for i := 0; i < 10; i++ { + txId := unittest.TransactionFixture().ID() + urg := environment.NewRandomGenerator( + tracing.NewTracerSpan(), + entropyProvider, + txId) + + // make sure n is a power of 2 so that there is no bias in the last class + // n is a random power of 2 (from 2 to 2^10) + n := 1 << (1 + mrand.Intn(10)) + classWidth := (math.MaxUint64 / uint64(n)) + 1 + random.BasicDistributionTest(t, uint64(n), uint64(classWidth), urg.UnsafeRandom) + } + }) + + // tests that has deterministic outputs. + t.Run("PRG-based Random", func(t *testing.T) { + for i := 0; i < 10; i++ { + txId := unittest.TransactionFixture().ID() + N := 100 + r1 := getRandoms(txId, N) + r2 := getRandoms(txId, N) + require.Equal(t, r1, r2) + } + }) + + t.Run("transaction specific randomness", func(t *testing.T) { + txns := [][]uint64{} + for i := 0; i < 10; i++ { + txId := unittest.TransactionFixture().ID() + N := 2 + txns = append(txns, getRandoms(txId, N)) + } + + for i, txn := range txns { + for _, otherTxn := range txns[i+1:] { + require.NotEqual(t, txn, otherTxn) + } + } + }) +} diff --git a/fvm/environment/unsafe_random_generator.go b/fvm/environment/unsafe_random_generator.go deleted file mode 100644 index 548753d90ca..00000000000 --- a/fvm/environment/unsafe_random_generator.go +++ /dev/null @@ -1,156 +0,0 @@ -package environment - -import ( - "crypto/sha256" - "encoding/binary" - "fmt" - "hash" - "io" - "sync" - - "golang.org/x/crypto/hkdf" - - "github.com/onflow/flow-go/crypto/random" - "github.com/onflow/flow-go/fvm/errors" - "github.com/onflow/flow-go/fvm/storage/state" - "github.com/onflow/flow-go/fvm/tracing" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/trace" -) - -type UnsafeRandomGenerator interface { - // UnsafeRandom returns a random uint64 - UnsafeRandom() (uint64, error) -} - -type unsafeRandomGenerator struct { - tracer tracing.TracerSpan - - blockHeader *flow.Header - txnIndex uint32 - - prg random.Rand - createOnce sync.Once - createErr error -} - -type ParseRestrictedUnsafeRandomGenerator struct { - txnState state.NestedTransactionPreparer - impl UnsafeRandomGenerator -} - -func NewParseRestrictedUnsafeRandomGenerator( - txnState state.NestedTransactionPreparer, - impl UnsafeRandomGenerator, -) UnsafeRandomGenerator { - return ParseRestrictedUnsafeRandomGenerator{ - txnState: txnState, - impl: impl, - } -} - -func (gen ParseRestrictedUnsafeRandomGenerator) UnsafeRandom() ( - uint64, - error, -) { - return parseRestrict1Ret( - gen.txnState, - trace.FVMEnvUnsafeRandom, - gen.impl.UnsafeRandom) -} - -func NewUnsafeRandomGenerator( - tracer tracing.TracerSpan, - blockHeader *flow.Header, - txnIndex uint32, -) UnsafeRandomGenerator { - gen := &unsafeRandomGenerator{ - tracer: tracer, - blockHeader: blockHeader, - txnIndex: txnIndex, - } - - return gen -} - -func (gen *unsafeRandomGenerator) createRandomGenerator() ( - random.Rand, - error, -) { - if gen.blockHeader == nil { - return nil, nil - } - - // The block header ID is currently used as the entropy source. - // This should evolve to become the beacon signature (safer entropy - // source than the block ID) - source := gen.blockHeader.ID() - - // Provide additional randomness for each transaction. - salt := make([]byte, 4) - binary.LittleEndian.PutUint32(salt, gen.txnIndex) - - // Extract the entropy from the source and expand it into the required - // seed length. Note that we can use any implementation which provide - // similar properties. - hkdf := hkdf.New( - func() hash.Hash { return sha256.New() }, - source[:], - salt, - nil) - seed := make([]byte, random.Chacha20SeedLen) - _, err := io.ReadFull(hkdf, seed) - if err != nil { - return nil, fmt.Errorf("extracting seed with HKDF failed: %w", err) - } - - // initialize a fresh crypto-secure PRG with the seed (here ChaCha20) - // This PRG provides all outputs of Cadence UnsafeRandom. - prg, err := random.NewChacha20PRG(seed, []byte{}) - if err != nil { - return nil, fmt.Errorf("creating random generator failed: %w", err) - } - - return prg, nil -} - -// maybeCreateRandomGenerator seeds the pseudo-random number generator using the -// block header ID and transaction index as an entropy source. The seed -// function is currently called for each tranaction, the PRG is used to -// provide all the randoms the transaction needs through UnsafeRandom. -// -// This allows lazy seeding of the random number generator, since not a lot of -// transactions/scripts use it and the time it takes to seed it is not -// negligible. -func (gen *unsafeRandomGenerator) maybeCreateRandomGenerator() error { - gen.createOnce.Do(func() { - gen.prg, gen.createErr = gen.createRandomGenerator() - }) - - return gen.createErr -} - -// UnsafeRandom returns a random uint64 using the underlying PRG (currently -// using a crypto-secure one). This is not thread safe, due to the gen.prg -// instance currently used. Its also not thread safe because each thread needs -// to be deterministically seeded with a different seed. This is Ok because a -// single transaction has a single UnsafeRandomGenerator and is run in a single -// thread. -func (gen *unsafeRandomGenerator) UnsafeRandom() (uint64, error) { - defer gen.tracer.StartExtensiveTracingChildSpan( - trace.FVMEnvUnsafeRandom).End() - - // The internal seeding is only done once. - err := gen.maybeCreateRandomGenerator() - if err != nil { - return 0, err - } - - if gen.prg == nil { - return 0, errors.NewOperationNotSupportedError("UnsafeRandom") - } - - buf := make([]byte, 8) - gen.prg.Read(buf) // Note: prg.Read does not return error - return binary.LittleEndian.Uint64(buf), nil -} diff --git a/fvm/environment/unsafe_random_generator_test.go b/fvm/environment/unsafe_random_generator_test.go deleted file mode 100644 index bb6f13b87e0..00000000000 --- a/fvm/environment/unsafe_random_generator_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package environment_test - -import ( - "fmt" - "math" - mrand "math/rand" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "gonum.org/v1/gonum/stat" - - "github.com/onflow/flow-go/fvm/environment" - "github.com/onflow/flow-go/fvm/tracing" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/unittest" -) - -// TODO: these functions are copied from flow-go/crypto/rand -// Once the new flow-go/crypto/ module version is tagged, flow-go would upgrade -// to the new version and import these functions -func BasicDistributionTest(t *testing.T, n uint64, classWidth uint64, randf func() (uint64, error)) { - // sample size should ideally be a high number multiple of `n` - // but if `n` is too small, we could use a small sample size so that the test - // isn't too slow - sampleSize := 1000 * n - if n < 100 { - sampleSize = (80000 / n) * n // highest multiple of n less than 80000 - } - distribution := make([]float64, n) - // populate the distribution - for i := uint64(0); i < sampleSize; i++ { - r, err := randf() - require.NoError(t, err) - if n*classWidth != 0 { - require.Less(t, r, n*classWidth) - } - distribution[r/classWidth] += 1.0 - } - EvaluateDistributionUniformity(t, distribution) -} - -func EvaluateDistributionUniformity(t *testing.T, distribution []float64) { - tolerance := 0.05 - stdev := stat.StdDev(distribution, nil) - mean := stat.Mean(distribution, nil) - assert.Greater(t, tolerance*mean, stdev, fmt.Sprintf("basic randomness test failed: n: %d, stdev: %v, mean: %v", len(distribution), stdev, mean)) -} - -func TestUnsafeRandomGenerator(t *testing.T) { - bh := unittest.BlockHeaderFixtureOnChain(flow.Mainnet.Chain().ChainID()) - - getRandoms := func(txnIndex uint32, N int) []uint64 { - // seed the RG with the same block header - urg := environment.NewUnsafeRandomGenerator( - tracing.NewTracerSpan(), - bh, - txnIndex) - numbers := make([]uint64, N) - for i := 0; i < N; i++ { - u, err := urg.UnsafeRandom() - require.NoError(t, err) - numbers[i] = u - } - return numbers - } - - // basic randomness test to check outputs are "uniformly" spread over the - // output space - t.Run("randomness test", func(t *testing.T) { - for txnIndex := uint32(0); txnIndex < 10; txnIndex++ { - urg := environment.NewUnsafeRandomGenerator( - tracing.NewTracerSpan(), - bh, - txnIndex) - - // make sure n is a power of 2 so that there is no bias in the last class - // n is a random power of 2 (from 2 to 2^10) - n := 1 << (1 + mrand.Intn(10)) - classWidth := (math.MaxUint64 / uint64(n)) + 1 - BasicDistributionTest(t, uint64(n), uint64(classWidth), urg.UnsafeRandom) - } - }) - - // tests that unsafeRandom is PRG based and hence has deterministic outputs. - t.Run("PRG-based UnsafeRandom", func(t *testing.T) { - for txnIndex := uint32(0); txnIndex < 10; txnIndex++ { - N := 100 - r1 := getRandoms(txnIndex, N) - r2 := getRandoms(txnIndex, N) - require.Equal(t, r1, r2) - } - }) - - t.Run("transaction specific randomness", func(t *testing.T) { - txns := [][]uint64{} - for txnIndex := uint32(0); txnIndex < 10; txnIndex++ { - N := 100 - txns = append(txns, getRandoms(txnIndex, N)) - } - - for i, txn := range txns { - for _, otherTxn := range txns[i+1:] { - require.NotEqual(t, txn, otherTxn) - } - } - }) -} diff --git a/fvm/fvm_bench_test.go b/fvm/fvm_bench_test.go index 8c8eb4f66fc..276c8cb69b8 100644 --- a/fvm/fvm_bench_test.go +++ b/fvm/fvm_bench_test.go @@ -225,6 +225,7 @@ func NewBasicBlockExecutor(tb testing.TB, chain flow.Chain, logger zerolog.Logge me, prov, nil, + nil, 1) // We're interested in fvm's serial execution time require.NoError(tb, err) diff --git a/fvm/fvm_blockcontext_test.go b/fvm/fvm_blockcontext_test.go index 9fea269b7cb..e4148fcc5c7 100644 --- a/fvm/fvm_blockcontext_test.go +++ b/fvm/fvm_blockcontext_test.go @@ -1664,17 +1664,19 @@ func TestBlockContext_GetAccount(t *testing.T) { }) } -func TestBlockContext_UnsafeRandom(t *testing.T) { +func TestBlockContext_Random(t *testing.T) { t.Parallel() chain, vm := createChainAndVm(flow.Mainnet) header := &flow.Header{Height: 42} + source := testutil.EntropyProviderFixture(nil) ctx := fvm.NewContext( fvm.WithChain(chain), fvm.WithBlockHeader(header), + fvm.WithEntropyProvider(source), fvm.WithCadenceLogging(true), ) @@ -1701,9 +1703,10 @@ func TestBlockContext_UnsafeRandom(t *testing.T) { require.Len(t, output.Logs, 1) - num, err := strconv.ParseUint(output.Logs[0], 10, 64) + // output cannot be deterministic because transaction signature is not deterministic + // (which makes the tx hash and the PRG seed used by the execution not deterministic) + _, err = strconv.ParseUint(output.Logs[0], 10, 64) require.NoError(t, err) - require.Equal(t, uint64(0x7515f254adc6f8af), num) }) } diff --git a/fvm/fvm_test.go b/fvm/fvm_test.go index 587df638ee9..e93c19c575a 100644 --- a/fvm/fvm_test.go +++ b/fvm/fvm_test.go @@ -2023,6 +2023,87 @@ func TestScriptAccountKeyMutationsFailure(t *testing.T) { ) } +func TestScriptExecutionLimit(t *testing.T) { + + t.Parallel() + + script := fvm.Script([]byte(` + pub fun main() { + var s: Int256 = 1024102410241024 + var i: Int256 = 0 + var a: Int256 = 7 + var b: Int256 = 5 + var c: Int256 = 2 + + while i < 150000 { + s = s * a + s = s / b + s = s / c + i = i + 1 + } + } + `)) + + bootstrapProcedureOptions := []fvm.BootstrapProcedureOption{ + fvm.WithTransactionFee(fvm.DefaultTransactionFees), + fvm.WithExecutionMemoryLimit(math.MaxUint32), + fvm.WithExecutionEffortWeights(map[common.ComputationKind]uint64{ + common.ComputationKindStatement: 1569, + common.ComputationKindLoop: 1569, + common.ComputationKindFunctionInvocation: 1569, + environment.ComputationKindGetValue: 808, + environment.ComputationKindCreateAccount: 2837670, + environment.ComputationKindSetValue: 765, + }), + fvm.WithExecutionMemoryWeights(meter.DefaultMemoryWeights), + fvm.WithMinimumStorageReservation(fvm.DefaultMinimumStorageReservation), + fvm.WithAccountCreationFee(fvm.DefaultAccountCreationFee), + fvm.WithStorageMBPerFLOW(fvm.DefaultStorageMBPerFLOW), + } + + t.Run("Exceeding computation limit", + newVMTest().withBootstrapProcedureOptions( + bootstrapProcedureOptions..., + ).withContextOptions( + fvm.WithTransactionFeesEnabled(true), + fvm.WithAccountStorageLimit(true), + fvm.WithComputationLimit(10000), + ).run( + func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { + scriptCtx := fvm.NewContextFromParent(ctx) + + _, output, err := vm.Run(scriptCtx, script, snapshotTree) + require.NoError(t, err) + require.Error(t, output.Err) + require.True(t, errors.IsComputationLimitExceededError(output.Err)) + require.ErrorContains(t, output.Err, "computation exceeds limit (10000)") + require.GreaterOrEqual(t, output.ComputationUsed, uint64(10000)) + require.GreaterOrEqual(t, output.MemoryEstimate, uint64(548020260)) + }, + ), + ) + + t.Run("Sufficient computation limit", + newVMTest().withBootstrapProcedureOptions( + bootstrapProcedureOptions..., + ).withContextOptions( + fvm.WithTransactionFeesEnabled(true), + fvm.WithAccountStorageLimit(true), + fvm.WithComputationLimit(20000), + ).run( + func(t *testing.T, vm fvm.VM, chain flow.Chain, ctx fvm.Context, snapshotTree snapshot.SnapshotTree) { + scriptCtx := fvm.NewContextFromParent(ctx) + + _, output, err := vm.Run(scriptCtx, script, snapshotTree) + require.NoError(t, err) + require.NoError(t, output.Err) + require.GreaterOrEqual(t, output.ComputationUsed, uint64(17955)) + require.GreaterOrEqual(t, output.MemoryEstimate, uint64(984017413)) + }, + ), + ) +} + func TestInteractionLimit(t *testing.T) { type testCase struct { name string diff --git a/fvm/script.go b/fvm/script.go index 10bd5d68717..c979fb309f5 100644 --- a/fvm/script.go +++ b/fvm/script.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/hashicorp/go-multierror" "github.com/onflow/cadence/runtime" "github.com/onflow/cadence/runtime/common" @@ -198,11 +199,13 @@ func (executor *scriptExecutor) executeScript() error { Source: executor.proc.Script, Arguments: executor.proc.Arguments, }, - common.ScriptLocation(executor.proc.ID)) + common.ScriptLocation(executor.proc.ID), + ) + populateErr := executor.output.PopulateEnvironmentValues(executor.env) if err != nil { - return err + return multierror.Append(err, populateErr) } executor.output.Value = value - return executor.output.PopulateEnvironmentValues(executor.env) + return populateErr } diff --git a/go.mod b/go.mod index ef9c29a3a43..b4b7cbe7f62 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/onflow/flow-go -go 1.19 +go 1.20 require ( cloud.google.com/go/compute/metadata v0.2.3 @@ -51,13 +51,13 @@ require ( github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multihash v0.2.3 github.com/onflow/atree v0.6.0 - github.com/onflow/cadence v0.39.12 + github.com/onflow/cadence v0.39.14 github.com/onflow/flow v0.3.4 - github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3 + github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.4-0.20230703193002-53362441b57d github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3 - github.com/onflow/flow-go-sdk v0.41.6 - github.com/onflow/flow-go/crypto v0.24.7 - github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230602212908-08fc6536d391 + github.com/onflow/flow-go-sdk v0.41.9 + github.com/onflow/flow-go/crypto v0.24.9 + github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230628215638-83439d22e0ce github.com/onflow/go-bitswap v0.0.0-20230703214630-6d3db958c73d github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pierrec/lz4 v2.6.1+incompatible @@ -103,7 +103,6 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/onflow/wal v0.0.0-20230529184820-bc9f8244608d github.com/slok/go-http-metrics v0.10.0 - gonum.org/v1/gonum v0.13.0 ) require ( @@ -228,6 +227,7 @@ require ( github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/onflow/flow-ft/lib/go/contracts v0.7.0 // indirect + github.com/onflow/flow-nft/lib/go/contracts v1.1.0 // indirect github.com/onflow/sdks v0.5.0 // indirect github.com/onsi/ginkgo/v2 v2.9.7 // indirect github.com/opencontainers/runtime-spec v1.0.2 // indirect @@ -275,6 +275,7 @@ require ( golang.org/x/oauth2 v0.6.0 // indirect golang.org/x/term v0.9.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + gonum.org/v1/gonum v0.13.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index ecf5dfae9f4..97ca856b74d 100644 --- a/go.sum +++ b/go.sum @@ -53,6 +53,7 @@ cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1 cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/kms v1.0.0/go.mod h1:nhUehi+w7zht2XrUfvTRNpxrfayBHqP4lu2NSywui/0= cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= cloud.google.com/go/profiler v0.3.0 h1:R6y/xAeifaUXxd2x6w+jIwKxoKl8Cv5HJvcvASTPWJo= cloud.google.com/go/profiler v0.3.0/go.mod h1:9wYk9eY4iZHsev8TQb61kh3wiOiSyz/xOYixWPzweCU= @@ -105,6 +106,7 @@ github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -200,6 +202,8 @@ github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bytecodealliance/wasmtime-go v0.22.0/go.mod h1:q320gUxqyI8yB+ZqRuaJOEnGkAnHh6WtJjMaT2CW4wI= +github.com/c-bata/go-prompt v0.2.5/go.mod h1:vFnjEGDIIA/Lib7giyE4E9c50Lvl8j0S+7FVlAwDAVw= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -319,6 +323,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ethereum/go-ethereum v1.9.9/go.mod h1:a9TqabFudpDu1nucId+k9S8R9whYaHnGBLKFouA5EAo= github.com/ethereum/go-ethereum v1.9.13 h1:rOPqjSngvs1VSYH2H+PMPiWt4VEulvNRbFgqiGqJM3E= github.com/ethereum/go-ethereum v1.9.13/go.mod h1:qwN9d1GLyDh0N7Ab8bMGd0H9knaji2jOBm2RrMGjXls= github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -330,6 +335,7 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -340,8 +346,10 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fxamacker/cbor/v2 v2.2.1-0.20210927235116-3d6d5d1de29b/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= github.com/fxamacker/cbor/v2 v2.4.1-0.20230228173756-c0c9f774e40c h1:5tm/Wbs9d9r+qZaUFXk59CWDD0+77PBqDREffYkyi5c= github.com/fxamacker/cbor/v2 v2.4.1-0.20230228173756-c0c9f774e40c/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/fxamacker/circlehash v0.1.0/go.mod h1:3aq3OfVvsWtkWMb6A1owjOQFA+TLsD5FgJflnaQwtMM= github.com/fxamacker/circlehash v0.3.0 h1:XKdvTtIJV9t7DDUtsf0RIpC1OcxZtPbmgIH7ekx28WA= github.com/fxamacker/circlehash v0.3.0/go.mod h1:3aq3OfVvsWtkWMb6A1owjOQFA+TLsD5FgJflnaQwtMM= github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= @@ -402,6 +410,7 @@ github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-test/deep v1.0.5/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= @@ -423,6 +432,7 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= @@ -780,9 +790,11 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/kevinburke/go-bindata v3.22.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= github.com/kevinburke/go-bindata v3.23.0+incompatible h1:rqNOXZlqrYhMVVAsQx8wuc+LaA73YcfbQ407wAykyS8= github.com/kevinburke/go-bindata v3.23.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -1032,6 +1044,7 @@ github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rB github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/logrusorgru/aurora/v4 v4.0.0 h1:sRjfPpun/63iADiSvGGjgA1cAYegEWMPCJdUpJYn9JA= github.com/logrusorgru/aurora/v4 v4.0.0/go.mod h1:lP0iIa2nrnT/qoFXcOZSrZQpJ1o6n2CUf/hyHi2Q4ZQ= github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= @@ -1053,6 +1066,8 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= @@ -1064,6 +1079,7 @@ github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXT github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= @@ -1074,8 +1090,12 @@ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-tty v0.0.3/go.mod h1:ihxohKRERHTVzN+aSVRwACLCeqIoZAWpoICkkvrWyR0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -1216,24 +1236,31 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onflow/atree v0.1.0-beta1.0.20211027184039-559ee654ece9/go.mod h1:+6x071HgCF/0v5hQcaE5qqjc2UqN5gCU8h5Mk6uqpOg= github.com/onflow/atree v0.6.0 h1:j7nQ2r8npznx4NX39zPpBYHmdy45f4xwoi+dm37Jk7c= github.com/onflow/atree v0.6.0/go.mod h1:gBHU0M05qCbv9NN0kijLWMgC47gHVNBIp4KmsVFi0tc= -github.com/onflow/cadence v0.39.12 h1:bb3UdOe7nClUcaLbxSWGLSIJKuCrivpgxhPow99ikv0= -github.com/onflow/cadence v0.39.12/go.mod h1:OIJLyVBPa339DCBQXBfGaorT4tBjQh9gSKe+ZAIyyh0= +github.com/onflow/cadence v0.20.1/go.mod h1:7mzUvPZUIJztIbr9eTvs+fQjWWHTF8veC+yk4ihcNIA= +github.com/onflow/cadence v0.39.14 h1:YoR3YFUga49rqzVY1xwI6I2ZDBmvwGh13jENncsleC8= +github.com/onflow/cadence v0.39.14/go.mod h1:OIJLyVBPa339DCBQXBfGaorT4tBjQh9gSKe+ZAIyyh0= github.com/onflow/flow v0.3.4 h1:FXUWVdYB90f/rjNcY0Owo30gL790tiYff9Pb/sycXYE= github.com/onflow/flow v0.3.4/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3 h1:wV+gcgOY0oJK4HLZQYQoK+mm09rW1XSxf83yqJwj0n4= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3/go.mod h1:Osvy81E/+tscQM+d3kRFjktcIcZj2bmQ9ESqRQWDEx8= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.4-0.20230703193002-53362441b57d h1:B7PdhdUNkve5MVrekWDuQf84XsGBxNZ/D3x+QQ8XeVs= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.4-0.20230703193002-53362441b57d/go.mod h1:xAiV/7TKhw863r6iO3CS5RnQ4F+pBY1TxD272BsILlo= github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3 h1:X25A1dNajNUtE+KoV76wQ6BR6qI7G65vuuRXxDDqX7E= github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3/go.mod h1:dqAUVWwg+NlOhsuBHex7bEWmsUjsiExzhe/+t4xNH6A= github.com/onflow/flow-ft/lib/go/contracts v0.7.0 h1:XEKE6qJUw3luhsYmIOteXP53gtxNxrwTohgxJXCYqBE= github.com/onflow/flow-ft/lib/go/contracts v0.7.0/go.mod h1:kTMFIySzEJJeupk+7EmXs0EJ6CBWY/MV9fv9iYQk+RU= -github.com/onflow/flow-go-sdk v0.41.6 h1:x5HhmRDvbCWXRCzHITJxOp0Komq5JJ9zphoR2u6NOCg= -github.com/onflow/flow-go-sdk v0.41.6/go.mod h1:AYypQvn6ecMONhF3M1vBOUX9b4oHKFWkkrw8bO4VEik= -github.com/onflow/flow-go/crypto v0.24.7 h1:RCLuB83At4z5wkAyUCF7MYEnPoIIOHghJaODuJyEoW0= -github.com/onflow/flow-go/crypto v0.24.7/go.mod h1:fqCzkIBBMRRkciVrvW21rECKq1oD7Q6u+bCI78lfNX0= -github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230602212908-08fc6536d391 h1:6uKg0gpLKpTZKMihrsFR0Gkq++1hykzfR1tQCKuOfw4= -github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230602212908-08fc6536d391/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/onflow/flow-go-sdk v0.24.0/go.mod h1:IoptMLPyFXWvyd9yYA6/4EmSeeozl6nJoIv4FaEMg74= +github.com/onflow/flow-go-sdk v0.41.9 h1:cyplhhhc0RnfOAan2t7I/7C9g1hVGDDLUhWj6ZHAkk4= +github.com/onflow/flow-go-sdk v0.41.9/go.mod h1:e9Q5TITCy7g08lkdQJxP8fAKBnBoC5FjALvUKr36j4I= +github.com/onflow/flow-go/crypto v0.21.3/go.mod h1:vI6V4CY3R6c4JKBxdcRiR/AnjBfL8OSD97bJc60cLuQ= +github.com/onflow/flow-go/crypto v0.24.9 h1:0EQp+kSZYJepMIiSypfJVe7tzsPcb6UXOdOtsTCDhBs= +github.com/onflow/flow-go/crypto v0.24.9/go.mod h1:fqCzkIBBMRRkciVrvW21rECKq1oD7Q6u+bCI78lfNX0= +github.com/onflow/flow-nft/lib/go/contracts v1.1.0 h1:rhUDeD27jhLwOqQKI/23008CYfnqXErrJvc4EFRP2a0= +github.com/onflow/flow-nft/lib/go/contracts v1.1.0/go.mod h1:YsvzYng4htDgRB9sa9jxdwoTuuhjK8WYWXTyLkIigZY= +github.com/onflow/flow/protobuf/go/flow v0.2.2/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= +github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230628215638-83439d22e0ce h1:YQKijiQaq8SF1ayNqp3VVcwbBGXSnuHNHq4GQmVGybE= +github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230628215638-83439d22e0ce/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/onflow/go-bitswap v0.0.0-20230703214630-6d3db958c73d h1:QcOAeEyF3iAUHv21LQ12sdcsr0yFrJGoGLyCAzYYtvI= github.com/onflow/go-bitswap v0.0.0-20230703214630-6d3db958c73d/go.mod h1:GCPpiyRoHncdqPj++zPr9ZOYBX4hpJ0pYZRYqSE8VKk= github.com/onflow/sdks v0.5.0 h1:2HCRibwqDaQ1c9oUApnkZtEAhWiNY2GTpRD5+ftdkN8= @@ -1292,6 +1319,7 @@ github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6J github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pkg/term v1.1.0/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= @@ -1361,6 +1389,7 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= +github.com/robertkrimen/otto v0.0.0-20170205013659-6a77b7cbc37d/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1380,6 +1409,7 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/schollz/progressbar/v3 v3.8.3/go.mod h1:pWnVCjSBZsT2X3nx9HfRdnCDrpbevliMeoEVhStwHko= github.com/schollz/progressbar/v3 v3.13.1 h1:o8rySDYiQ59Mwzy2FELeHY5ZARXZTVJC7iHD6PEFUiE= github.com/schollz/progressbar/v3 v3.13.1/go.mod h1:xvrbki8kfT1fzWzBT/UZd9L6GA+jdL7HAgq2RFnO6fQ= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -1482,6 +1512,7 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/supranational/blst v0.3.4/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/supranational/blst v0.3.10 h1:CMciDZ/h4pXDDXQASe8ZGTNKUiVNxVVA5hpci2Uuhuk= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= @@ -1543,8 +1574,10 @@ github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPR github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/blake3 v0.2.0/go.mod h1:G9pM4qQwjRzF1/v7+vabMj/c5mWpGZ2Wzo3Eb4z0pb4= github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= +github.com/zeebo/pcg v1.0.0/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1592,6 +1625,7 @@ go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= go.uber.org/fx v1.19.2 h1:SyFgYQFr1Wl0AYstE8vyYIzP4bFz2URrScjwC4cwUvY= go.uber.org/fx v1.19.2/go.mod h1:43G1VcqSzbIv77y00p1DRAsyZS8WdzuYdhZXmEUkMyQ= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -1631,6 +1665,7 @@ golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1643,11 +1678,15 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= @@ -1660,6 +1699,7 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMk golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1816,6 +1856,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1824,12 +1865,14 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1851,7 +1894,10 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200918174421-af09f7315aff/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201014080544-cc95f250f6bc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1881,6 +1927,8 @@ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025112917-711f33c9992c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1902,6 +1950,7 @@ golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= @@ -1924,12 +1973,14 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1976,6 +2027,7 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200828161849-5deb26317202/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1998,8 +2050,12 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.1/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -2033,6 +2089,7 @@ google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6 google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= @@ -2118,7 +2175,10 @@ google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211007155348-82e027067bd4/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= @@ -2215,8 +2275,10 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= +gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200316214253-d7b0ff38cac9/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78= gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -2255,6 +2317,7 @@ nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0 pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/insecure/corruptlibp2p/gossipsub_spammer.go b/insecure/corruptlibp2p/gossipsub_spammer.go index 2ec81a89e53..d38e1dfa12b 100644 --- a/insecure/corruptlibp2p/gossipsub_spammer.go +++ b/insecure/corruptlibp2p/gossipsub_spammer.go @@ -26,9 +26,33 @@ type GossipSubRouterSpammer struct { SpammerId flow.Identity } -// NewGossipSubRouterSpammer is the main method tests call for spamming attacks. -func NewGossipSubRouterSpammer(t *testing.T, sporkId flow.Identifier, role flow.Role, provider module.IdentityProvider, opts ...p2ptest.NodeFixtureParameterOption) *GossipSubRouterSpammer { - spammerNode, spammerId, router := createSpammerNode(t, sporkId, role, provider, opts...) +// NewGossipSubRouterSpammer creates a new GossipSubRouterSpammer. +// Args: +// - t: the test object. +// - sporkId: the spork node's ID. +// - role: the role of the spork node. +// - provider: the identity provider. +// Returns: +// - the GossipSubRouterSpammer. +func NewGossipSubRouterSpammer(t *testing.T, sporkId flow.Identifier, role flow.Role, provider module.IdentityProvider) *GossipSubRouterSpammer { + return NewGossipSubRouterSpammerWithRpcInspector(t, sporkId, role, provider, func(id peer.ID, rpc *corrupt.RPC) error { + return nil // no-op + }) +} + +// NewGossipSubRouterSpammerWithRpcInspector creates a new GossipSubRouterSpammer with a custom RPC inspector. +// The RPC inspector is called before each incoming RPC is processed by the router. +// If the inspector returns an error, the RPC is dropped. +// Args: +// - t: the test object. +// - sporkId: the spork node's ID. +// - role: the role of the spork node. +// - provider: the identity provider. +// - inspector: the RPC inspector. +// Returns: +// - the GossipSubRouterSpammer. +func NewGossipSubRouterSpammerWithRpcInspector(t *testing.T, sporkId flow.Identifier, role flow.Role, provider module.IdentityProvider, inspector func(id peer.ID, rpc *corrupt.RPC) error) *GossipSubRouterSpammer { + spammerNode, spammerId, router := newSpammerNodeWithRpcInspector(t, sporkId, role, provider, inspector) return &GossipSubRouterSpammer{ router: router, SpammerNode: spammerNode, @@ -65,17 +89,31 @@ func (s *GossipSubRouterSpammer) Start(t *testing.T) { s.router.set(s.router.Get()) } -func createSpammerNode(t *testing.T, sporkId flow.Identifier, role flow.Role, provider module.IdentityProvider, opts ...p2ptest.NodeFixtureParameterOption) (p2p.LibP2PNode, flow.Identity, *atomicRouter) { +// newSpammerNodeWithRpcInspector creates a new spammer node, which is capable of sending spam control and actual messages to other nodes. +// It also creates a new atomic router that allows us to set the router to a new instance of the corrupt router. +// Args: +// - sporkId: the spork id of the spammer node. +// - role: the role of the spammer node. +// - provider: the identity provider of the spammer node. +// - inspector: the inspector function that is called when a message is received by the spammer node. +// Returns: +// - p2p.LibP2PNode: the spammer node. +// - flow.Identity: the identity of the spammer node. +// - *atomicRouter: the atomic router that allows us to set the router to a new instance of the corrupt router. +func newSpammerNodeWithRpcInspector( + t *testing.T, + sporkId flow.Identifier, + role flow.Role, + provider module.IdentityProvider, + inspector func(id peer.ID, rpc *corrupt.RPC) error) (p2p.LibP2PNode, flow.Identity, *atomicRouter) { router := newAtomicRouter() + var opts []p2ptest.NodeFixtureParameterOption opts = append(opts, p2ptest.WithRole(role), internal.WithCorruptGossipSub(CorruptGossipSubFactory(func(r *corrupt.GossipSubRouter) { require.NotNil(t, r) router.set(r) }), - CorruptGossipSubConfigFactoryWithInspector(func(id peer.ID, rpc *corrupt.RPC) error { - // here we can inspect the incoming RPC message to the spammer node - return nil - }))) + CorruptGossipSubConfigFactoryWithInspector(inspector))) spammerNode, spammerId := p2ptest.NodeFixture( t, sporkId, diff --git a/insecure/corruptlibp2p/spam_test.go b/insecure/corruptlibp2p/spam_test.go index 06f6183c03e..2886b598c66 100644 --- a/insecure/corruptlibp2p/spam_test.go +++ b/insecure/corruptlibp2p/spam_test.go @@ -71,9 +71,9 @@ func TestSpam_IHave(t *testing.T) { // this is vital as the spammer will circumvent the normal pubsub subscription mechanism and send iHAVE messages directly to the victim. // without a prior connection established, directly spamming pubsub messages may cause a race condition in the pubsub implementation. p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) - p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, func() (interface{}, channels.Topic) { - blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) - return unittest.ProposalFixture(), blockTopic + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() }) // prepare to spam - generate iHAVE control messages diff --git a/insecure/go.mod b/insecure/go.mod index fba888f2997..9a64f440592 100644 --- a/insecure/go.mod +++ b/insecure/go.mod @@ -1,6 +1,6 @@ module github.com/onflow/flow-go/insecure -go 1.19 +go 1.20 require ( github.com/golang/protobuf v1.5.3 @@ -9,22 +9,22 @@ require ( github.com/libp2p/go-libp2p v0.28.1 github.com/libp2p/go-libp2p-pubsub v0.9.3 github.com/multiformats/go-multiaddr-dns v0.3.1 - github.com/onflow/flow-go v0.29.8 - github.com/onflow/flow-go/crypto v0.24.7 + github.com/onflow/flow-go v0.31.1-0.20230718164039-e3411eff1e9d + github.com/onflow/flow-go/crypto v0.24.9 github.com/rs/zerolog v1.29.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 github.com/yhassanzadeh13/go-libp2p-pubsub v0.6.11-flow-expose-msg.0.20230703223453-544e2fe28a26 go.uber.org/atomic v1.11.0 - google.golang.org/grpc v1.55.0 + google.golang.org/grpc v1.56.1 google.golang.org/protobuf v1.30.0 ) require ( cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go/compute v1.19.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.12.0 // indirect + cloud.google.com/go/iam v0.13.0 // indirect cloud.google.com/go/storage v1.28.1 // indirect github.com/aws/aws-sdk-go-v2 v1.17.7 // indirect github.com/aws/aws-sdk-go-v2/config v1.18.19 // indirect @@ -59,7 +59,7 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect github.com/dgraph-io/badger/v2 v2.2007.4 // indirect - github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect + github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -74,7 +74,6 @@ require ( github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/gammazero/deque v0.1.0 // indirect github.com/gammazero/workerpool v1.1.2 // indirect - github.com/ghodss/yaml v1.0.0 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect @@ -103,14 +102,14 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware/providers/zerolog/v2 v2.0.0-rc.2 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-20200501113911-9a95f0fdbfea // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/huin/goupnp v1.2.0 // indirect github.com/improbable-eng/grpc-web v0.15.0 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/boxo v0.10.0 // indirect github.com/ipfs/go-block-format v0.1.2 // indirect @@ -181,12 +180,13 @@ require ( github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/onflow/atree v0.6.0 // indirect - github.com/onflow/cadence v0.39.12 // indirect - github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3 // indirect + github.com/onflow/cadence v0.39.14 // indirect + github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.4-0.20230703193002-53362441b57d // indirect github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3 // indirect github.com/onflow/flow-ft/lib/go/contracts v0.7.0 // indirect - github.com/onflow/flow-go-sdk v0.41.6 // indirect - github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230602212908-08fc6536d391 // indirect + github.com/onflow/flow-go-sdk v0.41.9 // indirect + github.com/onflow/flow-nft/lib/go/contracts v1.1.0 // indirect + github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230628215638-83439d22e0ce // indirect github.com/onflow/go-bitswap v0.0.0-20230703214630-6d3db958c73d // indirect github.com/onflow/sdks v0.5.0 // indirect github.com/onflow/wal v0.0.0-20230529184820-bc9f8244608d // indirect @@ -220,7 +220,7 @@ require ( github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/afero v1.9.3 // indirect github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/cobra v1.6.1 // indirect + github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/viper v1.15.0 // indirect github.com/stretchr/objx v0.5.0 // indirect @@ -253,7 +253,7 @@ require ( golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/mod v0.10.0 // indirect golang.org/x/net v0.10.0 // indirect - golang.org/x/oauth2 v0.6.0 // indirect + golang.org/x/oauth2 v0.7.0 // indirect golang.org/x/sync v0.2.0 // indirect golang.org/x/sys v0.9.0 // indirect golang.org/x/term v0.9.0 // indirect @@ -264,10 +264,9 @@ require ( gonum.org/v1/gonum v0.13.0 // indirect google.golang.org/api v0.114.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect nhooyr.io/websocket v1.8.7 // indirect diff --git a/insecure/go.sum b/insecure/go.sum index 82276186e6f..25f044da73e 100644 --- a/insecure/go.sum +++ b/insecure/go.sum @@ -19,6 +19,16 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -27,14 +37,15 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= -cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/kms v1.0.0/go.mod h1:nhUehi+w7zht2XrUfvTRNpxrfayBHqP4lu2NSywui/0= cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= cloud.google.com/go/profiler v0.3.0 h1:R6y/xAeifaUXxd2x6w+jIwKxoKl8Cv5HJvcvASTPWJo= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -85,6 +96,7 @@ github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -179,6 +191,8 @@ github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bytecodealliance/wasmtime-go v0.22.0/go.mod h1:q320gUxqyI8yB+ZqRuaJOEnGkAnHh6WtJjMaT2CW4wI= +github.com/c-bata/go-prompt v0.2.5/go.mod h1:vFnjEGDIIA/Lib7giyE4E9c50Lvl8j0S+7FVlAwDAVw= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -258,8 +272,9 @@ github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDm github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= @@ -292,9 +307,11 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ethereum/go-ethereum v1.9.9/go.mod h1:a9TqabFudpDu1nucId+k9S8R9whYaHnGBLKFouA5EAo= github.com/ethereum/go-ethereum v1.9.13 h1:rOPqjSngvs1VSYH2H+PMPiWt4VEulvNRbFgqiGqJM3E= github.com/ethereum/go-ethereum v1.9.13/go.mod h1:qwN9d1GLyDh0N7Ab8bMGd0H9knaji2jOBm2RrMGjXls= github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -304,6 +321,7 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -314,8 +332,10 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fxamacker/cbor/v2 v2.2.1-0.20210927235116-3d6d5d1de29b/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= github.com/fxamacker/cbor/v2 v2.4.1-0.20230228173756-c0c9f774e40c h1:5tm/Wbs9d9r+qZaUFXk59CWDD0+77PBqDREffYkyi5c= github.com/fxamacker/cbor/v2 v2.4.1-0.20230228173756-c0c9f774e40c/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/fxamacker/circlehash v0.1.0/go.mod h1:3aq3OfVvsWtkWMb6A1owjOQFA+TLsD5FgJflnaQwtMM= github.com/fxamacker/circlehash v0.3.0 h1:XKdvTtIJV9t7DDUtsf0RIpC1OcxZtPbmgIH7ekx28WA= github.com/fxamacker/circlehash v0.3.0/go.mod h1:3aq3OfVvsWtkWMb6A1owjOQFA+TLsD5FgJflnaQwtMM= github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= @@ -325,7 +345,6 @@ github.com/gammazero/deque v0.1.0/go.mod h1:KQw7vFau1hHuM8xmI9RbgKFbAsQFWmBpqQ2K github.com/gammazero/workerpool v1.1.2 h1:vuioDQbgrz4HoaCi2q1HLlOXdpbap5AET7xu5/qj87g= github.com/gammazero/workerpool v1.1.2/go.mod h1:UelbXcO0zCIGFcufcirHhq2/xtLXJdQ29qZNlXG9OjQ= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= @@ -376,6 +395,7 @@ github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-test/deep v1.0.5/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= @@ -397,6 +417,7 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= @@ -417,6 +438,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -436,6 +458,7 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -472,6 +495,7 @@ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPg github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -483,6 +507,11 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9SN1TigNLn9ZnF3W4SYRKq2gAHs= github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= @@ -498,6 +527,8 @@ github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= @@ -530,8 +561,9 @@ github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpg github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= @@ -577,8 +609,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= @@ -736,9 +768,11 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/kevinburke/go-bindata v3.22.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= github.com/kevinburke/go-bindata v3.23.0+incompatible h1:rqNOXZlqrYhMVVAsQx8wuc+LaA73YcfbQ407wAykyS8= github.com/kevinburke/go-bindata v3.23.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -988,6 +1022,7 @@ github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rB github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/logrusorgru/aurora v0.0.0-20200102142835-e9ef32dff381/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/logrusorgru/aurora/v4 v4.0.0 h1:sRjfPpun/63iADiSvGGjgA1cAYegEWMPCJdUpJYn9JA= github.com/logrusorgru/aurora/v4 v4.0.0/go.mod h1:lP0iIa2nrnT/qoFXcOZSrZQpJ1o6n2CUf/hyHi2Q4ZQ= github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= @@ -1009,6 +1044,8 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= @@ -1020,6 +1057,7 @@ github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXT github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= @@ -1030,8 +1068,12 @@ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-tty v0.0.3/go.mod h1:ihxohKRERHTVzN+aSVRwACLCeqIoZAWpoICkkvrWyR0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -1170,22 +1212,29 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onflow/atree v0.1.0-beta1.0.20211027184039-559ee654ece9/go.mod h1:+6x071HgCF/0v5hQcaE5qqjc2UqN5gCU8h5Mk6uqpOg= github.com/onflow/atree v0.6.0 h1:j7nQ2r8npznx4NX39zPpBYHmdy45f4xwoi+dm37Jk7c= github.com/onflow/atree v0.6.0/go.mod h1:gBHU0M05qCbv9NN0kijLWMgC47gHVNBIp4KmsVFi0tc= -github.com/onflow/cadence v0.39.12 h1:bb3UdOe7nClUcaLbxSWGLSIJKuCrivpgxhPow99ikv0= -github.com/onflow/cadence v0.39.12/go.mod h1:OIJLyVBPa339DCBQXBfGaorT4tBjQh9gSKe+ZAIyyh0= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3 h1:wV+gcgOY0oJK4HLZQYQoK+mm09rW1XSxf83yqJwj0n4= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3/go.mod h1:Osvy81E/+tscQM+d3kRFjktcIcZj2bmQ9ESqRQWDEx8= +github.com/onflow/cadence v0.20.1/go.mod h1:7mzUvPZUIJztIbr9eTvs+fQjWWHTF8veC+yk4ihcNIA= +github.com/onflow/cadence v0.39.14 h1:YoR3YFUga49rqzVY1xwI6I2ZDBmvwGh13jENncsleC8= +github.com/onflow/cadence v0.39.14/go.mod h1:OIJLyVBPa339DCBQXBfGaorT4tBjQh9gSKe+ZAIyyh0= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.4-0.20230703193002-53362441b57d h1:B7PdhdUNkve5MVrekWDuQf84XsGBxNZ/D3x+QQ8XeVs= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.4-0.20230703193002-53362441b57d/go.mod h1:xAiV/7TKhw863r6iO3CS5RnQ4F+pBY1TxD272BsILlo= github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3 h1:X25A1dNajNUtE+KoV76wQ6BR6qI7G65vuuRXxDDqX7E= github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3/go.mod h1:dqAUVWwg+NlOhsuBHex7bEWmsUjsiExzhe/+t4xNH6A= github.com/onflow/flow-ft/lib/go/contracts v0.7.0 h1:XEKE6qJUw3luhsYmIOteXP53gtxNxrwTohgxJXCYqBE= github.com/onflow/flow-ft/lib/go/contracts v0.7.0/go.mod h1:kTMFIySzEJJeupk+7EmXs0EJ6CBWY/MV9fv9iYQk+RU= -github.com/onflow/flow-go-sdk v0.41.6 h1:x5HhmRDvbCWXRCzHITJxOp0Komq5JJ9zphoR2u6NOCg= -github.com/onflow/flow-go-sdk v0.41.6/go.mod h1:AYypQvn6ecMONhF3M1vBOUX9b4oHKFWkkrw8bO4VEik= -github.com/onflow/flow-go/crypto v0.24.7 h1:RCLuB83At4z5wkAyUCF7MYEnPoIIOHghJaODuJyEoW0= -github.com/onflow/flow-go/crypto v0.24.7/go.mod h1:fqCzkIBBMRRkciVrvW21rECKq1oD7Q6u+bCI78lfNX0= -github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230602212908-08fc6536d391 h1:6uKg0gpLKpTZKMihrsFR0Gkq++1hykzfR1tQCKuOfw4= -github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230602212908-08fc6536d391/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/onflow/flow-go-sdk v0.24.0/go.mod h1:IoptMLPyFXWvyd9yYA6/4EmSeeozl6nJoIv4FaEMg74= +github.com/onflow/flow-go-sdk v0.41.9 h1:cyplhhhc0RnfOAan2t7I/7C9g1hVGDDLUhWj6ZHAkk4= +github.com/onflow/flow-go-sdk v0.41.9/go.mod h1:e9Q5TITCy7g08lkdQJxP8fAKBnBoC5FjALvUKr36j4I= +github.com/onflow/flow-go/crypto v0.21.3/go.mod h1:vI6V4CY3R6c4JKBxdcRiR/AnjBfL8OSD97bJc60cLuQ= +github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230628215638-83439d22e0ce h1:YQKijiQaq8SF1ayNqp3VVcwbBGXSnuHNHq4GQmVGybE= +github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230628215638-83439d22e0ce/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/onflow/flow-go/crypto v0.24.9 h1:0EQp+kSZYJepMIiSypfJVe7tzsPcb6UXOdOtsTCDhBs= +github.com/onflow/flow-go/crypto v0.24.9/go.mod h1:fqCzkIBBMRRkciVrvW21rECKq1oD7Q6u+bCI78lfNX0= +github.com/onflow/flow-nft/lib/go/contracts v1.1.0 h1:rhUDeD27jhLwOqQKI/23008CYfnqXErrJvc4EFRP2a0= +github.com/onflow/flow-nft/lib/go/contracts v1.1.0/go.mod h1:YsvzYng4htDgRB9sa9jxdwoTuuhjK8WYWXTyLkIigZY= +github.com/onflow/flow/protobuf/go/flow v0.2.2/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= github.com/onflow/go-bitswap v0.0.0-20230703214630-6d3db958c73d h1:QcOAeEyF3iAUHv21LQ12sdcsr0yFrJGoGLyCAzYYtvI= github.com/onflow/go-bitswap v0.0.0-20230703214630-6d3db958c73d/go.mod h1:GCPpiyRoHncdqPj++zPr9ZOYBX4hpJ0pYZRYqSE8VKk= github.com/onflow/sdks v0.5.0 h1:2HCRibwqDaQ1c9oUApnkZtEAhWiNY2GTpRD5+ftdkN8= @@ -1242,6 +1291,7 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pkg/term v1.1.0/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= @@ -1311,6 +1361,7 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= +github.com/robertkrimen/otto v0.0.0-20170205013659-6a77b7cbc37d/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1330,6 +1381,7 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/schollz/progressbar/v3 v3.8.3/go.mod h1:pWnVCjSBZsT2X3nx9HfRdnCDrpbevliMeoEVhStwHko= github.com/schollz/progressbar/v3 v3.13.1 h1:o8rySDYiQ59Mwzy2FELeHY5ZARXZTVJC7iHD6PEFUiE= github.com/schollz/progressbar/v3 v3.13.1/go.mod h1:xvrbki8kfT1fzWzBT/UZd9L6GA+jdL7HAgq2RFnO6fQ= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -1393,8 +1445,8 @@ github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -1432,6 +1484,7 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/supranational/blst v0.3.4/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/supranational/blst v0.3.10 h1:CMciDZ/h4pXDDXQASe8ZGTNKUiVNxVVA5hpci2Uuhuk= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= @@ -1495,8 +1548,10 @@ github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPR github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/blake3 v0.2.0/go.mod h1:G9pM4qQwjRzF1/v7+vabMj/c5mWpGZ2Wzo3Eb4z0pb4= github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= +github.com/zeebo/pcg v1.0.0/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1544,6 +1599,7 @@ go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= go.uber.org/fx v1.19.2 h1:SyFgYQFr1Wl0AYstE8vyYIzP4bFz2URrScjwC4cwUvY= go.uber.org/fx v1.19.2/go.mod h1:43G1VcqSzbIv77y00p1DRAsyZS8WdzuYdhZXmEUkMyQ= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -1583,6 +1639,7 @@ golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1595,11 +1652,15 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= @@ -1612,6 +1673,7 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMk golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1626,6 +1688,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1694,6 +1757,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= @@ -1708,9 +1772,15 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1752,6 +1822,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1760,12 +1831,14 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1787,28 +1860,41 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200918174421-af09f7315aff/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201014080544-cc95f250f6bc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025112917-711f33c9992c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1821,6 +1907,7 @@ golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= @@ -1841,12 +1928,14 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1893,6 +1982,7 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200828161849-5deb26317202/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1902,6 +1992,9 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= @@ -1911,8 +2004,12 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.1/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -1936,6 +2033,17 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -1991,10 +2099,34 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211007155348-82e027067bd4/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2022,10 +2154,17 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -2059,8 +2198,10 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= +gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200316214253-d7b0ff38cac9/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78= gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -2075,7 +2216,6 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= @@ -2097,7 +2237,9 @@ nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0 nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= +pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/insecure/integration/functional/test/gossipsub/rpc_inspector/utils.go b/insecure/integration/functional/test/gossipsub/rpc_inspector/utils.go index 164634236bc..2307c57f0ab 100644 --- a/insecure/integration/functional/test/gossipsub/rpc_inspector/utils.go +++ b/insecure/integration/functional/test/gossipsub/rpc_inspector/utils.go @@ -20,9 +20,9 @@ func startNodesAndEnsureConnected(t *testing.T, ctx irrecoverable.SignalerContex // this is vital as the spammer will circumvent the normal pubsub subscription mechanism and send iHAVE messages directly to the victim. // without a prior connection established, directly spamming pubsub messages may cause a race condition in the pubsub implementation. p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) - p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, func() (interface{}, channels.Topic) { - blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkID) - return unittest.ProposalFixture(), blockTopic + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkID) + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() }) } diff --git a/insecure/integration/functional/test/gossipsub/rpc_inspector/validation_inspector_test.go b/insecure/integration/functional/test/gossipsub/rpc_inspector/validation_inspector_test.go index 5917ceee31e..3dd873d0e7c 100644 --- a/insecure/integration/functional/test/gossipsub/rpc_inspector/validation_inspector_test.go +++ b/insecure/integration/functional/test/gossipsub/rpc_inspector/validation_inspector_test.go @@ -1145,7 +1145,7 @@ func TestGossipSubSpamMitigationIntegration(t *testing.T) { t.Name(), idProvider, p2ptest.WithRole(flow.RoleConsensus), - p2ptest.WithPeerScoringEnabled(idProvider), + p2ptest.EnablePeerScoringWithOverride(p2p.PeerScoringConfigNoOverride), ) ids := flow.IdentityList{&victimId, &spammer.SpammerId} @@ -1196,9 +1196,9 @@ func TestGossipSubSpamMitigationIntegration(t *testing.T) { p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) // as nodes started fresh and no spamming has happened yet, the nodes should be able to exchange messages on the topic. - p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, func() (interface{}, channels.Topic) { - blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkID) - return unittest.ProposalFixture(), blockTopic + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkID) + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() }) // prepares spam graft and prune messages with different strategies. @@ -1228,9 +1228,8 @@ func TestGossipSubSpamMitigationIntegration(t *testing.T) { // now we expect the detection and mitigation to kick in and the victim node to disconnect from the spammer node. // so the spammer and victim nodes should not be able to exchange messages on the topic. - p2ptest.EnsureNoPubsubExchangeBetweenGroups(t, ctx, []p2p.LibP2PNode{victimNode}, []p2p.LibP2PNode{spammer.SpammerNode}, func() (interface{}, channels.Topic) { - blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkID) - return unittest.ProposalFixture(), blockTopic + p2ptest.EnsureNoPubsubExchangeBetweenGroups(t, ctx, []p2p.LibP2PNode{victimNode}, []p2p.LibP2PNode{spammer.SpammerNode}, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() }) } diff --git a/insecure/integration/functional/test/gossipsub/scoring/ihave_spam_test.go b/insecure/integration/functional/test/gossipsub/scoring/ihave_spam_test.go new file mode 100644 index 00000000000..3342b3ac4dc --- /dev/null +++ b/insecure/integration/functional/test/gossipsub/scoring/ihave_spam_test.go @@ -0,0 +1,347 @@ +package scoring + +import ( + "context" + "fmt" + "testing" + "time" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + corrupt "github.com/yhassanzadeh13/go-libp2p-pubsub" + + "github.com/onflow/flow-go/insecure/corruptlibp2p" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/scoring" + p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestGossipSubIHaveBrokenPromises_Below_Threshold tests that as long as the spammer stays below the ihave spam thresholds, it is not caught and +// penalized by the victim node. +// The thresholds are: +// Maximum messages that include iHave per heartbeat is: 10 (gossipsub parameter). +// Threshold for broken promises of iHave per heartbeat is: 10 (Flow-specific) parameter. It means that GossipSub samples one iHave id out of the +// entire RPC and if that iHave id is not eventually delivered within 3 seconds (gossipsub parameter), then the promise is considered broken. We set +// this threshold to 10 meaning that the first 10 broken promises are ignored. This is to allow for some network churn. +// Also, per hearbeat (i.e., decay interval), the spammer is allowed to send at most 5000 ihave messages (gossip sub parameter) on aggregate, and +// excess messages are dropped (without being counted as broken promises). +func TestGossipSubIHaveBrokenPromises_Below_Threshold(t *testing.T) { + t.Parallel() + + role := flow.RoleConsensus + sporkId := unittest.IdentifierFixture() + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + + receivedIWants := unittest.NewProtectedMap[string, struct{}]() + idProvider := mock.NewIdentityProvider(t) + spammer := corruptlibp2p.NewGossipSubRouterSpammerWithRpcInspector(t, sporkId, role, idProvider, func(id peer.ID, rpc *corrupt.RPC) error { + // override rpc inspector of the spammer node to keep track of the iwants it has received. + if rpc.RPC.Control == nil || rpc.RPC.Control.Iwant == nil { + return nil + } + for _, iwant := range rpc.RPC.Control.Iwant { + for _, msgId := range iwant.MessageIDs { + receivedIWants.Add(msgId, struct{}{}) + } + } + return nil + }) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + // we override some of the default scoring parameters in order to speed up the test in a time-efficient manner. + blockTopicOverrideParams := scoring.DefaultTopicScoreParams() + blockTopicOverrideParams.MeshMessageDeliveriesActivation = 1 * time.Second // we start observing the mesh message deliveries after 1 second of the node startup. + // we disable invalid message delivery parameters, as the way we implement spammer, when it spams ihave messages, it does not sign them. Hence, without decaying the invalid message deliveries, + // the node would be penalized for invalid message delivery way sooner than it can mount an ihave broken-promises spam attack. + blockTopicOverrideParams.InvalidMessageDeliveriesWeight = 0.0 + blockTopicOverrideParams.InvalidMessageDeliveriesDecay = 0.0 + victimNode, victimIdentity := p2ptest.NodeFixture( + t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithRole(role), + p2ptest.WithPeerScoreTracerInterval(1*time.Second), + p2ptest.EnablePeerScoringWithOverride(&p2p.PeerScoringConfigOverride{ + TopicScoreParams: map[channels.Topic]*pubsub.TopicScoreParams{ + blockTopic: blockTopicOverrideParams, + }, + DecayInterval: 1 * time.Second, // we override the decay interval to 1 second so that the score is updated within 1 second intervals. + }), + ) + + idProvider.On("ByPeerID", victimNode.Host().ID()).Return(&victimIdentity, true).Maybe() + idProvider.On("ByPeerID", spammer.SpammerNode.Host().ID()).Return(&spammer.SpammerId, true).Maybe() + ids := flow.IdentityList{&spammer.SpammerId, &victimIdentity} + nodes := []p2p.LibP2PNode{spammer.SpammerNode, victimNode} + + p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) + defer p2ptest.StopNodes(t, nodes, cancel, 2*time.Second) + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + + // checks end-to-end message delivery works on GossipSub + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() + }) + + // creates 10 RPCs each with 10 iHave messages, each iHave message has 50 message ids, hence overall, we have 5000 iHave message ids. + spamIHaveBrokenPromise(t, spammer, blockTopic.String(), receivedIWants, victimNode) + + // wait till victim counts the spam iHaves as broken promises (one per RPC for a total of 10). + initialBehavioralPenalty := float64(0) // keeps track of the initial behavioral penalty of the spammer node for decay testing. + require.Eventually(t, func() bool { + behavioralPenalty, ok := victimNode.PeerScoreExposer().GetBehaviourPenalty(spammer.SpammerNode.Host().ID()) + if !ok { + return false + } + if behavioralPenalty < 9 { // ideally it must be 10 (one per RPC), but we give it a buffer of 1 to account for decays and floating point errors. + return false + } + + initialBehavioralPenalty = behavioralPenalty + return true + // Note: we have to wait at least 3 seconds for an iHave to be considered as broken promise (gossipsub parameters), we set it to 10 + // seconds to be on the safe side. + }, 10*time.Second, 100*time.Millisecond) + + spammerScore, ok := victimNode.PeerScoreExposer().GetScore(spammer.SpammerNode.Host().ID()) + require.True(t, ok, "sanity check failed, we should have a score for the spammer node") + // since spammer is not yet considered to be penalized, its score must be greater than the gossipsub health thresholds. + require.Greaterf(t, spammerScore, scoring.DefaultGossipThreshold, "sanity check failed, the score of the spammer node must be greater than gossip threshold: %f, actual: %f", scoring.DefaultGossipThreshold, spammerScore) + require.Greaterf(t, spammerScore, scoring.DefaultPublishThreshold, "sanity check failed, the score of the spammer node must be greater than publish threshold: %f, actual: %f", scoring.DefaultPublishThreshold, spammerScore) + require.Greaterf(t, spammerScore, scoring.DefaultGraylistThreshold, "sanity check failed, the score of the spammer node must be greater than graylist threshold: %f, actual: %f", scoring.DefaultGraylistThreshold, spammerScore) + + // eventually, after a heartbeat the spammer behavioral counter must be decayed + require.Eventually(t, func() bool { + behavioralPenalty, ok := victimNode.PeerScoreExposer().GetBehaviourPenalty(spammer.SpammerNode.Host().ID()) + if !ok { + return false + } + if behavioralPenalty >= initialBehavioralPenalty { // after a heartbeat the spammer behavioral counter must be decayed. + return false + } + + return true + }, 2*time.Second, 100*time.Millisecond, "sanity check failed, the spammer behavioral counter must be decayed after a heartbeat") + + // since spammer stays below the threshold, it should be able to exchange messages with the victim node over pubsub. + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() + }) +} + +// TestGossipSubIHaveBrokenPromises_Above_Threshold tests that a continuous stream of spam iHave broken promises will +// eventually cause the spammer node to be graylisted (i.e., no incoming RPCs from the spammer node will be accepted, and +// no outgoing RPCs to the spammer node will be sent). +// The test performs 3 rounds of attacks: each round with 10 RPCs, each RPC with 10 iHave messages, each iHave message with 50 message ids, hence overall, we have 5000 iHave message ids. +// Note that based on GossipSub parameters 5000 iHave is the most one can send within one decay interval. +// First round of attack makes spammers broken promises still below the threshold of 10 RPCs (broken promises are counted per RPC), hence no degradation of the spammers score. +// Second round of attack makes spammers broken promises above the threshold of 10 RPCs, hence a degradation of the spammers score. +// Third round of attack makes spammers broken promises to around 20 RPCs above the threshold, which causes the graylisting of the spammer node. +func TestGossipSubIHaveBrokenPromises_Above_Threshold(t *testing.T) { + t.Parallel() + + role := flow.RoleConsensus + sporkId := unittest.IdentifierFixture() + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + + receivedIWants := unittest.NewProtectedMap[string, struct{}]() + idProvider := mock.NewIdentityProvider(t) + spammer := corruptlibp2p.NewGossipSubRouterSpammerWithRpcInspector(t, sporkId, role, idProvider, func(id peer.ID, rpc *corrupt.RPC) error { + // override rpc inspector of the spammer node to keep track of the iwants it has received. + if rpc.RPC.Control == nil || rpc.RPC.Control.Iwant == nil { + return nil + } + for _, iwant := range rpc.RPC.Control.Iwant { + for _, msgId := range iwant.MessageIDs { + receivedIWants.Add(msgId, struct{}{}) + } + } + return nil + }) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + // we override some of the default scoring parameters in order to speed up the test in a time-efficient manner. + blockTopicOverrideParams := scoring.DefaultTopicScoreParams() + blockTopicOverrideParams.MeshMessageDeliveriesActivation = 1 * time.Second // we start observing the mesh message deliveries after 1 second of the node startup. + // we disable invalid message delivery parameters, as the way we implement spammer, when it spams ihave messages, it does not sign them. Hence, without decaying the invalid message deliveries, + // the node would be penalized for invalid message delivery way sooner than it can mount an ihave broken-promises spam attack. + blockTopicOverrideParams.InvalidMessageDeliveriesWeight = 0.0 + blockTopicOverrideParams.InvalidMessageDeliveriesDecay = 0.0 + victimNode, victimIdentity := p2ptest.NodeFixture( + t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithRole(role), + p2ptest.WithPeerScoreTracerInterval(1*time.Second), + p2ptest.EnablePeerScoringWithOverride(&p2p.PeerScoringConfigOverride{ + TopicScoreParams: map[channels.Topic]*pubsub.TopicScoreParams{ + blockTopic: blockTopicOverrideParams, + }, + DecayInterval: 1 * time.Second, // we override the decay interval to 1 second so that the score is updated within 1 second intervals. + }), + ) + + idProvider.On("ByPeerID", victimNode.Host().ID()).Return(&victimIdentity, true).Maybe() + idProvider.On("ByPeerID", spammer.SpammerNode.Host().ID()).Return(&spammer.SpammerId, true).Maybe() + ids := flow.IdentityList{&spammer.SpammerId, &victimIdentity} + nodes := []p2p.LibP2PNode{spammer.SpammerNode, victimNode} + + p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) + defer p2ptest.StopNodes(t, nodes, cancel, 2*time.Second) + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + + // checks end-to-end message delivery works on GossipSub + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() + }) + + initScore, ok := victimNode.PeerScoreExposer().GetScore(spammer.SpammerNode.Host().ID()) + require.True(t, ok, "score for spammer node must be present") + + // FIRST ROUND OF ATTACK: spammer sends 10 RPCs to the victim node, each containing 500 iHave messages. + spamIHaveBrokenPromise(t, spammer, blockTopic.String(), receivedIWants, victimNode) + + // wait till victim counts the spam iHaves as broken promises for the second round of attack (one per RPC for a total of 10). + require.Eventually(t, func() bool { + behavioralPenalty, ok := victimNode.PeerScoreExposer().GetBehaviourPenalty(spammer.SpammerNode.Host().ID()) + if !ok { + return false + } + // ideally it must be 10 (one per RPC), but we give it a buffer of 1 to account for decays and floating point errors. + // note that we intentionally override the decay speed to be 60-times faster in this test. + if behavioralPenalty < 9 { + return false + } + + return true + // Note: we have to wait at least 3 seconds for an iHave to be considered as broken promise (gossipsub parameters), we set it to 10 + // seconds to be on the safe side. + }, 10*time.Second, 100*time.Millisecond) + + scoreAfterFirstRound, ok := victimNode.PeerScoreExposer().GetScore(spammer.SpammerNode.Host().ID()) + require.True(t, ok, "score for spammer node must be present") + // spammer score after first round must not be decreased severely, we account for 10% drop due to under-performing + // (on sending fresh new messages since that is not part of the test). + require.Greater(t, scoreAfterFirstRound, 0.9*initScore) + + // SECOND ROUND OF ATTACK: spammer sends 10 RPCs to the victim node, each containing 500 iHave messages. + spamIHaveBrokenPromise(t, spammer, blockTopic.String(), receivedIWants, victimNode) + + // wait till victim counts the spam iHaves as broken promises for the second round of attack (one per RPC for a total of 10). + require.Eventually(t, func() bool { + behavioralPenalty, ok := victimNode.PeerScoreExposer().GetBehaviourPenalty(spammer.SpammerNode.Host().ID()) + if !ok { + return false + } + + // ideally we should have 20 (10 from the first round, 10 from the second round), but we give it a buffer of 2 to account for decays and floating point errors. + // note that we intentionally override the decay speed to be 60-times faster in this test. + if behavioralPenalty < 18 { + return false + } + + return true + // Note: we have to wait at least 3 seconds for an iHave to be considered as broken promise (gossipsub parameters), we set it to 10 + // seconds to be on the safe side. + }, 10*time.Second, 100*time.Millisecond) + + spammerScore, ok := victimNode.PeerScoreExposer().GetScore(spammer.SpammerNode.Host().ID()) + require.True(t, ok, "sanity check failed, we should have a score for the spammer node") + // with the second round of the attack, the spammer is about 10 broken promises above the threshold (total ~20 broken promises, but the first 10 are not counted). + // we expect the score to be dropped to initScore - 10 * 10 * 0.01 * scoring.MaxAppSpecificReward, however, instead of 10, we consider 8 about the threshold, to account for decays. + require.LessOrEqual(t, spammerScore, initScore-8*8*0.01*scoring.MaxAppSpecificReward, "sanity check failed, the score of the spammer node must be less than the initial score minus 8 * 8 * 0.01 * scoring.MaxAppSpecificReward: %f, actual: %f", initScore-10*10*10-2*scoring.MaxAppSpecificReward, spammerScore) + require.Greaterf(t, spammerScore, scoring.DefaultGossipThreshold, "sanity check failed, the score of the spammer node must be greater than gossip threshold: %f, actual: %f", scoring.DefaultGossipThreshold, spammerScore) + require.Greaterf(t, spammerScore, scoring.DefaultPublishThreshold, "sanity check failed, the score of the spammer node must be greater than publish threshold: %f, actual: %f", scoring.DefaultPublishThreshold, spammerScore) + require.Greaterf(t, spammerScore, scoring.DefaultGraylistThreshold, "sanity check failed, the score of the spammer node must be greater than graylist threshold: %f, actual: %f", scoring.DefaultGraylistThreshold, spammerScore) + + // since the spammer score is above the gossip, graylist and publish thresholds, it should be still able to exchange messages with victim. + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() + }) + + // THIRD ROUND OF ATTACK: spammer sends 10 RPCs to the victim node, each containing 500 iHave messages, we expect spammer to be graylisted. + spamIHaveBrokenPromise(t, spammer, blockTopic.String(), receivedIWants, victimNode) + + // wait till victim counts the spam iHaves as broken promises for the third round of attack (one per RPC for a total of 10). + require.Eventually(t, func() bool { + behavioralPenalty, ok := victimNode.PeerScoreExposer().GetBehaviourPenalty(spammer.SpammerNode.Host().ID()) + if !ok { + return false + } + // ideally we should have 30 (10 from the first round, 10 from the second round, 10 from the third round), but we give it a buffer of 3 to account for decays and floating point errors. + // note that we intentionally override the decay speed to be 60-times faster in this test. + if behavioralPenalty < 27 { + return false + } + + return true + // Note: we have to wait at least 3 seconds for an iHave to be considered as broken promise (gossipsub parameters), we set it to 10 + // seconds to be on the safe side. + }, 10*time.Second, 100*time.Millisecond) + + spammerScore, ok = victimNode.PeerScoreExposer().GetScore(spammer.SpammerNode.Host().ID()) + require.True(t, ok, "sanity check failed, we should have a score for the spammer node") + // with the third round of the attack, the spammer is about 20 broken promises above the threshold (total ~30 broken promises), hence its overall score must be below the gossip, publish, and graylist thresholds, meaning that + // victim will not exchange messages with it anymore, and also that it will be graylisted meaning all incoming and outgoing RPCs to and from the spammer will be dropped by the victim. + require.Lessf(t, spammerScore, scoring.DefaultGossipThreshold, "sanity check failed, the score of the spammer node must be less than gossip threshold: %f, actual: %f", scoring.DefaultGossipThreshold, spammerScore) + require.Lessf(t, spammerScore, scoring.DefaultPublishThreshold, "sanity check failed, the score of the spammer node must be less than publish threshold: %f, actual: %f", scoring.DefaultPublishThreshold, spammerScore) + require.Lessf(t, spammerScore, scoring.DefaultGraylistThreshold, "sanity check failed, the score of the spammer node must be less than graylist threshold: %f, actual: %f", scoring.DefaultGraylistThreshold, spammerScore) + + // since the spammer score is below the gossip, graylist and publish thresholds, it should not be able to exchange messages with victim anymore. + p2ptest.EnsureNoPubsubExchangeBetweenGroups(t, ctx, []p2p.LibP2PNode{spammer.SpammerNode}, []p2p.LibP2PNode{victimNode}, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() + }) +} + +// spamIHaveBrokenPromises is a test utility function that is exclusive for the TestGossipSubIHaveBrokenPromises tests. +// It creates and sends 10 RPCs each with 10 iHave messages, each iHave message has 50 message ids, hence overall, we have 5000 iHave message ids. +// It then sends those iHave spams to the victim node and waits till the victim node receives them. +// Args: +// - t: the test instance. +// - spammer: the spammer node. +// - topic: the topic to spam. +// - receivedIWants: a map to keep track of the iWants received by the victim node (exclusive to TestGossipSubIHaveBrokenPromises). +// - victimNode: the victim node. +func spamIHaveBrokenPromise(t *testing.T, spammer *corruptlibp2p.GossipSubRouterSpammer, topic string, receivedIWants *unittest.ProtectedMap[string, struct{}], victimNode p2p.LibP2PNode) { + spamMsgs := spammer.GenerateCtlMessages(10, corruptlibp2p.WithIHave(10, 50, topic)) + var sentIHaves []string + for _, msg := range spamMsgs { + for _, iHave := range msg.Ihave { + for _, msgId := range iHave.MessageIDs { + require.NotContains(t, sentIHaves, msgId) + sentIHaves = append(sentIHaves, msgId) + } + } + } + require.Len(t, sentIHaves, 5000, "sanity check failed, we should have 5000 iHave message ids, actual: %d", len(sentIHaves)) + + // spams the victim node with 1000 spam iHave messages, since iHave messages are for junk message ids, there will be no + // reply from spammer to victim over the iWants. Hence, the victim must count this towards 10 broken promises. + // This sums up to 10 broken promises (1 per RPC). + spammer.SpamControlMessage(t, victimNode, spamMsgs, p2ptest.PubsubMessageFixture(t, p2ptest.WithTopic(topic))) + + // wait till all the spam iHaves are responded with iWants. + require.Eventually(t, func() bool { + for _, msgId := range sentIHaves { + if _, ok := receivedIWants.Get(msgId); !ok { + return false + } + } + + return true + }, 5*time.Second, 100*time.Millisecond, fmt.Sprintf("sanity check failed, we should have received all the iWants for the spam iHaves, expected: %d, actual: %d", len(sentIHaves), receivedIWants.Size())) +} diff --git a/insecure/integration/functional/test/gossipsub/scoring/scoring_test.go b/insecure/integration/functional/test/gossipsub/scoring/scoring_test.go index ec024775cf0..d8baf4be735 100644 --- a/insecure/integration/functional/test/gossipsub/scoring/scoring_test.go +++ b/insecure/integration/functional/test/gossipsub/scoring/scoring_test.go @@ -5,18 +5,21 @@ import ( "testing" "time" + pubsub "github.com/libp2p/go-libp2p-pubsub" pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/insecure/corruptlibp2p" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/scoring" p2ptest "github.com/onflow/flow-go/network/p2p/test" + validator "github.com/onflow/flow-go/network/validator/pubsub" "github.com/onflow/flow-go/utils/unittest" ) @@ -24,6 +27,8 @@ import ( // a spammer peer, the victim will eventually penalize the spammer and stop receiving messages from them. // Note: the term integration is used here because it requires integrating all components of the libp2p stack. func TestGossipSubInvalidMessageDelivery_Integration(t *testing.T) { + t.Parallel() + tt := []struct { name string spamMsgFactory func(spammerId peer.ID, victimId peer.ID, topic channels.Topic) *pubsub_pb.Message @@ -91,6 +96,7 @@ func TestGossipSubInvalidMessageDelivery_Integration(t *testing.T) { // - t: the test instance. // - spamMsgFactory: a function that creates unique invalid messages to spam the victim with. func testGossipSubInvalidMessageDeliveryScoring(t *testing.T, spamMsgFactory func(peer.ID, peer.ID, channels.Topic) *pubsub_pb.Message) { + role := flow.RoleConsensus sporkId := unittest.IdentifierFixture() blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) @@ -107,7 +113,7 @@ func testGossipSubInvalidMessageDeliveryScoring(t *testing.T, spamMsgFactory fun idProvider, p2ptest.WithRole(role), p2ptest.WithPeerScoreTracerInterval(1*time.Second), - p2ptest.WithPeerScoringEnabled(idProvider), + p2ptest.EnablePeerScoringWithOverride(p2p.PeerScoringConfigNoOverride), ) idProvider.On("ByPeerID", victimNode.Host().ID()).Return(&victimIdentity, true).Maybe() @@ -122,8 +128,8 @@ func testGossipSubInvalidMessageDeliveryScoring(t *testing.T, spamMsgFactory fun p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) // checks end-to-end message delivery works on GossipSub - p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, func() (interface{}, channels.Topic) { - return unittest.ProposalFixture(), blockTopic + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() }) totalSpamMessages := 20 @@ -167,7 +173,375 @@ func testGossipSubInvalidMessageDeliveryScoring(t *testing.T, spamMsgFactory fun // ensure that the topic snapshot of the spammer contains a record of at least (60%) of the spam messages sent. The 60% is to account for the messages that were delivered before the score was updated, after the spammer is PRUNED, as well as to account for decay. require.True(t, blkTopicSnapshot.InvalidMessageDeliveries > 0.6*float64(totalSpamMessages), "invalid message deliveries must be greater than %f. invalid message deliveries: %f", 0.9*float64(totalSpamMessages), blkTopicSnapshot.InvalidMessageDeliveries) - p2ptest.EnsureNoPubsubExchangeBetweenGroups(t, ctx, []p2p.LibP2PNode{victimNode}, []p2p.LibP2PNode{spammer.SpammerNode}, func() (interface{}, channels.Topic) { - return unittest.ProposalFixture(), blockTopic + p2ptest.EnsureNoPubsubExchangeBetweenGroups(t, ctx, []p2p.LibP2PNode{victimNode}, []p2p.LibP2PNode{spammer.SpammerNode}, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() + }) +} + +// TestGossipSubMeshDeliveryScoring_UnderDelivery_SingleTopic tests that when a peer is under-performing in a topic mesh, its score is (slightly) penalized. +func TestGossipSubMeshDeliveryScoring_UnderDelivery_SingleTopic(t *testing.T) { + t.Parallel() + + role := flow.RoleConsensus + sporkId := unittest.IdentifierFixture() + + idProvider := mock.NewIdentityProvider(t) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + + // we override some of the default scoring parameters in order to speed up the test in a time-efficient manner. + blockTopicOverrideParams := scoring.DefaultTopicScoreParams() + blockTopicOverrideParams.MeshMessageDeliveriesActivation = 1 * time.Second // we start observing the mesh message deliveries after 1 second of the node startup. + thisNode, thisId := p2ptest.NodeFixture( // this node is the one that will be penalizing the under-performer node. + t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithRole(role), + p2ptest.WithPeerScoreTracerInterval(1*time.Second), + p2ptest.EnablePeerScoringWithOverride( + &p2p.PeerScoringConfigOverride{ + TopicScoreParams: map[channels.Topic]*pubsub.TopicScoreParams{ + blockTopic: blockTopicOverrideParams, + }, + DecayInterval: 1 * time.Second, // we override the decay interval to 1 second so that the score is updated within 1 second intervals. + }), + ) + + underPerformerNode, underPerformerId := p2ptest.NodeFixture( + t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithRole(role), + ) + + idProvider.On("ByPeerID", thisNode.Host().ID()).Return(&thisId, true).Maybe() + idProvider.On("ByPeerID", underPerformerNode.Host().ID()).Return(&underPerformerId, true).Maybe() + ids := flow.IdentityList{&underPerformerId, &thisId} + nodes := []p2p.LibP2PNode{underPerformerNode, thisNode} + + p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) + defer p2ptest.StopNodes(t, nodes, cancel, 2*time.Second) + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + + // initially both nodes should be able to publish and receive messages from each other in the topic mesh. + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() + }) + + // Also initially the under-performing node should have a score that is at least equal to the MaxAppSpecificReward. + // The reason is in our scoring system, we reward the staked nodes by MaxAppSpecificReward, and the under-performing node is considered staked + // as it is in the id provider of thisNode. + require.Eventually(t, func() bool { + underPerformingNodeScore, ok := thisNode.PeerScoreExposer().GetScore(underPerformerNode.Host().ID()) + if !ok { + return false + } + if underPerformingNodeScore < scoring.MaxAppSpecificReward { + // ensure the score is high enough so that gossip is routed by victim node to spammer node. + return false + } + + return true + }, 1*time.Second, 100*time.Millisecond) + + // however, after one decay interval, we expect the score of the under-performing node to be penalized by -0.05 * MaxAppSpecificReward as + // it has not been able to deliver messages to this node in the topic mesh since the past decay interval. + require.Eventually(t, func() bool { + underPerformingNodeScore, ok := thisNode.PeerScoreExposer().GetScore(underPerformerNode.Host().ID()) + if !ok { + return false + } + if underPerformingNodeScore > 0.96*scoring.MaxAppSpecificReward { // score must be penalized by -0.05 * MaxAppSpecificReward. + // 0.96 is to account for floating point errors. + return false + } + if underPerformingNodeScore < scoring.DefaultGossipThreshold { // even the node is slightly penalized, it should still be able to gossip with this node. + return false + } + if underPerformingNodeScore < scoring.DefaultPublishThreshold { // even the node is slightly penalized, it should still be able to publish to this node. + return false + } + if underPerformingNodeScore < scoring.DefaultGraylistThreshold { // even the node is slightly penalized, it should still be able to establish rpc connection with this node. + return false + } + + return true + }, 3*time.Second, 100*time.Millisecond) + + // even though the under-performing node is penalized, it should still be able to publish and receive messages from this node in the topic mesh. + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() + }) +} + +// TestGossipSubMeshDeliveryScoring_UnderDelivery_TwoTopics tests that when a peer is under-performing in two topics, it is penalized in both topics. +func TestGossipSubMeshDeliveryScoring_UnderDelivery_TwoTopics(t *testing.T) { + t.Parallel() + + role := flow.RoleConsensus + sporkId := unittest.IdentifierFixture() + + idProvider := mock.NewIdentityProvider(t) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + dkgTopic := channels.TopicFromChannel(channels.DKGCommittee, sporkId) + + // we override some of the default scoring parameters in order to speed up the test in a time-efficient manner. + blockTopicOverrideParams := scoring.DefaultTopicScoreParams() + blockTopicOverrideParams.MeshMessageDeliveriesActivation = 1 * time.Second // we start observing the mesh message deliveries after 1 second of the node startup. + dkgTopicOverrideParams := scoring.DefaultTopicScoreParams() + dkgTopicOverrideParams.MeshMessageDeliveriesActivation = 1 * time.Second // we start observing the mesh message deliveries after 1 second of the node startup. + thisNode, thisId := p2ptest.NodeFixture( // this node is the one that will be penalizing the under-performer node. + t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithRole(role), + p2ptest.WithPeerScoreTracerInterval(1*time.Second), + p2ptest.EnablePeerScoringWithOverride( + &p2p.PeerScoringConfigOverride{ + TopicScoreParams: map[channels.Topic]*pubsub.TopicScoreParams{ + blockTopic: blockTopicOverrideParams, + dkgTopic: dkgTopicOverrideParams, + }, + DecayInterval: 1 * time.Second, // we override the decay interval to 1 second so that the score is updated within 1 second intervals. + }), + ) + + underPerformerNode, underPerformerId := p2ptest.NodeFixture( + t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithRole(role), + ) + + idProvider.On("ByPeerID", thisNode.Host().ID()).Return(&thisId, true).Maybe() + idProvider.On("ByPeerID", underPerformerNode.Host().ID()).Return(&underPerformerId, true).Maybe() + ids := flow.IdentityList{&underPerformerId, &thisId} + nodes := []p2p.LibP2PNode{underPerformerNode, thisNode} + + p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) + defer p2ptest.StopNodes(t, nodes, cancel, 2*time.Second) + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + + // subscribe to the topics. + for _, node := range nodes { + for _, topic := range []channels.Topic{blockTopic, dkgTopic} { + _, err := node.Subscribe(topic, validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter())) + require.NoError(t, err) + } + } + + // Initially the under-performing node should have a score that is at least equal to the MaxAppSpecificReward. + // The reason is in our scoring system, we reward the staked nodes by MaxAppSpecificReward, and the under-performing node is considered staked + // as it is in the id provider of thisNode. + require.Eventually(t, func() bool { + underPerformingNodeScore, ok := thisNode.PeerScoreExposer().GetScore(underPerformerNode.Host().ID()) + if !ok { + return false + } + if underPerformingNodeScore < scoring.MaxAppSpecificReward { + // ensure the score is high enough so that gossip is routed by victim node to spammer node. + return false + } + + return true + }, 2*time.Second, 100*time.Millisecond) + + // No message delivery happens intentionally, so that the under-performing node is penalized. + + // however, after one decay interval, we expect the score of the under-performing node to be penalized by ~ 2 * -0.05 * MaxAppSpecificReward. + require.Eventually(t, func() bool { + underPerformingNodeScore, ok := thisNode.PeerScoreExposer().GetScore(underPerformerNode.Host().ID()) + if !ok { + return false + } + if underPerformingNodeScore > 0.91*scoring.MaxAppSpecificReward { // score must be penalized by ~ 2 * -0.05 * MaxAppSpecificReward. + // 0.91 is to account for the floating point errors. + return false + } + if underPerformingNodeScore < scoring.DefaultGossipThreshold { // even the node is slightly penalized, it should still be able to gossip with this node. + return false + } + if underPerformingNodeScore < scoring.DefaultPublishThreshold { // even the node is slightly penalized, it should still be able to publish to this node. + return false + } + if underPerformingNodeScore < scoring.DefaultGraylistThreshold { // even the node is slightly penalized, it should still be able to establish rpc connection with this node. + return false + } + + return true + }, 3*time.Second, 100*time.Millisecond) + + // even though the under-performing node is penalized, it should still be able to publish and receive messages from this node in both topic meshes. + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() + }) + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, dkgTopic, 1, func() interface{} { + return unittest.DKGMessageFixture() + }) +} + +// TestGossipSubMeshDeliveryScoring_Replay_Will_Not_Counted tests that replayed messages will not be counted towards the mesh message deliveries. +func TestGossipSubMeshDeliveryScoring_Replay_Will_Not_Counted(t *testing.T) { + t.Parallel() + + role := flow.RoleConsensus + sporkId := unittest.IdentifierFixture() + + idProvider := mock.NewIdentityProvider(t) + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + + // we override some of the default scoring parameters in order to speed up the test in a time-efficient manner. + blockTopicOverrideParams := scoring.DefaultTopicScoreParams() + blockTopicOverrideParams.MeshMessageDeliveriesActivation = 1 * time.Second // we start observing the mesh message deliveries after 1 second of the node startup. + thisNode, thisId := p2ptest.NodeFixture( // this node is the one that will be penalizing the under-performer node. + t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithRole(role), + p2ptest.WithPeerScoreTracerInterval(1*time.Second), + p2ptest.EnablePeerScoringWithOverride( + &p2p.PeerScoringConfigOverride{ + TopicScoreParams: map[channels.Topic]*pubsub.TopicScoreParams{ + blockTopic: blockTopicOverrideParams, + }, + DecayInterval: 1 * time.Second, // we override the decay interval to 1 second so that the score is updated within 1 second intervals. + }), + ) + + replayingNode, replayingId := p2ptest.NodeFixture( + t, + sporkId, + t.Name(), + idProvider, + p2ptest.WithRole(role), + ) + + idProvider.On("ByPeerID", thisNode.Host().ID()).Return(&thisId, true).Maybe() + idProvider.On("ByPeerID", replayingNode.Host().ID()).Return(&replayingId, true).Maybe() + ids := flow.IdentityList{&replayingId, &thisId} + nodes := []p2p.LibP2PNode{replayingNode, thisNode} + + p2ptest.StartNodes(t, signalerCtx, nodes, 100*time.Millisecond) + defer p2ptest.StopNodes(t, nodes, cancel, 2*time.Second) + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + + // initially both nodes should be able to publish and receive messages from each other in the block topic mesh. + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() + }) + + // Initially the replaying node should have a score that is at least equal to the MaxAppSpecificReward. + // The reason is in our scoring system, we reward the staked nodes by MaxAppSpecificReward, and initially every node is considered staked + // as it is in the id provider of thisNode. + initialReplayingNodeScore := float64(0) + require.Eventually(t, func() bool { + replayingNodeScore, ok := thisNode.PeerScoreExposer().GetScore(replayingNode.Host().ID()) + if !ok { + return false + } + if replayingNodeScore < scoring.MaxAppSpecificReward { + // ensure the score is high enough so that gossip is routed by victim node to spammer node. + return false + } + + initialReplayingNodeScore = replayingNodeScore + return true + }, 2*time.Second, 100*time.Millisecond) + + // replaying node acts honestly and sends 200 block proposals on the topic mesh. This is twice the + // defaultTopicMeshMessageDeliveryThreshold, which prevents the replaying node to be penalized. + proposalList := make([]*messages.BlockProposal, 200) + for i := 0; i < len(proposalList); i++ { + proposalList[i] = unittest.ProposalFixture() + } + i := -1 + p2ptest.EnsurePubsubMessageExchangeFromNode(t, ctx, replayingNode, thisNode, blockTopic, len(proposalList), func() interface{} { + i += 1 + return proposalList[i] + }) + + // as the replaying node is not penalized, we expect its score to be equal to the initial score. + require.Eventually(t, func() bool { + replayingNodeScore, ok := thisNode.PeerScoreExposer().GetScore(replayingNode.Host().ID()) + if !ok { + return false + } + if replayingNodeScore < scoring.MaxAppSpecificReward { + // ensure the score is high enough so that gossip is routed by victim node to spammer node. + return false + } + if replayingNodeScore != initialReplayingNodeScore { + // ensure the score is not penalized. + return false + } + + initialReplayingNodeScore = replayingNodeScore + return true + }, 2*time.Second, 100*time.Millisecond) + + // now the replaying node acts maliciously and just replays the same messages again. + i = -1 + p2ptest.EnsureNoPubsubMessageExchange(t, ctx, []p2p.LibP2PNode{replayingNode}, []p2p.LibP2PNode{thisNode}, blockTopic, len(proposalList), func() interface{} { + i += 1 + return proposalList[i] + }) + + // since the last decay interval, the replaying node has not delivered anything new, so its score should be penalized for under-performing. + require.Eventually(t, func() bool { + replayingNodeScore, ok := thisNode.PeerScoreExposer().GetScore(replayingNode.Host().ID()) + if !ok { + return false + } + + if replayingNodeScore >= initialReplayingNodeScore { + // node must be penalized for just replaying the same messages. + return false + } + + if replayingNodeScore >= scoring.MaxAppSpecificReward { + // node must be penalized for just replaying the same messages. + return false + } + + // following if-statements check that even though the node is penalized, it is not penalized too much, and + // can still participate in the network. We don't desire to disallow list a node for just under-performing. + if replayingNodeScore < scoring.DefaultGossipThreshold { + return false + } + + if replayingNodeScore < scoring.DefaultPublishThreshold { + return false + } + + if replayingNodeScore < scoring.DefaultGraylistThreshold { + return false + } + + initialReplayingNodeScore = replayingNodeScore + return true + }, 2*time.Second, 100*time.Millisecond) + + // even though the replaying node is penalized, it should still be able to publish and receive messages from this node in both topic meshes. + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() }) } diff --git a/integration/benchmark/cmd/manual/Dockerfile b/integration/benchmark/cmd/manual/Dockerfile index 1ad38985a43..58f2b71d42b 100644 --- a/integration/benchmark/cmd/manual/Dockerfile +++ b/integration/benchmark/cmd/manual/Dockerfile @@ -1,7 +1,7 @@ # syntax = docker/dockerfile:experimental # NOTE: Must be run in the context of the repo's root directory -FROM golang:1.19-buster AS build-setup +FROM golang:1.20-buster AS build-setup RUN apt-get update RUN apt-get -y install cmake zip diff --git a/integration/go.mod b/integration/go.mod index f59d02427b4..e47e8bac3c3 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -1,9 +1,9 @@ module github.com/onflow/flow-go/integration -go 1.19 +go 1.20 require ( - cloud.google.com/go/bigquery v1.48.0 + cloud.google.com/go/bigquery v1.50.0 github.com/VividCortex/ewma v1.2.0 github.com/coreos/go-semver v0.3.0 github.com/dapperlabs/testingdock v0.4.4 @@ -17,15 +17,15 @@ require ( github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-ds-badger2 v0.1.3 github.com/ipfs/go-ipfs-blockstore v1.3.0 - github.com/onflow/cadence v0.39.12 - github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3 + github.com/onflow/cadence v0.39.14 + github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.4-0.20230703193002-53362441b57d github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3 - github.com/onflow/flow-emulator v0.50.6 - github.com/onflow/flow-go v0.31.1-0.20230607185125-e75265a6c631 - github.com/onflow/flow-go-sdk v0.41.6 - github.com/onflow/flow-go/crypto v0.24.7 + github.com/onflow/flow-emulator v0.53.0 + github.com/onflow/flow-go v0.31.1-0.20230718164039-e3411eff1e9d + github.com/onflow/flow-go-sdk v0.41.9 + github.com/onflow/flow-go/crypto v0.24.9 github.com/onflow/flow-go/insecure v0.0.0-00010101000000-000000000000 - github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230602212908-08fc6536d391 + github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230628215638-83439d22e0ce github.com/plus3it/gorecurcopy v0.0.1 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_model v0.4.0 @@ -36,23 +36,23 @@ require ( go.uber.org/atomic v1.11.0 golang.org/x/exp v0.0.0-20230321023759-10a507213a29 golang.org/x/sync v0.2.0 - google.golang.org/grpc v1.55.0 + google.golang.org/grpc v1.56.1 google.golang.org/protobuf v1.30.0 ) require ( cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go/compute v1.19.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.12.0 // indirect - cloud.google.com/go/storage v1.28.1 // indirect + cloud.google.com/go/iam v0.13.0 // indirect + cloud.google.com/go/storage v1.29.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect github.com/Microsoft/go-winio v0.5.2 // indirect github.com/Microsoft/hcsshim v0.8.7 // indirect github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4 // indirect github.com/acomagu/bufpipe v1.0.3 // indirect github.com/andybalholm/brotli v1.0.4 // indirect - github.com/apache/arrow/go/v10 v10.0.1 // indirect + github.com/apache/arrow/go/v11 v11.0.0 // indirect github.com/apache/thrift v0.16.0 // indirect github.com/aws/aws-sdk-go-v2 v1.17.7 // indirect github.com/aws/aws-sdk-go-v2/config v1.18.19 // indirect @@ -228,8 +228,7 @@ require ( github.com/multiformats/go-varint v0.0.7 // indirect github.com/onflow/atree v0.6.0 // indirect github.com/onflow/flow-ft/lib/go/contracts v0.7.0 // indirect - github.com/onflow/flow-nft/lib/go/contracts v0.0.0-20220727161549-d59b1e547ac4 // indirect - github.com/onflow/fusd/lib/go/contracts v0.0.0-20211021081023-ae9de8fb2c7e // indirect + github.com/onflow/flow-nft/lib/go/contracts v1.1.0 // indirect github.com/onflow/go-bitswap v0.0.0-20230703214630-6d3db958c73d // indirect github.com/onflow/nft-storefront/lib/go/contracts v0.0.0-20221222181731-14b90207cead // indirect github.com/onflow/sdks v0.5.0 // indirect @@ -306,7 +305,7 @@ require ( golang.org/x/crypto v0.10.0 // indirect golang.org/x/mod v0.10.0 // indirect golang.org/x/net v0.10.0 // indirect - golang.org/x/oauth2 v0.6.0 // indirect + golang.org/x/oauth2 v0.7.0 // indirect golang.org/x/sys v0.9.0 // indirect golang.org/x/term v0.9.0 // indirect golang.org/x/text v0.10.0 // indirect @@ -316,7 +315,7 @@ require ( gonum.org/v1/gonum v0.13.0 // indirect google.golang.org/api v0.114.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/integration/go.sum b/integration/go.sum index 0b3079bed0f..4aac8d7305d 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -40,19 +40,19 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.48.0 h1:u+fhS1jJOkPO9vdM84M8HO5VznTfVUicBeoXNKD26ho= -cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.50.0 h1:RscMV6LbnAmhAzD893Lv9nXXy2WCaJmbxYPWDLbGqNQ= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datacatalog v1.12.0 h1:3uaYULZRLByPdbuUvacGeqneudztEM4xqKQsBcxbDnY= +cloud.google.com/go/datacatalog v1.13.0 h1:4H5IJiyUE0X6ShQBqgFFZvGGcrwGVndTwUSLP4c52gw= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= -cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= cloud.google.com/go/kms v1.0.0/go.mod h1:nhUehi+w7zht2XrUfvTRNpxrfayBHqP4lu2NSywui/0= cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= cloud.google.com/go/profiler v0.3.0 h1:R6y/xAeifaUXxd2x6w+jIwKxoKl8Cv5HJvcvASTPWJo= @@ -66,8 +66,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI= -cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0 h1:6weCgzRvMg7lzuUurI4697AqIRPU1SvzHhynwpW31jI= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -136,8 +136,8 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= -github.com/apache/arrow/go/v10 v10.0.1 h1:n9dERvixoC/1JjDmBcs9FPaEryoANa2sCgVFo6ez9cI= -github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0 h1:hqauxvFQxww+0mEU/2XHG6LT7eZternCZq+A5Yly2uM= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY= @@ -1354,29 +1354,27 @@ github.com/onflow/atree v0.1.0-beta1.0.20211027184039-559ee654ece9/go.mod h1:+6x github.com/onflow/atree v0.6.0 h1:j7nQ2r8npznx4NX39zPpBYHmdy45f4xwoi+dm37Jk7c= github.com/onflow/atree v0.6.0/go.mod h1:gBHU0M05qCbv9NN0kijLWMgC47gHVNBIp4KmsVFi0tc= github.com/onflow/cadence v0.20.1/go.mod h1:7mzUvPZUIJztIbr9eTvs+fQjWWHTF8veC+yk4ihcNIA= -github.com/onflow/cadence v0.39.12 h1:bb3UdOe7nClUcaLbxSWGLSIJKuCrivpgxhPow99ikv0= -github.com/onflow/cadence v0.39.12/go.mod h1:OIJLyVBPa339DCBQXBfGaorT4tBjQh9gSKe+ZAIyyh0= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3 h1:wV+gcgOY0oJK4HLZQYQoK+mm09rW1XSxf83yqJwj0n4= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.3/go.mod h1:Osvy81E/+tscQM+d3kRFjktcIcZj2bmQ9ESqRQWDEx8= +github.com/onflow/cadence v0.39.14 h1:YoR3YFUga49rqzVY1xwI6I2ZDBmvwGh13jENncsleC8= +github.com/onflow/cadence v0.39.14/go.mod h1:OIJLyVBPa339DCBQXBfGaorT4tBjQh9gSKe+ZAIyyh0= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.4-0.20230703193002-53362441b57d h1:B7PdhdUNkve5MVrekWDuQf84XsGBxNZ/D3x+QQ8XeVs= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.2.4-0.20230703193002-53362441b57d/go.mod h1:xAiV/7TKhw863r6iO3CS5RnQ4F+pBY1TxD272BsILlo= github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3 h1:X25A1dNajNUtE+KoV76wQ6BR6qI7G65vuuRXxDDqX7E= github.com/onflow/flow-core-contracts/lib/go/templates v1.2.3/go.mod h1:dqAUVWwg+NlOhsuBHex7bEWmsUjsiExzhe/+t4xNH6A= -github.com/onflow/flow-emulator v0.50.6 h1:grt62kVQC5Jsma7bY8k65ts7BZ6+E0XBXroRA75RAPA= -github.com/onflow/flow-emulator v0.50.6/go.mod h1:0avs83tvFDt8vyMcm4AYOcHDSRJHY5eX/XXQW2F4jEg= +github.com/onflow/flow-emulator v0.53.0 h1:VIMljBL77VnO+CeeJX1N5GVmF245XwZrFGv63dLPQGk= +github.com/onflow/flow-emulator v0.53.0/go.mod h1:o7O+b3fQYs26vJ+4SeMY/T9kA1rT09tFxQccTFyM5b4= github.com/onflow/flow-ft/lib/go/contracts v0.7.0 h1:XEKE6qJUw3luhsYmIOteXP53gtxNxrwTohgxJXCYqBE= github.com/onflow/flow-ft/lib/go/contracts v0.7.0/go.mod h1:kTMFIySzEJJeupk+7EmXs0EJ6CBWY/MV9fv9iYQk+RU= github.com/onflow/flow-go-sdk v0.24.0/go.mod h1:IoptMLPyFXWvyd9yYA6/4EmSeeozl6nJoIv4FaEMg74= -github.com/onflow/flow-go-sdk v0.41.6 h1:x5HhmRDvbCWXRCzHITJxOp0Komq5JJ9zphoR2u6NOCg= -github.com/onflow/flow-go-sdk v0.41.6/go.mod h1:AYypQvn6ecMONhF3M1vBOUX9b4oHKFWkkrw8bO4VEik= +github.com/onflow/flow-go-sdk v0.41.9 h1:cyplhhhc0RnfOAan2t7I/7C9g1hVGDDLUhWj6ZHAkk4= +github.com/onflow/flow-go-sdk v0.41.9/go.mod h1:e9Q5TITCy7g08lkdQJxP8fAKBnBoC5FjALvUKr36j4I= github.com/onflow/flow-go/crypto v0.21.3/go.mod h1:vI6V4CY3R6c4JKBxdcRiR/AnjBfL8OSD97bJc60cLuQ= -github.com/onflow/flow-go/crypto v0.24.7 h1:RCLuB83At4z5wkAyUCF7MYEnPoIIOHghJaODuJyEoW0= -github.com/onflow/flow-go/crypto v0.24.7/go.mod h1:fqCzkIBBMRRkciVrvW21rECKq1oD7Q6u+bCI78lfNX0= -github.com/onflow/flow-nft/lib/go/contracts v0.0.0-20220727161549-d59b1e547ac4 h1:5AnM9jIwkyHaY6+C3cWnt07oTOYctmwxvpiL25HRJws= -github.com/onflow/flow-nft/lib/go/contracts v0.0.0-20220727161549-d59b1e547ac4/go.mod h1:YsvzYng4htDgRB9sa9jxdwoTuuhjK8WYWXTyLkIigZY= +github.com/onflow/flow-go/crypto v0.24.9 h1:0EQp+kSZYJepMIiSypfJVe7tzsPcb6UXOdOtsTCDhBs= +github.com/onflow/flow-go/crypto v0.24.9/go.mod h1:fqCzkIBBMRRkciVrvW21rECKq1oD7Q6u+bCI78lfNX0= +github.com/onflow/flow-nft/lib/go/contracts v1.1.0 h1:rhUDeD27jhLwOqQKI/23008CYfnqXErrJvc4EFRP2a0= +github.com/onflow/flow-nft/lib/go/contracts v1.1.0/go.mod h1:YsvzYng4htDgRB9sa9jxdwoTuuhjK8WYWXTyLkIigZY= github.com/onflow/flow/protobuf/go/flow v0.2.2/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= -github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230602212908-08fc6536d391 h1:6uKg0gpLKpTZKMihrsFR0Gkq++1hykzfR1tQCKuOfw4= -github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230602212908-08fc6536d391/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= -github.com/onflow/fusd/lib/go/contracts v0.0.0-20211021081023-ae9de8fb2c7e h1:RHaXPHvWCy3VM62+HTyu6DYq5T8rrK1gxxqogKuJ4S4= -github.com/onflow/fusd/lib/go/contracts v0.0.0-20211021081023-ae9de8fb2c7e/go.mod h1:CRX9eXtc9zHaRVTW1Xh4Cf5pZgKkQuu1NuSEVyHXr/0= +github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230628215638-83439d22e0ce h1:YQKijiQaq8SF1ayNqp3VVcwbBGXSnuHNHq4GQmVGybE= +github.com/onflow/flow/protobuf/go/flow v0.3.2-0.20230628215638-83439d22e0ce/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/onflow/go-bitswap v0.0.0-20230703214630-6d3db958c73d h1:QcOAeEyF3iAUHv21LQ12sdcsr0yFrJGoGLyCAzYYtvI= github.com/onflow/go-bitswap v0.0.0-20230703214630-6d3db958c73d/go.mod h1:GCPpiyRoHncdqPj++zPr9ZOYBX4hpJ0pYZRYqSE8VKk= github.com/onflow/nft-storefront/lib/go/contracts v0.0.0-20221222181731-14b90207cead h1:2j1Unqs76Z1b95Gu4C3Y28hzNUHBix7wL490e61SMSw= @@ -1995,8 +1993,8 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2367,8 +2365,8 @@ google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211007155348-82e027067bd4/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2403,8 +2401,8 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= diff --git a/integration/localnet/builder/bootstrap.go b/integration/localnet/builder/bootstrap.go index 0bd709b9203..dc54dba8cdb 100644 --- a/integration/localnet/builder/bootstrap.go +++ b/integration/localnet/builder/bootstrap.go @@ -453,12 +453,14 @@ func prepareObserverService(i int, observerName string, agPublicKey string) Serv fmt.Sprintf("--rpc-addr=%s:%s", observerName, testnet.GRPCPort), fmt.Sprintf("--secure-rpc-addr=%s:%s", observerName, testnet.GRPCSecurePort), fmt.Sprintf("--http-addr=%s:%s", observerName, testnet.GRPCWebPort), + fmt.Sprintf("--rest-addr=%s:%s", observerName, testnet.RESTPort), ) service.AddExposedPorts( testnet.GRPCPort, testnet.GRPCSecurePort, testnet.GRPCWebPort, + testnet.RESTPort, ) // observer services rely on the access gateway diff --git a/integration/localnet/client/Dockerfile b/integration/localnet/client/Dockerfile index ac1fbb8d8e7..4908e287624 100644 --- a/integration/localnet/client/Dockerfile +++ b/integration/localnet/client/Dockerfile @@ -1,13 +1,13 @@ -FROM golang:1.17 +FROM golang:1.20 COPY flow-localnet.json /go WORKDIR /go -RUN curl -L https://github.com/onflow/flow-cli/archive/refs/tags/v0.36.2.tar.gz | tar -xzv -RUN cd flow-cli-0.36.2 && go mod download -RUN cd flow-cli-0.36.2 && make -RUN /go/flow-cli-0.36.2/cmd/flow/flow version -RUN cp /go/flow-cli-0.36.2/cmd/flow/flow /go/flow +RUN curl -L https://github.com/onflow/flow-cli/archive/refs/tags/v1.3.3.tar.gz | tar -xzv +RUN cd flow-cli-1.3.3 && go mod download +RUN cd flow-cli-1.3.3 && make +RUN /go/flow-cli-1.3.3/cmd/flow/flow version +RUN cp /go/flow-cli-1.3.3/cmd/flow/flow /go/flow CMD /go/flow -f /go/flow-localnet.json -n observer blocks get latest diff --git a/integration/testnet/network.go b/integration/testnet/network.go index 9f060dd0532..8b9522d7ba6 100644 --- a/integration/testnet/network.go +++ b/integration/testnet/network.go @@ -768,6 +768,9 @@ func (net *FlowNetwork) addObserver(t *testing.T, conf ObserverConfig) { nodeContainer.exposePort(AdminPort, testingdock.RandomPort(t)) nodeContainer.AddFlag("admin-addr", nodeContainer.ContainerAddr(AdminPort)) + nodeContainer.exposePort(RESTPort, testingdock.RandomPort(t)) + nodeContainer.AddFlag("rest-addr", nodeContainer.ContainerAddr(RESTPort)) + nodeContainer.opts.HealthCheck = testingdock.HealthCheckCustom(nodeContainer.HealthcheckCallback()) suiteContainer := net.suite.Container(containerOpts) diff --git a/integration/tests/access/observer_test.go b/integration/tests/access/observer_test.go index 29b96da49e6..25bfeab2f3a 100644 --- a/integration/tests/access/observer_test.go +++ b/integration/tests/access/observer_test.go @@ -1,22 +1,29 @@ package access import ( + "bytes" "context" + "encoding/json" + "fmt" + "net/http" "testing" - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" - accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/onflow/flow-go/engine/access/rest/util" "github.com/onflow/flow-go/integration/testnet" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" ) func TestObserver(t *testing.T) { @@ -25,9 +32,10 @@ func TestObserver(t *testing.T) { type ObserverSuite struct { suite.Suite - net *testnet.FlowNetwork - teardown func() - local map[string]struct{} + net *testnet.FlowNetwork + teardown func() + localRpc map[string]struct{} + localRest map[string]struct{} cancel context.CancelFunc } @@ -44,7 +52,7 @@ func (s *ObserverSuite) TearDownTest() { } func (s *ObserverSuite) SetupTest() { - s.local = map[string]struct{}{ + s.localRpc = map[string]struct{}{ "Ping": {}, "GetLatestBlockHeader": {}, "GetBlockHeaderByID": {}, @@ -56,18 +64,26 @@ func (s *ObserverSuite) SetupTest() { "GetNetworkParameters": {}, } + s.localRest = map[string]struct{}{ + "getBlocksByIDs": {}, + "getBlocksByHeight": {}, + "getBlockPayloadByID": {}, + "getNetworkParameters": {}, + "getNodeVersionInfo": {}, + } + nodeConfigs := []testnet.NodeConfig{ // access node with unstaked nodes supported testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.InfoLevel), testnet.WithAdditionalFlag("--supports-observer=true")), - // need one dummy execution node (unused ghost) - testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()), + // need one dummy execution node + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), // need one dummy verification node (unused ghost) testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()), - // need one controllable collection node (unused ghost) - testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()), + // need one controllable collection node + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), // need three consensus nodes (unused ghost) testnet.NewNodeConfig(flow.RoleConsensus, testnet.WithLogLevel(zerolog.FatalLevel), testnet.AsGhost()), @@ -90,11 +106,11 @@ func (s *ObserverSuite) SetupTest() { s.net.Start(ctx) } -// TestObserver runs the following tests: +// TestObserverRPC runs the following tests: // 1. CompareRPCs: verifies that the observer client returns the same errors as the access client for rpcs proxied to the upstream AN // 2. HandledByUpstream: stops the upstream AN and verifies that the observer client returns errors for all rpcs handled by the upstream // 3. HandledByObserver: stops the upstream AN and verifies that the observer client handles all other queries -func (s *ObserverSuite) TestObserver() { +func (s *ObserverSuite) TestObserverRPC() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -111,7 +127,7 @@ func (s *ObserverSuite) TestObserver() { // verify that both clients return the same errors for proxied rpcs for _, rpc := range s.getRPCs() { // skip rpcs handled locally by observer - if _, local := s.local[rpc.name]; local { + if _, local := s.localRpc[rpc.name]; local { continue } t.Run(rpc.name, func(t *testing.T) { @@ -129,7 +145,7 @@ func (s *ObserverSuite) TestObserver() { t.Run("HandledByUpstream", func(t *testing.T) { // verify that we receive Unavailable errors from all rpcs handled upstream for _, rpc := range s.getRPCs() { - if _, local := s.local[rpc.name]; local { + if _, local := s.localRpc[rpc.name]; local { continue } t.Run(rpc.name, func(t *testing.T) { @@ -142,7 +158,7 @@ func (s *ObserverSuite) TestObserver() { t.Run("HandledByObserver", func(t *testing.T) { // verify that we receive NotFound or no error from all rpcs handled locally for _, rpc := range s.getRPCs() { - if _, local := s.local[rpc.name]; !local { + if _, local := s.localRpc[rpc.name]; !local { continue } t.Run(rpc.name, func(t *testing.T) { @@ -154,7 +170,90 @@ func (s *ObserverSuite) TestObserver() { }) } }) +} + +// TestObserverRest runs the following tests: +// 1. CompareEndpoints: verifies that the observer client returns the same errors as the access client for rests proxied to the upstream AN +// 2. HandledByUpstream: stops the upstream AN and verifies that the observer client returns errors for all rests handled by the upstream +// 3. HandledByObserver: stops the upstream AN and verifies that the observer client handles all other queries +func (s *ObserverSuite) TestObserverRest() { + t := s.T() + + accessAddr := s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.RESTPort) + observerAddr := s.net.ContainerByName("observer_1").Addr(testnet.RESTPort) + + httpClient := http.DefaultClient + makeHttpCall := func(method string, url string, body interface{}) (*http.Response, error) { + switch method { + case http.MethodGet: + return httpClient.Get(url) + case http.MethodPost: + jsonBody, _ := json.Marshal(body) + return httpClient.Post(url, "application/json", bytes.NewBuffer(jsonBody)) + } + panic("not supported") + } + makeObserverCall := func(method string, path string, body interface{}) (*http.Response, error) { + return makeHttpCall(method, "http://"+observerAddr+"/v1"+path, body) + } + makeAccessCall := func(method string, path string, body interface{}) (*http.Response, error) { + return makeHttpCall(method, "http://"+accessAddr+"/v1"+path, body) + } + + t.Run("CompareEndpoints", func(t *testing.T) { + // verify that both clients return the same errors for proxied rests + for _, endpoint := range s.getRestEndpoints() { + // skip rest handled locally by observer + if _, local := s.localRest[endpoint.name]; local { + continue + } + t.Run(endpoint.name, func(t *testing.T) { + accessResp, accessErr := makeAccessCall(endpoint.method, endpoint.path, endpoint.body) + observerResp, observerErr := makeObserverCall(endpoint.method, endpoint.path, endpoint.body) + assert.NoError(t, accessErr) + assert.NoError(t, observerErr) + assert.Equal(t, accessResp.Status, observerResp.Status) + assert.Equal(t, accessResp.StatusCode, observerResp.StatusCode) + assert.Contains(t, [...]int{ + http.StatusNotFound, + http.StatusOK, + }, observerResp.StatusCode) + }) + } + }) + + // stop the upstream access container + err := s.net.StopContainerByName(context.Background(), testnet.PrimaryAN) + require.NoError(t, err) + + t.Run("HandledByUpstream", func(t *testing.T) { + // verify that we receive StatusInternalServerError, StatusServiceUnavailable errors from all rests handled upstream + for _, endpoint := range s.getRestEndpoints() { + if _, local := s.localRest[endpoint.name]; local { + continue + } + t.Run(endpoint.name, func(t *testing.T) { + observerResp, observerErr := makeObserverCall(endpoint.method, endpoint.path, endpoint.body) + require.NoError(t, observerErr) + assert.Contains(t, [...]int{ + http.StatusServiceUnavailable}, observerResp.StatusCode) + }) + } + }) + t.Run("HandledByObserver", func(t *testing.T) { + // verify that we receive NotFound or no error from all rests handled locally + for _, endpoint := range s.getRestEndpoints() { + if _, local := s.localRest[endpoint.name]; !local { + continue + } + t.Run(endpoint.name, func(t *testing.T) { + observerResp, observerErr := makeObserverCall(endpoint.method, endpoint.path, endpoint.body) + require.NoError(t, observerErr) + assert.Contains(t, [...]int{http.StatusNotFound, http.StatusOK}, observerResp.StatusCode) + }) + } + }) } func (s *ObserverSuite) getAccessClient() (accessproto.AccessAPIClient, error) { @@ -287,3 +386,126 @@ func (s *ObserverSuite) getRPCs() []RPCTest { }}, } } + +type RestEndpointTest struct { + name string + method string + path string + body interface{} +} + +func (s *ObserverSuite) getRestEndpoints() []RestEndpointTest { + transactionId := unittest.IdentifierFixture().String() + account := flow.Localnet.Chain().ServiceAddress().String() + block := unittest.BlockFixture() + executionResult := unittest.ExecutionResultFixture() + collection := unittest.CollectionFixture(2) + eventType := "A.0123456789abcdef.flow.event" + + return []RestEndpointTest{ + { + name: "getTransactionByID", + method: http.MethodGet, + path: "/transactions/" + transactionId, + }, + { + name: "createTransaction", + method: http.MethodPost, + path: "/transactions", + body: createTx(s.net), + }, + { + name: "getTransactionResultByID", + method: http.MethodGet, + path: fmt.Sprintf("/transaction_results/%s?block_id=%s&collection_id=%s", transactionId, block.ID().String(), collection.ID().String()), + }, + { + name: "getBlocksByIDs", + method: http.MethodGet, + path: "/blocks/" + block.ID().String(), + }, + { + name: "getBlocksByHeight", + method: http.MethodGet, + path: "/blocks?height=1", + }, + { + name: "getBlockPayloadByID", + method: http.MethodGet, + path: "/blocks/" + block.ID().String() + "/payload", + }, + { + name: "getExecutionResultByID", + method: http.MethodGet, + path: "/execution_results/" + executionResult.ID().String(), + }, + { + name: "getExecutionResultByBlockID", + method: http.MethodGet, + path: "/execution_results?block_id=" + block.ID().String(), + }, + { + name: "getCollectionByID", + method: http.MethodGet, + path: "/collections/" + collection.ID().String(), + }, + { + name: "executeScript", + method: http.MethodPost, + path: "/scripts", + body: createScript(), + }, + { + name: "getAccount", + method: http.MethodGet, + path: "/accounts/" + account + "?block_height=1", + }, + { + name: "getEvents", + method: http.MethodGet, + path: fmt.Sprintf("/events?type=%s&start_height=%d&end_height=%d", eventType, 0, 3), + }, + { + name: "getNetworkParameters", + method: http.MethodGet, + path: "/network/parameters", + }, + { + name: "getNodeVersionInfo", + method: http.MethodGet, + path: "/node_version_info", + }, + } +} + +func createTx(net *testnet.FlowNetwork) interface{} { + flowAddr := flow.Localnet.Chain().ServiceAddress() + payloadSignature := unittest.TransactionSignatureFixture() + envelopeSignature := unittest.TransactionSignatureFixture() + + payloadSignature.Address = flowAddr + + envelopeSignature.Address = flowAddr + envelopeSignature.KeyIndex = 2 + + tx := flow.NewTransactionBody(). + AddAuthorizer(flowAddr). + SetPayer(flowAddr). + SetScript(unittest.NoopTxScript()). + SetReferenceBlockID(net.Root().ID()). + SetProposalKey(flowAddr, 1, 0) + tx.PayloadSignatures = []flow.TransactionSignature{payloadSignature} + tx.EnvelopeSignatures = []flow.TransactionSignature{envelopeSignature} + + return unittest.CreateSendTxHttpPayload(*tx) +} + +func createScript() interface{} { + validCode := []byte(`pub fun main(foo: String): String { return foo }`) + validArgs := []byte(`{ "type": "String", "value": "hello world" }`) + body := map[string]interface{}{ + "script": util.ToBase64(validCode), + "arguments": []string{util.ToBase64(validArgs)}, + } + return body +} diff --git a/integration/tests/consensus/inclusion_test.go b/integration/tests/consensus/inclusion_test.go index e36ef7dae8e..85ef3fd8046 100644 --- a/integration/tests/consensus/inclusion_test.go +++ b/integration/tests/consensus/inclusion_test.go @@ -43,8 +43,6 @@ func (is *InclusionSuite) SetupTest() { is.log = unittest.LoggerForTest(is.Suite.T(), zerolog.InfoLevel) is.log.Info().Msgf("================> SetupTest") - // seed random generator - // to collect node confiis... var nodeConfigs []testnet.NodeConfig diff --git a/ledger/common/testutils/testutils.go b/ledger/common/testutils/testutils.go index ab30000c47c..e0e100ee46c 100644 --- a/ledger/common/testutils/testutils.go +++ b/ledger/common/testutils/testutils.go @@ -206,7 +206,7 @@ func RandomValues(n int, minByteSize, maxByteSize int) []l.Value { byteSize = minByteSize + rand.Intn(maxByteSize-minByteSize) } value := make([]byte, byteSize) - _, err := rand.Read(value) + _, err := crand.Read(value) if err != nil { panic("random generation failed") } diff --git a/model/flow/address_test.go b/model/flow/address_test.go index b71a0d567ed..28e99efa315 100644 --- a/model/flow/address_test.go +++ b/model/flow/address_test.go @@ -5,7 +5,6 @@ import ( "math/bits" "math/rand" "testing" - "time" "github.com/onflow/cadence" "github.com/onflow/cadence/runtime/common" @@ -167,9 +166,6 @@ func testAddressConstants(t *testing.T) { const invalidCodeWord = uint64(0xab2ae42382900010) func testAddressGeneration(t *testing.T) { - // seed random generator - rand.Seed(time.Now().UnixNano()) - // loops in each test const loop = 50 @@ -260,9 +256,6 @@ func testAddressGeneration(t *testing.T) { } func testAddressesIntersection(t *testing.T) { - // seed random generator - rand.Seed(time.Now().UnixNano()) - // loops in each test const loop = 25 @@ -329,9 +322,6 @@ func testAddressesIntersection(t *testing.T) { } func testIndexFromAddress(t *testing.T) { - // seed random generator - rand.Seed(time.Now().UnixNano()) - // loops in each test const loop = 50 @@ -370,9 +360,6 @@ func testIndexFromAddress(t *testing.T) { } func TestUint48(t *testing.T) { - // seed random generator - rand.Seed(time.Now().UnixNano()) - const loop = 50 // test consistensy of putUint48 and uint48 for i := 0; i < loop; i++ { diff --git a/model/flow/identifier.go b/model/flow/identifier.go index 62ad2a64735..e205e74a716 100644 --- a/model/flow/identifier.go +++ b/model/flow/identifier.go @@ -6,7 +6,6 @@ import ( "encoding/binary" "encoding/hex" "fmt" - "math/rand" "reflect" "github.com/ipfs/go-cid" @@ -16,6 +15,7 @@ import ( "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/model/fingerprint" "github.com/onflow/flow-go/storage/merkle" + "github.com/onflow/flow-go/utils/rand" ) const IdentifierLen = 32 @@ -179,21 +179,24 @@ func CheckConcatSum(sum Identifier, fps ...Identifier) bool { return sum == computed } -// Sample returns random sample of length 'size' of the ids -// [Fisher-Yates shuffle](https://en.wikipedia.org/wiki/Fisher-Yates_shuffle). -func Sample(size uint, ids ...Identifier) []Identifier { +// Sample returns non-deterministic random sample of length 'size' of the ids +func Sample(size uint, ids ...Identifier) ([]Identifier, error) { n := uint(len(ids)) dup := make([]Identifier, 0, n) dup = append(dup, ids...) // if sample size is greater than total size, return all the elements if n <= size { - return dup + return dup, nil } - for i := uint(0); i < size; i++ { - j := uint(rand.Intn(int(n - i))) - dup[i], dup[j+i] = dup[j+i], dup[i] + swap := func(i, j uint) { + dup[i], dup[j] = dup[j], dup[i] } - return dup[:size] + + err := rand.Samples(n, size, swap) + if err != nil { + return nil, fmt.Errorf("generating randoms failed: %w", err) + } + return dup[:size], nil } func CidToId(c cid.Cid) (Identifier, error) { diff --git a/model/flow/identifierList.go b/model/flow/identifierList.go index ec77a04a98f..1cf3e0263a8 100644 --- a/model/flow/identifierList.go +++ b/model/flow/identifierList.go @@ -2,7 +2,6 @@ package flow import ( "bytes" - "math/rand" "sort" "github.com/rs/zerolog/log" @@ -103,15 +102,8 @@ func (il IdentifierList) Union(other IdentifierList) IdentifierList { return union } -// DeterministicSample returns deterministic random sample from the `IdentifierList` using the given seed -func (il IdentifierList) DeterministicSample(size uint, seed int64) IdentifierList { - rand.Seed(seed) - return il.Sample(size) -} - // Sample returns random sample of length 'size' of the ids -// [Fisher-Yates shuffle](https://en.wikipedia.org/wiki/Fisher–Yates_shuffle). -func (il IdentifierList) Sample(size uint) IdentifierList { +func (il IdentifierList) Sample(size uint) (IdentifierList, error) { return Sample(size, il...) } diff --git a/model/flow/identifierList_test.go b/model/flow/identifierList_test.go index b878938a5e3..7e18b6ee921 100644 --- a/model/flow/identifierList_test.go +++ b/model/flow/identifierList_test.go @@ -5,7 +5,6 @@ import ( "math/rand" "sort" "testing" - "time" "github.com/stretchr/testify/require" @@ -21,7 +20,7 @@ func TestIdentifierListSort(t *testing.T) { var ids flow.IdentifierList = unittest.IdentifierListFixture(count) // shuffles array before sorting to enforce some pseudo-randomness - rand.Seed(time.Now().UnixNano()) + rand.Shuffle(ids.Len(), ids.Swap) sort.Sort(ids) diff --git a/model/flow/identifier_test.go b/model/flow/identifier_test.go index a4362e95f37..7ac5dd3df89 100644 --- a/model/flow/identifier_test.go +++ b/model/flow/identifier_test.go @@ -1,10 +1,10 @@ package flow_test import ( + "crypto/rand" "encoding/binary" "encoding/json" "fmt" - "math/rand" "testing" blocks "github.com/ipfs/go-block-format" @@ -66,20 +66,23 @@ func TestIdentifierSample(t *testing.T) { t.Run("Sample creates a random sample", func(t *testing.T) { sampleSize := uint(5) - sample := flow.Sample(sampleSize, ids...) + sample, err := flow.Sample(sampleSize, ids...) + require.NoError(t, err) require.Len(t, sample, int(sampleSize)) require.NotEqual(t, sample, ids[:sampleSize]) }) t.Run("sample size greater than total size results in the original list", func(t *testing.T) { sampleSize := uint(len(ids) + 1) - sample := flow.Sample(sampleSize, ids...) + sample, err := flow.Sample(sampleSize, ids...) + require.NoError(t, err) require.Equal(t, sample, ids) }) t.Run("sample size of zero results in an empty list", func(t *testing.T) { sampleSize := uint(0) - sample := flow.Sample(sampleSize, ids...) + sample, err := flow.Sample(sampleSize, ids...) + require.NoError(t, err) require.Empty(t, sample) }) } @@ -131,7 +134,8 @@ func TestCIDConversion(t *testing.T) { // generate random CID data := make([]byte, 4) - rand.Read(data) + _, err = rand.Read(data) + require.NoError(t, err) cid = blocks.NewBlock(data).Cid() id, err = flow.CidToId(cid) diff --git a/model/flow/identity.go b/model/flow/identity.go index f05188988e6..c44c394cb06 100644 --- a/model/flow/identity.go +++ b/model/flow/identity.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "math" - "math/rand" "regexp" "strconv" @@ -18,6 +17,7 @@ import ( "github.com/vmihailenco/msgpack" "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/utils/rand" ) // DefaultInitialWeight is the default initial weight for a node identity. @@ -461,40 +461,28 @@ func (il IdentityList) ByNetworkingKey(key crypto.PublicKey) (*Identity, bool) { return nil, false } -// Sample returns simple random sample from the `IdentityList` -func (il IdentityList) Sample(size uint) IdentityList { - return il.sample(size, rand.Intn) -} - -// DeterministicSample returns deterministic random sample from the `IdentityList` using the given seed -func (il IdentityList) DeterministicSample(size uint, seed int64) IdentityList { - rng := rand.New(rand.NewSource(seed)) - return il.sample(size, rng.Intn) -} - -func (il IdentityList) sample(size uint, intn func(int) int) IdentityList { +// Sample returns non-deterministic random sample from the `IdentityList` +func (il IdentityList) Sample(size uint) (IdentityList, error) { n := uint(len(il)) - if size > n { + dup := make([]*Identity, 0, n) + dup = append(dup, il...) + if n < size { size = n } - - dup := il.Copy() - for i := uint(0); i < size; i++ { - j := uint(intn(int(n - i))) - dup[i], dup[j+i] = dup[j+i], dup[i] + swap := func(i, j uint) { + dup[i], dup[j] = dup[j], dup[i] } - return dup[:size] + err := rand.Samples(n, size, swap) + if err != nil { + return nil, fmt.Errorf("failed to sample identity list: %w", err) + } + return dup[:size], nil } -// DeterministicShuffle randomly and deterministically shuffles the identity -// list, returning the shuffled list without modifying the receiver. -func (il IdentityList) DeterministicShuffle(seed int64) IdentityList { - dup := il.Copy() - rng := rand.New(rand.NewSource(seed)) - rng.Shuffle(len(il), func(i, j int) { - dup[i], dup[j] = dup[j], dup[i] - }) - return dup +// Shuffle randomly shuffles the identity list (non-deterministic), +// and returns the shuffled list without modifying the receiver. +func (il IdentityList) Shuffle() (IdentityList, error) { + return il.Sample(uint(len(il))) } // SamplePct returns a random sample from the receiver identity list. The @@ -502,9 +490,9 @@ func (il IdentityList) DeterministicShuffle(seed int64) IdentityList { // if `pct>0`, so this will always select at least one identity. // // NOTE: The input must be between 0-1. -func (il IdentityList) SamplePct(pct float64) IdentityList { +func (il IdentityList) SamplePct(pct float64) (IdentityList, error) { if pct <= 0 { - return IdentityList{} + return IdentityList{}, nil } count := float64(il.Count()) * pct diff --git a/model/flow/identity_test.go b/model/flow/identity_test.go index 9c1a137d8ab..891a854aca6 100644 --- a/model/flow/identity_test.go +++ b/model/flow/identity_test.go @@ -2,10 +2,8 @@ package flow_test import ( "encoding/json" - "math/rand" "strings" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -198,28 +196,35 @@ func TestIdentityList_Union(t *testing.T) { func TestSample(t *testing.T) { t.Run("Sample max", func(t *testing.T) { il := unittest.IdentityListFixture(10) - require.Equal(t, uint(10), il.Sample(10).Count()) + sam, err := il.Sample(10) + require.NoError(t, err) + require.Equal(t, uint(10), sam.Count()) }) t.Run("Sample oversized", func(t *testing.T) { il := unittest.IdentityListFixture(10) - require.Equal(t, uint(10), il.Sample(11).Count()) + sam, err := il.Sample(11) + require.NoError(t, err) + require.Equal(t, uint(10), sam.Count()) }) } func TestShuffle(t *testing.T) { t.Run("should be shuffled", func(t *testing.T) { il := unittest.IdentityListFixture(15) // ~1/billion chance of shuffling to input state - shuffled := il.DeterministicShuffle(rand.Int63()) + shuffled, err := il.Shuffle() + require.NoError(t, err) assert.Equal(t, len(il), len(shuffled)) assert.ElementsMatch(t, il, shuffled) }) - t.Run("should be deterministic", func(t *testing.T) { + t.Run("should not be deterministic", func(t *testing.T) { il := unittest.IdentityListFixture(10) - seed := rand.Int63() - shuffled1 := il.DeterministicShuffle(seed) - shuffled2 := il.DeterministicShuffle(seed) - assert.Equal(t, shuffled1, shuffled2) + shuffled1, err := il.Shuffle() + require.NoError(t, err) + shuffled2, err := il.Shuffle() + require.NoError(t, err) + assert.NotEqual(t, shuffled1, shuffled2) + assert.ElementsMatch(t, shuffled1, shuffled2) }) } @@ -238,7 +243,8 @@ func TestIdentity_ID(t *testing.T) { func TestIdentity_Sort(t *testing.T) { il := unittest.IdentityListFixture(20) - random := il.DeterministicShuffle(time.Now().UnixNano()) + random, err := il.Shuffle() + require.NoError(t, err) assert.False(t, random.Sorted(order.Canonical)) canonical := il.Sort(order.Canonical) diff --git a/model/verification/chunkDataPackRequest.go b/model/verification/chunkDataPackRequest.go index 0c0cd4cd92a..9f2bf42c52c 100644 --- a/model/verification/chunkDataPackRequest.go +++ b/model/verification/chunkDataPackRequest.go @@ -1,6 +1,8 @@ package verification import ( + "fmt" + "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" @@ -23,10 +25,14 @@ type ChunkDataPackRequestInfo struct { // SampleTargets returns identifier of execution nodes that can be asked for the chunk data pack, based on // the agreeing and disagreeing execution nodes of the chunk data pack request. -func (c ChunkDataPackRequestInfo) SampleTargets(count int) flow.IdentifierList { +func (c ChunkDataPackRequestInfo) SampleTargets(count int) (flow.IdentifierList, error) { // if there are enough receipts produced the same result (agrees), we sample from them. if len(c.Agrees) >= count { - return c.Targets.Filter(filter.HasNodeID(c.Agrees...)).Sample(uint(count)).NodeIDs() + sample, err := c.Targets.Filter(filter.HasNodeID(c.Agrees...)).Sample(uint(count)) + if err != nil { + return nil, fmt.Errorf("sampling target failed: %w", err) + } + return sample.NodeIDs(), nil } // since there is at least one agree, then usually, we just need `count - 1` extra nodes as backup. @@ -35,8 +41,11 @@ func (c ChunkDataPackRequestInfo) SampleTargets(count int) flow.IdentifierList { // fetch from the one produced the same result (the only agree) need := uint(count - len(c.Agrees)) - nonResponders := c.Targets.Filter(filter.Not(filter.HasNodeID(c.Disagrees...))).Sample(need).NodeIDs() - return append(c.Agrees, nonResponders...) + nonResponders, err := c.Targets.Filter(filter.Not(filter.HasNodeID(c.Disagrees...))).Sample(need) + if err != nil { + return nil, fmt.Errorf("sampling target failed: %w", err) + } + return append(c.Agrees, nonResponders.NodeIDs()...), nil } type ChunkDataPackRequestInfoList []*ChunkDataPackRequestInfo diff --git a/model/verification/verifiableChunkData.go b/model/verification/verifiableChunkData.go index 298beece37f..2f6f1e22579 100644 --- a/model/verification/verifiableChunkData.go +++ b/model/verification/verifiableChunkData.go @@ -2,6 +2,7 @@ package verification import ( "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" ) // VerifiableChunkData represents a ready-to-verify chunk @@ -10,6 +11,7 @@ type VerifiableChunkData struct { IsSystemChunk bool // indicates whether this is a system chunk Chunk *flow.Chunk // the chunk to be verified Header *flow.Header // BlockHeader that contains this chunk + Snapshot protocol.Snapshot // state snapshot at the chunk's block Result *flow.ExecutionResult // execution result of this block ChunkDataPack *flow.ChunkDataPack // chunk data package needed to verify this chunk EndState flow.StateCommitment // state commitment at the end of this chunk diff --git a/module/builder/collection/builder_test.go b/module/builder/collection/builder_test.go index 829a6c007e3..9641b7c934a 100644 --- a/module/builder/collection/builder_test.go +++ b/module/builder/collection/builder_test.go @@ -5,7 +5,6 @@ import ( "math/rand" "os" "testing" - "time" "github.com/dgraph-io/badger/v2" "github.com/rs/zerolog" @@ -63,9 +62,6 @@ type BuilderSuite struct { func (suite *BuilderSuite) SetupTest() { var err error - // seed the RNG - rand.Seed(time.Now().UnixNano()) - suite.genesis = model.Genesis() suite.chainID = suite.genesis.Header.ChainID diff --git a/module/chunks/chunkVerifier.go b/module/chunks/chunkVerifier.go index 11b3a2d6c2b..fb1b81fbf98 100644 --- a/module/chunks/chunkVerifier.go +++ b/module/chunks/chunkVerifier.go @@ -57,7 +57,15 @@ func (fcv *ChunkVerifier) Verify( if vc.IsSystemChunk { ctx = fvm.NewContextFromParent( fcv.systemChunkCtx, - fvm.WithBlockHeader(vc.Header)) + fvm.WithBlockHeader(vc.Header), + // `protocol.Snapshot` implements `EntropyProvider` interface + // Note that `Snapshot` possible errors for RandomSource() are: + // - storage.ErrNotFound if the QC is unknown. + // - state.ErrUnknownSnapshotReference if the snapshot reference block is unknown + // However, at this stage, snapshot reference block should be known and the QC should also be known, + // so no error is expected in normal operations, as required by `EntropyProvider`. + fvm.WithEntropyProvider(vc.Snapshot), + ) txBody, err := blueprints.SystemChunkTransaction(fcv.vmCtx.Chain) if err != nil { @@ -70,7 +78,15 @@ func (fcv *ChunkVerifier) Verify( } else { ctx = fvm.NewContextFromParent( fcv.vmCtx, - fvm.WithBlockHeader(vc.Header)) + fvm.WithBlockHeader(vc.Header), + // `protocol.Snapshot` implements `EntropyProvider` interface + // Note that `Snapshot` possible errors for RandomSource() are: + // - storage.ErrNotFound if the QC is unknown. + // - state.ErrUnknownSnapshotReference if the snapshot reference block is unknown + // However, at this stage, snapshot reference block should be known and the QC should also be known, + // so no error is expected in normal operations, as required by `EntropyProvider`. + fvm.WithEntropyProvider(vc.Snapshot), + ) transactions = make( []*fvm.TransactionProcedure, diff --git a/module/chunks/chunkVerifier_test.go b/module/chunks/chunkVerifier_test.go index 5f049e21b4e..ba01a33c49b 100644 --- a/module/chunks/chunkVerifier_test.go +++ b/module/chunks/chunkVerifier_test.go @@ -2,9 +2,7 @@ package chunks_test import ( "fmt" - "math/rand" "testing" - "time" "github.com/onflow/cadence/runtime" "github.com/rs/zerolog" @@ -68,9 +66,6 @@ type ChunkVerifierTestSuite struct { // Make sure variables are set properly // SetupTest is executed prior to each individual test in this test suite func (s *ChunkVerifierTestSuite) SetupSuite() { - // seed the RNG - rand.Seed(time.Now().UnixNano()) - vm := new(vmMock) systemOkVm := new(vmSystemOkMock) systemBadVm := new(vmSystemBadMock) diff --git a/module/chunks/chunk_assigner_test.go b/module/chunks/chunk_assigner_test.go index 21586afad89..ea6907c2e70 100644 --- a/module/chunks/chunk_assigner_test.go +++ b/module/chunks/chunk_assigner_test.go @@ -1,7 +1,7 @@ package chunks import ( - "math/rand" + "crypto/rand" "testing" "github.com/stretchr/testify/mock" @@ -10,7 +10,7 @@ import ( chmodels "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" - protocolMock "github.com/onflow/flow-go/state/protocol/mock" + protocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/state/protocol/prg" "github.com/onflow/flow-go/utils/unittest" ) @@ -21,7 +21,7 @@ type PublicAssignmentTestSuite struct { } // Setup test with n verification nodes -func (a *PublicAssignmentTestSuite) SetupTest(n int) (*flow.Header, *protocolMock.Snapshot, *protocolMock.State) { +func (a *PublicAssignmentTestSuite) SetupTest(n int) (*flow.Header, *protocol.Snapshot, *protocol.State) { nodes := make([]flow.Role, 0) for i := 1; i < n; i++ { nodes = append(nodes, flow.RoleVerification) diff --git a/module/dkg/controller.go b/module/dkg/controller.go index 5c9adf4994a..ae4b54ecb38 100644 --- a/module/dkg/controller.go +++ b/module/dkg/controller.go @@ -3,7 +3,6 @@ package dkg import ( "fmt" "math" - "math/rand" "sync" "time" @@ -12,6 +11,7 @@ import ( "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/utils/rand" ) const ( @@ -304,7 +304,10 @@ func (c *Controller) doBackgroundWork() { isFirstMessage := false c.once.Do(func() { isFirstMessage = true - delay := c.preHandleFirstBroadcastDelay() + delay, err := c.preHandleFirstBroadcastDelay() + if err != nil { + c.log.Err(err).Msg("pre handle first broadcast delay failed") + } c.log.Info().Msgf("sleeping for %s before processing first phase 1 broadcast message", delay) time.Sleep(delay) }) @@ -337,12 +340,15 @@ func (c *Controller) start() error { // before starting the DKG, sleep for a random delay to avoid synchronizing // this expensive operation across all consensus nodes - delay := c.preStartDelay() + delay, err := c.preStartDelay() + if err != nil { + return fmt.Errorf("pre start delay failed: %w", err) + } c.log.Debug().Msgf("sleeping for %s before starting DKG", delay) time.Sleep(delay) c.dkgLock.Lock() - err := c.dkg.Start(c.seed) + err = c.dkg.Start(c.seed) c.dkgLock.Unlock() if err != nil { return fmt.Errorf("Error starting DKG: %w", err) @@ -421,18 +427,16 @@ func (c *Controller) phase3() error { // preStartDelay returns a duration to delay prior to starting the DKG process. // This prevents synchronization of the DKG starting (an expensive operation) // across the network, which can impact finalization. -func (c *Controller) preStartDelay() time.Duration { - delay := computePreprocessingDelay(c.config.BaseStartDelay, c.dkg.Size()) - return delay +func (c *Controller) preStartDelay() (time.Duration, error) { + return computePreprocessingDelay(c.config.BaseStartDelay, c.dkg.Size()) } // preHandleFirstBroadcastDelay returns a duration to delay prior to handling // the first broadcast message. This delay is used only during phase 1 of the DKG. // This prevents synchronization of processing verification vectors (an // expensive operation) across the network, which can impact finalization. -func (c *Controller) preHandleFirstBroadcastDelay() time.Duration { - delay := computePreprocessingDelay(c.config.BaseHandleFirstBroadcastDelay, c.dkg.Size()) - return delay +func (c *Controller) preHandleFirstBroadcastDelay() (time.Duration, error) { + return computePreprocessingDelay(c.config.BaseHandleFirstBroadcastDelay, c.dkg.Size()) } // computePreprocessingDelay computes a random delay to introduce before an @@ -441,15 +445,18 @@ func (c *Controller) preHandleFirstBroadcastDelay() time.Duration { // The maximum delay is m=b*n^2 where: // * b is a configurable base delay // * n is the size of the DKG committee -func computePreprocessingDelay(baseDelay time.Duration, dkgSize int) time.Duration { +func computePreprocessingDelay(baseDelay time.Duration, dkgSize int) (time.Duration, error) { maxDelay := computePreprocessingDelayMax(baseDelay, dkgSize) if maxDelay <= 0 { - return 0 + return 0, nil } // select delay from [0,m) - delay := time.Duration(rand.Int63n(maxDelay.Nanoseconds())) - return delay + r, err := rand.Uint64n(uint64(maxDelay.Nanoseconds())) + if err != nil { + return time.Duration(0), fmt.Errorf("delay generation failed %w", err) + } + return time.Duration(r), nil } // computePreprocessingDelayMax computes the maximum dely for computePreprocessingDelay. diff --git a/module/dkg/controller_test.go b/module/dkg/controller_test.go index 03f10adf1c1..e8f8d253537 100644 --- a/module/dkg/controller_test.go +++ b/module/dkg/controller_test.go @@ -333,20 +333,26 @@ func checkArtifacts(t *testing.T, nodes []*node, totalNodes int) { func TestDelay(t *testing.T) { t.Run("should return 0 delay for <=0 inputs", func(t *testing.T) { - delay := computePreprocessingDelay(0, 100) + delay, err := computePreprocessingDelay(0, 100) + require.NoError(t, err) assert.Equal(t, delay, time.Duration(0)) - delay = computePreprocessingDelay(time.Hour, 0) + delay, err = computePreprocessingDelay(time.Hour, 0) + require.NoError(t, err) assert.Equal(t, delay, time.Duration(0)) - delay = computePreprocessingDelay(time.Millisecond, -1) + delay, err = computePreprocessingDelay(time.Millisecond, -1) + require.NoError(t, err) assert.Equal(t, delay, time.Duration(0)) - delay = computePreprocessingDelay(-time.Millisecond, 100) + delay, err = computePreprocessingDelay(-time.Millisecond, 100) + require.NoError(t, err) assert.Equal(t, delay, time.Duration(0)) }) // NOTE: this is a probabilistic test. It will (extremely infrequently) fail. t.Run("should return different values for same inputs", func(t *testing.T) { - d1 := computePreprocessingDelay(time.Hour, 100) - d2 := computePreprocessingDelay(time.Hour, 100) + d1, err := computePreprocessingDelay(time.Hour, 100) + require.NoError(t, err) + d2, err := computePreprocessingDelay(time.Hour, 100) + require.NoError(t, err) assert.NotEqual(t, d1, d2) }) @@ -360,7 +366,8 @@ func TestDelay(t *testing.T) { maxDelay := computePreprocessingDelayMax(baseDelay, dkgSize) assert.Equal(t, expectedMaxDelay, maxDelay) - delay := computePreprocessingDelay(baseDelay, dkgSize) + delay, err := computePreprocessingDelay(baseDelay, dkgSize) + require.NoError(t, err) assert.LessOrEqual(t, minDelay, delay) assert.GreaterOrEqual(t, expectedMaxDelay, delay) }) @@ -375,7 +382,8 @@ func TestDelay(t *testing.T) { maxDelay := computePreprocessingDelayMax(baseDelay, dkgSize) assert.Equal(t, expectedMaxDelay, maxDelay) - delay := computePreprocessingDelay(baseDelay, dkgSize) + delay, err := computePreprocessingDelay(baseDelay, dkgSize) + require.NoError(t, err) assert.LessOrEqual(t, minDelay, delay) assert.GreaterOrEqual(t, expectedMaxDelay, delay) }) diff --git a/module/epochs/qc_voter_test.go b/module/epochs/qc_voter_test.go index 71a2fdd3b97..47a54483200 100644 --- a/module/epochs/qc_voter_test.go +++ b/module/epochs/qc_voter_test.go @@ -69,7 +69,7 @@ func (suite *Suite) SetupTest() { suite.counter = rand.Uint64() suite.nodes = unittest.IdentityListFixture(4, unittest.WithRole(flow.RoleCollection)) - suite.me = suite.nodes.Sample(1)[0] + suite.me = suite.nodes[rand.Intn(len(suite.nodes))] suite.local.On("NodeID").Return(func() flow.Identifier { return suite.me.NodeID }) diff --git a/module/executiondatasync/execution_data/store_test.go b/module/executiondatasync/execution_data/store_test.go index 4dae95ecebf..f1784201766 100644 --- a/module/executiondatasync/execution_data/store_test.go +++ b/module/executiondatasync/execution_data/store_test.go @@ -3,9 +3,10 @@ package execution_data_test import ( "bytes" "context" + "crypto/rand" "fmt" "io" - "math/rand" + mrand "math/rand" "testing" "github.com/ipfs/go-cid" @@ -134,7 +135,7 @@ type corruptedTailSerializer struct { func newCorruptedTailSerializer(numChunks int) *corruptedTailSerializer { return &corruptedTailSerializer{ - corruptedChunk: rand.Intn(numChunks) + 1, + corruptedChunk: mrand.Intn(numChunks) + 1, } } @@ -197,7 +198,7 @@ func TestGetIncompleteData(t *testing.T) { cids := getAllKeys(t, blobstore) t.Logf("%d blobs in blob tree", len(cids)) - cidToDelete := cids[rand.Intn(len(cids))] + cidToDelete := cids[mrand.Intn(len(cids))] require.NoError(t, blobstore.DeleteBlob(context.Background(), cidToDelete)) _, err = eds.Get(context.Background(), rootID) diff --git a/module/finalizer/collection/finalizer_test.go b/module/finalizer/collection/finalizer_test.go index f8224105482..fa92d3eeafe 100644 --- a/module/finalizer/collection/finalizer_test.go +++ b/module/finalizer/collection/finalizer_test.go @@ -1,9 +1,7 @@ package collection_test import ( - "math/rand" "testing" - "time" "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/assert" @@ -25,10 +23,6 @@ import ( func TestFinalizer(t *testing.T) { unittest.RunWithBadgerDB(t, func(db *badger.DB) { - - // seed the RNG - rand.Seed(time.Now().UnixNano()) - // reference block on the main consensus chain refBlock := unittest.BlockHeaderFixture() // genesis block for the cluster chain diff --git a/module/grpcserver/server.go b/module/grpcserver/server.go new file mode 100644 index 00000000000..309cb9315f2 --- /dev/null +++ b/module/grpcserver/server.go @@ -0,0 +1,88 @@ +package grpcserver + +import ( + "net" + "sync" + + "github.com/rs/zerolog" + + "google.golang.org/grpc" + + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" +) + +// GrpcServer wraps `grpc.Server` and allows to manage it using `component.Component` interface. It can be injected +// into different engines making it possible to use single grpc server for multiple services which live in different modules. +type GrpcServer struct { + component.Component + log zerolog.Logger + Server *grpc.Server + + grpcListenAddr string // the GRPC server address as ip:port + + addrLock sync.RWMutex + grpcAddress net.Addr +} + +var _ component.Component = (*GrpcServer)(nil) + +// NewGrpcServer returns a new grpc server. +func NewGrpcServer(log zerolog.Logger, + grpcListenAddr string, + grpcServer *grpc.Server, +) *GrpcServer { + server := &GrpcServer{ + log: log, + Server: grpcServer, + grpcListenAddr: grpcListenAddr, + } + server.Component = component.NewComponentManagerBuilder(). + AddWorker(server.serveGRPCWorker). + AddWorker(server.shutdownWorker). + Build() + return server +} + +// serveGRPCWorker is a worker routine which starts the gRPC server. +// The ready callback is called after the server address is bound and set. +func (g *GrpcServer) serveGRPCWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + g.log = g.log.With().Str("grpc_address", g.grpcListenAddr).Logger() + g.log.Info().Msg("starting grpc server on address") + + l, err := net.Listen("tcp", g.grpcListenAddr) + if err != nil { + g.log.Err(err).Msg("failed to start the grpc server") + ctx.Throw(err) + return + } + + // save the actual address on which we are listening (may be different from g.config.GRPCListenAddr if not port + // was specified) + g.addrLock.Lock() + g.grpcAddress = l.Addr() + g.addrLock.Unlock() + g.log.Debug().Msg("listening on port") + ready() + + err = g.Server.Serve(l) // blocking call + if err != nil { + g.log.Err(err).Msg("fatal error in grpc server") + ctx.Throw(err) + } +} + +// GRPCAddress returns the listen address of the GRPC server. +// Guaranteed to be non-nil after Engine.Ready is closed. +func (g *GrpcServer) GRPCAddress() net.Addr { + g.addrLock.RLock() + defer g.addrLock.RUnlock() + return g.grpcAddress +} + +// shutdownWorker is a worker routine which shuts down server when the context is cancelled. +func (g *GrpcServer) shutdownWorker(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + <-ctx.Done() + g.Server.GracefulStop() +} diff --git a/module/grpcserver/server_builder.go b/module/grpcserver/server_builder.go new file mode 100644 index 00000000000..d42196cdf12 --- /dev/null +++ b/module/grpcserver/server_builder.go @@ -0,0 +1,107 @@ +package grpcserver + +import ( + "github.com/rs/zerolog" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + + "github.com/onflow/flow-go/engine/common/rpc" +) + +type Option func(*GrpcServerBuilder) + +// WithTransportCredentials sets the transport credentials parameters for a grpc server builder. +func WithTransportCredentials(transportCredentials credentials.TransportCredentials) Option { + return func(c *GrpcServerBuilder) { + c.transportCredentials = transportCredentials + } +} + +// WithStreamInterceptor sets the StreamInterceptor option to grpc server. +func WithStreamInterceptor() Option { + return func(c *GrpcServerBuilder) { + c.stateStreamInterceptorEnable = true + } +} + +// GrpcServerBuilder created for separating the creation and starting GrpcServer, +// cause services need to be registered before the server starts. +type GrpcServerBuilder struct { + log zerolog.Logger + gRPCListenAddr string + server *grpc.Server + + transportCredentials credentials.TransportCredentials // the GRPC credentials + stateStreamInterceptorEnable bool +} + +// NewGrpcServerBuilder helps to build a new grpc server. +func NewGrpcServerBuilder(log zerolog.Logger, + gRPCListenAddr string, + maxMsgSize uint, + rpcMetricsEnabled bool, + apiRateLimits map[string]int, // the api rate limit (max calls per second) for each of the Access API e.g. Ping->100, GetTransaction->300 + apiBurstLimits map[string]int, // the api burst limit (max calls at the same time) for each of the Access API e.g. Ping->50, GetTransaction->10 + opts ...Option, +) *GrpcServerBuilder { + log = log.With().Str("component", "grpc_server").Logger() + + grpcServerBuilder := &GrpcServerBuilder{ + gRPCListenAddr: gRPCListenAddr, + } + + for _, applyOption := range opts { + applyOption(grpcServerBuilder) + } + + // create a GRPC server to serve GRPC clients + grpcOpts := []grpc.ServerOption{ + grpc.MaxRecvMsgSize(int(maxMsgSize)), + grpc.MaxSendMsgSize(int(maxMsgSize)), + } + var interceptors []grpc.UnaryServerInterceptor // ordered list of interceptors + // if rpc metrics is enabled, first create the grpc metrics interceptor + if rpcMetricsEnabled { + interceptors = append(interceptors, grpc_prometheus.UnaryServerInterceptor) + + if grpcServerBuilder.stateStreamInterceptorEnable { + // note: intentionally not adding logging or rate limit interceptors for streams. + // rate limiting is done in the handler, and we don't need log events for every message as + // that would be too noisy. + log.Info().Msg("stateStreamInterceptorEnable true") + grpcOpts = append(grpcOpts, grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor)) + } else { + log.Info().Msg("stateStreamInterceptorEnable false") + } + } + if len(apiRateLimits) > 0 { + // create a rate limit interceptor + rateLimitInterceptor := rpc.NewRateLimiterInterceptor(log, apiRateLimits, apiBurstLimits).UnaryServerInterceptor + // append the rate limit interceptor to the list of interceptors + interceptors = append(interceptors, rateLimitInterceptor) + } + // add the logging interceptor, ensure it is innermost wrapper + interceptors = append(interceptors, rpc.LoggingInterceptor(log)...) + // create a chained unary interceptor + // create an unsecured grpc server + grpcOpts = append(grpcOpts, grpc.ChainUnaryInterceptor(interceptors...)) + + if grpcServerBuilder.transportCredentials != nil { + log = log.With().Str("endpoint", "secure").Logger() + // create a secure server by using the secure grpc credentials that are passed in as part of config + grpcOpts = append(grpcOpts, grpc.Creds(grpcServerBuilder.transportCredentials)) + } else { + log = log.With().Str("endpoint", "unsecure").Logger() + } + grpcServerBuilder.log = log + grpcServerBuilder.server = grpc.NewServer(grpcOpts...) + + return grpcServerBuilder +} + +func (b *GrpcServerBuilder) Build() *GrpcServer { + return NewGrpcServer(b.log, b.gRPCListenAddr, b.server) +} diff --git a/module/jobqueue/finalized_block_reader_test.go b/module/jobqueue/finalized_block_reader_test.go index 679a63e6f2f..8349828d272 100644 --- a/module/jobqueue/finalized_block_reader_test.go +++ b/module/jobqueue/finalized_block_reader_test.go @@ -65,7 +65,8 @@ func withReader( root, err := s.State.Params().FinalizedRoot() require.NoError(t, err) clusterCommittee := participants.Filter(filter.HasRole(flow.RoleCollection)) - results := vertestutils.CompleteExecutionReceiptChainFixture(t, root, blockCount/2, vertestutils.WithClusterCommittee(clusterCommittee)) + sources := unittest.RandomSourcesFixture(10) + results := vertestutils.CompleteExecutionReceiptChainFixture(t, root, blockCount/2, sources, vertestutils.WithClusterCommittee(clusterCommittee)) blocks := vertestutils.ExtendStateWithFinalizedBlocks(t, results, s.State) withBlockReader(reader, blocks) diff --git a/module/mempool/herocache/backdata/cache.go b/module/mempool/herocache/backdata/cache.go index 9849c73584e..2ac93e38957 100644 --- a/module/mempool/herocache/backdata/cache.go +++ b/module/mempool/herocache/backdata/cache.go @@ -135,7 +135,7 @@ func NewCache(sizeLimit uint32, sizeLimit: sizeLimit, buckets: make([]slotBucket, bucketNum), ejectionMode: ejectionMode, - entities: heropool.NewHeroPool(sizeLimit, ejectionMode), + entities: heropool.NewHeroPool(sizeLimit, ejectionMode, logger), availableSlotHistogram: make([]uint64, slotsPerBucket+1), // +1 is to account for empty buckets as well. interactionCounter: atomic.NewUint64(0), lastTelemetryDump: atomic.NewInt64(0), @@ -267,7 +267,7 @@ func (c *Cache) Clear() { defer c.logTelemetry() c.buckets = make([]slotBucket, c.bucketNum) - c.entities = heropool.NewHeroPool(c.sizeLimit, c.ejectionMode) + c.entities = heropool.NewHeroPool(c.sizeLimit, c.ejectionMode, c.logger) c.availableSlotHistogram = make([]uint64, slotsPerBucket+1) c.interactionCounter = atomic.NewUint64(0) c.lastTelemetryDump = atomic.NewInt64(0) diff --git a/module/mempool/herocache/backdata/heropool/pool.go b/module/mempool/herocache/backdata/heropool/pool.go index 12fb34c9614..92f926c2239 100644 --- a/module/mempool/herocache/backdata/heropool/pool.go +++ b/module/mempool/herocache/backdata/heropool/pool.go @@ -1,9 +1,10 @@ package heropool import ( - "math/rand" + "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/rand" ) type EjectionMode string @@ -47,6 +48,7 @@ func (p PoolEntity) Entity() flow.Entity { } type Pool struct { + logger zerolog.Logger size uint32 free state // keeps track of free slots. used state // keeps track of allocated slots to cachedEntities. @@ -54,7 +56,7 @@ type Pool struct { ejectionMode EjectionMode } -func NewHeroPool(sizeLimit uint32, ejectionMode EjectionMode) *Pool { +func NewHeroPool(sizeLimit uint32, ejectionMode EjectionMode, logger zerolog.Logger) *Pool { l := &Pool{ free: state{ head: poolIndex{index: 0}, @@ -66,6 +68,7 @@ func NewHeroPool(sizeLimit uint32, ejectionMode EjectionMode) *Pool { }, poolEntities: make([]poolEntity, sizeLimit), ejectionMode: ejectionMode, + logger: logger, } l.initFreeEntities() @@ -93,7 +96,8 @@ func (p *Pool) initFreeEntities() { // // If the pool has no available slots and an ejection is set, ejection occurs when adding a new entity. // If an ejection occurred, ejectedEntity holds the ejected entity. -func (p *Pool) Add(entityId flow.Identifier, entity flow.Entity, owner uint64) (entityIndex EIndex, slotAvailable bool, ejectedEntity flow.Entity) { +func (p *Pool) Add(entityId flow.Identifier, entity flow.Entity, owner uint64) ( + entityIndex EIndex, slotAvailable bool, ejectedEntity flow.Entity) { entityIndex, slotAvailable, ejectedEntity = p.sliceIndexForEntity() if slotAvailable { p.poolEntities[entityIndex].entity = entity @@ -159,22 +163,34 @@ func (p *Pool) Head() (flow.Entity, bool) { // Ejection happens if there is no available slot, and there is an ejection mode set. // If an ejection occurred, ejectedEntity holds the ejected entity. func (p *Pool) sliceIndexForEntity() (i EIndex, hasAvailableSlot bool, ejectedEntity flow.Entity) { + lruEject := func() (EIndex, bool, flow.Entity) { + // LRU ejection + // the used head is the oldest entity, so we turn the used head to a free head here. + invalidatedEntity := p.invalidateUsedHead() + return p.claimFreeHead(), true, invalidatedEntity + } + if p.free.head.isUndefined() { // the free list is empty, so we are out of space, and we need to eject. switch p.ejectionMode { case NoEjection: // pool is set for no ejection, hence, no slice index is selected, abort immediately. return 0, false, nil - case LRUEjection: - // LRU ejection - // the used head is the oldest entity, so we turn the used head to a free head here. - invalidatedEntity := p.invalidateUsedHead() - return p.claimFreeHead(), true, invalidatedEntity case RandomEjection: // we only eject randomly when the pool is full and random ejection is on. - randomIndex := EIndex(rand.Uint32() % p.size) + random, err := rand.Uint32n(p.size) + if err != nil { + p.logger.Fatal().Err(err). + Msg("hero pool random ejection failed - falling back to LRU ejection") + // fall back to LRU ejection only for this instance + return lruEject() + } + randomIndex := EIndex(random) invalidatedEntity := p.invalidateEntityAtIndex(randomIndex) return p.claimFreeHead(), true, invalidatedEntity + case LRUEjection: + // LRU ejection + return lruEject() } } diff --git a/module/mempool/herocache/backdata/heropool/pool_test.go b/module/mempool/herocache/backdata/heropool/pool_test.go index 8f3a83db681..9b8b15bea3a 100644 --- a/module/mempool/herocache/backdata/heropool/pool_test.go +++ b/module/mempool/herocache/backdata/heropool/pool_test.go @@ -645,7 +645,7 @@ func withTestScenario(t *testing.T, ejectionMode EjectionMode, helpers ...func(*testing.T, *Pool, []*unittest.MockEntity)) { - pool := NewHeroPool(limit, ejectionMode) + pool := NewHeroPool(limit, ejectionMode, unittest.Logger()) // head on underlying linked-list value should be uninitialized require.True(t, pool.used.head.isUndefined()) diff --git a/module/mempool/herocache/dns_cache.go b/module/mempool/herocache/dns_cache.go index db4c9a9b67b..9af171c39ae 100644 --- a/module/mempool/herocache/dns_cache.go +++ b/module/mempool/herocache/dns_cache.go @@ -19,7 +19,8 @@ type DNSCache struct { txtCache *stdmap.Backend } -func NewDNSCache(sizeLimit uint32, logger zerolog.Logger, ipCollector module.HeroCacheMetrics, txtCollector module.HeroCacheMetrics) *DNSCache { +func NewDNSCache(sizeLimit uint32, logger zerolog.Logger, ipCollector module.HeroCacheMetrics, txtCollector module.HeroCacheMetrics, +) *DNSCache { return &DNSCache{ txtCache: stdmap.NewBackend( stdmap.WithBackData( diff --git a/module/mempool/queue/heroQueue.go b/module/mempool/queue/heroQueue.go index ec1269147b8..ece206fec17 100644 --- a/module/mempool/queue/heroQueue.go +++ b/module/mempool/queue/heroQueue.go @@ -19,7 +19,8 @@ type HeroQueue struct { sizeLimit uint } -func NewHeroQueue(sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics) *HeroQueue { +func NewHeroQueue(sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics, +) *HeroQueue { return &HeroQueue{ cache: herocache.NewCache( sizeLimit, diff --git a/module/mempool/queue/heroQueue_test.go b/module/mempool/queue/heroQueue_test.go index 75396a9b1ed..f0775a206c5 100644 --- a/module/mempool/queue/heroQueue_test.go +++ b/module/mempool/queue/heroQueue_test.go @@ -59,10 +59,8 @@ func TestHeroQueue_Sequential(t *testing.T) { func TestHeroQueue_Concurrent(t *testing.T) { sizeLimit := 100 q := queue.NewHeroQueue(uint32(sizeLimit), unittest.Logger(), metrics.NewNoopCollector()) - // initially queue must be zero require.Zero(t, q.Size()) - // initially there should be nothing to pop entity, ok := q.Pop() require.False(t, ok) diff --git a/module/mempool/queue/heroStore.go b/module/mempool/queue/heroStore.go index 8a9e4805c63..03c478e1893 100644 --- a/module/mempool/queue/heroStore.go +++ b/module/mempool/queue/heroStore.go @@ -33,7 +33,8 @@ type HeroStore struct { q *HeroQueue } -func NewHeroStore(sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics) *HeroStore { +func NewHeroStore(sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics, +) *HeroStore { return &HeroStore{ q: NewHeroQueue(sizeLimit, logger, collector), } diff --git a/module/mempool/stdmap/backend.go b/module/mempool/stdmap/backend.go index cb0dca2640d..fb42e5297d5 100644 --- a/module/mempool/stdmap/backend.go +++ b/module/mempool/stdmap/backend.go @@ -23,12 +23,12 @@ type Backend struct { } // NewBackend creates a new memory pool backend. -// This is using EjectTrueRandomFast() +// This is using EjectRandomFast() func NewBackend(options ...OptionFunc) *Backend { b := Backend{ backData: backdata.NewMapBackData(), guaranteedCapacity: uint(math.MaxUint32), - batchEject: EjectTrueRandomFast, + batchEject: EjectRandomFast, eject: nil, ejectionCallbacks: nil, } @@ -185,14 +185,14 @@ func (b *Backend) reduce() { //defer binstat.Leave(bs) // we keep reducing the cache size until we are at limit again - // this was a loop, but the loop is now in EjectTrueRandomFast() + // this was a loop, but the loop is now in EjectRandomFast() // the ejections are batched, so this call to eject() may not actually // do anything until the batch threshold is reached (currently 128) if b.backData.Size() > b.guaranteedCapacity { // get the key from the eject function // we don't do anything if there is an error if b.batchEject != nil { - _ = b.batchEject(b) + _, _ = b.batchEject(b) } else { _, _, _ = b.eject(b) } diff --git a/module/mempool/stdmap/eject.go b/module/mempool/stdmap/eject.go index 3ed2d59683a..7cea5214b3d 100644 --- a/module/mempool/stdmap/eject.go +++ b/module/mempool/stdmap/eject.go @@ -3,12 +3,13 @@ package stdmap import ( + "fmt" "math" - "math/rand" "sort" "sync" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/rand" ) // this is the threshold for how much over the guaranteed capacity the @@ -31,49 +32,33 @@ const overCapacityThreshold = 128 // concurrency (specifically, it locks the mempool during ejection). // - The implementation should be non-blocking (though, it is allowed to // take a bit of time; the mempool will just be locked during this time). -type BatchEjectFunc func(b *Backend) bool +type BatchEjectFunc func(b *Backend) (bool, error) type EjectFunc func(b *Backend) (flow.Identifier, flow.Entity, bool) -// EjectTrueRandom relies on a random generator to pick a random entity to eject from the -// entity set. It will, on average, iterate through half the entities of the set. However, -// it provides us with a truly evenly distributed random selection. -func EjectTrueRandom(b *Backend) (flow.Identifier, flow.Entity, bool) { - var entity flow.Entity - var entityID flow.Identifier - - bFound := false - i := 0 - n := rand.Intn(int(b.backData.Size())) - for entityID, entity = range b.backData.All() { - if i == n { - bFound = true - break - } - i++ - } - return entityID, entity, bFound -} - -// EjectTrueRandomFast checks if the map size is beyond the +// EjectRandomFast checks if the map size is beyond the // threshold size, and will iterate through them and eject unneeded // entries if that is the case. Return values are unused -func EjectTrueRandomFast(b *Backend) bool { +func EjectRandomFast(b *Backend) (bool, error) { currentSize := b.backData.Size() if b.guaranteedCapacity >= currentSize { - return false + return false, nil } // At this point, we know that currentSize > b.guaranteedCapacity. As // currentSize fits into an int, b.guaranteedCapacity must also fit. overcapacity := currentSize - b.guaranteedCapacity if overcapacity <= overCapacityThreshold { - return false + return false, nil } // Randomly select indices of elements to remove: mapIndices := make([]int, 0, overcapacity) for i := overcapacity; i > 0; i-- { - mapIndices = append(mapIndices, rand.Intn(int(currentSize))) + rand, err := rand.Uintn(currentSize) + if err != nil { + return false, fmt.Errorf("random generation failed: %w", err) + } + mapIndices = append(mapIndices, int(rand)) } sort.Ints(mapIndices) // inplace @@ -99,13 +84,13 @@ func EjectTrueRandomFast(b *Backend) bool { } if idx == int(overcapacity) { - return true + return true, nil } next2Remove = mapIndices[idx] } i++ } - return true + return true, nil } // EjectPanic simply panics, crashing the program. Useful when cache is not expected @@ -158,7 +143,7 @@ func (q *LRUEjector) Untrack(entityID flow.Identifier) { // Eject implements EjectFunc for LRUEjector. It finds the entity with the lowest sequence number (i.e., // the oldest entity). It also untracks. This is using a linear search -func (q *LRUEjector) Eject(b *Backend) (flow.Identifier, flow.Entity, bool) { +func (q *LRUEjector) Eject(b *Backend) flow.Identifier { q.Lock() defer q.Unlock() @@ -171,19 +156,11 @@ func (q *LRUEjector) Eject(b *Backend) (flow.Identifier, flow.Entity, bool) { oldestID = id oldestSQ = sq } - } } - // TODO: don't do a lookup if it isn't necessary - oldestEntity, ok := b.backData.ByID(oldestID) - - if !ok { - oldestID, oldestEntity, ok = EjectTrueRandom(b) - } - // untracks the oldest id as it is supposed to be ejected delete(q.table, oldestID) - return oldestID, oldestEntity, ok + return oldestID } diff --git a/module/mempool/stdmap/eject_test.go b/module/mempool/stdmap/eject_test.go index cee1974e840..398c74938aa 100644 --- a/module/mempool/stdmap/eject_test.go +++ b/module/mempool/stdmap/eject_test.go @@ -196,7 +196,7 @@ func TestLRUEjector_UntrackEject(t *testing.T) { ejector.Untrack(items[0]) // next ejectable item should be the second oldest item - id, _, _ := ejector.Eject(backEnd) + id := ejector.Eject(backEnd) assert.Equal(t, id, items[1]) } @@ -224,7 +224,7 @@ func TestLRUEjector_EjectAll(t *testing.T) { // ejects one by one for i := 0; i < size; i++ { - id, _, _ := ejector.Eject(backEnd) + id := ejector.Eject(backEnd) require.Equal(t, id, items[i]) } } diff --git a/module/metrics.go b/module/metrics.go index 338f87c1ecc..2b889b98c44 100644 --- a/module/metrics.go +++ b/module/metrics.go @@ -42,6 +42,10 @@ type NetworkSecurityMetrics interface { // OnRateLimitedPeer tracks the number of rate limited unicast messages seen on the network. OnRateLimitedPeer(pid peer.ID, role, msgType, topic, reason string) + + // OnViolationReportSkipped tracks the number of slashing violations consumer violations that were not + // reported for misbehavior when the identity of the sender not known. + OnViolationReportSkipped() } // GossipSubRouterMetrics encapsulates the metrics collectors for GossipSubRouter module of the networking layer. @@ -182,6 +186,8 @@ type NetworkInboundQueueMetrics interface { type NetworkCoreMetrics interface { NetworkInboundQueueMetrics AlspMetrics + NetworkSecurityMetrics + // OutboundMessageSent collects metrics related to a message sent by the node. OutboundMessageSent(sizeBytes int, topic string, protocol string, messageType string) // InboundMessageReceived collects metrics related to a message received by the node. @@ -223,7 +229,6 @@ type AlspMetrics interface { // NetworkMetrics is the blanket abstraction that encapsulates the metrics collectors for the networking layer. type NetworkMetrics interface { LibP2PMetrics - NetworkSecurityMetrics NetworkCoreMetrics } diff --git a/module/metrics/access.go b/module/metrics/access.go index e1021c93a42..1116f87f433 100644 --- a/module/metrics/access.go +++ b/module/metrics/access.go @@ -3,7 +3,6 @@ package metrics import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - metricsProm "github.com/slok/go-http-metrics/metrics/prometheus" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/counters" @@ -23,6 +22,12 @@ func WithBackendScriptsMetrics(m module.BackendScriptsMetrics) AccessCollectorOp } } +func WithRestMetrics(m module.RestMetrics) AccessCollectorOpts { + return func(ac *AccessCollector) { + ac.RestMetrics = m + } +} + type AccessCollector struct { module.RestMetrics module.TransactionMetrics @@ -101,8 +106,6 @@ func NewAccessCollector(opts ...AccessCollectorOpts) *AccessCollector { Help: "gauge to track the maximum block height of execution receipts received", }), maxReceiptHeightValue: counters.NewMonotonousCounter(0), - - RestMetrics: NewRestCollector(metricsProm.Config{Prefix: "access_rest_api"}), } for _, opt := range opts { diff --git a/module/metrics/herocache.go b/module/metrics/herocache.go index 54e287bdb1b..f3a88341c87 100644 --- a/module/metrics/herocache.go +++ b/module/metrics/herocache.go @@ -146,6 +146,15 @@ func GossipSubRPCInspectorQueueMetricFactory(f HeroCacheMetricsFactory, networkT return f(namespaceNetwork, r) } +func GossipSubRPCSentTrackerMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. + r := ResourceNetworkingRPCSentTrackerCache + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + func RpcInspectorNotificationQueueMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { r := ResourceNetworkingRpcInspectorNotificationQueue if networkType == network.PublicNetwork { diff --git a/module/metrics/labels.go b/module/metrics/labels.go index 353e1b3ca25..4efb72b1152 100644 --- a/module/metrics/labels.go +++ b/module/metrics/labels.go @@ -20,6 +20,10 @@ const ( LabelSuccess = "success" LabelCtrlMsgType = "control_message" LabelMisbehavior = "misbehavior" + LabelHandler = "handler" + LabelStatusCode = "code" + LabelMethod = "method" + LabelService = "service" ) const ( @@ -92,6 +96,7 @@ const ( ResourceNetworkingApplicationLayerSpamReportQueue = "application_layer_spam_report_queue" ResourceNetworkingRpcClusterPrefixReceivedCache = "rpc_cluster_prefixed_received_cache" ResourceNetworkingDisallowListCache = "disallow_list_cache" + ResourceNetworkingRPCSentTrackerCache = "gossipsub_rpc_sent_tracker_cache" ResourceFollowerPendingBlocksCache = "follower_pending_block_cache" // follower engine ResourceFollowerLoopCertifiedBlocksChannel = "follower_loop_certified_blocks_channel" // follower loop, certified blocks buffered channel diff --git a/module/metrics/namespaces.go b/module/metrics/namespaces.go index 31995538992..f89f2a530ae 100644 --- a/module/metrics/namespaces.go +++ b/module/metrics/namespaces.go @@ -15,6 +15,7 @@ const ( namespaceExecutionDataSync = "execution_data_sync" namespaceChainsync = "chainsync" namespaceFollowerEngine = "follower" + namespaceRestAPI = "access_rest_api" ) // Network subsystems represent the various layers of networking. @@ -28,6 +29,7 @@ const ( subsystemAuth = "authorization" subsystemRateLimiting = "ratelimit" subsystemAlsp = "alsp" + subsystemSecurity = "security" ) // Storage subsystems represent the various components of the storage layer. @@ -42,6 +44,7 @@ const ( subsystemTransactionTiming = "transaction_timing" subsystemTransactionSubmission = "transaction_submission" subsystemConnectionPool = "connection_pool" + subsystemHTTP = "http" ) // Observer subsystem diff --git a/module/metrics/network.go b/module/metrics/network.go index f064ca10f6e..311dbba9f15 100644 --- a/module/metrics/network.go +++ b/module/metrics/network.go @@ -45,9 +45,10 @@ type NetworkCollector struct { dnsLookupRequestDroppedCount prometheus.Counter routingTableSize prometheus.Gauge - // authorization, rate limiting metrics + // security metrics unAuthorizedMessagesCount *prometheus.CounterVec rateLimitedUnicastMessagesCount *prometheus.CounterVec + violationReportSkippedCount prometheus.Counter prefix string } @@ -245,6 +246,15 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne }, []string{LabelNodeRole, LabelMessage, LabelChannel, LabelRateLimitReason}, ) + nc.violationReportSkippedCount = promauto.NewCounter( + prometheus.CounterOpts{ + Namespace: namespaceNetwork, + Subsystem: subsystemSecurity, + Name: nc.prefix + "slashing_violation_reports_skipped_count", + Help: "number of slashing violations consumer violations that were not reported for misbehavior because the identity of the sender not known", + }, + ) + return nc } @@ -358,3 +368,9 @@ func (nc *NetworkCollector) OnRateLimitedPeer(peerID peer.ID, role, msgType, top Msg("unicast peer rate limited") nc.rateLimitedUnicastMessagesCount.WithLabelValues(role, msgType, topic, reason).Inc() } + +// OnViolationReportSkipped tracks the number of slashing violations consumer violations that were not +// reported for misbehavior when the identity of the sender not known. +func (nc *NetworkCollector) OnViolationReportSkipped() { + nc.violationReportSkippedCount.Inc() +} diff --git a/module/metrics/noop.go b/module/metrics/noop.go index 2dd33d133cc..9bf5be48f0d 100644 --- a/module/metrics/noop.go +++ b/module/metrics/noop.go @@ -4,6 +4,8 @@ import ( "context" "time" + "google.golang.org/grpc/codes" + "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" @@ -303,3 +305,8 @@ func (nc *NoopCollector) AsyncProcessingStarted(string) func (nc *NoopCollector) AsyncProcessingFinished(string, time.Duration) {} func (nc *NoopCollector) OnMisbehaviorReported(string, string) {} +func (nc *NoopCollector) OnViolationReportSkipped() {} + +var _ ObserverMetrics = (*NoopCollector)(nil) + +func (nc *NoopCollector) RecordRPC(handler, rpc string, code codes.Code) {} diff --git a/module/metrics/observer.go b/module/metrics/observer.go index 4e885c9bf4c..375aa66a2ac 100644 --- a/module/metrics/observer.go +++ b/module/metrics/observer.go @@ -6,10 +6,16 @@ import ( "google.golang.org/grpc/codes" ) +type ObserverMetrics interface { + RecordRPC(handler, rpc string, code codes.Code) +} + type ObserverCollector struct { rpcs *prometheus.CounterVec } +var _ ObserverMetrics = (*ObserverCollector)(nil) + func NewObserverCollector() *ObserverCollector { return &ObserverCollector{ rpcs: promauto.NewCounterVec(prometheus.CounterOpts{ diff --git a/module/metrics/rest_api.go b/module/metrics/rest_api.go index 0ab08d0a3ca..e9132f243c6 100644 --- a/module/metrics/rest_api.go +++ b/module/metrics/rest_api.go @@ -2,14 +2,13 @@ package metrics import ( "context" + "fmt" "time" "github.com/prometheus/client_golang/prometheus" + httpmetrics "github.com/slok/go-http-metrics/metrics" "github.com/onflow/flow-go/module" - - httpmetrics "github.com/slok/go-http-metrics/metrics" - metricsProm "github.com/slok/go-http-metrics/metrics/prometheus" ) type RestCollector struct { @@ -17,97 +16,98 @@ type RestCollector struct { httpResponseSizeHistogram *prometheus.HistogramVec httpRequestsInflight *prometheus.GaugeVec httpRequestsTotal *prometheus.GaugeVec + + // urlToRouteMapper is a callback that converts a URL to a route name + urlToRouteMapper func(string) (string, error) } var _ module.RestMetrics = (*RestCollector)(nil) // NewRestCollector returns a new metrics RestCollector that implements the RestCollector // using Prometheus as the backend. -func NewRestCollector(cfg metricsProm.Config) module.RestMetrics { - if len(cfg.DurationBuckets) == 0 { - cfg.DurationBuckets = prometheus.DefBuckets - } - - if len(cfg.SizeBuckets) == 0 { - cfg.SizeBuckets = prometheus.ExponentialBuckets(100, 10, 8) - } - - if cfg.Registry == nil { - cfg.Registry = prometheus.DefaultRegisterer - } - - if cfg.HandlerIDLabel == "" { - cfg.HandlerIDLabel = "handler" - } - - if cfg.StatusCodeLabel == "" { - cfg.StatusCodeLabel = "code" - } - - if cfg.MethodLabel == "" { - cfg.MethodLabel = "method" - } - - if cfg.ServiceLabel == "" { - cfg.ServiceLabel = "service" +func NewRestCollector(urlToRouteMapper func(string) (string, error), registerer prometheus.Registerer) (*RestCollector, error) { + if urlToRouteMapper == nil { + return nil, fmt.Errorf("urlToRouteMapper cannot be nil") } r := &RestCollector{ + urlToRouteMapper: urlToRouteMapper, httpRequestDurHistogram: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: cfg.Prefix, - Subsystem: "http", + Namespace: namespaceRestAPI, + Subsystem: subsystemHTTP, Name: "request_duration_seconds", Help: "The latency of the HTTP requests.", - Buckets: cfg.DurationBuckets, - }, []string{cfg.ServiceLabel, cfg.HandlerIDLabel, cfg.MethodLabel, cfg.StatusCodeLabel}), + Buckets: prometheus.DefBuckets, + }, []string{LabelService, LabelHandler, LabelMethod, LabelStatusCode}), httpResponseSizeHistogram: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: cfg.Prefix, - Subsystem: "http", + Namespace: namespaceRestAPI, + Subsystem: subsystemHTTP, Name: "response_size_bytes", Help: "The size of the HTTP responses.", - Buckets: cfg.SizeBuckets, - }, []string{cfg.ServiceLabel, cfg.HandlerIDLabel, cfg.MethodLabel, cfg.StatusCodeLabel}), + Buckets: prometheus.ExponentialBuckets(100, 10, 8), + }, []string{LabelService, LabelHandler, LabelMethod, LabelStatusCode}), httpRequestsInflight: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: cfg.Prefix, - Subsystem: "http", + Namespace: namespaceRestAPI, + Subsystem: subsystemHTTP, Name: "requests_inflight", Help: "The number of inflight requests being handled at the same time.", - }, []string{cfg.ServiceLabel, cfg.HandlerIDLabel}), + }, []string{LabelService, LabelHandler}), httpRequestsTotal: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: cfg.Prefix, - Subsystem: "http", + Namespace: namespaceRestAPI, + Subsystem: subsystemHTTP, Name: "requests_total", Help: "The number of requests handled over time.", - }, []string{cfg.MethodLabel, cfg.HandlerIDLabel}), + }, []string{LabelMethod, LabelHandler}), } - cfg.Registry.MustRegister( + registerer.MustRegister( r.httpRequestDurHistogram, r.httpResponseSizeHistogram, r.httpRequestsInflight, r.httpRequestsTotal, ) - return r + return r, nil } -// These methods are called automatically by go-http-metrics/middleware +// ObserveHTTPRequestDuration records the duration of the REST request. +// This method is called automatically by go-http-metrics/middleware func (r *RestCollector) ObserveHTTPRequestDuration(_ context.Context, p httpmetrics.HTTPReqProperties, duration time.Duration) { - r.httpRequestDurHistogram.WithLabelValues(p.Service, p.ID, p.Method, p.Code).Observe(duration.Seconds()) + handler := r.mapURLToRoute(p.ID) + r.httpRequestDurHistogram.WithLabelValues(p.Service, handler, p.Method, p.Code).Observe(duration.Seconds()) } +// ObserveHTTPResponseSize records the response size of the REST request. +// This method is called automatically by go-http-metrics/middleware func (r *RestCollector) ObserveHTTPResponseSize(_ context.Context, p httpmetrics.HTTPReqProperties, sizeBytes int64) { - r.httpResponseSizeHistogram.WithLabelValues(p.Service, p.ID, p.Method, p.Code).Observe(float64(sizeBytes)) + handler := r.mapURLToRoute(p.ID) + r.httpResponseSizeHistogram.WithLabelValues(p.Service, handler, p.Method, p.Code).Observe(float64(sizeBytes)) } +// AddInflightRequests increments and decrements the number of inflight request being processed. +// This method is called automatically by go-http-metrics/middleware func (r *RestCollector) AddInflightRequests(_ context.Context, p httpmetrics.HTTPProperties, quantity int) { - r.httpRequestsInflight.WithLabelValues(p.Service, p.ID).Add(float64(quantity)) + handler := r.mapURLToRoute(p.ID) + r.httpRequestsInflight.WithLabelValues(p.Service, handler).Add(float64(quantity)) +} + +// AddTotalRequests records all REST requests +// This is a custom method called by the REST handler +func (r *RestCollector) AddTotalRequests(_ context.Context, method, path string) { + handler := r.mapURLToRoute(path) + r.httpRequestsTotal.WithLabelValues(method, handler).Inc() } -// New custom method to track all requests made for every REST API request -func (r *RestCollector) AddTotalRequests(_ context.Context, method string, routeName string) { - r.httpRequestsTotal.WithLabelValues(method, routeName).Inc() +// mapURLToRoute uses the urlToRouteMapper callback to convert a URL to a route name +// This normalizes the URL, removing dynamic information converting it to a static string +func (r *RestCollector) mapURLToRoute(url string) string { + route, err := r.urlToRouteMapper(url) + if err != nil { + return "unknown" + } + + return route } diff --git a/module/mock/network_core_metrics.go b/module/mock/network_core_metrics.go index 63c849fbf27..d78c3355449 100644 --- a/module/mock/network_core_metrics.go +++ b/module/mock/network_core_metrics.go @@ -5,6 +5,8 @@ package mock import ( mock "github.com/stretchr/testify/mock" + peer "github.com/libp2p/go-libp2p/core/peer" + time "time" ) @@ -48,6 +50,21 @@ func (_m *NetworkCoreMetrics) OnMisbehaviorReported(channel string, misbehaviorT _m.Called(channel, misbehaviorType) } +// OnRateLimitedPeer provides a mock function with given fields: pid, role, msgType, topic, reason +func (_m *NetworkCoreMetrics) OnRateLimitedPeer(pid peer.ID, role string, msgType string, topic string, reason string) { + _m.Called(pid, role, msgType, topic, reason) +} + +// OnUnauthorizedMessage provides a mock function with given fields: role, msgType, topic, offense +func (_m *NetworkCoreMetrics) OnUnauthorizedMessage(role string, msgType string, topic string, offense string) { + _m.Called(role, msgType, topic, offense) +} + +// OnViolationReportSkipped provides a mock function with given fields: +func (_m *NetworkCoreMetrics) OnViolationReportSkipped() { + _m.Called() +} + // OutboundMessageSent provides a mock function with given fields: sizeBytes, topic, protocol, messageType func (_m *NetworkCoreMetrics) OutboundMessageSent(sizeBytes int, topic string, protocol string, messageType string) { _m.Called(sizeBytes, topic, protocol, messageType) diff --git a/module/mock/network_metrics.go b/module/mock/network_metrics.go index 851565d5724..2909f7d677f 100644 --- a/module/mock/network_metrics.go +++ b/module/mock/network_metrics.go @@ -300,6 +300,11 @@ func (_m *NetworkMetrics) OnUnauthorizedMessage(role string, msgType string, top _m.Called(role, msgType, topic, offense) } +// OnViolationReportSkipped provides a mock function with given fields: +func (_m *NetworkMetrics) OnViolationReportSkipped() { + _m.Called() +} + // OutboundConnections provides a mock function with given fields: connectionCount func (_m *NetworkMetrics) OutboundConnections(connectionCount uint) { _m.Called(connectionCount) diff --git a/module/mock/network_security_metrics.go b/module/mock/network_security_metrics.go index 51d045c2a12..a48a693c0ab 100644 --- a/module/mock/network_security_metrics.go +++ b/module/mock/network_security_metrics.go @@ -23,6 +23,11 @@ func (_m *NetworkSecurityMetrics) OnUnauthorizedMessage(role string, msgType str _m.Called(role, msgType, topic, offense) } +// OnViolationReportSkipped provides a mock function with given fields: +func (_m *NetworkSecurityMetrics) OnViolationReportSkipped() { + _m.Called() +} + type mockConstructorTestingTNewNetworkSecurityMetrics interface { mock.TestingT Cleanup(func()) diff --git a/module/signature/aggregation_test.go b/module/signature/aggregation_test.go index aacd0a89f06..aebc696b091 100644 --- a/module/signature/aggregation_test.go +++ b/module/signature/aggregation_test.go @@ -4,7 +4,6 @@ package signature import ( - "crypto/rand" "errors" mrand "math/rand" "sort" @@ -17,6 +16,13 @@ import ( "github.com/onflow/flow-go/crypto" ) +func getPRG(t *testing.T) *mrand.Rand { + random := time.Now().UnixNano() + t.Logf("rng seed is %d", random) + rng := mrand.New(mrand.NewSource(random)) + return rng +} + // Utility function that flips a point sign bit to negate the point // this is shortcut which works only for zcash BLS12-381 compressed serialization // that is currently supported by the flow crypto module @@ -25,7 +31,7 @@ func negatePoint(pointbytes []byte) { pointbytes[0] ^= 0x20 } -func createAggregationData(t *testing.T, signersNumber int) ( +func createAggregationData(t *testing.T, rand *mrand.Rand, signersNumber int) ( []byte, string, []crypto.Signature, []crypto.PublicKey, ) { // create message and tag @@ -54,7 +60,7 @@ func createAggregationData(t *testing.T, signersNumber int) ( } func TestAggregatorSameMessage(t *testing.T) { - + rand := getPRG(t) signersNum := 20 // constructor edge cases @@ -79,7 +85,7 @@ func TestAggregatorSameMessage(t *testing.T) { // Happy paths // all signatures are valid t.Run("happy path", func(t *testing.T) { - msg, tag, sigs, pks := createAggregationData(t, signersNum) + msg, tag, sigs, pks := createAggregationData(t, rand, signersNum) aggregator, err := NewSignatureAggregatorSameMessage(msg, tag, pks) require.NoError(t, err) @@ -150,7 +156,7 @@ func TestAggregatorSameMessage(t *testing.T) { // Unhappy paths t.Run("invalid inputs", func(t *testing.T) { - msg, tag, sigs, pks := createAggregationData(t, signersNum) + msg, tag, sigs, pks := createAggregationData(t, rand, signersNum) aggregator, err := NewSignatureAggregatorSameMessage(msg, tag, pks) require.NoError(t, err) // invalid indices for different methods @@ -184,7 +190,7 @@ func TestAggregatorSameMessage(t *testing.T) { }) t.Run("duplicate signers", func(t *testing.T) { - msg, tag, sigs, pks := createAggregationData(t, signersNum) + msg, tag, sigs, pks := createAggregationData(t, rand, signersNum) aggregator, err := NewSignatureAggregatorSameMessage(msg, tag, pks) require.NoError(t, err) @@ -223,7 +229,7 @@ func TestAggregatorSameMessage(t *testing.T) { // 1: No signature has been added. t.Run("aggregate with no signatures", func(t *testing.T) { - msg, tag, _, pks := createAggregationData(t, 1) + msg, tag, _, pks := createAggregationData(t, rand, 1) aggregator, err := NewSignatureAggregatorSameMessage(msg, tag, pks) require.NoError(t, err) // Aggregation should error with sentinel InsufficientSignaturesError @@ -239,7 +245,7 @@ func TestAggregatorSameMessage(t *testing.T) { // 2.a. aggregated public key is not identity // 2.b. aggregated public key is identity t.Run("invalid signature serialization", func(t *testing.T) { - msg, tag, sigs, pks := createAggregationData(t, 2) + msg, tag, sigs, pks := createAggregationData(t, rand, 2) invalidStructureSig := (crypto.Signature)([]byte{0, 0}) t.Run("with non-identity aggregated public key", func(t *testing.T) { @@ -305,7 +311,7 @@ func TestAggregatorSameMessage(t *testing.T) { // 3.a. aggregated public key is not identity // 3.b. aggregated public key is identity t.Run("correct serialization and invalid signature", func(t *testing.T) { - msg, tag, sigs, pks := createAggregationData(t, 2) + msg, tag, sigs, pks := createAggregationData(t, rand, 2) t.Run("with non-identity aggregated public key", func(t *testing.T) { aggregator, err := NewSignatureAggregatorSameMessage(msg, tag, pks) @@ -374,7 +380,7 @@ func TestAggregatorSameMessage(t *testing.T) { // 4. All signatures are valid but aggregated key is identity t.Run("all valid signatures and identity aggregated key", func(t *testing.T) { - msg, tag, sigs, pks := createAggregationData(t, 2) + msg, tag, sigs, pks := createAggregationData(t, rand, 2) // public key at index 1 is opposite of public key at index 0 (pks[1] = -pks[0]) // so that aggregation of pks[0] and pks[1] is identity @@ -413,9 +419,7 @@ func TestAggregatorSameMessage(t *testing.T) { } func TestKeyAggregator(t *testing.T) { - r := time.Now().UnixNano() - mrand.Seed(r) - t.Logf("math rand seed is %d", r) + rand := getPRG(t) signersNum := 20 // create keys @@ -497,8 +501,8 @@ func TestKeyAggregator(t *testing.T) { rounds := 30 for i := 0; i < rounds; i++ { go func() { // test module concurrency - low := mrand.Intn(signersNum - 1) - high := low + 1 + mrand.Intn(signersNum-1-low) + low := rand.Intn(signersNum - 1) + high := low + 1 + rand.Intn(signersNum-1-low) var key, expectedKey crypto.PublicKey var err error key, err = aggregator.KeyAggregate(indices[low:high]) diff --git a/module/signature/signer_indices_test.go b/module/signature/signer_indices_test.go index c34daea4f37..0bd7aaee34e 100644 --- a/module/signature/signer_indices_test.go +++ b/module/signature/signer_indices_test.go @@ -112,7 +112,7 @@ func Test_EncodeSignerToIndicesAndSigType(t *testing.T) { // create committee committeeIdentities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) committee := committeeIdentities.NodeIDs() - stakingSigners, beaconSigners := sampleSigners(committee, numStakingSigners, numRandomBeaconSigners) + stakingSigners, beaconSigners := sampleSigners(t, committee, numStakingSigners, numRandomBeaconSigners) // encode prefixed, sigTypes, err := signature.EncodeSignerToIndicesAndSigType(committee, stakingSigners, beaconSigners) @@ -150,7 +150,7 @@ func Test_DecodeSigTypeToStakingAndBeaconSigners(t *testing.T) { // create committee committeeIdentities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) committee := committeeIdentities.NodeIDs() - stakingSigners, beaconSigners := sampleSigners(committee, numStakingSigners, numRandomBeaconSigners) + stakingSigners, beaconSigners := sampleSigners(t, committee, numStakingSigners, numRandomBeaconSigners) // encode signerIndices, sigTypes, err := signature.EncodeSignerToIndicesAndSigType(committee, stakingSigners, beaconSigners) @@ -276,7 +276,8 @@ func Test_EncodeSignersToIndices(t *testing.T) { // create committee identities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) committee := identities.NodeIDs() - signers := committee.Sample(uint(numSigners)) + signers, err := committee.Sample(uint(numSigners)) + require.NoError(t, err) // encode prefixed, err := signature.EncodeSignersToIndices(committee, signers) @@ -305,7 +306,8 @@ func Test_DecodeSignerIndicesToIdentifiers(t *testing.T) { // create committee identities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) committee := identities.NodeIDs() - signers := committee.Sample(uint(numSigners)) + signers, err := committee.Sample(uint(numSigners)) + require.NoError(t, err) sort.Sort(signers) // encode @@ -340,7 +342,8 @@ func Test_DecodeSignerIndicesToIdentities(t *testing.T) { // create committee identities := unittest.IdentityListFixture(committeeSize, unittest.WithRole(flow.RoleConsensus)).Sort(order.Canonical) - signers := identities.Sample(uint(numSigners)) + signers, err := identities.Sample(uint(numSigners)) + require.NoError(t, err) // encode signerIndices, err := signature.EncodeSignersToIndices(identities.NodeIDs(), signers.NodeIDs()) @@ -356,6 +359,7 @@ func Test_DecodeSignerIndicesToIdentities(t *testing.T) { // sampleSigners takes `committee` and samples to _disjoint_ subsets // (`stakingSigners` and `randomBeaconSigners`) with the specified cardinality func sampleSigners( + t *rapid.T, committee flow.IdentifierList, numStakingSigners int, numRandomBeaconSigners int, @@ -364,9 +368,12 @@ func sampleSigners( panic(fmt.Sprintf("Cannot sample %d nodes out of a committee is size %d", numStakingSigners+numRandomBeaconSigners, len(committee))) } - stakingSigners = committee.Sample(uint(numStakingSigners)) + var err error + stakingSigners, err = committee.Sample(uint(numStakingSigners)) + require.NoError(t, err) remaining := committee.Filter(id.Not(id.In(stakingSigners...))) - randomBeaconSigners = remaining.Sample(uint(numRandomBeaconSigners)) + randomBeaconSigners, err = remaining.Sample(uint(numRandomBeaconSigners)) + require.NoError(t, err) return } diff --git a/module/state_synchronization/requester/execution_data_requester_test.go b/module/state_synchronization/requester/execution_data_requester_test.go index f116da7a297..5ac29329094 100644 --- a/module/state_synchronization/requester/execution_data_requester_test.go +++ b/module/state_synchronization/requester/execution_data_requester_test.go @@ -52,7 +52,6 @@ type ExecutionDataRequesterSuite struct { func TestExecutionDataRequesterSuite(t *testing.T) { t.Parallel() - rand.Seed(time.Now().UnixMilli()) suite.Run(t, new(ExecutionDataRequesterSuite)) } diff --git a/module/state_synchronization/requester/jobs/execution_data_reader_test.go b/module/state_synchronization/requester/jobs/execution_data_reader_test.go index 365e0358ee6..90240c83dd8 100644 --- a/module/state_synchronization/requester/jobs/execution_data_reader_test.go +++ b/module/state_synchronization/requester/jobs/execution_data_reader_test.go @@ -3,7 +3,6 @@ package jobs import ( "context" "errors" - "math/rand" "testing" "time" @@ -46,7 +45,6 @@ type ExecutionDataReaderSuite struct { func TestExecutionDataReaderSuite(t *testing.T) { t.Parallel() - rand.Seed(time.Now().UnixMilli()) suite.Run(t, new(ExecutionDataReaderSuite)) } diff --git a/module/trace/constants.go b/module/trace/constants.go index 76db4374abb..5dada818038 100644 --- a/module/trace/constants.go +++ b/module/trace/constants.go @@ -178,7 +178,7 @@ const ( FVMEnvBLSAggregatePublicKeys SpanName = "fvm.env.blsAggregatePublicKeys" FVMEnvGetCurrentBlockHeight SpanName = "fvm.env.getCurrentBlockHeight" FVMEnvGetBlockAtHeight SpanName = "fvm.env.getBlockAtHeight" - FVMEnvUnsafeRandom SpanName = "fvm.env.unsafeRandom" + FVMEnvRandom SpanName = "fvm.env.unsafeRandom" FVMEnvCreateAccount SpanName = "fvm.env.createAccount" FVMEnvAddAccountKey SpanName = "fvm.env.addAccountKey" FVMEnvAddEncodedAccountKey SpanName = "fvm.env.addEncodedAccountKey" diff --git a/module/trace/trace_test.go b/module/trace/trace_test.go index c98a632d4a9..f1011589930 100644 --- a/module/trace/trace_test.go +++ b/module/trace/trace_test.go @@ -2,7 +2,7 @@ package trace import ( "context" - "math/rand" + "crypto/rand" "testing" "github.com/rs/zerolog" diff --git a/network/alsp/manager/manager.go b/network/alsp/manager/manager.go index 941407e2637..a1c3e25bf03 100644 --- a/network/alsp/manager/manager.go +++ b/network/alsp/manager/manager.go @@ -230,6 +230,7 @@ func (m *MisbehaviorReportManager) HandleMisbehaviorReport(channel channels.Chan Hex("misbehaving_id", logging.ID(report.OriginId())). Str("reason", report.Reason().String()). Float64("penalty", report.Penalty()).Logger() + lg.Trace().Msg("received misbehavior report") m.metrics.OnMisbehaviorReported(channel.String(), report.Reason().String()) nonce := [internal.NonceSize]byte{} @@ -333,7 +334,6 @@ func (m *MisbehaviorReportManager) onHeartbeat() error { Cause: network.DisallowListedCauseAlsp, // sets the ALSP disallow listing cause on node }) } - // each time we decay the penalty by the decay speed, the penalty is a negative number, and the decay speed // is a positive number. So the penalty is getting closer to zero. // We use math.Min() to make sure the penalty is never positive. @@ -425,7 +425,6 @@ func (m *MisbehaviorReportManager) processMisbehaviorReport(report internal.Repo // we should crash the node in this case to prevent further misbehavior reports from being lost and fix the bug. return fmt.Errorf("failed to apply penalty to the spam record: %w", err) } - lg.Debug().Float64("updated_penalty", updatedPenalty).Msg("misbehavior report handled") return nil } diff --git a/network/alsp/manager/manager_test.go b/network/alsp/manager/manager_test.go index 03f012bb206..b013688bf8b 100644 --- a/network/alsp/manager/manager_test.go +++ b/network/alsp/manager/manager_test.go @@ -30,6 +30,7 @@ import ( "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p" p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/network/slashing" "github.com/onflow/flow-go/utils/unittest" ) @@ -54,7 +55,7 @@ func TestNetworkPassesReportedMisbehavior(t *testing.T) { misbehaviorReportManger.On("Ready").Return(readyDoneChan).Once() misbehaviorReportManger.On("Done").Return(readyDoneChan).Once() ids, nodes, _ := testutils.LibP2PNodeForMiddlewareFixture(t, 1) - mws, _ := testutils.MiddlewareFixtures(t, ids, nodes, testutils.MiddlewareConfigFixture(t)) + mws, _ := testutils.MiddlewareFixtures(t, ids, nodes, testutils.MiddlewareConfigFixture(t), mocknetwork.NewViolationsConsumer(t)) networkCfg := testutils.NetworkConfigFixture(t, *ids[0], ids, mws[0]) net, err := p2p.NewNetwork(networkCfg, p2p.WithAlspManager(misbehaviorReportManger)) @@ -111,7 +112,7 @@ func TestHandleReportedMisbehavior_Cache_Integration(t *testing.T) { }), } ids, nodes, _ := testutils.LibP2PNodeForMiddlewareFixture(t, 1) - mws, _ := testutils.MiddlewareFixtures(t, ids, nodes, testutils.MiddlewareConfigFixture(t)) + mws, _ := testutils.MiddlewareFixtures(t, ids, nodes, testutils.MiddlewareConfigFixture(t), mocknetwork.NewViolationsConsumer(t)) networkCfg := testutils.NetworkConfigFixture(t, *ids[0], ids, mws[0], p2p.WithAlspConfig(cfg)) net, err := p2p.NewNetwork(networkCfg) require.NoError(t, err) @@ -183,7 +184,7 @@ func TestHandleReportedMisbehavior_Cache_Integration(t *testing.T) { // TestHandleReportedMisbehavior_And_DisallowListing_Integration implements an end-to-end integration test for the // handling of reported misbehavior and disallow listing. // -// The test sets up 3 nodes, one victim, one honest, and one (alledged) spammer. +// The test sets up 3 nodes, one victim, one honest, and one (alleged) spammer. // Initially, the test ensures that all nodes are connected to each other. // Then, test imitates that victim node reports the spammer node for spamming. // The test generates enough spam reports to trigger the disallow-listing of the victim node. @@ -196,17 +197,17 @@ func TestHandleReportedMisbehavior_And_DisallowListing_Integration(t *testing.T) // this test is assessing the integration of the ALSP manager with the network. As the ALSP manager is an attribute // of the network, we need to configure the ALSP manager via the network configuration, and let the network create // the ALSP manager. - var victimSpamRecordCacheCache alsp.SpamRecordCache + var victimSpamRecordCache alsp.SpamRecordCache cfg.Opts = []alspmgr.MisbehaviorReportManagerOption{ alspmgr.WithSpamRecordsCacheFactory(func(logger zerolog.Logger, size uint32, metrics module.HeroCacheMetrics) alsp.SpamRecordCache { - victimSpamRecordCacheCache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) - return victimSpamRecordCacheCache + victimSpamRecordCache = internal.NewSpamRecordCache(size, logger, metrics, model.SpamRecordFactory()) + return victimSpamRecordCache }), } ids, nodes, _ := testutils.LibP2PNodeForMiddlewareFixture(t, 3, p2ptest.WithPeerManagerEnabled(p2ptest.PeerManagerConfigFixture(), nil)) - mws, _ := testutils.MiddlewareFixtures(t, ids, nodes, testutils.MiddlewareConfigFixture(t)) + mws, _ := testutils.MiddlewareFixtures(t, ids, nodes, testutils.MiddlewareConfigFixture(t), mocknetwork.NewViolationsConsumer(t)) networkCfg := testutils.NetworkConfigFixture(t, *ids[0], ids, mws[0], p2p.WithAlspConfig(cfg)) victimNetwork, err := p2p.NewNetwork(networkCfg) require.NoError(t, err) @@ -266,6 +267,98 @@ func TestHandleReportedMisbehavior_And_DisallowListing_Integration(t *testing.T) p2ptest.EnsureNotConnectedBetweenGroups(t, ctx, []p2p.LibP2PNode{nodes[victimIndex]}, []p2p.LibP2PNode{nodes[spammerIndex]}) } +// TestHandleReportedMisbehavior_And_SlashingViolationsConsumer_Integration implements an end-to-end integration test for the +// handling of reported misbehavior from the slashing.ViolationsConsumer. +// +// The test sets up one victim, one honest, and one (alleged) spammer for each of the current slashing violations. +// Initially, the test ensures that all nodes are connected to each other. +// Then, test imitates the slashing violations consumer on the victim node reporting misbehavior's for each slashing violation. +// The test generates enough slashing violations to trigger the connection to each of the spamming nodes to be eventually pruned. +// The test ensures that the victim node is disconnected from all spammer nodes. +// The test ensures that despite attempting on connections, no inbound or outbound connections between the victim and +// the pruned spammer nodes are established. +func TestHandleReportedMisbehavior_And_SlashingViolationsConsumer_Integration(t *testing.T) { + + // create 1 victim node, 1 honest node and a node for each slashing violation + ids, nodes, _ := testutils.LibP2PNodeForMiddlewareFixture(t, 7) // creates 7 nodes (1 victim, 1 honest, 5 spammer nodes one for each slashing violation). + mws, _ := testutils.MiddlewareFixtures(t, ids, nodes, testutils.MiddlewareConfigFixture(t), mocknetwork.NewViolationsConsumer(t)) + networkCfg := testutils.NetworkConfigFixture(t, *ids[0], ids, mws[0], p2p.WithAlspConfig(managerCfgFixture(t))) + victimNetwork, err := p2p.NewNetwork(networkCfg) + require.NoError(t, err) + + // create slashing violations consumer with victim node network providing the network.MisbehaviorReportConsumer interface + violationsConsumer := slashing.NewSlashingViolationsConsumer(unittest.Logger(), metrics.NewNoopCollector(), victimNetwork) + mws[0].SetSlashingViolationsConsumer(violationsConsumer) + + ctx, cancel := context.WithCancel(context.Background()) + signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) + testutils.StartNodesAndNetworks(signalerCtx, t, nodes, []network.Network{victimNetwork}, 100*time.Millisecond) + defer testutils.StopComponents[p2p.LibP2PNode](t, nodes, 100*time.Millisecond) + defer cancel() + + p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) + // initially victim and misbehaving nodes should be able to connect to each other. + p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) + + // each slashing violation func is mapped to a violation with the identity of one of the misbehaving nodes + // index of the victim node in the nodes slice. + victimIndex := 0 + honestNodeIndex := 1 + invalidMessageIndex := 2 + senderEjectedIndex := 3 + unauthorizedUnicastOnChannelIndex := 4 + unauthorizedPublishOnChannelIndex := 5 + unknownMsgTypeIndex := 6 + slashingViolationTestCases := []struct { + violationsConsumerFunc func(violation *network.Violation) + violation *network.Violation + }{ + {violationsConsumer.OnUnAuthorizedSenderError, &network.Violation{Identity: ids[invalidMessageIndex]}}, + {violationsConsumer.OnSenderEjectedError, &network.Violation{Identity: ids[senderEjectedIndex]}}, + {violationsConsumer.OnUnauthorizedUnicastOnChannel, &network.Violation{Identity: ids[unauthorizedUnicastOnChannelIndex]}}, + {violationsConsumer.OnUnauthorizedPublishOnChannel, &network.Violation{Identity: ids[unauthorizedPublishOnChannelIndex]}}, + {violationsConsumer.OnUnknownMsgTypeError, &network.Violation{Identity: ids[unknownMsgTypeIndex]}}, + } + + violationsWg := sync.WaitGroup{} + violationCount := 120 + for _, testCase := range slashingViolationTestCases { + for i := 0; i < violationCount; i++ { + testCase := testCase + violationsWg.Add(1) + go func() { + defer violationsWg.Done() + testCase.violationsConsumerFunc(testCase.violation) + }() + } + } + unittest.RequireReturnsBefore(t, violationsWg.Wait, 100*time.Millisecond, "slashing violations not reported in time") + + forEachMisbehavingNode := func(f func(i int)) { + for misbehavingNodeIndex := 2; misbehavingNodeIndex <= len(nodes)-1; misbehavingNodeIndex++ { + f(misbehavingNodeIndex) + } + } + + // ensures all misbehaving nodes are disconnected from the victim node + forEachMisbehavingNode(func(misbehavingNodeIndex int) { + p2ptest.RequireEventuallyNotConnected(t, []p2p.LibP2PNode{nodes[victimIndex]}, []p2p.LibP2PNode{nodes[misbehavingNodeIndex]}, 100*time.Millisecond, 2*time.Second) + }) + + // despite being disconnected from the victim node, misbehaving nodes and the honest node are still connected. + forEachMisbehavingNode(func(misbehavingNodeIndex int) { + p2ptest.RequireConnectedEventually(t, []p2p.LibP2PNode{nodes[honestNodeIndex], nodes[misbehavingNodeIndex]}, 1*time.Millisecond, 100*time.Millisecond) + }) + + // despite disconnecting misbehaving nodes, ensure that (victim and honest) are still connected. + p2ptest.RequireConnectedEventually(t, []p2p.LibP2PNode{nodes[honestNodeIndex], nodes[victimIndex]}, 1*time.Millisecond, 100*time.Millisecond) + + // while misbehaving nodes are disconnected, they cannot connect to the victim node. Also, the victim node cannot directly dial and connect to the misbehaving nodes until each node's peer score decays. + forEachMisbehavingNode(func(misbehavingNodeIndex int) { + p2ptest.EnsureNotConnectedBetweenGroups(t, ctx, []p2p.LibP2PNode{nodes[victimIndex]}, []p2p.LibP2PNode{nodes[misbehavingNodeIndex]}) + }) +} + // TestMisbehaviorReportMetrics tests the recording of misbehavior report metrics. // It checks that when a misbehavior report is received by the ALSP manager, the metrics are recorded. // It fails the test if the metrics are not recorded or if they are recorded incorrectly. @@ -279,7 +372,7 @@ func TestMisbehaviorReportMetrics(t *testing.T) { cfg.AlspMetrics = alspMetrics ids, nodes, _ := testutils.LibP2PNodeForMiddlewareFixture(t, 1) - mws, _ := testutils.MiddlewareFixtures(t, ids, nodes, testutils.MiddlewareConfigFixture(t)) + mws, _ := testutils.MiddlewareFixtures(t, ids, nodes, testutils.MiddlewareConfigFixture(t), mocknetwork.NewViolationsConsumer(t)) networkCfg := testutils.NetworkConfigFixture(t, *ids[0], ids, mws[0], p2p.WithAlspConfig(cfg)) net, err := p2p.NewNetwork(networkCfg) require.NoError(t, err) diff --git a/network/alsp/misbehavior.go b/network/alsp/misbehavior.go index 326b113cd8b..af4921cd06a 100644 --- a/network/alsp/misbehavior.go +++ b/network/alsp/misbehavior.go @@ -24,6 +24,25 @@ const ( // the message is not valid according to the engine's validation logic. The decision to consider a message invalid // is up to the engine. InvalidMessage network.Misbehavior = "misbehavior-invalid-message" + + // UnExpectedValidationError is a misbehavior that is reported when a validation error is encountered during message validation before the message + // is processed by an engine. + UnExpectedValidationError network.Misbehavior = "unexpected-validation-error" + + // UnknownMsgType is a misbehavior that is reported when a message of unknown type is received from a peer. + UnknownMsgType network.Misbehavior = "unknown-message-type" + + // SenderEjected is a misbehavior that is reported when a message is received from an ejected peer. + SenderEjected network.Misbehavior = "sender-ejected" + + // UnauthorizedUnicastOnChannel is a misbehavior that is reported when a message not authorized to be sent via unicast is received via unicast. + UnauthorizedUnicastOnChannel network.Misbehavior = "unauthorized-unicast-on-channel" + + // UnAuthorizedSender is a misbehavior that is reported when a message is sent by an unauthorized role. + UnAuthorizedSender network.Misbehavior = "unauthorized-sender" + + // UnauthorizedPublishOnChannel is a misbehavior that is reported when a message not authorized to be sent via pubsub is received via pubsub. + UnauthorizedPublishOnChannel network.Misbehavior = "unauthorized-pubsub-on-channel" ) func AllMisbehaviorTypes() []network.Misbehavior { @@ -33,5 +52,11 @@ func AllMisbehaviorTypes() []network.Misbehavior { RedundantMessage, UnsolicitedMessage, InvalidMessage, + UnExpectedValidationError, + UnknownMsgType, + SenderEjected, + UnauthorizedUnicastOnChannel, + UnauthorizedPublishOnChannel, + UnAuthorizedSender, } } diff --git a/network/cache/rcvcache.go b/network/cache/rcvcache.go index be685ae670d..bdab2ad894a 100644 --- a/network/cache/rcvcache.go +++ b/network/cache/rcvcache.go @@ -29,7 +29,8 @@ func (r receiveCacheEntry) Checksum() flow.Identifier { } // NewHeroReceiveCache returns a new HeroCache-based receive cache. -func NewHeroReceiveCache(sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics) *ReceiveCache { +func NewHeroReceiveCache(sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics, +) *ReceiveCache { backData := herocache.NewCache(sizeLimit, herocache.DefaultOversizeFactor, heropool.LRUEjection, // receive cache must be LRU. diff --git a/network/internal/p2pfixtures/fixtures.go b/network/internal/p2pfixtures/fixtures.go index 40229337dfa..29ee0509fbb 100644 --- a/network/internal/p2pfixtures/fixtures.go +++ b/network/internal/p2pfixtures/fixtures.go @@ -102,11 +102,16 @@ func CreateNode(t *testing.T, networkKey crypto.PrivateKey, sporkID flow.Identif idProvider := id.NewFixedIdentityProvider(nodeIds) defaultFlowConfig, err := config.DefaultConfig() require.NoError(t, err) - meshTracer := tracer.NewGossipSubMeshTracer( - logger, - metrics.NewNoopCollector(), - idProvider, - defaultFlowConfig.NetworkConfig.GossipSubConfig.LocalMeshLogInterval) + + meshTracerCfg := &tracer.GossipSubMeshTracerConfig{ + Logger: logger, + Metrics: metrics.NewNoopCollector(), + IDProvider: idProvider, + LoggerInterval: defaultFlowConfig.NetworkConfig.GossipSubConfig.LocalMeshLogInterval, + RpcSentTrackerCacheCollector: metrics.NewNoopCollector(), + RpcSentTrackerCacheSize: defaultFlowConfig.NetworkConfig.GossipSubConfig.RPCSentTrackerCacheSize, + } + meshTracer := tracer.NewGossipSubMeshTracer(meshTracerCfg) builder := p2pbuilder.NewNodeBuilder( logger, diff --git a/network/internal/testutils/testUtil.go b/network/internal/testutils/testUtil.go index 2457c7c6af7..95707ee9e3c 100644 --- a/network/internal/testutils/testUtil.go +++ b/network/internal/testutils/testUtil.go @@ -28,7 +28,6 @@ import ( netcache "github.com/onflow/flow-go/network/cache" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/codec/cbor" - "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/netconf" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/conduit" @@ -167,12 +166,11 @@ func LibP2PNodeForMiddlewareFixture(t *testing.T, n int, opts ...p2ptest.NodeFix // - a middleware config. func MiddlewareConfigFixture(t *testing.T) *middleware.Config { return &middleware.Config{ - Logger: unittest.Logger(), - BitSwapMetrics: metrics.NewNoopCollector(), - RootBlockID: sporkID, - UnicastMessageTimeout: middleware.DefaultUnicastTimeout, - Codec: unittest.NetworkCodec(), - SlashingViolationsConsumer: mocknetwork.NewViolationsConsumer(t), + Logger: unittest.Logger(), + BitSwapMetrics: metrics.NewNoopCollector(), + RootBlockID: sporkID, + UnicastMessageTimeout: middleware.DefaultUnicastTimeout, + Codec: unittest.NetworkCodec(), } } @@ -187,7 +185,7 @@ func MiddlewareConfigFixture(t *testing.T) *middleware.Config { // Returns: // - a list of middlewares - one for each identity. // - a list of UpdatableIDProvider - one for each identity. -func MiddlewareFixtures(t *testing.T, identities flow.IdentityList, libP2PNodes []p2p.LibP2PNode, cfg *middleware.Config, opts ...middleware.OptionFn) ([]network.Middleware, []*unittest.UpdatableIDProvider) { +func MiddlewareFixtures(t *testing.T, identities flow.IdentityList, libP2PNodes []p2p.LibP2PNode, cfg *middleware.Config, consumer network.ViolationsConsumer, opts ...middleware.OptionFn) ([]network.Middleware, []*unittest.UpdatableIDProvider) { require.Equal(t, len(identities), len(libP2PNodes)) mws := make([]network.Middleware, len(identities)) @@ -199,8 +197,8 @@ func MiddlewareFixtures(t *testing.T, identities flow.IdentityList, libP2PNodes cfg.FlowId = identities[i].NodeID idProviders[i] = unittest.NewUpdatableIDProvider(identities) cfg.IdTranslator = translator.NewIdentityProviderIDTranslator(idProviders[i]) - mws[i] = middleware.NewMiddleware(cfg, opts...) + mws[i].SetSlashingViolationsConsumer(consumer) } return mws, idProviders } diff --git a/network/middleware.go b/network/middleware.go index c2eeef98905..d8e14ee82c1 100644 --- a/network/middleware.go +++ b/network/middleware.go @@ -22,6 +22,9 @@ type Middleware interface { // SetOverlay sets the overlay used by the middleware. This must be called before the middleware can be Started. SetOverlay(Overlay) + // SetSlashingViolationsConsumer sets the slashing violations consumer. + SetSlashingViolationsConsumer(ViolationsConsumer) + // SendDirect sends msg on a 1-1 direct connection to the target ID. It models a guaranteed delivery asynchronous // direct one-to-one connection on the underlying network. No intermediate node on the overlay is utilized // as the router. diff --git a/network/mocknetwork/adapter.go b/network/mocknetwork/adapter.go index 364ec1027ce..2700f6eb0cc 100644 --- a/network/mocknetwork/adapter.go +++ b/network/mocknetwork/adapter.go @@ -58,9 +58,9 @@ func (_m *Adapter) PublishOnChannel(_a0 channels.Channel, _a1 interface{}, _a2 . return r0 } -// ReportMisbehaviorOnChannel provides a mock function with given fields: _a0, _a1 -func (_m *Adapter) ReportMisbehaviorOnChannel(_a0 channels.Channel, _a1 network.MisbehaviorReport) { - _m.Called(_a0, _a1) +// ReportMisbehaviorOnChannel provides a mock function with given fields: channel, report +func (_m *Adapter) ReportMisbehaviorOnChannel(channel channels.Channel, report network.MisbehaviorReport) { + _m.Called(channel, report) } // UnRegisterChannel provides a mock function with given fields: channel diff --git a/network/mocknetwork/middleware.go b/network/mocknetwork/middleware.go index 64167ce9ed8..18cdaed21b0 100644 --- a/network/mocknetwork/middleware.go +++ b/network/mocknetwork/middleware.go @@ -160,6 +160,11 @@ func (_m *Middleware) SetOverlay(_a0 network.Overlay) { _m.Called(_a0) } +// SetSlashingViolationsConsumer provides a mock function with given fields: _a0 +func (_m *Middleware) SetSlashingViolationsConsumer(_a0 network.ViolationsConsumer) { + _m.Called(_a0) +} + // Start provides a mock function with given fields: _a0 func (_m *Middleware) Start(_a0 irrecoverable.SignalerContext) { _m.Called(_a0) diff --git a/network/mocknetwork/misbehavior_report_consumer.go b/network/mocknetwork/misbehavior_report_consumer.go new file mode 100644 index 00000000000..8731a6ae8fe --- /dev/null +++ b/network/mocknetwork/misbehavior_report_consumer.go @@ -0,0 +1,35 @@ +// Code generated by mockery v2.21.4. DO NOT EDIT. + +package mocknetwork + +import ( + channels "github.com/onflow/flow-go/network/channels" + mock "github.com/stretchr/testify/mock" + + network "github.com/onflow/flow-go/network" +) + +// MisbehaviorReportConsumer is an autogenerated mock type for the MisbehaviorReportConsumer type +type MisbehaviorReportConsumer struct { + mock.Mock +} + +// ReportMisbehaviorOnChannel provides a mock function with given fields: channel, report +func (_m *MisbehaviorReportConsumer) ReportMisbehaviorOnChannel(channel channels.Channel, report network.MisbehaviorReport) { + _m.Called(channel, report) +} + +type mockConstructorTestingTNewMisbehaviorReportConsumer interface { + mock.TestingT + Cleanup(func()) +} + +// NewMisbehaviorReportConsumer creates a new instance of MisbehaviorReportConsumer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewMisbehaviorReportConsumer(t mockConstructorTestingTNewMisbehaviorReportConsumer) *MisbehaviorReportConsumer { + mock := &MisbehaviorReportConsumer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/network/mocknetwork/violations_consumer.go b/network/mocknetwork/violations_consumer.go index 9c6f252b095..2af1bf2b80f 100644 --- a/network/mocknetwork/violations_consumer.go +++ b/network/mocknetwork/violations_consumer.go @@ -3,7 +3,7 @@ package mocknetwork import ( - slashing "github.com/onflow/flow-go/network/slashing" + network "github.com/onflow/flow-go/network" mock "github.com/stretchr/testify/mock" ) @@ -13,32 +13,37 @@ type ViolationsConsumer struct { } // OnInvalidMsgError provides a mock function with given fields: violation -func (_m *ViolationsConsumer) OnInvalidMsgError(violation *slashing.Violation) { +func (_m *ViolationsConsumer) OnInvalidMsgError(violation *network.Violation) { _m.Called(violation) } // OnSenderEjectedError provides a mock function with given fields: violation -func (_m *ViolationsConsumer) OnSenderEjectedError(violation *slashing.Violation) { +func (_m *ViolationsConsumer) OnSenderEjectedError(violation *network.Violation) { _m.Called(violation) } // OnUnAuthorizedSenderError provides a mock function with given fields: violation -func (_m *ViolationsConsumer) OnUnAuthorizedSenderError(violation *slashing.Violation) { +func (_m *ViolationsConsumer) OnUnAuthorizedSenderError(violation *network.Violation) { + _m.Called(violation) +} + +// OnUnauthorizedPublishOnChannel provides a mock function with given fields: violation +func (_m *ViolationsConsumer) OnUnauthorizedPublishOnChannel(violation *network.Violation) { _m.Called(violation) } // OnUnauthorizedUnicastOnChannel provides a mock function with given fields: violation -func (_m *ViolationsConsumer) OnUnauthorizedUnicastOnChannel(violation *slashing.Violation) { +func (_m *ViolationsConsumer) OnUnauthorizedUnicastOnChannel(violation *network.Violation) { _m.Called(violation) } // OnUnexpectedError provides a mock function with given fields: violation -func (_m *ViolationsConsumer) OnUnexpectedError(violation *slashing.Violation) { +func (_m *ViolationsConsumer) OnUnexpectedError(violation *network.Violation) { _m.Called(violation) } // OnUnknownMsgTypeError provides a mock function with given fields: violation -func (_m *ViolationsConsumer) OnUnknownMsgTypeError(violation *slashing.Violation) { +func (_m *ViolationsConsumer) OnUnknownMsgTypeError(violation *network.Violation) { _m.Called(violation) } diff --git a/network/netconf/flags.go b/network/netconf/flags.go index 3d8e6357e76..bdf821aa60b 100644 --- a/network/netconf/flags.go +++ b/network/netconf/flags.go @@ -37,9 +37,10 @@ const ( gracePeriod = "libp2p-grace-period" silencePeriod = "libp2p-silence-period" // gossipsub - peerScoring = "gossipsub-peer-scoring-enabled" - localMeshLogInterval = "gossipsub-local-mesh-logging-interval" - scoreTracerInterval = "gossipsub-score-tracer-interval" + peerScoring = "gossipsub-peer-scoring-enabled" + localMeshLogInterval = "gossipsub-local-mesh-logging-interval" + rpcSentTrackerCacheSize = "gossipsub-rpc-sent-tracker-cache-size" + scoreTracerInterval = "gossipsub-score-tracer-interval" // gossipsub validation inspector gossipSubRPCInspectorNotificationCacheSize = "gossipsub-rpc-inspector-notification-cache-size" validationInspectorNumberOfWorkers = "gossipsub-rpc-validation-inspector-workers" @@ -66,7 +67,7 @@ func AllFlagNames() []string { return []string{ networkingConnectionPruning, preferredUnicastsProtocols, receivedMessageCacheSize, peerUpdateInterval, unicastMessageTimeout, unicastCreateStreamRetryDelay, dnsCacheTTL, disallowListNotificationCacheSize, dryRun, lockoutDuration, messageRateLimit, bandwidthRateLimit, bandwidthBurstLimit, memoryLimitRatio, - fileDescriptorsRatio, peerBaseLimitConnsInbound, highWatermark, lowWatermark, gracePeriod, silencePeriod, peerScoring, localMeshLogInterval, scoreTracerInterval, + fileDescriptorsRatio, peerBaseLimitConnsInbound, highWatermark, lowWatermark, gracePeriod, silencePeriod, peerScoring, localMeshLogInterval, rpcSentTrackerCacheSize, scoreTracerInterval, gossipSubRPCInspectorNotificationCacheSize, validationInspectorNumberOfWorkers, validationInspectorInspectMessageQueueCacheSize, validationInspectorClusterPrefixedTopicsReceivedCacheSize, validationInspectorClusterPrefixedTopicsReceivedCacheDecay, validationInspectorClusterPrefixHardThreshold, ihaveSyncSampleSizePercentage, ihaveAsyncSampleSizePercentage, ihaveMaxSampleSize, metricsInspectorNumberOfWorkers, metricsInspectorCacheSize, alspDisabled, alspSpamRecordCacheSize, alspSpamRecordQueueSize, alspHearBeatInterval, @@ -107,6 +108,7 @@ func InitializeNetworkFlags(flags *pflag.FlagSet, config *Config) { flags.Bool(peerScoring, config.GossipSubConfig.PeerScoring, "enabling peer scoring on pubsub network") flags.Duration(localMeshLogInterval, config.GossipSubConfig.LocalMeshLogInterval, "logging interval for local mesh in gossipsub") flags.Duration(scoreTracerInterval, config.GossipSubConfig.ScoreTracerInterval, "logging interval for peer score tracer in gossipsub, set to 0 to disable") + flags.Uint32(rpcSentTrackerCacheSize, config.GossipSubConfig.RPCSentTrackerCacheSize, "cache size of the rpc sent tracker used by the gossipsub mesh tracer.") // gossipsub RPC control message validation limits used for validation configuration and rate limiting flags.Int(validationInspectorNumberOfWorkers, config.GossipSubConfig.GossipSubRPCInspectorsConfig.GossipSubRPCValidationInspectorConfigs.NumberOfWorkers, "number of gossupsub RPC control message validation inspector component workers") flags.Uint32(validationInspectorInspectMessageQueueCacheSize, config.GossipSubConfig.GossipSubRPCInspectorsConfig.GossipSubRPCValidationInspectorConfigs.CacheSize, "cache size for gossipsub RPC validation inspector events worker pool queue.") diff --git a/network/network.go b/network/network.go index 703c5e627c8..38896633e4d 100644 --- a/network/network.go +++ b/network/network.go @@ -13,6 +13,17 @@ import ( // and private (i.e., staked) networks. type NetworkingType uint8 +func (t NetworkingType) String() string { + switch t { + case PrivateNetwork: + return "private" + case PublicNetwork: + return "public" + default: + return "unknown" + } +} + const ( // PrivateNetwork indicates that the staked private-side of the Flow blockchain that nodes can only join and leave // with a staking requirement. @@ -47,6 +58,7 @@ type Network interface { // Adapter is meant to be utilized by the Conduit interface to send messages to the Network layer to be // delivered to the remote targets. type Adapter interface { + MisbehaviorReportConsumer // UnicastOnChannel sends the message in a reliable way to the given recipient. UnicastOnChannel(channels.Channel, interface{}, flow.Identifier) error @@ -60,7 +72,10 @@ type Adapter interface { // UnRegisterChannel unregisters the engine for the specified channel. The engine will no longer be able to send or // receive messages from that channel. UnRegisterChannel(channel channels.Channel) error +} +// MisbehaviorReportConsumer set of funcs used to handle MisbehaviorReport disseminated from misbehavior reporters. +type MisbehaviorReportConsumer interface { // ReportMisbehaviorOnChannel reports the misbehavior of a node on sending a message to the current node that appears // valid based on the networking layer but is considered invalid by the current node based on the Flow protocol. // The misbehavior report is sent to the current node's networking layer on the given channel to be processed. @@ -69,5 +84,5 @@ type Adapter interface { // - report: The misbehavior report to be sent. // Returns: // none - ReportMisbehaviorOnChannel(channels.Channel, MisbehaviorReport) + ReportMisbehaviorOnChannel(channel channels.Channel, report MisbehaviorReport) } diff --git a/network/p2p/builder.go b/network/p2p/builder.go index 3bd8e278716..b856f931a29 100644 --- a/network/p2p/builder.go +++ b/network/p2p/builder.go @@ -28,7 +28,6 @@ type GossipSubAdapterConfigFunc func(*BasePubSubAdapterConfig) PubSubAdapterConf // GossipSubBuilder provides a builder pattern for creating a GossipSub pubsub system. type GossipSubBuilder interface { - PeerScoringBuilder // SetHost sets the host of the builder. // If the host has already been set, a fatal error is logged. SetHost(host.Host) @@ -45,9 +44,16 @@ type GossipSubBuilder interface { // We expect the node to initialize with a default gossipsub config. Hence, this function overrides the default config. SetGossipSubConfigFunc(GossipSubAdapterConfigFunc) - // SetGossipSubPeerScoring sets the gossipsub peer scoring of the builder. - // If the gossipsub peer scoring flag has already been set, a fatal error is logged. - SetGossipSubPeerScoring(bool) + // EnableGossipSubScoringWithOverride enables peer scoring for the GossipSub pubsub system with the given override. + // Any existing peer scoring config attribute that is set in the override will override the default peer scoring config. + // Anything that is left to nil or zero value in the override will be ignored and the default value will be used. + // Note: it is not recommended to override the default peer scoring config in production unless you know what you are doing. + // Production Tip: use PeerScoringConfigNoOverride as the argument to this function to enable peer scoring without any override. + // Args: + // - PeerScoringConfigOverride: override for the peer scoring config- Recommended to use PeerScoringConfigNoOverride for production. + // Returns: + // none + EnableGossipSubScoringWithOverride(*PeerScoringConfigOverride) // SetGossipSubScoreTracerInterval sets the gossipsub score tracer interval of the builder. // If the gossipsub score tracer interval has already been set, a fatal error is logged. @@ -81,16 +87,6 @@ type GossipSubBuilder interface { Build(irrecoverable.SignalerContext) (PubSubAdapter, error) } -type PeerScoringBuilder interface { - // SetTopicScoreParams sets the topic score parameters for the given topic. - // If the topic score parameters have already been set for the given topic, it is overwritten. - SetTopicScoreParams(topic channels.Topic, topicScoreParams *pubsub.TopicScoreParams) - - // SetAppSpecificScoreParams sets the application specific score parameters for the given topic. - // If the application specific score parameters have already been set for the given topic, it is overwritten. - SetAppSpecificScoreParams(func(peer.ID) float64) -} - // GossipSubRpcInspectorSuiteFactoryFunc is a function that creates a new RPC inspector suite. It is used to create // RPC inspectors for the gossipsub protocol. The RPC inspectors are used to inspect and validate // incoming RPC messages before they are processed by the gossipsub protocol. @@ -123,11 +119,16 @@ type NodeBuilder interface { SetConnectionGater(ConnectionGater) NodeBuilder SetRoutingSystem(func(context.Context, host.Host) (routing.Routing, error)) NodeBuilder - // EnableGossipSubPeerScoring enables peer scoring for the GossipSub pubsub system. - // Arguments: - // - module.IdentityProvider: the identity provider for the node (must be set before calling this method). - // - *PeerScoringConfig: the peer scoring configuration for the GossipSub pubsub system. If nil, the default configuration is used. - EnableGossipSubPeerScoring(*PeerScoringConfig) NodeBuilder + // EnableGossipSubScoringWithOverride enables peer scoring for the GossipSub pubsub system with the given override. + // Any existing peer scoring config attribute that is set in the override will override the default peer scoring config. + // Anything that is left to nil or zero value in the override will be ignored and the default value will be used. + // Note: it is not recommended to override the default peer scoring config in production unless you know what you are doing. + // Production Tip: use PeerScoringConfigNoOverride as the argument to this function to enable peer scoring without any override. + // Args: + // - PeerScoringConfigOverride: override for the peer scoring config- Recommended to use PeerScoringConfigNoOverride for production. + // Returns: + // none + EnableGossipSubScoringWithOverride(*PeerScoringConfigOverride) NodeBuilder SetCreateNode(CreateNodeFunc) NodeBuilder SetGossipSubFactory(GossipSubFactoryFunc, GossipSubAdapterConfigFunc) NodeBuilder SetStreamCreationRetryInterval(time.Duration) NodeBuilder @@ -138,10 +139,31 @@ type NodeBuilder interface { Build() (LibP2PNode, error) } -// PeerScoringConfig is a configuration for peer scoring parameters for a GossipSub pubsub system. -type PeerScoringConfig struct { +// PeerScoringConfigOverride is a structure that is used to carry over the override values for peer scoring configuration. +// Any attribute that is set in the override will override the default peer scoring config. +// Typically, we are not recommending to override the default peer scoring config in production unless you know what you are doing. +type PeerScoringConfigOverride struct { // TopicScoreParams is a map of topic score parameters for each topic. + // Override criteria: any topic (i.e., key in the map) will override the default topic score parameters for that topic and + // the corresponding value in the map will be used instead of the default value. + // If you don't want to override topic score params for a given topic, simply don't include that topic in the map. + // If the map is nil, the default topic score parameters are used for all topics. TopicScoreParams map[channels.Topic]*pubsub.TopicScoreParams + // AppSpecificScoreParams is a function that returns the application specific score parameters for a given peer. + // Override criteria: if the function is not nil, it will override the default application specific score parameters. + // If the function is nil, the default application specific score parameters are used. AppSpecificScoreParams func(peer.ID) float64 + + // DecayInterval is the interval over which we decay the effect of past behavior, so that + // a good or bad behavior will not have a permanent effect on the penalty. It is also the interval + // that GossipSub uses to refresh the scores of all peers. + // Override criteria: if the value is not zero, it will override the default decay interval. + // If the value is zero, the default decay interval is used. + DecayInterval time.Duration } + +// PeerScoringConfigNoOverride is a default peer scoring configuration for a GossipSub pubsub system. +// It is set to nil, which means that no override is done to the default peer scoring configuration. +// It is the recommended way to use the default peer scoring configuration. +var PeerScoringConfigNoOverride = (*PeerScoringConfigOverride)(nil) diff --git a/network/p2p/cache/node_blocklist_wrapper_test.go b/network/p2p/cache/node_blocklist_wrapper_test.go index 01e23e5a276..929be0b066a 100644 --- a/network/p2p/cache/node_blocklist_wrapper_test.go +++ b/network/p2p/cache/node_blocklist_wrapper_test.go @@ -143,7 +143,8 @@ func (s *NodeDisallowListWrapperTestSuite) TestDisallowListNode() { blocklistLookup := blocklist.Lookup() honestIdentities := unittest.IdentityListFixture(8) combinedIdentities := honestIdentities.Union(blocklist) - combinedIdentities = combinedIdentities.DeterministicShuffle(1234) + combinedIdentities, err = combinedIdentities.Shuffle() + require.NoError(s.T(), err) numIdentities := len(combinedIdentities) s.provider.On("Identities", mock.Anything).Return(combinedIdentities) @@ -170,7 +171,8 @@ func (s *NodeDisallowListWrapperTestSuite) TestDisallowListNode() { blocklistLookup := blocklist.Lookup() honestIdentities := unittest.IdentityListFixture(8) combinedIdentities := honestIdentities.Union(blocklist) - combinedIdentities = combinedIdentities.DeterministicShuffle(1234) + combinedIdentities, err = combinedIdentities.Shuffle() + require.NoError(s.T(), err) numIdentities := len(combinedIdentities) s.provider.On("Identities", mock.Anything).Return(combinedIdentities) diff --git a/network/p2p/connection/connection_gater_test.go b/network/p2p/connection/connection_gater_test.go index 0277fc6b632..cd240b5293b 100644 --- a/network/p2p/connection/connection_gater_test.go +++ b/network/p2p/connection/connection_gater_test.go @@ -396,19 +396,20 @@ func TestConnectionGater_Disallow_Integration(t *testing.T) { func ensureCommunicationSilenceAmongGroups(t *testing.T, ctx context.Context, sporkId flow.Identifier, groupA []p2p.LibP2PNode, groupB []p2p.LibP2PNode) { // ensures no connection, unicast, or pubsub going to the disallow-listed nodes p2ptest.EnsureNotConnectedBetweenGroups(t, ctx, groupA, groupB) - p2ptest.EnsureNoPubsubExchangeBetweenGroups(t, ctx, groupA, groupB, func() (interface{}, channels.Topic) { - blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) - return unittest.ProposalFixture(), blockTopic + + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) + p2ptest.EnsureNoPubsubExchangeBetweenGroups(t, ctx, groupA, groupB, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() }) p2pfixtures.EnsureNoStreamCreationBetweenGroups(t, ctx, groupA, groupB) } // ensureCommunicationOverAllProtocols ensures that all nodes are connected to each other, and they can exchange messages over the pubsub and unicast. func ensureCommunicationOverAllProtocols(t *testing.T, ctx context.Context, sporkId flow.Identifier, nodes []p2p.LibP2PNode, inbounds []chan string) { + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) p2ptest.TryConnectionAndEnsureConnected(t, ctx, nodes) - p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, func() (interface{}, channels.Topic) { - blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) - return unittest.ProposalFixture(), blockTopic + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() }) p2pfixtures.EnsureMessageExchangeOverUnicast(t, ctx, nodes, inbounds, p2pfixtures.LongStringMessageFactoryFixture(t)) } diff --git a/network/p2p/connection/connector_factory.go b/network/p2p/connection/connector_factory.go index aed13a9d168..10003895953 100644 --- a/network/p2p/connection/connector_factory.go +++ b/network/p2p/connection/connector_factory.go @@ -1,13 +1,14 @@ package connection import ( + "crypto/rand" "fmt" - "math/rand" "time" "github.com/libp2p/go-libp2p/core/host" discoveryBackoff "github.com/libp2p/go-libp2p/p2p/discovery/backoff" + "github.com/onflow/flow-go/crypto/random" "github.com/onflow/flow-go/network/p2p" ) @@ -39,7 +40,10 @@ const ( // (https://github.com/libp2p/go-libp2p-pubsub/blob/master/discovery.go#L34) func DefaultLibp2pBackoffConnectorFactory() p2p.ConnectorFactory { return func(host host.Host) (p2p.Connector, error) { - rngSrc := rand.NewSource(rand.Int63()) + rngSrc, err := newSource() + if err != nil { + return nil, fmt.Errorf("failed to generate a random source: %w", err) + } cacheSize := 100 dialTimeout := time.Minute * 2 @@ -59,3 +63,39 @@ func DefaultLibp2pBackoffConnectorFactory() p2p.ConnectorFactory { return backoffConnector, nil } } + +// `source` implements math/rand.Source so it can be used +// by libp2p's `NewExponentialBackoff`. +// It is backed by a more secure randomness than math/rand's `NewSource`. +// `source` is only implemented to avoid using math/rand's `NewSource`. +type source struct { + prg random.Rand +} + +// Seed is not used by the backoff object from `NewExponentialBackoff` +func (src *source) Seed(seed int64) {} + +// Int63 is used by `NewExponentialBackoff` and is based on a crypto PRG +func (src *source) Int63() int64 { + return int64(src.prg.UintN(1 << 63)) +} + +// creates a source using a crypto PRG and secure random seed +// returned errors: +// - exception error if the system randomness fails (the system and other components would +// have many other issues if this happens) +// - exception error if the CSPRG (Chacha20) isn't initialized properly (should not happen in normal +// operations) +func newSource() (*source, error) { + seed := make([]byte, random.Chacha20SeedLen) + _, err := rand.Read(seed) // checking err only is enough + if err != nil { + return nil, fmt.Errorf("failed to generate a seed: %w", err) + } + prg, err := random.NewChacha20PRG(seed, nil) + if err != nil { + // should not happen in normal operations because `seed` has the correct length + return nil, fmt.Errorf("failed to generate a PRG: %w", err) + } + return &source{prg}, nil +} diff --git a/network/p2p/connection/peerManager.go b/network/p2p/connection/peerManager.go index 83dee63359e..11fe502a07c 100644 --- a/network/p2p/connection/peerManager.go +++ b/network/p2p/connection/peerManager.go @@ -3,7 +3,7 @@ package connection import ( "context" "fmt" - mrand "math/rand" + "sync" "time" @@ -14,6 +14,7 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/rand" ) // DefaultPeerUpdateInterval is default duration for which the peer manager waits in between attempts to update peer connections. @@ -41,7 +42,7 @@ type PeerManager struct { // and it uses the connector to actually connect or disconnect from peers. func NewPeerManager(logger zerolog.Logger, updateInterval time.Duration, connector p2p.PeerUpdater) *PeerManager { pm := &PeerManager{ - logger: logger, + logger: logger.With().Str("component", "peer-manager").Logger(), connector: connector, peerRequestQ: make(chan struct{}, 1), peerUpdateInterval: updateInterval, @@ -86,7 +87,11 @@ func (pm *PeerManager) updateLoop(ctx irrecoverable.SignalerContext) { func (pm *PeerManager) periodicLoop(ctx irrecoverable.SignalerContext) { // add a random delay to initial launch to avoid synchronizing this // potentially expensive operation across the network - delay := time.Duration(mrand.Int63n(pm.peerUpdateInterval.Nanoseconds())) + r, err := rand.Uint64n(uint64(pm.peerUpdateInterval.Nanoseconds())) + if err != nil { + ctx.Throw(fmt.Errorf("unable to generate random interval: %w", err)) + } + delay := time.Duration(r) ticker := time.NewTicker(pm.peerUpdateInterval) defer ticker.Stop() diff --git a/network/p2p/inspector/internal/cache/cache.go b/network/p2p/inspector/internal/cache/cache.go index 133fd0a9ac7..82d8f781a98 100644 --- a/network/p2p/inspector/internal/cache/cache.go +++ b/network/p2p/inspector/internal/cache/cache.go @@ -40,9 +40,7 @@ type RecordCache struct { // NewRecordCache creates a new *RecordCache. // Args: -// - sizeLimit: the maximum number of records that the cache can hold. -// - logger: the logger used by the cache. -// - collector: the metrics collector used by the cache. +// - config: record cache config. // - recordEntityFactory: a factory function that creates a new spam record. // Returns: // - *RecordCache, the created cache. diff --git a/network/p2p/middleware/middleware.go b/network/p2p/middleware/middleware.go index 169737eabc5..c908e8d7f18 100644 --- a/network/p2p/middleware/middleware.go +++ b/network/p2p/middleware/middleware.go @@ -35,7 +35,6 @@ import ( "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/network/p2p/unicast/ratelimit" "github.com/onflow/flow-go/network/p2p/utils" - "github.com/onflow/flow-go/network/slashing" "github.com/onflow/flow-go/network/validator" flowpubsub "github.com/onflow/flow-go/network/validator/pubsub" _ "github.com/onflow/flow-go/utils/binstat" @@ -62,13 +61,6 @@ const ( // LargeMsgUnicastTimeout is the maximum time to wait for a unicast request to complete for large message size LargeMsgUnicastTimeout = 1000 * time.Second - - // DisallowListCacheSize is the maximum number of peers that can be disallow-listed at a time. The recommended - // size is 100 * number of staked nodes. Note that when the cache is full, there is no eviction policy and - // disallow-listing a new peer will fail. Hence, the cache size should be set to a value that is large enough - // to accommodate all the peers that can be disallow-listed at a time. Also, note that this cache is only taking - // the staked (authorized) peers. Hence, Sybil attacks are not possible. - DisallowListCacheSize = 100 * 1000 ) var ( @@ -104,7 +96,7 @@ type Middleware struct { idTranslator p2p.IDTranslator previousProtocolStatePeers []peer.AddrInfo codec network.Codec - slashingViolationsConsumer slashing.ViolationsConsumer + slashingViolationsConsumer network.ViolationsConsumer unicastRateLimiters *ratelimit.RateLimiters authorizedSenderValidator *validator.AuthorizedSenderValidator } @@ -140,15 +132,14 @@ func WithUnicastRateLimiters(rateLimiters *ratelimit.RateLimiters) OptionFn { // Config is the configuration for the middleware. type Config struct { - Logger zerolog.Logger - Libp2pNode p2p.LibP2PNode - FlowId flow.Identifier // This node's Flow ID - BitSwapMetrics module.BitswapMetrics - RootBlockID flow.Identifier - UnicastMessageTimeout time.Duration - IdTranslator p2p.IDTranslator - Codec network.Codec - SlashingViolationsConsumer slashing.ViolationsConsumer + Logger zerolog.Logger + Libp2pNode p2p.LibP2PNode + FlowId flow.Identifier // This node's Flow ID + BitSwapMetrics module.BitswapMetrics + RootBlockID flow.Identifier + UnicastMessageTimeout time.Duration + IdTranslator p2p.IDTranslator + Codec network.Codec } // Validate validates the configuration, and sets default values for any missing fields. @@ -172,16 +163,15 @@ func NewMiddleware(cfg *Config, opts ...OptionFn) *Middleware { // create the node entity and inject dependencies & config mw := &Middleware{ - log: cfg.Logger, - libP2PNode: cfg.Libp2pNode, - bitswapMetrics: cfg.BitSwapMetrics, - rootBlockID: cfg.RootBlockID, - validators: DefaultValidators(cfg.Logger, cfg.FlowId), - unicastMessageTimeout: cfg.UnicastMessageTimeout, - idTranslator: cfg.IdTranslator, - codec: cfg.Codec, - slashingViolationsConsumer: cfg.SlashingViolationsConsumer, - unicastRateLimiters: ratelimit.NoopRateLimiters(), + log: cfg.Logger, + libP2PNode: cfg.Libp2pNode, + bitswapMetrics: cfg.BitSwapMetrics, + rootBlockID: cfg.RootBlockID, + validators: DefaultValidators(cfg.Logger, cfg.FlowId), + unicastMessageTimeout: cfg.UnicastMessageTimeout, + idTranslator: cfg.IdTranslator, + codec: cfg.Codec, + unicastRateLimiters: ratelimit.NoopRateLimiters(), } for _, opt := range opts { @@ -304,6 +294,11 @@ func (m *Middleware) SetOverlay(ov network.Overlay) { m.ov = ov } +// SetSlashingViolationsConsumer sets the slashing violations consumer. +func (m *Middleware) SetSlashingViolationsConsumer(consumer network.ViolationsConsumer) { + m.slashingViolationsConsumer = consumer +} + // authorizedPeers is a peer manager callback used by the underlying libp2p node that updates who can connect to this node (as // well as who this node can connect to). // and who is not allowed to connect to this node. This function is called by the peer manager and connection gater components @@ -518,7 +513,7 @@ func (m *Middleware) handleIncomingStream(s libp2pnetwork.Stream) { // ignore messages if node does not have subscription to topic if !m.libP2PNode.HasSubscription(topic) { - violation := &slashing.Violation{ + violation := &network.Violation{ Identity: nil, PeerID: remotePeer.String(), Channel: channel, Protocol: message.ProtocolTypeUnicast, } @@ -651,7 +646,7 @@ func (m *Middleware) processUnicastStreamMessage(remotePeer peer.ID, msg *messag // we can remove this check maxSize, err := UnicastMaxMsgSizeByCode(msg.Payload) if err != nil { - m.slashingViolationsConsumer.OnUnknownMsgTypeError(&slashing.Violation{ + m.slashingViolationsConsumer.OnUnknownMsgTypeError(&network.Violation{ Identity: nil, PeerID: remotePeer.String(), MsgType: "", Channel: channel, Protocol: message.ProtocolTypeUnicast, Err: err, }) return @@ -705,14 +700,14 @@ func (m *Middleware) processAuthenticatedMessage(msg *message.Message, peerID pe switch { case codec.IsErrUnknownMsgCode(err): // slash peer if message contains unknown message code byte - violation := &slashing.Violation{ + violation := &network.Violation{ PeerID: peerID.String(), OriginID: originId, Channel: channel, Protocol: protocol, Err: err, } m.slashingViolationsConsumer.OnUnknownMsgTypeError(violation) return case codec.IsErrMsgUnmarshal(err) || codec.IsErrInvalidEncoding(err): // slash if peer sent a message that could not be marshalled into the message type denoted by the message code byte - violation := &slashing.Violation{ + violation := &network.Violation{ PeerID: peerID.String(), OriginID: originId, Channel: channel, Protocol: protocol, Err: err, } m.slashingViolationsConsumer.OnInvalidMsgError(violation) @@ -722,7 +717,7 @@ func (m *Middleware) processAuthenticatedMessage(msg *message.Message, peerID pe // don't crash as a result of external inputs since that creates a DoS vector // collect slashing data because this could potentially lead to slashing err = fmt.Errorf("unexpected error during message validation: %w", err) - violation := &slashing.Violation{ + violation := &network.Violation{ PeerID: peerID.String(), OriginID: originId, Channel: channel, Protocol: protocol, Err: err, } m.slashingViolationsConsumer.OnUnexpectedError(violation) @@ -744,7 +739,6 @@ func (m *Middleware) processAuthenticatedMessage(msg *message.Message, peerID pe // processMessage processes a message and eventually passes it to the overlay func (m *Middleware) processMessage(scope *network.IncomingMessageScope) { - logger := m.log.With(). Str("channel", scope.Channel().String()). Str("type", scope.Protocol().String()). diff --git a/network/p2p/mock/gossip_sub_builder.go b/network/p2p/mock/gossip_sub_builder.go index 2146f922c9b..08d82bd03c6 100644 --- a/network/p2p/mock/gossip_sub_builder.go +++ b/network/p2p/mock/gossip_sub_builder.go @@ -4,16 +4,12 @@ package mockp2p import ( host "github.com/libp2p/go-libp2p/core/host" - channels "github.com/onflow/flow-go/network/channels" - irrecoverable "github.com/onflow/flow-go/module/irrecoverable" mock "github.com/stretchr/testify/mock" p2p "github.com/onflow/flow-go/network/p2p" - peer "github.com/libp2p/go-libp2p/core/peer" - pubsub "github.com/libp2p/go-libp2p-pubsub" routing "github.com/libp2p/go-libp2p/core/routing" @@ -52,13 +48,13 @@ func (_m *GossipSubBuilder) Build(_a0 irrecoverable.SignalerContext) (p2p.PubSub return r0, r1 } -// OverrideDefaultRpcInspectorSuiteFactory provides a mock function with given fields: _a0 -func (_m *GossipSubBuilder) OverrideDefaultRpcInspectorSuiteFactory(_a0 p2p.GossipSubRpcInspectorSuiteFactoryFunc) { +// EnableGossipSubScoringWithOverride provides a mock function with given fields: _a0 +func (_m *GossipSubBuilder) EnableGossipSubScoringWithOverride(_a0 *p2p.PeerScoringConfigOverride) { _m.Called(_a0) } -// SetAppSpecificScoreParams provides a mock function with given fields: _a0 -func (_m *GossipSubBuilder) SetAppSpecificScoreParams(_a0 func(peer.ID) float64) { +// OverrideDefaultRpcInspectorSuiteFactory provides a mock function with given fields: _a0 +func (_m *GossipSubBuilder) OverrideDefaultRpcInspectorSuiteFactory(_a0 p2p.GossipSubRpcInspectorSuiteFactoryFunc) { _m.Called(_a0) } @@ -72,11 +68,6 @@ func (_m *GossipSubBuilder) SetGossipSubFactory(_a0 p2p.GossipSubFactoryFunc) { _m.Called(_a0) } -// SetGossipSubPeerScoring provides a mock function with given fields: _a0 -func (_m *GossipSubBuilder) SetGossipSubPeerScoring(_a0 bool) { - _m.Called(_a0) -} - // SetGossipSubScoreTracerInterval provides a mock function with given fields: _a0 func (_m *GossipSubBuilder) SetGossipSubScoreTracerInterval(_a0 time.Duration) { _m.Called(_a0) @@ -102,11 +93,6 @@ func (_m *GossipSubBuilder) SetSubscriptionFilter(_a0 pubsub.SubscriptionFilter) _m.Called(_a0) } -// SetTopicScoreParams provides a mock function with given fields: topic, topicScoreParams -func (_m *GossipSubBuilder) SetTopicScoreParams(topic channels.Topic, topicScoreParams *pubsub.TopicScoreParams) { - _m.Called(topic, topicScoreParams) -} - type mockConstructorTestingTNewGossipSubBuilder interface { mock.TestingT Cleanup(func()) diff --git a/network/p2p/mock/node_builder.go b/network/p2p/mock/node_builder.go index 97ab398f37a..15bb6c10306 100644 --- a/network/p2p/mock/node_builder.go +++ b/network/p2p/mock/node_builder.go @@ -55,12 +55,12 @@ func (_m *NodeBuilder) Build() (p2p.LibP2PNode, error) { return r0, r1 } -// EnableGossipSubPeerScoring provides a mock function with given fields: _a0 -func (_m *NodeBuilder) EnableGossipSubPeerScoring(_a0 *p2p.PeerScoringConfig) p2p.NodeBuilder { +// EnableGossipSubScoringWithOverride provides a mock function with given fields: _a0 +func (_m *NodeBuilder) EnableGossipSubScoringWithOverride(_a0 *p2p.PeerScoringConfigOverride) p2p.NodeBuilder { ret := _m.Called(_a0) var r0 p2p.NodeBuilder - if rf, ok := ret.Get(0).(func(*p2p.PeerScoringConfig) p2p.NodeBuilder); ok { + if rf, ok := ret.Get(0).(func(*p2p.PeerScoringConfigOverride) p2p.NodeBuilder); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { diff --git a/network/p2p/network.go b/network/p2p/network.go index 384fad3ab59..a288b92c7d7 100644 --- a/network/p2p/network.go +++ b/network/p2p/network.go @@ -22,6 +22,7 @@ import ( "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/queue" + "github.com/onflow/flow-go/network/slashing" _ "github.com/onflow/flow-go/utils/binstat" "github.com/onflow/flow-go/utils/logging" ) @@ -53,6 +54,7 @@ type Network struct { registerEngineRequests chan *registerEngineRequest registerBlobServiceRequests chan *registerBlobServiceRequest misbehaviorReportManager network.MisbehaviorReportManager + slashingViolationsConsumer network.ViolationsConsumer } var _ network.Network = &Network{} @@ -171,6 +173,8 @@ func NewNetwork(param *NetworkConfig, opts ...NetworkOption) (*Network, error) { opt(n) } + n.slashingViolationsConsumer = slashing.NewSlashingViolationsConsumer(param.Logger, param.Metrics, n) + n.mw.SetSlashingViolationsConsumer(n.slashingViolationsConsumer) n.mw.SetOverlay(n) if err := n.conduitFactory.RegisterAdapter(n); err != nil { @@ -467,13 +471,16 @@ func (n *Network) PublishOnChannel(channel channels.Channel, message interface{} // MulticastOnChannel unreliably sends the specified event over the channel to randomly selected 'num' number of recipients // selected from the specified targetIDs. func (n *Network) MulticastOnChannel(channel channels.Channel, message interface{}, num uint, targetIDs ...flow.Identifier) error { - selectedIDs := flow.IdentifierList(targetIDs).Filter(n.removeSelfFilter()).Sample(num) + selectedIDs, err := flow.IdentifierList(targetIDs).Filter(n.removeSelfFilter()).Sample(num) + if err != nil { + return fmt.Errorf("sampling failed: %w", err) + } if len(selectedIDs) == 0 { return network.EmptyTargetList } - err := n.sendOnChannel(channel, message, selectedIDs) + err = n.sendOnChannel(channel, message, selectedIDs) // publishes the message to the selected targets if err != nil { diff --git a/network/p2p/p2pbuilder/gossipsub/gossipSubBuilder.go b/network/p2p/p2pbuilder/gossipsub/gossipSubBuilder.go index 89b1351691f..3e69590dc17 100644 --- a/network/p2p/p2pbuilder/gossipsub/gossipSubBuilder.go +++ b/network/p2p/p2pbuilder/gossipsub/gossipSubBuilder.go @@ -7,7 +7,6 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/routing" "github.com/rs/zerolog" @@ -17,7 +16,6 @@ import ( "github.com/onflow/flow-go/module/mempool/queue" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/distributor" "github.com/onflow/flow-go/network/p2p/inspector" @@ -29,6 +27,7 @@ import ( "github.com/onflow/flow-go/network/p2p/scoring" "github.com/onflow/flow-go/network/p2p/tracer" "github.com/onflow/flow-go/network/p2p/utils" + "github.com/onflow/flow-go/utils/logging" ) // The Builder struct is used to configure and create a new GossipSub pubsub system. @@ -92,14 +91,42 @@ func (g *Builder) SetGossipSubConfigFunc(gossipSubConfigFunc p2p.GossipSubAdapte g.gossipSubConfigFunc = gossipSubConfigFunc } -// SetGossipSubPeerScoring sets the gossipsub peer scoring of the builder. -// If the gossipsub peer scoring flag has already been set, a fatal error is logged. -func (g *Builder) SetGossipSubPeerScoring(gossipSubPeerScoring bool) { - if g.gossipSubPeerScoring { - g.logger.Fatal().Msg("gossipsub peer scoring has already been set") +// EnableGossipSubScoringWithOverride enables peer scoring for the GossipSub pubsub system with the given override. +// Any existing peer scoring config attribute that is set in the override will override the default peer scoring config. +// Anything that is left to nil or zero value in the override will be ignored and the default value will be used. +// Note: it is not recommended to override the default peer scoring config in production unless you know what you are doing. +// Production Tip: use PeerScoringConfigNoOverride as the argument to this function to enable peer scoring without any override. +// Args: +// - PeerScoringConfigOverride: override for the peer scoring config- Recommended to use PeerScoringConfigNoOverride for production. +// Returns: +// none +func (g *Builder) EnableGossipSubScoringWithOverride(override *p2p.PeerScoringConfigOverride) { + g.gossipSubPeerScoring = true // TODO: we should enable peer scoring by default. + if override == nil { return } - g.gossipSubPeerScoring = gossipSubPeerScoring + if override.AppSpecificScoreParams != nil { + g.logger.Warn(). + Str(logging.KeyNetworkingSecurity, "true"). + Msg("overriding app specific score params for gossipsub") + g.scoreOptionConfig.OverrideAppSpecificScoreFunction(override.AppSpecificScoreParams) + } + if override.TopicScoreParams != nil { + for topic, params := range override.TopicScoreParams { + topicLogger := utils.TopicScoreParamsLogger(g.logger, topic.String(), params) + topicLogger.Warn(). + Str(logging.KeyNetworkingSecurity, "true"). + Msg("overriding topic score params for gossipsub") + g.scoreOptionConfig.OverrideTopicScoreParams(topic, params) + } + } + if override.DecayInterval > 0 { + g.logger.Warn(). + Str(logging.KeyNetworkingSecurity, "true"). + Dur("decay_interval", override.DecayInterval). + Msg("overriding decay interval for gossipsub") + g.scoreOptionConfig.OverrideDecayInterval(override.DecayInterval) + } } // SetGossipSubScoreTracerInterval sets the gossipsub score tracer interval of the builder. @@ -132,21 +159,6 @@ func (g *Builder) SetRoutingSystem(routingSystem routing.Routing) { g.routingSystem = routingSystem } -// SetTopicScoreParams sets the topic score params of the builder. -// There is a default topic score parameters that is used if this function is not called for a topic. -// However, if this function is called multiple times for a topic, the last topic score params will be used. -// Note: calling this function will override the default topic score params for the topic. Don't call this function -// unless you know what you are doing. -func (g *Builder) SetTopicScoreParams(topic channels.Topic, topicScoreParams *pubsub.TopicScoreParams) { - g.scoreOptionConfig.OverrideTopicScoreParams(topic, topicScoreParams) -} - -// SetAppSpecificScoreParams sets the app specific score params of the builder. -// There is no default app specific score function. However, if this function is called multiple times, the last function will be used. -func (g *Builder) SetAppSpecificScoreParams(f func(peer.ID) float64) { - g.scoreOptionConfig.SetAppSpecificScoreFunction(f) -} - // OverrideDefaultRpcInspectorSuiteFactory overrides the default rpc inspector suite factory. // Note: this function should only be used for testing purposes. Never override the default rpc inspector suite factory unless you know what you are doing. func (g *Builder) OverrideDefaultRpcInspectorSuiteFactory(factory p2p.GossipSubRpcInspectorSuiteFactoryFunc) { @@ -173,7 +185,11 @@ func NewGossipSubBuilder( idProvider module.IdentityProvider, rpcInspectorConfig *p2pconf.GossipSubRPCInspectorsConfig, ) *Builder { - lg := logger.With().Str("component", "gossipsub").Logger() + lg := logger.With(). + Str("component", "gossipsub"). + Str("network-type", networkType.String()). + Logger() + b := &Builder{ logger: lg, metricsCfg: metricsCfg, @@ -186,6 +202,7 @@ func NewGossipSubBuilder( rpcInspectorConfig: rpcInspectorConfig, rpcInspectorSuiteFactory: defaultInspectorSuite(), } + return b } @@ -310,6 +327,10 @@ func (g *Builder) Build(ctx irrecoverable.SignalerContext) (p2p.PubSubAdapter, e gossipSubConfigs.WithScoreTracer(scoreTracer) } + } else { + g.logger.Warn(). + Str(logging.KeyNetworkingSecurity, "true"). + Msg("gossipsub peer scoring is disabled") } if g.gossipSubTracer != nil { diff --git a/network/p2p/p2pbuilder/libp2pNodeBuilder.go b/network/p2p/p2pbuilder/libp2pNodeBuilder.go index c0a6412297c..e27cbc5bedd 100644 --- a/network/p2p/p2pbuilder/libp2pNodeBuilder.go +++ b/network/p2p/p2pbuilder/libp2pNodeBuilder.go @@ -26,6 +26,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" flownet "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/netconf" "github.com/onflow/flow-go/network/p2p" @@ -149,22 +150,17 @@ func (builder *LibP2PNodeBuilder) SetGossipSubFactory(gf p2p.GossipSubFactoryFun return builder } -// EnableGossipSubPeerScoring enables peer scoring for the GossipSub pubsub system. -// Arguments: -// - *PeerScoringConfig: the peer scoring configuration for the GossipSub pubsub system. If nil, the default configuration is used. -func (builder *LibP2PNodeBuilder) EnableGossipSubPeerScoring(config *p2p.PeerScoringConfig) p2p.NodeBuilder { - builder.gossipSubBuilder.SetGossipSubPeerScoring(true) - if config != nil { - if config.AppSpecificScoreParams != nil { - builder.gossipSubBuilder.SetAppSpecificScoreParams(config.AppSpecificScoreParams) - } - if config.TopicScoreParams != nil { - for topic, params := range config.TopicScoreParams { - builder.gossipSubBuilder.SetTopicScoreParams(topic, params) - } - } - } - +// EnableGossipSubScoringWithOverride enables peer scoring for the GossipSub pubsub system with the given override. +// Any existing peer scoring config attribute that is set in the override will override the default peer scoring config. +// Anything that is left to nil or zero value in the override will be ignored and the default value will be used. +// Note: it is not recommended to override the default peer scoring config in production unless you know what you are doing. +// Production Tip: use PeerScoringConfigNoOverride as the argument to this function to enable peer scoring without any override. +// Args: +// - PeerScoringConfigOverride: override for the peer scoring config- Recommended to use PeerScoringConfigNoOverride for production. +// Returns: +// none +func (builder *LibP2PNodeBuilder) EnableGossipSubScoringWithOverride(config *p2p.PeerScoringConfigOverride) p2p.NodeBuilder { + builder.gossipSubBuilder.EnableGossipSubScoringWithOverride(config) return builder } @@ -490,11 +486,20 @@ func DefaultNodeBuilder( SetRateLimiterDistributor(uniCfg.RateLimiterDistributor) if gossipCfg.PeerScoring { - // currently, we only enable peer scoring with default parameters. So, we set the score parameters to nil. - builder.EnableGossipSubPeerScoring(nil) + // In production, we never override the default scoring config. + builder.EnableGossipSubScoringWithOverride(p2p.PeerScoringConfigNoOverride) + } + + meshTracerCfg := &tracer.GossipSubMeshTracerConfig{ + Logger: logger, + Metrics: metricsCfg.Metrics, + IDProvider: idProvider, + LoggerInterval: gossipCfg.LocalMeshLogInterval, + RpcSentTrackerCacheCollector: metrics.GossipSubRPCSentTrackerMetricFactory(metricsCfg.HeroCacheFactory, flownet.PrivateNetwork), + RpcSentTrackerCacheSize: gossipCfg.RPCSentTrackerCacheSize, } + meshTracer := tracer.NewGossipSubMeshTracer(meshTracerCfg) - meshTracer := tracer.NewGossipSubMeshTracer(logger, metricsCfg.Metrics, idProvider, gossipCfg.LocalMeshLogInterval) builder.SetGossipSubTracer(meshTracer) builder.SetGossipSubScoreTracerInterval(gossipCfg.ScoreTracerInterval) diff --git a/network/p2p/p2pconf/gossipsub.go b/network/p2p/p2pconf/gossipsub.go index f9155129efd..d297f5cba8b 100644 --- a/network/p2p/p2pconf/gossipsub.go +++ b/network/p2p/p2pconf/gossipsub.go @@ -21,6 +21,8 @@ type GossipSubConfig struct { LocalMeshLogInterval time.Duration `mapstructure:"gossipsub-local-mesh-logging-interval"` // ScoreTracerInterval is the interval at which the score tracer logs the peer scores. ScoreTracerInterval time.Duration `mapstructure:"gossipsub-score-tracer-interval"` + // RPCSentTrackerCacheSize cache size of the rpc sent tracker used by the gossipsub mesh tracer. + RPCSentTrackerCacheSize uint32 `mapstructure:"gossipsub-rpc-sent-tracker-cache-size"` // PeerScoring is whether to enable GossipSub peer scoring. PeerScoring bool `mapstructure:"gossipsub-peer-scoring-enabled"` } diff --git a/network/p2p/p2pnode/gossipSubAdapter.go b/network/p2p/p2pnode/gossipSubAdapter.go index 861093993cc..59bd2f2d65a 100644 --- a/network/p2p/p2pnode/gossipSubAdapter.go +++ b/network/p2p/p2pnode/gossipSubAdapter.go @@ -158,6 +158,7 @@ func (g *GossipSubAdapter) Join(topic string) (p2p.Topic, error) { topicParamsLogger.Info().Msg("joined topic with score params set") } else { g.logger.Warn(). + Bool(logging.KeyNetworkingSecurity, true). Str("topic", topic). Msg("joining topic without score params, this is not recommended from a security perspective") } diff --git a/network/p2p/scoring/README.md b/network/p2p/scoring/README.md index 622ecadd3fe..38a758db439 100644 --- a/network/p2p/scoring/README.md +++ b/network/p2p/scoring/README.md @@ -93,6 +93,153 @@ scoreOption := NewScoreOption(config) 16. `defaultTopicInvalidMessageDeliveriesWeight` is set to -1.0 and is used to penalize peers that send invalid messages by applying it to the square of the number of such messages. A message is considered invalid if it is not properly signed. A peer will be disconnected if it sends around 14 invalid messages within a gossipsub heartbeat interval. 17. `defaultTopicInvalidMessageDeliveriesDecay` is a decay factor set to 0.99. It is used to reduce the number of invalid message deliveries counted against a peer by 1% at each heartbeat interval. This prevents the peer from being disconnected if it stops sending invalid messages. The heartbeat interval in the gossipsub scoring system is set to 1 minute by default. +## GossipSub Message Delivery Scoring +This section provides an overview of the GossipSub message delivery scoring mechanism used in the Flow network. +It's designed to maintain an efficient, secure and stable peer-to-peer network by scoring each peer based on their message delivery performance. +The system ensures the reliability of message propagation by scoring peers, which discourages malicious behaviors and enhances overall network performance. + +### Comprehensive System Overview +The GossipSub message delivery scoring mechanism used in the Flow network is an integral component of its P2P communication model. +It is designed to monitor and incentivize appropriate network behaviors by attributing scores to peers based on their message delivery performance. +This scoring system is fundamental to ensure that messages are reliably propagated across the network, creating a robust P2P communication infrastructure. + +The scoring system is per topic, which means it tracks the efficiency of peers in delivering messages in each specific topic they are participating in. +These per-topic scores then contribute to an overall score for each peer, providing a comprehensive view of a peer's effectiveness within the network. +In GossipSub, a crucial aspect of a peer's responsibility is to relay messages effectively to other nodes in the network. +The role of the scoring mechanism is to objectively assess a peer's efficiency in delivering these messages. +It takes into account several factors to determine the effectiveness of the peers. + +1. **Message Delivery Rate** - A peer's ability to deliver messages quickly is a vital metric. Slow delivery could lead to network lags and inefficiency. +2. **Message Delivery Volume** - A peer's capacity to deliver a large number of messages accurately and consistently. +3. **Continuity of Performance** - The scoring mechanism tracks not only the rate and volume of the messages but also the consistency in a peer's performance over time. +4. **Prevention of Malicious Behaviors** - The scoring system also helps in mitigating potential network attacks such as spamming and message replay attacks. + +The system utilizes several parameters to maintain and adjust the scores of the peers: +- `defaultTopicMeshMessageDeliveriesDecay`(value: 0.5): This parameter dictates how rapidly a peer's message delivery count decays with time. With a value of 0.5, it indicates a 50% decay at each decay interval. This mechanism ensures that past performances do not disproportionately impact the current score of the peer. +- `defaultTopicMeshMessageDeliveriesCap` (value: 1000): This parameter sets an upper limit on the number of message deliveries that can contribute to the score of a peer in a topic. With a cap set at 1000, it prevents the score from being overly influenced by large volumes of message deliveries, providing a balanced assessment of peer performance. +- `defaultTopicMeshMessageDeliveryThreshold` (value: 0.1 * `defaultTopicMeshMessageDeliveriesCap`): This threshold serves to identify under-performing peers. If a peer's message delivery count is below this threshold in a topic, the peer's score is penalized. This encourages peers to maintain a minimum level of performance. +- `defaultTopicMeshMessageDeliveriesWeight` (value: -0.05 * `MaxAppSpecificReward` / (`defaultTopicMeshMessageDeliveryThreshold` ^ 2) = 5^-4): This weight is applied when penalizing under-performing peers. The penalty is proportional to the square of the difference between the actual message deliveries and the threshold, multiplied by this weight. +- `defaultMeshMessageDeliveriesWindow` (value: `defaultDecayInterval` = 1 minute): This parameter defines the time window within which a message delivery is counted towards the score. This window is set to the decay interval, preventing replay attacks and counting only unique message deliveries. +- `defaultMeshMessageDeliveriesActivation` (value: 2 * `defaultDecayInterval` = 2 minutes): This time interval is the grace period before the scoring system starts tracking a new peer's performance. It accounts for the time it takes for a new peer to fully integrate into the network. + +By continually updating and adjusting the scores of peers based on these parameters, the GossipSub message delivery scoring mechanism ensures a robust, efficient, and secure P2P network. + +### Examples + +#### Scenario 1: Peer A Delivers Messages Within Cap and Above Threshold +Let's assume a Peer A that consistently delivers 500 messages per decay interval. This is within the `defaultTopicMeshMessageDeliveriesCap` (1000) and above the `defaultTopicMeshMessageDeliveryThreshold` (100). +As Peer A's deliveries are above the threshold and within the cap, its score will not be penalized. Instead, it will be maintained, promoting healthy network participation. + +#### Scenario 2: Peer B Delivers Messages Below Threshold +Now, assume Peer B delivers 50 messages per decay interval, below the `defaultTopicMeshMessageDeliveryThreshold` (100). +In this case, the score of Peer B will be penalized because its delivery rate is below the threshold. The penalty is calculated as `-|w| * (actual - threshold)^2`, where `w` is the weight (`defaultTopicMeshMessageDeliveriesWeight`), `actual` is the actual messages delivered (50), and `threshold` is the delivery threshold (100). + +#### Scenario 3: Peer C Delivers Messages Exceeding the Cap +Consider Peer C, which delivers 1500 messages per decay interval, exceeding the `defaultTopicMeshMessageDeliveriesCap` (1000). +In this case, even though Peer C is highly active, its score will not increase further once it hits the cap (1000). This is to avoid overemphasis on high delivery counts, which could skew the scoring system. + +#### Scenario 4: Peer D Joins a Topic Mesh +When a new Peer D joins a topic mesh, it will be given a grace period of `defaultMeshMessageDeliveriesActivation` (2 decay intervals) before its message delivery performance is tracked. This grace period allows the peer to set up and begin receiving messages from the network. +Remember, the parameters and scenarios described here aim to maintain a stable, efficient, and secure peer-to-peer network by carefully tracking and scoring each peer's message delivery performance. + +#### Scenario 5: Message Delivery Decay +To better understand how the message delivery decay (`defaultTopicMeshMessageDeliveriesDecay`) works in the GossipSub protocol, let's examine a hypothetical scenario. +Let's say we have a peer named `Peer A` who is actively participating in `Topic X`. `Peer A` has successfully delivered 800 messages in `Topic X` over a given time period. +**Initial State**: At this point, `Peer A`'s message delivery count for `Topic X` is 800. Now, the decay interval elapses without `Peer A` delivering any new messages in `Topic X`. +**After One Decay Interval**: Given that our `defaultTopicMeshMessageDeliveriesDecay` value is 0.5, after one decay interval, `Peer A`'s message delivery count for `Topic X` will decay by 50%. Therefore, `Peer A`'s count is now: + + 800 (previous message count) * 0.5 (decay factor) = 400 + +**After Two Decay Intervals** +If `Peer A` still hasn't delivered any new messages in `Topic X` during the next decay interval, the decay is applied again, further reducing the message delivery count: + + 400 (current message count) * 0.5 (decay factor) = 200 +And this process will continue at every decay interval, halving `Peer A`'s message delivery count for `Topic X` until `Peer A` delivers new messages in `Topic X` or the count reaches zero. +This decay process ensures that a peer cannot rest on its past deliveries; it must continually contribute to the network to maintain its score. +It helps maintain a lively and dynamic network environment, incentivizing constant active participation from all peers. + +### Scenario 6: Replay Attack +The `defaultMeshMessageDeliveriesWindow` and `defaultMeshMessageDeliveriesActivation` parameters play a crucial role in preventing replay attacks in the GossipSub protocol. Let's illustrate this with an example. +Consider a scenario where we have three peers: `Peer A`, `Peer B`, and `Peer C`. All three peers are active participants in `Topic X`. +**Initial State**: At Time = 0: `Peer A` generates and broadcasts a new message `M` in `Topic X`. `Peer B` and `Peer C` receive this message from `Peer A` and update their message caches accordingly. +**After Few Seconds**: At Time = 30 seconds: `Peer B`, with malicious intent, tries to rebroadcast the same message `M` back into `Topic X`. +Given that our `defaultMeshMessageDeliveriesWindow` value is equal to the decay interval (let's assume 1 minute), `Peer C` would have seen the original message `M` from `Peer A` less than one minute ago. +This is within the `defaultMeshMessageDeliveriesWindow`. Because `Peer A` (the original sender) is different from `Peer B` (the current sender), this delivery will be counted towards `Peer B`'s message delivery score in `Topic X`. +**After One Minute**: At Time = 61 seconds: `Peer B` tries to rebroadcast the same message `M` again. +Now, more than a minute has passed since `Peer C` first saw the message `M` from `Peer A`. This is outside the `defaultMeshMessageDeliveriesWindow`. +Therefore, the message `M` from `Peer B` will not count towards `Peer B`'s message delivery score in `Topic X` and `Peer B` still needs to fill up its threshold of message delivery in order not to be penalized for under-performing. +This effectively discouraging replay attacks of messages older than the `defaultMeshMessageDeliveriesWindow`. +This mechanism, combined with other parameters, helps maintain the security and efficiency of the network by discouraging harmful behaviors such as message replay attacks. + +## Mitigating iHave Broken Promises Attacks in GossipSub Protocol +### What is an iHave Broken Promise Attack? +In the GossipSub protocol, peers gossip information about new messages to a subset of random peers (out of their local mesh) in the form of an "iHave" message which basically tells the receiving peer what messages the sender has. +The receiving peer then replies with an "iWant" message, requesting for the messages it doesn't have. Note that for the peers in local mesh the actual new messages are sent instead of an "iHave" message (i.e., eager push). However, +iHave-iWant protocol is part of a complementary mechanism to ensure that the information is disseminated to the entire network in a timely manner (i.e., lazy pull). + +An "iHave Broken Promise" attack occurs when a peer advertises many "iHave" for a message but doesn't respond to the "iWant" requests for those messages. +This not only hinders the effective dissemination of information but can also strain the network with redundant requests. Hence, we stratify it as a spam behavior mounting a DoS attack on the network. + +### Detecting iHave Broken Promise Attacks +Detecting iHave Broken Promise Attacks is done by the GossipSub itself. On each incoming RPC from a remote node, the local GossipSub node checks if the RPC contains an iHave message. It then samples one (and only one) iHave message +randomly out of the entire set of iHave messages piggybacked on the incoming RPC. If the sampled iHave message is not literally addressed with the actual message, the local GossipSub node considers this as an iHave broken promise and +increases the behavior penalty counter for that remote node. Hence, incrementing the behavior penalty counter for a remote peer is done per RPC containing at least one iHave broken promise and not per iHave message. +Note that the behavior penalty counter also keeps track of GRAFT flood attacks that are done by a remote peer when it advertises many GRAFTs while it is on a PRUNE backoff by the local node. Mitigating iHave broken promise attacks also +mitigates GRAFT flood attacks. + +### Configuring GossipSub Parameters +In order to mitigate the iHave broken promises attacks, GossipSub expects the application layer (i.e., Flow protocol) to properly configure the relevant scoring parameters, notably: + +- `BehaviourPenaltyThreshold` is set to `defaultBehaviourPenaltyThreshold`, i.e., `10`. +- `BehaviourPenaltyWeight` is set to `defaultBehaviourPenaltyWeight`, i.e., `0.01` * `MaxAppSpecificPenalty` +- `BehaviourPenaltyDecay` is set to `defaultBehaviourPenaltyDecay`, i.e., `0.99`. + +#### 1. `defaultBehaviourPenaltyThreshold` +This parameter sets the threshold for when the behavior of a peer is considered bad. Misbehavior is defined as advertising an iHave without responding to the iWants (iHave broken promises), and attempting on GRAFT when the peer is considered for a PRUNE backoff. +If a remote peer sends an RPC that advertises at least one iHave for a message but doesn't respond to the iWant requests for that message within the next `3 seconds`, the peer misbehavior counter is incremented by `1`. This threshold is set to `10`, meaning that we at most tolerate 10 such RPCs containing iHave broken promises. After this, the peer is penalized for every excess RPC containing iHave broken promises. The counter decays by (`0.99`) every decay interval (defaultDecayInterval) i.e., every minute. + +#### 2. `defaultBehaviourPenaltyWeight` +This is the weight applied as a penalty when a peer's misbehavior goes beyond the `defaultBehaviourPenaltyThreshold`. +The penalty is applied to the square of the difference between the misbehavior counter and the threshold, i.e., -|w| * (misbehavior counter - threshold)^2, where `|w|` is the absolute value of the `defaultBehaviourPenaltyWeight`. +Note that `defaultBehaviourPenaltyWeight` is a negative value, meaning that the penalty is applied in the opposite direction of the misbehavior counter. For sake of illustration, we use the notion of `-|w|` to denote that a negative penalty is applied. +We set `defaultBehaviourPenaltyWeight` to `0.01 * MaxAppSpecificPenalty`, meaning a peer misbehaving `10` times more than the threshold (i.e., `10 + 10`) will lose its entire `MaxAppSpecificReward`, which is a reward given to all staked nodes in Flow blockchain. +This also means that a peer misbehaving `sqrt(2) * 10` times more than the threshold will cause the peer score to be dropped below the `MaxAppSpecificPenalty`, which is also below the `GraylistThreshold`, and the peer will be graylisted (i.e., all incoming and outgoing GossipSub RPCs from and to that peer will be rejected). +This means the peer is temporarily disconnected from the network, preventing it from causing further harm. + +#### 3. defaultBehaviourPenaltyDecay +This is the decay interval for the misbehavior counter of a peer. This counter is decayed by the `defaultBehaviourPenaltyDecay` parameter (e.g., `0.99`) per decay interval, which is currently every 1 minute. +This parameter helps to gradually reduce the effect of past misbehaviors and provides a chance for penalized nodes to rejoin the network. A very slow decay rate can help identify and isolate persistent offenders, while also allowing potentially honest nodes that had transient issues to regain their standing in the network. +The duration a peer remains graylisted is governed by the choice of `defaultBehaviourPenaltyWeight` and the decay parameters. +Based on the given configuration, a peer which has misbehaved on `sqrt(2) * 10` RPCs more than the threshold will get graylisted (disconnected at GossipSub level). +With the decay interval set to 1 minute and decay value of 0.99, a graylisted peer due to broken promises would be expected to be reconnected in about 50 minutes. +This is calculated by solving for `x` in the equation `(0.99)^x * (sqrt(2) * 10)^2 * MaxAppSpecificPenalty > GraylistThreshold`. +Simplifying, we find `x` to be approximately `527` decay intervals, or roughly `527` minutes. +This is the estimated time it would take for a severely misbehaving peer to have its penalty decayed enough to exceed the `GraylistThreshold` and thus be reconnected to the network. + +### Example Scenarios +**Scenario 1: Misbehaving Below Threshold** +In this scenario, consider peer `B` that has recently joined the network and is taking part in GossipSub. +This peer advertises to peer `A` many `iHave` messages over an RPC. But when other peer `A` requests these message with `iWant`s it fails to deliver the message within 3 seconds. +This action constitutes an _iHave broken promise_ for a single RPC and peer `A` increases the local behavior penalty counter of peer `B` by 1. +If the peer `B` commits this misbehavior infrequently, such that the total number of these RPCs does not exceed the `defaultBehaviourPenaltyThreshold` (set to 10 in our configuration), +the misbehavior counter for this peer will increment by 1 for each RPC and decays by `1%` evey decay interval (1 minute), but no additional penalty will be applied. +The misbehavior counter decays by a factor of `defaultBehaviourPenaltyDecay` (0.99) every minute, allowing the peer to recover from these minor infractions without significant disruption. + +**Scenario 2: Misbehaving Above Threshold But Below Graylisting** +Now consider that peer `B` frequently sends RPCs advertising many `iHaves` to peer `A` but fails to deliver the promised messages. +If the number of these misbehaviors exceeds our threshold (10 in our configuration), the peer `B` is now penalized by the local GossipSub mechanism of peer `A`. +The amount of the penalty is determined by the `defaultBehaviourPenaltyWeight` (set to 0.01 * MaxAppSpecificPenalty) applied to the square of the difference between the misbehavior counter and the threshold. +This penalty will progressively affect the peer's score, deteriorating its reputation in the local GossipSub scoring system of node `A`, but does not yet result in disconnection or graylisting. +The peer has a chance to amend its behavior before crossing into graylisting territory through stop misbehaving and letting the score to decay. +When peer `B` has a deteriorated score at node `A`, it will be less likely to be selected by node `A` as its local mesh peer (i.e., to directly receive new messages from node `A`), and is deprived of the opportunity to receive new messages earlier through node `A`. + +**Scenario 3: Graylisting** +Now assume that peer `B` peer has been continually misbehaving, with RPCs including iHave broken promises exceeding `sqrt(2) * 10` the threshold. +At this point, the peer's score drops below the `GraylistThreshold` due to the `defaultBehaviorPenaltyWeight` applied to the excess misbehavior. +The peer is then graylisted by peer `A`, i.e., peer `A` rejects all incoming RPCs to and from peer `B` at GossipSub level. +In our configuration, peer `B` will stay disconnected for at least `527` decay intervals or approximately `527` minutes. +This gives a strong disincentive for the peer to continue this behavior and also gives it time to recover and eventually be reconnected to the network. + ## Customization The scoring mechanism can be easily customized to suit the needs of the Flow network. This includes changing the scoring parameters, thresholds, and the scoring function itself. You can customize the scoring parameters and thresholds by using the various setter methods provided in the `ScoreOptionConfig` object. Additionally, you can provide a custom app-specific scoring function through the `SetAppSpecificScoreFunction` method. @@ -104,7 +251,6 @@ Example of setting custom app-specific scoring function: config.SetAppSpecificScoreFunction(customAppSpecificScoreFunction) ``` - ## Peer Scoring System Integration The peer scoring system is integrated with the GossipSub protocol through the `ScoreOption` configuration option. This option is passed to the GossipSub at the time of initialization. diff --git a/network/p2p/scoring/app_score_test.go b/network/p2p/scoring/app_score_test.go index 50e0379116e..8e2a1ae1bb8 100644 --- a/network/p2p/scoring/app_score_test.go +++ b/network/p2p/scoring/app_score_test.go @@ -35,15 +35,15 @@ func TestFullGossipSubConnectivity(t *testing.T) { groupOneNodes, groupOneIds := p2ptest.NodesFixture(t, sporkId, t.Name(), 5, idProvider, p2ptest.WithRole(flow.RoleConsensus), - p2ptest.WithPeerScoringEnabled(idProvider)) + p2ptest.EnablePeerScoringWithOverride(p2p.PeerScoringConfigNoOverride)) groupTwoNodes, groupTwoIds := p2ptest.NodesFixture(t, sporkId, t.Name(), 5, idProvider, p2ptest.WithRole(flow.RoleCollection), - p2ptest.WithPeerScoringEnabled(idProvider)) + p2ptest.EnablePeerScoringWithOverride(p2p.PeerScoringConfigNoOverride)) accessNodeGroup, accessNodeIds := p2ptest.NodesFixture(t, sporkId, t.Name(), 5, idProvider, p2ptest.WithRole(flow.RoleAccess), - p2ptest.WithPeerScoringEnabled(idProvider)) + p2ptest.EnablePeerScoringWithOverride(p2p.PeerScoringConfigNoOverride)) ids := append(append(groupOneIds, groupTwoIds...), accessNodeIds...) nodes := append(append(groupOneNodes, groupTwoNodes...), accessNodeGroup...) @@ -150,7 +150,7 @@ func testGossipSubMessageDeliveryUnderNetworkPartition(t *testing.T, honestPeerS // two (honest) consensus nodes opts := []p2ptest.NodeFixtureParameterOption{p2ptest.WithRole(flow.RoleConsensus)} if honestPeerScoring { - opts = append(opts, p2ptest.WithPeerScoringEnabled(idProvider)) + opts = append(opts, p2ptest.EnablePeerScoringWithOverride(p2p.PeerScoringConfigNoOverride)) } con1Node, con1Id := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, opts...) con2Node, con2Id := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, opts...) @@ -162,11 +162,11 @@ func testGossipSubMessageDeliveryUnderNetworkPartition(t *testing.T, honestPeerS accessNodeGroup, accessNodeIds := p2ptest.NodesFixture(t, sporkId, t.Name(), 30, idProvider, p2ptest.WithRole(flow.RoleAccess), - p2ptest.WithPeerScoringEnabled(idProvider), // overrides the default peer scoring parameters to mute GossipSub traffic from/to honest nodes. - p2ptest.WithPeerScoreParamsOption(&p2p.PeerScoringConfig{ + p2ptest.EnablePeerScoringWithOverride(&p2p.PeerScoringConfigOverride{ AppSpecificScoreParams: maliciousAppSpecificScore(flow.IdentityList{&con1Id, &con2Id}), - })) + }), + ) allNodes := append([]p2p.LibP2PNode{con1Node, con2Node}, accessNodeGroup...) allIds := append([]*flow.Identity{&con1Id, &con2Id}, accessNodeIds...) diff --git a/network/p2p/scoring/score_option.go b/network/p2p/scoring/score_option.go index c743b3efa33..0ae676005cb 100644 --- a/network/p2p/scoring/score_option.go +++ b/network/p2p/scoring/score_option.go @@ -1,6 +1,7 @@ package scoring import ( + "fmt" "time" pubsub "github.com/libp2p/go-libp2p-pubsub" @@ -17,10 +18,22 @@ import ( ) const ( + // DefaultAppSpecificScoreWeight is the default weight for app-specific scores. It is used to scale the app-specific + // scores to the same range as the other scores. At the current version, we don't distinguish between the app-specific + // scores and the other scores, so we set it to 1. DefaultAppSpecificScoreWeight = 1 - MaxAppSpecificPenalty = float64(-100) - MinAppSpecificPenalty = -1 - MaxAppSpecificReward = float64(100) + + // MaxAppSpecificReward is the default reward for well-behaving staked peers. If a peer does not have + // any misbehavior record, e.g., invalid subscription, invalid message, etc., it will be rewarded with this score. + MaxAppSpecificReward = float64(100) + + // MaxAppSpecificPenalty is the maximum penalty for sever offenses that we apply to a remote node score. The score + // mechanism of GossipSub in Flow is designed in a way that all other infractions are penalized with a fraction of + // this value. We have also set the other parameters such as DefaultGraylistThreshold, DefaultGossipThreshold and DefaultPublishThreshold to + // be a bit higher than this, i.e., MaxAppSpecificPenalty + 1. This ensures that a node with a score of MaxAppSpecificPenalty + // will be graylisted (i.e., all incoming and outgoing RPCs are rejected) and will not be able to publish or gossip any messages. + MaxAppSpecificPenalty = -1 * MaxAppSpecificReward + MinAppSpecificPenalty = -1 // DefaultStakedIdentityReward is the default reward for staking peers. It is applied to the peer's score when // the peer does not have any misbehavior record, e.g., invalid subscription, invalid message, etc. @@ -43,7 +56,7 @@ const ( // How we use it: // As current max penalty is -100, we set the threshold to -99 so that all gossips // to and from peers with penalty -100 are ignored. - DefaultGossipThreshold = -99 + DefaultGossipThreshold = MaxAppSpecificPenalty + 1 // DefaultPublishThreshold when a peer's penalty drops below this threshold, // self-published messages are not propagated towards this peer. @@ -54,7 +67,7 @@ const ( // How we use it: // As current max penalty is -100, we set the threshold to -99 so that all penalized peers are deprived of // receiving any published messages. - DefaultPublishThreshold = -99 + DefaultPublishThreshold = MaxAppSpecificPenalty + 1 // DefaultGraylistThreshold when a peer's penalty drops below this threshold, the peer is graylisted, i.e., // incoming RPCs from the peer are ignored. @@ -64,7 +77,7 @@ const ( // // How we use it: // As current max penalty is -100, we set the threshold to -99 so that all penalized peers are graylisted. - DefaultGraylistThreshold = -99 + DefaultGraylistThreshold = MaxAppSpecificPenalty + 1 // DefaultAcceptPXThreshold when a peer sends us PX information with a prune, we only accept it and connect to the supplied // peers if the originating peer's penalty exceeds this threshold. @@ -74,7 +87,7 @@ const ( // How we use it: // As current max reward is 100, we set the threshold to 99 so that we only receive supplied peers from // well-behaved peers. - DefaultAcceptPXThreshold = 99 + DefaultAcceptPXThreshold = MaxAppSpecificReward - 1 // DefaultOpportunisticGraftThreshold when the median peer penalty in the mesh drops below this value, // the peer may select more peers with penalty above the median to opportunistically graft on the mesh. @@ -138,10 +151,144 @@ const ( // are churners, i.e., peers that join and leave a topic mesh frequently. defaultTopicTimeInMesh = time.Hour - // defaultTopicWeight is the default weight of a topic in the GossipSub scoring system. The overall score of a peer in a topic mesh is - // multiplied by the weight of the topic when calculating the overall score of the peer. + // defaultTopicWeight is the default weight of a topic in the GossipSub scoring system. + // The overall score of a peer in a topic mesh is multiplied by the weight of the topic when calculating the overall score of the peer. // We set it to 1.0, which means that the overall score of a peer in a topic mesh is not affected by the weight of the topic. defaultTopicWeight = 1.0 + + // defaultTopicMeshMessageDeliveriesDecay is applied to the number of actual message deliveries in a topic mesh + // at each decay interval (i.e., defaultDecayInterval). + // It is used to decay the number of actual message deliveries, and prevents past message + // deliveries from affecting the current score of the peer. + // As the decay interval is 1 minute, we set it to 0.5, which means that the number of actual message + // deliveries will decay by 50% at each decay interval. + defaultTopicMeshMessageDeliveriesDecay = .5 + + // defaultTopicMeshMessageDeliveriesCap is the maximum number of actual message deliveries in a topic + // mesh that is used to calculate the score of a peer in that topic mesh. + // We set it to 1000, which means that the maximum number of actual message deliveries in a + // topic mesh that is used to calculate the score of a peer in that topic mesh is 1000. + // This is to prevent the score of a peer in a topic mesh from being affected by a large number of actual + // message deliveries and also affect the score of the peer in other topic meshes. + // When the total delivered messages in a topic mesh exceeds this value, the score of the peer in that topic + // mesh will not be affected by the actual message deliveries in that topic mesh. + // Moreover, this does not allow the peer to accumulate a large number of actual message deliveries in a topic mesh + // and then start under-performing in that topic mesh without being penalized. + defaultTopicMeshMessageDeliveriesCap = 1000 + + // defaultTopicMeshMessageDeliveriesThreshold is the threshold for the number of actual message deliveries in a + // topic mesh that is used to calculate the score of a peer in that topic mesh. + // If the number of actual message deliveries in a topic mesh is less than this value, + // the peer will be penalized by square of the difference between the actual message deliveries and the threshold, + // i.e., -w * (actual - threshold)^2 where `actual` and `threshold` are the actual message deliveries and the + // threshold, respectively, and `w` is the weight (i.e., defaultTopicMeshMessageDeliveriesWeight). + // We set it to 0.1 * defaultTopicMeshMessageDeliveriesCap, which means that if a peer delivers less tha 10% of the + // maximum number of actual message deliveries in a topic mesh, it will be considered as an under-performing peer + // in that topic mesh. + defaultTopicMeshMessageDeliveryThreshold = 0.1 * defaultTopicMeshMessageDeliveriesCap + + // defaultTopicMeshDeliveriesWeight is the weight for applying penalty when a peer is under-performing in a topic mesh. + // Upon every decay interval, if the number of actual message deliveries is less than the topic mesh message deliveries threshold + // (i.e., defaultTopicMeshMessageDeliveriesThreshold), the peer will be penalized by square of the difference between the actual + // message deliveries and the threshold, multiplied by this weight, i.e., -w * (actual - threshold)^2 where w is the weight, and + // `actual` and `threshold` are the actual message deliveries and the threshold, respectively. + // We set this value to be - 0.05 MaxAppSpecificReward / (defaultTopicMeshMessageDeliveriesThreshold^2). This guarantees that even if a peer + // is not delivering any message in a topic mesh, it will not be disconnected. + // Rather, looses part of the MaxAppSpecificReward that is awarded by our app-specific scoring function to all staked + // nodes by default will be withdrawn, and the peer will be slightly penalized. In other words, under-performing in a topic mesh + // will drop the overall score of a peer by 5% of the MaxAppSpecificReward that is awarded by our app-specific scoring function. + // It means that under-performing in a topic mesh will not cause a peer to be disconnected, but it will cause the peer to lose + // its MaxAppSpecificReward that is awarded by our app-specific scoring function. + // At this point, we do not want to disconnect a peer only because it is under-performing in a topic mesh as it might be + // causing a false positive network partition. + // TODO: we must increase the penalty for under-performing in a topic mesh in the future, and disconnect the peer if it is under-performing. + defaultTopicMeshMessageDeliveriesWeight = -0.05 * MaxAppSpecificReward / (defaultTopicMeshMessageDeliveryThreshold * defaultTopicMeshMessageDeliveryThreshold) + + // defaultMeshMessageDeliveriesWindow is the window size is time interval that we count a delivery of an already + // seen message towards the score of a peer in a topic mesh. The delivery is counted + // by GossipSub only if the previous sender of the message is different from the current sender. + // We set it to the decay interval of the GossipSub scoring system, which is 1 minute. + // It means that if a peer delivers a message that it has already seen less than one minute ago, + // the delivery will be counted towards the score of the peer in a topic mesh only if the previous sender of the message. + // This also prevents replay attacks of messages that are older than one minute. As replayed messages will not + // be counted towards the actual message deliveries of a peer in a topic mesh. + defaultMeshMessageDeliveriesWindow = defaultDecayInterval + + // defaultMeshMessageDeliveryActivation is the time interval that we wait for a new peer that joins a topic mesh + // till start counting the number of actual message deliveries of that peer in that topic mesh. + // We set it to 2 * defaultDecayInterval, which means that we wait for 2 decay intervals before start counting + // the number of actual message deliveries of a peer in a topic mesh. + // With a default decay interval of 1 minute, it means that we wait for 2 minutes before start counting the + // number of actual message deliveries of a peer in a topic mesh. This is to account for + // the time that it takes for a peer to start up and receive messages from other peers in the topic mesh. + defaultMeshMessageDeliveriesActivation = 2 * defaultDecayInterval + + // defaultBehaviorPenaltyThreshold is the threshold when the behavior of a peer is considered as bad by GossipSub. + // Currently, the misbehavior is defined as advertising an iHave without responding to the iWants (iHave broken promises), as well as attempting + // on GRAFT when the peer is considered for a PRUNE backoff, i.e., the local peer does not allow the peer to join the local topic mesh + // for a while, and the remote peer keep attempting on GRAFT (aka GRAFT flood). + // When the misbehavior counter of a peer goes beyond this threshold, the peer is penalized by defaultBehaviorPenaltyWeight (see below) for the excess misbehavior. + // + // An iHave broken promise means that a peer advertises an iHave for a message, but does not respond to the iWant requests for that message. + // For iHave broken promises, the gossipsub scoring works as follows: + // It samples ONLY A SINGLE iHave out of the entire RPC. + // If that iHave is not followed by an actual message within the next 3 seconds, the peer misbehavior counter is incremented by 1. + // + // We set it to 10, meaning that we at most tolerate 10 of such RPCs containing iHave broken promises. After that, the peer is penalized for every + // excess RPC containing iHave broken promises. + // The counter is also decayed by (0.99) every decay interval (defaultDecayInterval) i.e., every minute. + // Note that misbehaviors are counted by GossipSub across all topics (and is different from the Application Layer Misbehaviors that we count through + // the ALSP system). + defaultBehaviourPenaltyThreshold = 10 + + // defaultBehaviorPenaltyWeight is the weight for applying penalty when a peer misbehavior goes beyond the threshold. + // Misbehavior of a peer at gossipsub layer is defined as advertising an iHave without responding to the iWants (broken promises), as well as attempting + // on GRAFT when the peer is considered for a PRUNE backoff, i.e., the local peer does not allow the peer to join the local topic mesh + // This is detected by the GossipSub scoring system, and the peer is penalized by defaultBehaviorPenaltyWeight. + // + // An iHave broken promise means that a peer advertises an iHave for a message, but does not respond to the iWant requests for that message. + // For iHave broken promises, the gossipsub scoring works as follows: + // It samples ONLY A SINGLE iHave out of the entire RPC. + // If that iHave is not followed by an actual message within the next 3 seconds, the peer misbehavior counter is incremented by 1. + // + // The penalty is applied to the square of the difference between the misbehavior counter and the threshold, i.e., -|w| * (misbehavior counter - threshold)^2. + // We set it to 0.01 * MaxAppSpecificPenalty, which means that misbehaving 10 times more than the threshold (i.e., 10 + 10) will cause the peer to lose + // its entire AppSpecificReward that is awarded by our app-specific scoring function to all staked (i.e., authorized) nodes by default. + // Moreover, as the MaxAppSpecificPenalty is -MaxAppSpecificReward, misbehaving sqrt(2) * 10 times more than the threshold will cause the peer score + // to be dropped below the MaxAppSpecificPenalty, which is also below the GraylistThreshold, and the peer will be graylisted (i.e., disconnected). + // + // The math is as follows: -|w| * (misbehavior - threshold)^2 = 0.01 * MaxAppSpecificPenalty * (misbehavior - threshold)^2 < 2 * MaxAppSpecificPenalty + // if misbehavior > threshold + sqrt(2) * 10. + // As shown above, with this choice of defaultBehaviorPenaltyWeight, misbehaving sqrt(2) * 10 times more than the threshold will cause the peer score + // to be dropped below the MaxAppSpecificPenalty, which is also below the GraylistThreshold, and the peer will be graylisted (i.e., disconnected). This weight + // is chosen in a way that with almost a few misbehaviors more than the threshold, the peer will be graylisted. The rationale relies on the fact that + // the misbehavior counter is incremented by 1 for each RPC containing one or more broken promises. Hence, it is per RPC, and not per broken promise. + // Having sqrt(2) * 10 broken promises RPC is a blatant misbehavior, and the peer should be graylisted. With decay interval of 1 minute, and decay value of + // 0.99 we expect a graylisted node due to borken promises to get back in about 527 minutes, i.e., (0.99)^x * (sqrt(2) * 10)^2 * MaxAppSpecificPenalty > GraylistThreshold + // where x is the number of decay intervals that the peer is graylisted. As MaxAppSpecificPenalty and GraylistThresholds are close, we can simplify the inequality + // to (0.99)^x * (sqrt(2) * 10)^2 > 1 --> (0.99)^x * 200 > 1 --> (0.99)^x > 1/200 --> x > log(1/200) / log(0.99) --> x > 527.17 decay intervals, i.e., 527 minutes. + // Note that misbehaviors are counted by GossipSub across all topics (and is different from the Application Layer Misbehaviors that we count through + // the ALSP system that are reported by the engines). + defaultBehaviourPenaltyWeight = 0.01 * MaxAppSpecificPenalty + + // defaultBehaviorPenaltyDecay is the decay interval for the misbehavior counter of a peer. The misbehavior counter is + // incremented by GossipSub for iHave broken promises or the GRAFT flooding attacks (i.e., each GRAFT received from a remote peer while that peer is on a PRUNE backoff). + // + // An iHave broken promise means that a peer advertises an iHave for a message, but does not respond to the iWant requests for that message. + // For iHave broken promises, the gossipsub scoring works as follows: + // It samples ONLY A SINGLE iHave out of the entire RPC. + // If that iHave is not followed by an actual message within the next 3 seconds, the peer misbehavior counter is incremented by 1. + // This means that regardless of how many iHave broken promises an RPC contains, the misbehavior counter is incremented by 1. + // That is why we decay the misbehavior counter very slow, as this counter indicates a severe misbehavior. + // + // The misbehavior counter is decayed per decay interval (i.e., defaultDecayInterval = 1 minute) by GossipSub. + // We set it to 0.99, which means that the misbehavior counter is decayed by 1% per decay interval. + // With the generous threshold that we set (i.e., defaultBehaviourPenaltyThreshold = 10), we take the peers going beyond the threshold as persistent misbehaviors, + // We expect honest peers never to go beyond the threshold, and if they do, we expect them to go back below the threshold quickly. + // + // Note that misbehaviors are counted by GossipSub across all topics (and is different from the Application Layer Misbehaviors that we count through + // the ALSP system that is based on the engines report). + defaultBehaviourPenaltyDecay = 0.99 ) // ScoreOption is a functional option for configuring the peer scoring system. @@ -160,6 +307,7 @@ type ScoreOptionConfig struct { cacheSize uint32 cacheMetrics module.HeroCacheMetrics appScoreFunc func(peer.ID) float64 + decayInterval time.Duration // the decay interval, when is set to 0, the default value will be used. topicParams []func(map[string]*pubsub.TopicScoreParams) registerNotificationConsumerFunc func(p2p.GossipSubInvCtrlMsgNotifConsumer) } @@ -189,12 +337,12 @@ func (c *ScoreOptionConfig) SetCacheMetrics(metrics module.HeroCacheMetrics) { c.cacheMetrics = metrics } -// SetAppSpecificScoreFunction sets the app specific penalty function for the penalty option. +// OverrideAppSpecificScoreFunction sets the app specific penalty function for the penalty option. // It is used to calculate the app specific penalty of a peer. // If the app specific penalty function is not set, the default one is used. // Note that it is always safer to use the default one, unless you know what you are doing. // It is safe to call this method multiple times, the last call will be used. -func (c *ScoreOptionConfig) SetAppSpecificScoreFunction(appSpecificScoreFunction func(peer.ID) float64) { +func (c *ScoreOptionConfig) OverrideAppSpecificScoreFunction(appSpecificScoreFunction func(peer.ID) float64) { c.appScoreFunc = appSpecificScoreFunction } @@ -214,6 +362,21 @@ func (c *ScoreOptionConfig) SetRegisterNotificationConsumerFunc(f func(p2p.Gossi c.registerNotificationConsumerFunc = f } +// OverrideDecayInterval overrides the decay interval for the penalty option. It is used to override the default +// decay interval for the penalty option. The decay interval is the time interval that the decay values are applied and +// peer scores are updated. +// Note: It is always recommended to use the default value unless you know what you are doing. Hence, calling this method +// is not recommended in production. +// Args: +// +// interval: the decay interval. +// +// Returns: +// none +func (c *ScoreOptionConfig) OverrideDecayInterval(interval time.Duration) { + c.decayInterval = interval +} + // NewScoreOption creates a new penalty option with the given configuration. func NewScoreOption(cfg *ScoreOptionConfig) *ScoreOption { throttledSampler := logging.BurstSampler(MaxDebugLogs, time.Second) @@ -239,14 +402,27 @@ func NewScoreOption(cfg *ScoreOptionConfig) *ScoreOption { logger: logger, validator: validator, peerScoreParams: defaultPeerScoreParams(), + appScoreFunc: scoreRegistry.AppSpecificScoreFunc(), } // set the app specific penalty function for the penalty option // if the app specific penalty function is not set, use the default one - if cfg.appScoreFunc == nil { - s.appScoreFunc = scoreRegistry.AppSpecificScoreFunc() - } else { + if cfg.appScoreFunc != nil { s.appScoreFunc = cfg.appScoreFunc + s.logger. + Warn(). + Str(logging.KeyNetworkingSecurity, "true"). + Msg("app specific score function is overridden, should never happen in production") + } + + if cfg.decayInterval > 0 { + // overrides the default decay interval if the decay interval is set. + s.peerScoreParams.DecayInterval = cfg.decayInterval + s.logger. + Warn(). + Str(logging.KeyNetworkingSecurity, "true"). + Dur("decay_interval_ms", cfg.decayInterval). + Msg("decay interval is overridden, should never happen in production") } // registers the score registry as the consumer of the invalid control message notifications @@ -308,19 +484,21 @@ func (s *ScoreOption) preparePeerScoreThresholds() { func (s *ScoreOption) TopicScoreParams(topic *pubsub.Topic) *pubsub.TopicScoreParams { params, exists := s.peerScoreParams.Topics[topic.String()] if !exists { - return defaultTopicScoreParams() + return DefaultTopicScoreParams() } return params } func defaultPeerScoreParams() *pubsub.PeerScoreParams { + // DO NOT CHANGE THE DEFAULT VALUES, THEY ARE TUNED FOR THE BEST SECURITY PRACTICES. return &pubsub.PeerScoreParams{ Topics: make(map[string]*pubsub.TopicScoreParams), // we don't set all the parameters, so we skip the atomic validation. // atomic validation fails initialization if any parameter is not set. SkipAtomicValidation: true, - // DecayInterval is the interval over which we decay the effect of past behavior. So that - // a good or bad behavior will not have a permanent effect on the penalty. + // DecayInterval is the interval over which we decay the effect of past behavior, so that + // a good or bad behavior will not have a permanent effect on the penalty. It is also the interval + // that GossipSub uses to refresh the scores of all peers. DecayInterval: defaultDecayInterval, // DecayToZero defines the maximum value below which a peer scoring counter is reset to zero. // This is to prevent the counter from decaying to a very small value. @@ -329,16 +507,36 @@ func defaultPeerScoreParams() *pubsub.PeerScoreParams { DecayToZero: defaultDecayToZero, // AppSpecificWeight is the weight of the application specific penalty. AppSpecificWeight: DefaultAppSpecificScoreWeight, + // BehaviourPenaltyThreshold is the threshold above which a peer is penalized for GossipSub-level misbehaviors. + BehaviourPenaltyThreshold: defaultBehaviourPenaltyThreshold, + // BehaviourPenaltyWeight is the weight of the GossipSub-level penalty. + BehaviourPenaltyWeight: defaultBehaviourPenaltyWeight, + // BehaviourPenaltyDecay is the decay of the GossipSub-level penalty (applied every decay interval). + BehaviourPenaltyDecay: defaultBehaviourPenaltyDecay, } } -// defaultTopicScoreParams returns the default score params for topics. -func defaultTopicScoreParams() *pubsub.TopicScoreParams { - return &pubsub.TopicScoreParams{ - TopicWeight: defaultTopicWeight, - SkipAtomicValidation: defaultTopicSkipAtomicValidation, - InvalidMessageDeliveriesWeight: defaultTopicInvalidMessageDeliveriesWeight, - InvalidMessageDeliveriesDecay: defaultTopicInvalidMessageDeliveriesDecay, - TimeInMeshQuantum: defaultTopicTimeInMesh, +// DefaultTopicScoreParams returns the default score params for topics. +func DefaultTopicScoreParams() *pubsub.TopicScoreParams { + // DO NOT CHANGE THE DEFAULT VALUES, THEY ARE TUNED FOR THE BEST SECURITY PRACTICES. + p := &pubsub.TopicScoreParams{ + TopicWeight: defaultTopicWeight, + SkipAtomicValidation: defaultTopicSkipAtomicValidation, + InvalidMessageDeliveriesWeight: defaultTopicInvalidMessageDeliveriesWeight, + InvalidMessageDeliveriesDecay: defaultTopicInvalidMessageDeliveriesDecay, + TimeInMeshQuantum: defaultTopicTimeInMesh, + MeshMessageDeliveriesWeight: defaultTopicMeshMessageDeliveriesWeight, + MeshMessageDeliveriesDecay: defaultTopicMeshMessageDeliveriesDecay, + MeshMessageDeliveriesCap: defaultTopicMeshMessageDeliveriesCap, + MeshMessageDeliveriesThreshold: defaultTopicMeshMessageDeliveryThreshold, + MeshMessageDeliveriesWindow: defaultMeshMessageDeliveriesWindow, + MeshMessageDeliveriesActivation: defaultMeshMessageDeliveriesActivation, } + + if p.MeshMessageDeliveriesWeight >= 0 { + // GossipSub also does a validation, but we want to panic as early as possible. + panic(fmt.Sprintf("invalid mesh message deliveries weight %f", p.MeshMessageDeliveriesWeight)) + } + + return p } diff --git a/network/p2p/scoring/scoring_test.go b/network/p2p/scoring/scoring_test.go index db43c59a055..47e6f27cb57 100644 --- a/network/p2p/scoring/scoring_test.go +++ b/network/p2p/scoring/scoring_test.go @@ -92,7 +92,7 @@ func TestInvalidCtrlMsgScoringIntegration(t *testing.T) { t.Name(), idProvider, p2ptest.WithRole(flow.RoleConsensus), - p2ptest.WithPeerScoringEnabled(idProvider), + p2ptest.EnablePeerScoringWithOverride(p2p.PeerScoringConfigNoOverride), p2ptest.OverrideGossipSubRpcInspectorSuiteFactory(func(zerolog.Logger, flow.Identifier, *p2pconf.GossipSubRPCInspectorsConfig, @@ -110,7 +110,7 @@ func TestInvalidCtrlMsgScoringIntegration(t *testing.T) { t.Name(), idProvider, p2ptest.WithRole(flow.RoleConsensus), - p2ptest.WithPeerScoringEnabled(idProvider)) + p2ptest.EnablePeerScoringWithOverride(p2p.PeerScoringConfigNoOverride)) ids := flow.IdentityList{&id1, &id2} nodes := []p2p.LibP2PNode{node1, node2} @@ -128,11 +128,10 @@ func TestInvalidCtrlMsgScoringIntegration(t *testing.T) { defer p2ptest.StopNodes(t, nodes, cancel, 2*time.Second) p2ptest.LetNodesDiscoverEachOther(t, ctx, nodes, ids) - + blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) // checks end-to-end message delivery works on GossipSub - p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, func() (interface{}, channels.Topic) { - blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) - return unittest.ProposalFixture(), blockTopic + p2ptest.EnsurePubsubMessageExchange(t, ctx, nodes, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() }) // now simulates node2 spamming node1 with invalid gossipsub control messages. @@ -146,8 +145,7 @@ func TestInvalidCtrlMsgScoringIntegration(t *testing.T) { } // checks no GossipSub message exchange should no longer happen between node1 and node2. - p2ptest.EnsureNoPubsubExchangeBetweenGroups(t, ctx, []p2p.LibP2PNode{node1}, []p2p.LibP2PNode{node2}, func() (interface{}, channels.Topic) { - blockTopic := channels.TopicFromChannel(channels.PushBlocks, sporkId) - return unittest.ProposalFixture(), blockTopic + p2ptest.EnsureNoPubsubExchangeBetweenGroups(t, ctx, []p2p.LibP2PNode{node1}, []p2p.LibP2PNode{node2}, blockTopic, 1, func() interface{} { + return unittest.ProposalFixture() }) } diff --git a/network/p2p/scoring/subscription_validator_test.go b/network/p2p/scoring/subscription_validator_test.go index 3bba66c6199..549006b3bde 100644 --- a/network/p2p/scoring/subscription_validator_test.go +++ b/network/p2p/scoring/subscription_validator_test.go @@ -178,20 +178,20 @@ func TestSubscriptionValidator_Integration(t *testing.T) { conNode, conId := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, p2ptest.WithLogger(unittest.Logger()), - p2ptest.WithPeerScoringEnabled(idProvider), + p2ptest.EnablePeerScoringWithOverride(p2p.PeerScoringConfigNoOverride), p2ptest.WithRole(flow.RoleConsensus)) // two verification node. verNode1, verId1 := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, p2ptest.WithLogger(unittest.Logger()), - p2ptest.WithPeerScoringEnabled(idProvider), + p2ptest.EnablePeerScoringWithOverride(p2p.PeerScoringConfigNoOverride), p2ptest.WithRole(flow.RoleVerification)) verNode2, verId2 := p2ptest.NodeFixture(t, sporkId, t.Name(), idProvider, p2ptest.WithLogger(unittest.Logger()), - p2ptest.WithPeerScoringEnabled(idProvider), + p2ptest.EnablePeerScoringWithOverride(p2p.PeerScoringConfigNoOverride), p2ptest.WithRole(flow.RoleVerification)) ids := flow.IdentityList{&conId, &verId1, &verId2} diff --git a/network/p2p/test/fixtures.go b/network/p2p/test/fixtures.go index 5ee82dd4ab5..50ecb5a568f 100644 --- a/network/p2p/test/fixtures.go +++ b/network/p2p/test/fixtures.go @@ -5,6 +5,7 @@ import ( "context" "crypto/rand" crand "math/rand" + "sync" "testing" "time" @@ -145,7 +146,7 @@ func NodeFixture( } if parameters.PeerScoringEnabled { - builder.EnableGossipSubPeerScoring(parameters.PeerScoreConfig) + builder.EnableGossipSubScoringWithOverride(parameters.PeerScoringConfigOverride) } if parameters.GossipSubFactory != nil && parameters.GossipSubConfig != nil { @@ -199,7 +200,7 @@ type NodeFixtureParameters struct { Logger zerolog.Logger PeerScoringEnabled bool IdProvider module.IdentityProvider - PeerScoreConfig *p2p.PeerScoringConfig + PeerScoringConfigOverride *p2p.PeerScoringConfigOverride PeerManagerConfig *p2pconfig.PeerManagerConfig PeerProvider p2p.PeersProvider // peer manager parameter ConnGater p2p.ConnectionGater @@ -240,10 +241,21 @@ func WithCreateStreamRetryDelay(delay time.Duration) NodeFixtureParameterOption } } -func WithPeerScoringEnabled(idProvider module.IdentityProvider) NodeFixtureParameterOption { +// EnablePeerScoringWithOverride enables peer scoring for the GossipSub pubsub system with the given override. +// Any existing peer scoring config attribute that is set in the override will override the default peer scoring config. +// Anything that is left to nil or zero value in the override will be ignored and the default value will be used. +// Note: it is not recommended to override the default peer scoring config in production unless you know what you are doing. +// Default Use Tip: use p2p.PeerScoringConfigNoOverride as the argument to this function to enable peer scoring without any override. +// Args: +// - PeerScoringConfigOverride: override for the peer scoring config- Recommended to use p2p.PeerScoringConfigNoOverride for production or when +// you don't want to override the default peer scoring config. +// +// Returns: +// - NodeFixtureParameterOption: a function that can be passed to the NodeFixture function to enable peer scoring. +func EnablePeerScoringWithOverride(override *p2p.PeerScoringConfigOverride) NodeFixtureParameterOption { return func(p *NodeFixtureParameters) { p.PeerScoringEnabled = true - p.IdProvider = idProvider + p.PeerScoringConfigOverride = override } } @@ -308,9 +320,9 @@ func WithRole(role flow.Role) NodeFixtureParameterOption { } } -func WithPeerScoreParamsOption(cfg *p2p.PeerScoringConfig) NodeFixtureParameterOption { +func WithPeerScoreParamsOption(cfg *p2p.PeerScoringConfigOverride) NodeFixtureParameterOption { return func(p *NodeFixtureParameters) { - p.PeerScoreConfig = cfg + p.PeerScoringConfigOverride = cfg } } @@ -549,17 +561,20 @@ func EnsureStreamCreationInBothDirections(t *testing.T, ctx context.Context, nod } // EnsurePubsubMessageExchange ensures that the given connected nodes exchange the given message on the given channel through pubsub. -// Note: TryConnectionAndEnsureConnected() must be called to connect all nodes before calling this function. -func EnsurePubsubMessageExchange(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode, messageFactory func() (interface{}, channels.Topic)) { - _, topic := messageFactory() - +// Args: +// - nodes: the nodes to exchange messages +// - ctx: the context- the test will fail if the context expires. +// - topic: the topic to exchange messages on +// - count: the number of messages to exchange from each node. +// - messageFactory: a function that creates a unique message to be published by the node. +// The function should return a different message each time it is called. +// +// Note-1: this function assumes a timeout of 5 seconds for each message to be received. +// Note-2: TryConnectionAndEnsureConnected() must be called to connect all nodes before calling this function. +func EnsurePubsubMessageExchange(t *testing.T, ctx context.Context, nodes []p2p.LibP2PNode, topic channels.Topic, count int, messageFactory func() interface{}) { subs := make([]p2p.Subscription, len(nodes)) for i, node := range nodes { - ps, err := node.Subscribe( - topic, - validator.TopicValidator( - unittest.Logger(), - unittest.AllowAllPeerFilter())) + ps, err := node.Subscribe(topic, validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter())) require.NoError(t, err) subs[i] = ps } @@ -571,14 +586,52 @@ func EnsurePubsubMessageExchange(t *testing.T, ctx context.Context, nodes []p2p. require.True(t, ok) for _, node := range nodes { + for i := 0; i < count; i++ { + // creates a unique message to be published by the node + msg := messageFactory() + data := p2pfixtures.MustEncodeEvent(t, msg, channel) + require.NoError(t, node.Publish(ctx, topic, data)) + + // wait for the message to be received by all nodes + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + p2pfixtures.SubsMustReceiveMessage(t, ctx, data, subs) + cancel() + } + } +} + +// EnsurePubsubMessageExchangeFromNode ensures that the given node exchanges the given message on the given channel through pubsub with the other nodes. +// Args: +// - node: the node to exchange messages +// +// - ctx: the context- the test will fail if the context expires. +// - sender: the node that sends the message to the other node. +// - receiver: the node that receives the message from the other node. +// - topic: the topic to exchange messages on. +// - count: the number of messages to exchange from `sender` to `receiver`. +// - messageFactory: a function that creates a unique message to be published by the node. +func EnsurePubsubMessageExchangeFromNode(t *testing.T, ctx context.Context, sender p2p.LibP2PNode, receiver p2p.LibP2PNode, topic channels.Topic, count int, messageFactory func() interface{}) { + _, err := sender.Subscribe(topic, validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter())) + require.NoError(t, err) + + toSub, err := receiver.Subscribe(topic, validator.TopicValidator(unittest.Logger(), unittest.AllowAllPeerFilter())) + require.NoError(t, err) + + // let subscriptions propagate + time.Sleep(1 * time.Second) + + channel, ok := channels.ChannelFromTopic(topic) + require.True(t, ok) + + for i := 0; i < count; i++ { // creates a unique message to be published by the node - msg, _ := messageFactory() + msg := messageFactory() data := p2pfixtures.MustEncodeEvent(t, msg, channel) - require.NoError(t, node.Publish(ctx, topic, data)) + require.NoError(t, sender.Publish(ctx, topic, data)) // wait for the message to be received by all nodes ctx, cancel := context.WithTimeout(ctx, 5*time.Second) - p2pfixtures.SubsMustReceiveMessage(t, ctx, data, subs) + p2pfixtures.SubsMustReceiveMessage(t, ctx, data, []p2p.Subscription{toSub}) cancel() } } @@ -605,9 +658,14 @@ func EnsureNotConnectedBetweenGroups(t *testing.T, ctx context.Context, groupA [ } // EnsureNoPubsubMessageExchange ensures that the no pubsub message is exchanged "from" the given nodes "to" the given nodes. -func EnsureNoPubsubMessageExchange(t *testing.T, ctx context.Context, from []p2p.LibP2PNode, to []p2p.LibP2PNode, messageFactory func() (interface{}, channels.Topic)) { - _, topic := messageFactory() - +// Args: +// - from: the nodes that send messages to the other group but their message must not be received by the other group. +// +// - to: the nodes that are the target of the messages sent by the other group ("from") but must not receive any message from them. +// - topic: the topic to exchange messages on. +// - count: the number of messages to exchange from each node. +// - messageFactory: a function that creates a unique message to be published by the node. +func EnsureNoPubsubMessageExchange(t *testing.T, ctx context.Context, from []p2p.LibP2PNode, to []p2p.LibP2PNode, topic channels.Topic, count int, messageFactory func() interface{}) { subs := make([]p2p.Subscription, len(to)) tv := validator.TopicValidator( unittest.Logger(), @@ -627,27 +685,47 @@ func EnsureNoPubsubMessageExchange(t *testing.T, ctx context.Context, from []p2p // let subscriptions propagate time.Sleep(1 * time.Second) + wg := &sync.WaitGroup{} for _, node := range from { - // creates a unique message to be published by the node. - msg, _ := messageFactory() - channel, ok := channels.ChannelFromTopic(topic) - require.True(t, ok) - data := p2pfixtures.MustEncodeEvent(t, msg, channel) - - // ensure the message is NOT received by any of the nodes. - require.NoError(t, node.Publish(ctx, topic, data)) - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) - p2pfixtures.SubsMustNeverReceiveAnyMessage(t, ctx, subs) - cancel() + node := node // capture range variable + for i := 0; i < count; i++ { + wg.Add(1) + go func() { + // creates a unique message to be published by the node. + msg := messageFactory() + channel, ok := channels.ChannelFromTopic(topic) + require.True(t, ok) + data := p2pfixtures.MustEncodeEvent(t, msg, channel) + + // ensure the message is NOT received by any of the nodes. + require.NoError(t, node.Publish(ctx, topic, data)) + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + p2pfixtures.SubsMustNeverReceiveAnyMessage(t, ctx, subs) + cancel() + wg.Done() + }() + } } + + // we wait for 5 seconds at most for the messages to be exchanged, hence we wait for a total of 6 seconds here to ensure + // that the goroutines are done in a timely manner. + unittest.RequireReturnsBefore(t, wg.Wait, 6*time.Second, "timed out waiting for messages to be exchanged") } // EnsureNoPubsubExchangeBetweenGroups ensures that no pubsub message is exchanged between the given groups of nodes. -func EnsureNoPubsubExchangeBetweenGroups(t *testing.T, ctx context.Context, groupA []p2p.LibP2PNode, groupB []p2p.LibP2PNode, messageFactory func() (interface{}, channels.Topic)) { +// Args: +// - t: *testing.T instance +// - ctx: context.Context instance +// - groupA: first group of nodes- no message should be exchanged from any node of this group to the other group. +// - groupB: second group of nodes- no message should be exchanged from any node of this group to the other group. +// - topic: pubsub topic- no message should be exchanged on this topic. +// - count: number of messages to be exchanged- no message should be exchanged. +// - messageFactory: function to create a unique message to be published by the node. +func EnsureNoPubsubExchangeBetweenGroups(t *testing.T, ctx context.Context, groupA []p2p.LibP2PNode, groupB []p2p.LibP2PNode, topic channels.Topic, count int, messageFactory func() interface{}) { // ensure no message exchange from group A to group B - EnsureNoPubsubMessageExchange(t, ctx, groupA, groupB, messageFactory) + EnsureNoPubsubMessageExchange(t, ctx, groupA, groupB, topic, count, messageFactory) // ensure no message exchange from group B to group A - EnsureNoPubsubMessageExchange(t, ctx, groupB, groupA, messageFactory) + EnsureNoPubsubMessageExchange(t, ctx, groupB, groupA, topic, count, messageFactory) } // PeerIdSliceFixture returns a slice of random peer IDs for testing. diff --git a/network/p2p/test/topic_validator_test.go b/network/p2p/test/topic_validator_test.go index b6f0dfe7ba5..5a7e402b141 100644 --- a/network/p2p/test/topic_validator_test.go +++ b/network/p2p/test/topic_validator_test.go @@ -10,14 +10,19 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" + "github.com/stretchr/testify/mock" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/alsp" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/internal/p2pfixtures" "github.com/onflow/flow-go/network/message" + "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p" p2ptest "github.com/onflow/flow-go/network/p2p/test" "github.com/onflow/flow-go/network/p2p/translator" @@ -51,12 +56,12 @@ func TestTopicValidator_Unstaked(t *testing.T) { //NOTE: identity2 is not in the ids list simulating an un-staked node ids := flow.IdentityList{&identity1} - translator, err := translator.NewFixedTableIdentityTranslator(ids) + translatorFixture, err := translator.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) // peer filter used by the topic validator to check if node is staked isStaked := func(pid peer.ID) error { - fid, err := translator.GetFlowID(pid) + fid, err := translatorFixture.GetFlowID(pid) if err != nil { return fmt.Errorf("could not translate the peer_id %s to a Flow identifier: %w", pid.String(), err) } @@ -272,8 +277,7 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) idProvider := mockmodule.NewIdentityProvider(t) - // create a hooked logger - logger, hook := unittest.HookedLogger() + logger := unittest.Logger() sporkId := unittest.IdentifierFixture() @@ -292,12 +296,22 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { ids := flow.IdentityList{&identity1, &identity2, &identity3} - translator, err := translator.NewFixedTableIdentityTranslator(ids) + translatorFixture, err := translator.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) - violationsConsumer := slashing.NewSlashingViolationsConsumer(logger, metrics.NewNoopCollector()) + violation := &network.Violation{ + Identity: &identity3, + PeerID: an1.Host().ID().String(), + OriginID: identity3.NodeID, + MsgType: "*messages.BlockProposal", + Channel: channel, + Protocol: message.ProtocolTypePubSub, + Err: message.ErrUnauthorizedRole, + } + violationsConsumer := mocknetwork.NewViolationsConsumer(t) + violationsConsumer.On("OnUnAuthorizedSenderError", violation).Once().Return(nil) getIdentity := func(pid peer.ID) (*flow.Identity, bool) { - fid, err := translator.GetFlowID(pid) + fid, err := translatorFixture.GetFlowID(pid) if err != nil { return &flow.Identity{}, false } @@ -373,9 +387,6 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { p2pfixtures.SubMustNeverReceiveAnyMessage(t, timedCtx, sub2) unittest.RequireReturnsBefore(t, wg.Wait, 5*time.Second, "could not receive message on time") - - // ensure the correct error is contained in the logged error - require.Contains(t, hook.Logs(), message.ErrUnauthorizedRole.Error()) } // TestAuthorizedSenderValidator_Authorized tests that the authorized sender validator rejects messages being sent on the wrong channel @@ -401,12 +412,16 @@ func TestAuthorizedSenderValidator_InvalidMsg(t *testing.T) { topic := channels.TopicFromChannel(channel, sporkId) ids := flow.IdentityList{&identity1, &identity2} - translator, err := translator.NewFixedTableIdentityTranslator(ids) + translatorFixture, err := translator.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) - violationsConsumer := slashing.NewSlashingViolationsConsumer(logger, metrics.NewNoopCollector()) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(identity2.NodeID, alsp.UnAuthorizedSender) + require.NoError(t, err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(t) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", channel, expectedMisbehaviorReport).Once() + violationsConsumer := slashing.NewSlashingViolationsConsumer(logger, metrics.NewNoopCollector(), misbehaviorReportConsumer) getIdentity := func(pid peer.ID) (*flow.Identity, bool) { - fid, err := translator.GetFlowID(pid) + fid, err := translatorFixture.GetFlowID(pid) if err != nil { return &flow.Identity{}, false } @@ -474,12 +489,16 @@ func TestAuthorizedSenderValidator_Ejected(t *testing.T) { topic := channels.TopicFromChannel(channel, sporkId) ids := flow.IdentityList{&identity1, &identity2, &identity3} - translator, err := translator.NewFixedTableIdentityTranslator(ids) + translatorFixture, err := translator.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) - violationsConsumer := slashing.NewSlashingViolationsConsumer(logger, metrics.NewNoopCollector()) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(identity2.NodeID, alsp.SenderEjected) + require.NoError(t, err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(t) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", channel, expectedMisbehaviorReport).Once() + violationsConsumer := slashing.NewSlashingViolationsConsumer(logger, metrics.NewNoopCollector(), misbehaviorReportConsumer) getIdentity := func(pid peer.ID) (*flow.Identity, bool) { - fid, err := translator.GetFlowID(pid) + fid, err := translatorFixture.GetFlowID(pid) if err != nil { return &flow.Identity{}, false } @@ -568,13 +587,15 @@ func TestAuthorizedSenderValidator_ClusterChannel(t *testing.T) { topic := channels.TopicFromChannel(channel, sporkId) ids := flow.IdentityList{&identity1, &identity2, &identity3} - translator, err := translator.NewFixedTableIdentityTranslator(ids) + translatorFixture, err := translator.NewFixedTableIdentityTranslator(ids) require.NoError(t, err) logger := unittest.Logger() - violationsConsumer := slashing.NewSlashingViolationsConsumer(logger, metrics.NewNoopCollector()) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(t) + defer misbehaviorReportConsumer.AssertNotCalled(t, "ReportMisbehaviorOnChannel", mock.AnythingOfType("channels.Channel"), mock.AnythingOfType("*alsp.MisbehaviorReport")) + violationsConsumer := slashing.NewSlashingViolationsConsumer(logger, metrics.NewNoopCollector(), misbehaviorReportConsumer) getIdentity := func(pid peer.ID) (*flow.Identity, bool) { - fid, err := translator.GetFlowID(pid) + fid, err := translatorFixture.GetFlowID(pid) if err != nil { return &flow.Identity{}, false } diff --git a/network/p2p/tracer/gossipSubMeshTracer.go b/network/p2p/tracer/gossipSubMeshTracer.go index 7cd4dd2b692..1cc25fd2565 100644 --- a/network/p2p/tracer/gossipSubMeshTracer.go +++ b/network/p2p/tracer/gossipSubMeshTracer.go @@ -14,6 +14,7 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/tracer/internal" "github.com/onflow/flow-go/utils/logging" ) @@ -43,23 +44,35 @@ type GossipSubMeshTracer struct { idProvider module.IdentityProvider loggerInterval time.Duration metrics module.GossipSubLocalMeshMetrics + rpcSentTracker *internal.RPCSentTracker } var _ p2p.PubSubTracer = (*GossipSubMeshTracer)(nil) -func NewGossipSubMeshTracer( - logger zerolog.Logger, - metrics module.GossipSubLocalMeshMetrics, - idProvider module.IdentityProvider, - loggerInterval time.Duration) *GossipSubMeshTracer { +type GossipSubMeshTracerConfig struct { + Logger zerolog.Logger + Metrics module.GossipSubLocalMeshMetrics + IDProvider module.IdentityProvider + LoggerInterval time.Duration + RpcSentTrackerCacheCollector module.HeroCacheMetrics + RpcSentTrackerCacheSize uint32 +} +// NewGossipSubMeshTracer creates a new *GossipSubMeshTracer. +// Args: +// - *GossipSubMeshTracerConfig: the mesh tracer config. +// Returns: +// - *GossipSubMeshTracer: new mesh tracer. +func NewGossipSubMeshTracer(config *GossipSubMeshTracerConfig) *GossipSubMeshTracer { + rpcSentTracker := internal.NewRPCSentTracker(config.Logger, config.RpcSentTrackerCacheSize, config.RpcSentTrackerCacheCollector) g := &GossipSubMeshTracer{ RawTracer: NewGossipSubNoopTracer(), topicMeshMap: make(map[string]map[peer.ID]struct{}), - idProvider: idProvider, - metrics: metrics, - logger: logger.With().Str("component", "gossip_sub_topology_tracer").Logger(), - loggerInterval: loggerInterval, + idProvider: config.IDProvider, + metrics: config.Metrics, + logger: config.Logger.With().Str("component", "gossipsub_topology_tracer").Logger(), + loggerInterval: config.LoggerInterval, + rpcSentTracker: rpcSentTracker, } g.Component = component.NewComponentManagerBuilder(). @@ -139,6 +152,15 @@ func (t *GossipSubMeshTracer) Prune(p peer.ID, topic string) { lg.Info().Hex("flow_id", logging.ID(id.NodeID)).Str("role", id.Role.String()).Msg("pruned peer") } +// SendRPC is called when a RPC is sent. Currently, the GossipSubMeshTracer tracks iHave RPC messages that have been sent. +// This function can be updated to track other control messages in the future as required. +func (t *GossipSubMeshTracer) SendRPC(rpc *pubsub.RPC, _ peer.ID) { + switch { + case len(rpc.GetControl().GetIhave()) > 0: + t.rpcSentTracker.OnIHaveRPCSent(rpc.GetControl().GetIhave()) + } +} + // logLoop logs the mesh peers of the local node for each topic at a regular interval. func (t *GossipSubMeshTracer) logLoop(ctx irrecoverable.SignalerContext) { ticker := time.NewTicker(t.loggerInterval) diff --git a/network/p2p/tracer/gossipSubMeshTracer_test.go b/network/p2p/tracer/gossipSubMeshTracer_test.go index fc14b280282..a2da0584f94 100644 --- a/network/p2p/tracer/gossipSubMeshTracer_test.go +++ b/network/p2p/tracer/gossipSubMeshTracer_test.go @@ -11,8 +11,10 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/atomic" + "github.com/onflow/flow-go/config" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p" @@ -29,6 +31,8 @@ import ( // One of the nodes is running with an unknown peer id, which the identity provider is mocked to return an error and // the mesh tracer should log a warning message. func TestGossipSubMeshTracer(t *testing.T) { + defaultConfig, err := config.DefaultConfig() + require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) signalerCtx := irrecoverable.NewMockSignalerContext(t, ctx) sporkId := unittest.IdentifierFixture() @@ -61,7 +65,15 @@ func TestGossipSubMeshTracer(t *testing.T) { // we only need one node with a meshTracer to test the meshTracer. // meshTracer logs at 1 second intervals for sake of testing. collector := mockmodule.NewGossipSubLocalMeshMetrics(t) - meshTracer := tracer.NewGossipSubMeshTracer(logger, collector, idProvider, 1*time.Second) + meshTracerCfg := &tracer.GossipSubMeshTracerConfig{ + Logger: logger, + Metrics: collector, + IDProvider: idProvider, + LoggerInterval: time.Second, + RpcSentTrackerCacheCollector: metrics.NewNoopCollector(), + RpcSentTrackerCacheSize: defaultConfig.NetworkConfig.GossipSubConfig.RPCSentTrackerCacheSize, + } + meshTracer := tracer.NewGossipSubMeshTracer(meshTracerCfg) tracerNode, tracerId := p2ptest.NodeFixture( t, sporkId, diff --git a/network/p2p/tracer/gossipSubScoreTracer_test.go b/network/p2p/tracer/gossipSubScoreTracer_test.go index a759cc2b46f..2a3ea623eb0 100644 --- a/network/p2p/tracer/gossipSubScoreTracer_test.go +++ b/network/p2p/tracer/gossipSubScoreTracer_test.go @@ -83,9 +83,7 @@ func TestGossipSubScoreTracer(t *testing.T) { }), p2ptest.WithLogger(logger), p2ptest.WithPeerScoreTracerInterval(1*time.Second), // set the peer score log interval to 1 second for sake of testing. - p2ptest.WithPeerScoringEnabled(idProvider), // enable peer scoring for sake of testing. - // 4. Sets some fixed scores for the nodes for the sake of testing based on their roles. - p2ptest.WithPeerScoreParamsOption(&p2p.PeerScoringConfig{ + p2ptest.EnablePeerScoringWithOverride(&p2p.PeerScoringConfigOverride{ AppSpecificScoreParams: func(pid peer.ID) float64 { id, ok := idProvider.ByPeerID(pid) require.True(t, ok) diff --git a/network/p2p/tracer/internal/cache.go b/network/p2p/tracer/internal/cache.go new file mode 100644 index 00000000000..b916133b270 --- /dev/null +++ b/network/p2p/tracer/internal/cache.go @@ -0,0 +1,86 @@ +package internal + +import ( + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" + "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" + "github.com/onflow/flow-go/module/mempool/stdmap" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" +) + +// rpcCtrlMsgSentCacheConfig configuration for the rpc sent cache. +type rpcCtrlMsgSentCacheConfig struct { + logger zerolog.Logger + sizeLimit uint32 + collector module.HeroCacheMetrics +} + +// rpcSentCache cache that stores rpcSentEntity. These entity's represent RPC control messages sent from the local node. +type rpcSentCache struct { + // c is the underlying cache. + c *stdmap.Backend +} + +// newRPCSentCache creates a new *rpcSentCache. +// Args: +// - config: record cache config. +// Returns: +// - *rpcSentCache: the created cache. +// Note that this cache is intended to track control messages sent by the local node, +// it stores a RPCSendEntity using an Id which should uniquely identifies the message being tracked. +func newRPCSentCache(config *rpcCtrlMsgSentCacheConfig) *rpcSentCache { + backData := herocache.NewCache(config.sizeLimit, + herocache.DefaultOversizeFactor, + heropool.LRUEjection, + config.logger.With().Str("mempool", "gossipsub-rpc-control-messages-sent").Logger(), + config.collector) + return &rpcSentCache{ + c: stdmap.NewBackend(stdmap.WithBackData(backData)), + } +} + +// add initializes the record cached for the given messageEntityID if it does not exist. +// Returns true if the record is initialized, false otherwise (i.e.: the record already exists). +// Args: +// - topic: the topic ID. +// - messageId: the message ID. +// - controlMsgType: the rpc control message type. +// Returns: +// - bool: true if the record is initialized, false otherwise (i.e.: the record already exists). +// Note that if add is called multiple times for the same messageEntityID, the record is initialized only once, and the +// subsequent calls return false and do not change the record (i.e.: the record is not re-initialized). +func (r *rpcSentCache) add(topic string, messageId string, controlMsgType p2pmsg.ControlMessageType) bool { + return r.c.Add(newRPCSentEntity(r.rpcSentEntityID(topic, messageId, controlMsgType), controlMsgType)) +} + +// has checks if the RPC message has been cached indicating it has been sent. +// Args: +// - topic: the topic ID. +// - messageId: the message ID. +// - controlMsgType: the rpc control message type. +// Returns: +// - bool: true if the RPC has been cache indicating it was sent from the local node. +func (r *rpcSentCache) has(topic string, messageId string, controlMsgType p2pmsg.ControlMessageType) bool { + return r.c.Has(r.rpcSentEntityID(topic, messageId, controlMsgType)) +} + +// size returns the number of records in the cache. +func (r *rpcSentCache) size() uint { + return r.c.Size() +} + +// rpcSentEntityID creates an entity ID from the topic, messageID and control message type. +// Args: +// - topic: the topic ID. +// - messageId: the message ID. +// - controlMsgType: the rpc control message type. +// Returns: +// - flow.Identifier: the entity ID. +func (r *rpcSentCache) rpcSentEntityID(topic string, messageId string, controlMsgType p2pmsg.ControlMessageType) flow.Identifier { + return flow.MakeIDFromFingerPrint([]byte(fmt.Sprintf("%s%s%s", topic, messageId, controlMsgType))) +} diff --git a/network/p2p/tracer/internal/cache_test.go b/network/p2p/tracer/internal/cache_test.go new file mode 100644 index 00000000000..c92b42b5e02 --- /dev/null +++ b/network/p2p/tracer/internal/cache_test.go @@ -0,0 +1,122 @@ +package internal + +import ( + "sync" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network/channels" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestCache_Add tests the add method of the rpcSentCache. +// It ensures that the method returns true when a new record is initialized +// and false when an existing record is initialized. +func TestCache_Add(t *testing.T) { + cache := cacheFixture(t, 100, zerolog.Nop(), metrics.NewNoopCollector()) + controlMsgType := p2pmsg.CtrlMsgIHave + topic := channels.PushBlocks.String() + messageID1 := unittest.IdentifierFixture().String() + messageID2 := unittest.IdentifierFixture().String() + + // test initializing a record for an ID that doesn't exist in the cache + initialized := cache.add(topic, messageID1, controlMsgType) + require.True(t, initialized, "expected record to be initialized") + require.True(t, cache.has(topic, messageID1, controlMsgType), "expected record to exist") + + // test initializing a record for an ID that already exists in the cache + initialized = cache.add(topic, messageID1, controlMsgType) + require.False(t, initialized, "expected record not to be initialized") + require.True(t, cache.has(topic, messageID1, controlMsgType), "expected record to exist") + + // test initializing a record for another ID + initialized = cache.add(topic, messageID2, controlMsgType) + require.True(t, initialized, "expected record to be initialized") + require.True(t, cache.has(topic, messageID2, controlMsgType), "expected record to exist") +} + +// TestCache_ConcurrentInit tests the concurrent initialization of records. +// The test covers the following scenarios: +// 1. Multiple goroutines initializing records for different ids. +// 2. Ensuring that all records are correctly initialized. +func TestCache_ConcurrentAdd(t *testing.T) { + cache := cacheFixture(t, 100, zerolog.Nop(), metrics.NewNoopCollector()) + controlMsgType := p2pmsg.CtrlMsgIHave + topic := channels.PushBlocks.String() + messageIds := unittest.IdentifierListFixture(10) + + var wg sync.WaitGroup + wg.Add(len(messageIds)) + + for _, id := range messageIds { + go func(id flow.Identifier) { + defer wg.Done() + cache.add(topic, id.String(), controlMsgType) + }(id) + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + // ensure that all records are correctly initialized + for _, id := range messageIds { + require.True(t, cache.has(topic, id.String(), controlMsgType)) + } +} + +// TestCache_ConcurrentSameRecordInit tests the concurrent initialization of the same record. +// The test covers the following scenarios: +// 1. Multiple goroutines attempting to initialize the same record concurrently. +// 2. Only one goroutine successfully initializes the record, and others receive false on initialization. +// 3. The record is correctly initialized in the cache and can be retrieved using the Get method. +func TestCache_ConcurrentSameRecordAdd(t *testing.T) { + cache := cacheFixture(t, 100, zerolog.Nop(), metrics.NewNoopCollector()) + controlMsgType := p2pmsg.CtrlMsgIHave + topic := channels.PushBlocks.String() + messageID := unittest.IdentifierFixture().String() + const concurrentAttempts = 10 + + var wg sync.WaitGroup + wg.Add(concurrentAttempts) + + successGauge := atomic.Int32{} + + for i := 0; i < concurrentAttempts; i++ { + go func() { + defer wg.Done() + initSuccess := cache.add(topic, messageID, controlMsgType) + if initSuccess { + successGauge.Inc() + } + }() + } + + unittest.RequireReturnsBefore(t, wg.Wait, 100*time.Millisecond, "timed out waiting for goroutines to finish") + + // ensure that only one goroutine successfully initialized the record + require.Equal(t, int32(1), successGauge.Load()) + + // ensure that the record is correctly initialized in the cache + require.True(t, cache.has(topic, messageID, controlMsgType)) +} + +// cacheFixture returns a new *RecordCache. +func cacheFixture(t *testing.T, sizeLimit uint32, logger zerolog.Logger, collector module.HeroCacheMetrics) *rpcSentCache { + config := &rpcCtrlMsgSentCacheConfig{ + sizeLimit: sizeLimit, + logger: logger, + collector: collector, + } + r := newRPCSentCache(config) + // expect cache to be empty + require.Equalf(t, uint(0), r.size(), "cache size must be 0") + require.NotNil(t, r) + return r +} diff --git a/network/p2p/tracer/internal/rpc_send_entity.go b/network/p2p/tracer/internal/rpc_send_entity.go new file mode 100644 index 00000000000..b3f56ce0b55 --- /dev/null +++ b/network/p2p/tracer/internal/rpc_send_entity.go @@ -0,0 +1,37 @@ +package internal + +import ( + "github.com/onflow/flow-go/model/flow" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" +) + +// rpcSentEntity struct representing an RPC control message sent from local node. +// This struct implements the flow.Entity interface and uses messageID field deduplication. +type rpcSentEntity struct { + // messageID the messageID of the rpc control message. + messageID flow.Identifier + // controlMsgType the control message type. + controlMsgType p2pmsg.ControlMessageType +} + +var _ flow.Entity = (*rpcSentEntity)(nil) + +// ID returns the node ID of the sender, which is used as the unique identifier of the entity for maintenance and +// deduplication purposes in the cache. +func (r rpcSentEntity) ID() flow.Identifier { + return r.messageID +} + +// Checksum returns the node ID of the sender, it does not have any purpose in the cache. +// It is implemented to satisfy the flow.Entity interface. +func (r rpcSentEntity) Checksum() flow.Identifier { + return r.messageID +} + +// newRPCSentEntity returns a new rpcSentEntity. +func newRPCSentEntity(id flow.Identifier, controlMessageType p2pmsg.ControlMessageType) rpcSentEntity { + return rpcSentEntity{ + messageID: id, + controlMsgType: controlMessageType, + } +} diff --git a/network/p2p/tracer/internal/rpc_sent_tracker.go b/network/p2p/tracer/internal/rpc_sent_tracker.go new file mode 100644 index 00000000000..6d44ac984a3 --- /dev/null +++ b/network/p2p/tracer/internal/rpc_sent_tracker.go @@ -0,0 +1,47 @@ +package internal + +import ( + pb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/module" + p2pmsg "github.com/onflow/flow-go/network/p2p/message" +) + +// RPCSentTracker tracks RPC messages that are sent. +type RPCSentTracker struct { + cache *rpcSentCache +} + +// NewRPCSentTracker returns a new *NewRPCSentTracker. +func NewRPCSentTracker(logger zerolog.Logger, sizeLimit uint32, collector module.HeroCacheMetrics) *RPCSentTracker { + config := &rpcCtrlMsgSentCacheConfig{ + sizeLimit: sizeLimit, + logger: logger, + collector: collector, + } + return &RPCSentTracker{cache: newRPCSentCache(config)} +} + +// OnIHaveRPCSent caches a unique entity message ID for each message ID included in each rpc iHave control message. +// Args: +// - *pubsub.RPC: the rpc sent. +func (t *RPCSentTracker) OnIHaveRPCSent(iHaves []*pb.ControlIHave) { + controlMsgType := p2pmsg.CtrlMsgIHave + for _, iHave := range iHaves { + topicID := iHave.GetTopicID() + for _, messageID := range iHave.GetMessageIDs() { + t.cache.add(topicID, messageID, controlMsgType) + } + } +} + +// WasIHaveRPCSent checks if an iHave control message with the provided message ID was sent. +// Args: +// - string: the topic ID of the iHave RPC. +// - string: the message ID of the iHave RPC. +// Returns: +// - bool: true if the iHave rpc with the provided message ID was sent. +func (t *RPCSentTracker) WasIHaveRPCSent(topicID, messageID string) bool { + return t.cache.has(topicID, messageID, p2pmsg.CtrlMsgIHave) +} diff --git a/network/p2p/tracer/internal/rpc_sent_tracker_test.go b/network/p2p/tracer/internal/rpc_sent_tracker_test.go new file mode 100644 index 00000000000..7b9c4ec9acb --- /dev/null +++ b/network/p2p/tracer/internal/rpc_sent_tracker_test.go @@ -0,0 +1,89 @@ +package internal + +import ( + "testing" + + pubsub "github.com/libp2p/go-libp2p-pubsub" + pb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/config" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/network/channels" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestNewRPCSentTracker ensures *RPCSenTracker is created as expected. +func TestNewRPCSentTracker(t *testing.T) { + tracker := mockTracker(t) + require.NotNil(t, tracker) +} + +// TestRPCSentTracker_IHave ensures *RPCSentTracker tracks sent iHave control messages as expected. +func TestRPCSentTracker_IHave(t *testing.T) { + tracker := mockTracker(t) + require.NotNil(t, tracker) + + t.Run("WasIHaveRPCSent should return false for iHave message Id that has not been tracked", func(t *testing.T) { + require.False(t, tracker.WasIHaveRPCSent("topic_id", "message_id")) + }) + + t.Run("WasIHaveRPCSent should return true for iHave message after it is tracked with OnIHaveRPCSent", func(t *testing.T) { + numOfMsgIds := 100 + testCases := []struct { + topic string + messageIDS []string + }{ + {channels.PushBlocks.String(), unittest.IdentifierListFixture(numOfMsgIds).Strings()}, + {channels.ReceiveApprovals.String(), unittest.IdentifierListFixture(numOfMsgIds).Strings()}, + {channels.SyncCommittee.String(), unittest.IdentifierListFixture(numOfMsgIds).Strings()}, + {channels.RequestChunks.String(), unittest.IdentifierListFixture(numOfMsgIds).Strings()}, + } + iHaves := make([]*pb.ControlIHave, len(testCases)) + for i, testCase := range testCases { + testCase := testCase + iHaves[i] = &pb.ControlIHave{ + TopicID: &testCase.topic, + MessageIDs: testCase.messageIDS, + } + } + rpc := rpcFixture(withIhaves(iHaves)) + tracker.OnIHaveRPCSent(rpc.GetControl().GetIhave()) + + for _, testCase := range testCases { + for _, messageID := range testCase.messageIDS { + require.True(t, tracker.WasIHaveRPCSent(testCase.topic, messageID)) + } + } + }) +} + +func mockTracker(t *testing.T) *RPCSentTracker { + logger := zerolog.Nop() + cfg, err := config.DefaultConfig() + require.NoError(t, err) + collector := metrics.NewNoopCollector() + tracker := NewRPCSentTracker(logger, cfg.NetworkConfig.GossipSubConfig.RPCSentTrackerCacheSize, collector) + return tracker +} + +type rpcFixtureOpt func(*pubsub.RPC) + +func withIhaves(iHave []*pb.ControlIHave) rpcFixtureOpt { + return func(rpc *pubsub.RPC) { + rpc.Control.Ihave = iHave + } +} + +func rpcFixture(opts ...rpcFixtureOpt) *pubsub.RPC { + rpc := &pubsub.RPC{ + RPC: pb.RPC{ + Control: &pb.ControlMessage{}, + }, + } + for _, opt := range opts { + opt(rpc) + } + return rpc +} diff --git a/network/queue/messageQueue_test.go b/network/queue/messageQueue_test.go index 159ce7506cb..5fd7cf86839 100644 --- a/network/queue/messageQueue_test.go +++ b/network/queue/messageQueue_test.go @@ -217,7 +217,7 @@ func createMessages(messageCnt int, priorityFunc queue.MessagePriorityFunc) map[ } func randomPriority(_ interface{}) (queue.Priority, error) { - rand.Seed(time.Now().UnixNano()) + p := rand.Intn(int(queue.HighPriority-queue.LowPriority+1)) + int(queue.LowPriority) return queue.Priority(p), nil } diff --git a/network/slashing/consumer.go b/network/slashing/consumer.go index aaac28fccc5..3ba8d656c21 100644 --- a/network/slashing/consumer.go +++ b/network/slashing/consumer.go @@ -7,35 +7,34 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/alsp" "github.com/onflow/flow-go/utils/logging" ) const ( - unknown = "unknown" - unExpectedValidationError = "unexpected_validation_error" - unAuthorizedSenderViolation = "unauthorized_sender" - unknownMsgTypeViolation = "unknown_message_type" - invalidMsgViolation = "invalid_message" - senderEjectedViolation = "sender_ejected" - unauthorizedUnicastOnChannel = "unauthorized_unicast_on_channel" + unknown = "unknown" ) // Consumer is a struct that logs a message for any slashable offenses. // This struct will be updated in the future when slashing is implemented. type Consumer struct { - log zerolog.Logger - metrics module.NetworkSecurityMetrics + log zerolog.Logger + metrics module.NetworkSecurityMetrics + misbehaviorReportConsumer network.MisbehaviorReportConsumer } // NewSlashingViolationsConsumer returns a new Consumer. -func NewSlashingViolationsConsumer(log zerolog.Logger, metrics module.NetworkSecurityMetrics) *Consumer { +func NewSlashingViolationsConsumer(log zerolog.Logger, metrics module.NetworkSecurityMetrics, misbehaviorReportConsumer network.MisbehaviorReportConsumer) *Consumer { return &Consumer{ - log: log.With().Str("module", "network_slashing_consumer").Logger(), - metrics: metrics, + log: log.With().Str("module", "network_slashing_consumer").Logger(), + metrics: metrics, + misbehaviorReportConsumer: misbehaviorReportConsumer, } } -func (c *Consumer) logOffense(networkOffense string, violation *Violation) { +// logOffense logs the slashing violation with details. +func (c *Consumer) logOffense(misbehavior network.Misbehavior, violation *network.Violation) { // if violation fails before the message is decoded the violation.MsgType will be unknown if len(violation.MsgType) == 0 { violation.MsgType = unknown @@ -51,7 +50,7 @@ func (c *Consumer) logOffense(networkOffense string, violation *Violation) { e := c.log.Error(). Str("peer_id", violation.PeerID). - Str("networking_offense", networkOffense). + Str("misbehavior", misbehavior.String()). Str("message_type", violation.MsgType). Str("channel", violation.Channel.String()). Str("protocol", violation.Protocol.String()). @@ -62,37 +61,77 @@ func (c *Consumer) logOffense(networkOffense string, violation *Violation) { e.Msg(fmt.Sprintf("potential slashable offense: %s", violation.Err)) // capture unauthorized message count metric - c.metrics.OnUnauthorizedMessage(role, violation.MsgType, violation.Channel.String(), networkOffense) + c.metrics.OnUnauthorizedMessage(role, violation.MsgType, violation.Channel.String(), misbehavior.String()) } -// OnUnAuthorizedSenderError logs an error for unauthorized sender error. -func (c *Consumer) OnUnAuthorizedSenderError(violation *Violation) { - c.logOffense(unAuthorizedSenderViolation, violation) +// reportMisbehavior reports the slashing violation to the alsp misbehavior report manager. When violation identity +// is nil this indicates the misbehavior occurred either on a public network and the identity of the sender is unknown +// we can skip reporting the misbehavior. +// Args: +// - misbehavior: the network misbehavior. +// - violation: the slashing violation. +// Any error encountered while creating the misbehavior report is considered irrecoverable and will result in a fatal log. +func (c *Consumer) reportMisbehavior(misbehavior network.Misbehavior, violation *network.Violation) { + if violation.Identity == nil { + c.log.Debug(). + Bool(logging.KeySuspicious, true). + Str("peerID", violation.PeerID). + Msg("violation identity unknown (or public) skipping misbehavior reporting") + c.metrics.OnViolationReportSkipped() + return + } + report, err := alsp.NewMisbehaviorReport(violation.Identity.NodeID, misbehavior) + if err != nil { + // failing to create the misbehavior report is unlikely. If an error is encountered while + // creating the misbehavior report it indicates a bug and processing can not proceed. + c.log.Fatal(). + Err(err). + Str("peerID", violation.PeerID). + Msg("failed to create misbehavior report") + } + c.misbehaviorReportConsumer.ReportMisbehaviorOnChannel(violation.Channel, report) +} + +// OnUnAuthorizedSenderError logs an error for unauthorized sender error and reports a misbehavior to alsp misbehavior report manager. +func (c *Consumer) OnUnAuthorizedSenderError(violation *network.Violation) { + c.logOffense(alsp.UnAuthorizedSender, violation) + c.reportMisbehavior(alsp.UnAuthorizedSender, violation) } -// OnUnknownMsgTypeError logs an error for unknown message type error. -func (c *Consumer) OnUnknownMsgTypeError(violation *Violation) { - c.logOffense(unknownMsgTypeViolation, violation) +// OnUnknownMsgTypeError logs an error for unknown message type error and reports a misbehavior to alsp misbehavior report manager. +func (c *Consumer) OnUnknownMsgTypeError(violation *network.Violation) { + c.logOffense(alsp.UnknownMsgType, violation) + c.reportMisbehavior(alsp.UnknownMsgType, violation) } // OnInvalidMsgError logs an error for messages that contained payloads that could not -// be unmarshalled into the message type denoted by message code byte. -func (c *Consumer) OnInvalidMsgError(violation *Violation) { - c.logOffense(invalidMsgViolation, violation) +// be unmarshalled into the message type denoted by message code byte and reports a misbehavior to alsp misbehavior report manager. +func (c *Consumer) OnInvalidMsgError(violation *network.Violation) { + c.logOffense(alsp.InvalidMessage, violation) + c.reportMisbehavior(alsp.InvalidMessage, violation) +} + +// OnSenderEjectedError logs an error for sender ejected error and reports a misbehavior to alsp misbehavior report manager. +func (c *Consumer) OnSenderEjectedError(violation *network.Violation) { + c.logOffense(alsp.SenderEjected, violation) + c.reportMisbehavior(alsp.SenderEjected, violation) } -// OnSenderEjectedError logs an error for sender ejected error. -func (c *Consumer) OnSenderEjectedError(violation *Violation) { - c.logOffense(senderEjectedViolation, violation) +// OnUnauthorizedUnicastOnChannel logs an error for messages unauthorized to be sent via unicast and reports a misbehavior to alsp misbehavior report manager. +func (c *Consumer) OnUnauthorizedUnicastOnChannel(violation *network.Violation) { + c.logOffense(alsp.UnauthorizedUnicastOnChannel, violation) + c.reportMisbehavior(alsp.UnauthorizedUnicastOnChannel, violation) } -// OnUnauthorizedUnicastOnChannel logs an error for messages unauthorized to be sent via unicast. -func (c *Consumer) OnUnauthorizedUnicastOnChannel(violation *Violation) { - c.logOffense(unauthorizedUnicastOnChannel, violation) +// OnUnauthorizedPublishOnChannel logs an error for messages unauthorized to be sent via pubsub. +func (c *Consumer) OnUnauthorizedPublishOnChannel(violation *network.Violation) { + c.logOffense(alsp.UnauthorizedPublishOnChannel, violation) + c.reportMisbehavior(alsp.UnauthorizedPublishOnChannel, violation) } // OnUnexpectedError logs an error for unexpected errors. This indicates message validation -// has failed for an unknown reason and could potentially be n slashable offense. -func (c *Consumer) OnUnexpectedError(violation *Violation) { - c.logOffense(unExpectedValidationError, violation) +// has failed for an unknown reason and could potentially be n slashable offense and reports a misbehavior to alsp misbehavior report manager. +func (c *Consumer) OnUnexpectedError(violation *network.Violation) { + c.logOffense(alsp.UnExpectedValidationError, violation) + c.reportMisbehavior(alsp.UnExpectedValidationError, violation) } diff --git a/network/stub/network.go b/network/stub/network.go index fc93cf9b588..5990e245944 100644 --- a/network/stub/network.go +++ b/network/stub/network.go @@ -160,7 +160,11 @@ func (n *Network) PublishOnChannel(channel channels.Channel, event interface{}, // Engines attached to the same channel on other nodes. The targeted nodes are selected based on the selector. // In this test helper implementation, multicast uses submit method under the hood. func (n *Network) MulticastOnChannel(channel channels.Channel, event interface{}, num uint, targetIDs ...flow.Identifier) error { - targetIDs = flow.Sample(num, targetIDs...) + var err error + targetIDs, err = flow.Sample(num, targetIDs...) + if err != nil { + return fmt.Errorf("sampling failed: %w", err) + } return n.submit(channel, event, targetIDs...) } @@ -306,6 +310,6 @@ func (n *Network) StopConDev() { close(n.qCD) } -func (n *Network) ReportMisbehaviorOnChannel(channel channels.Channel, report network.MisbehaviorReport) { +func (n *Network) ReportMisbehaviorOnChannel(_ channels.Channel, _ network.MisbehaviorReport) { // no-op for stub network. } diff --git a/network/test/blob_service_test.go b/network/test/blob_service_test.go index 40c052111d7..c0979244ad8 100644 --- a/network/test/blob_service_test.go +++ b/network/test/blob_service_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/suite" "go.uber.org/atomic" + "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p/connection" "github.com/onflow/flow-go/network/p2p/dht" p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" @@ -89,7 +90,7 @@ func (suite *BlobServiceTestSuite) SetupTest() { ConnectionPruning: true, ConnectorFactory: connection.DefaultLibp2pBackoffConnectorFactory(), }, nil)) - mws, _ := testutils.MiddlewareFixtures(suite.T(), ids, nodes, testutils.MiddlewareConfigFixture(suite.T())) + mws, _ := testutils.MiddlewareFixtures(suite.T(), ids, nodes, testutils.MiddlewareConfigFixture(suite.T()), mocknetwork.NewViolationsConsumer(suite.T())) suite.networks = testutils.NetworksFixture(suite.T(), ids, mws) testutils.StartNodesAndNetworks(signalerCtx, suite.T(), nodes, suite.networks, 100*time.Millisecond) diff --git a/network/test/echoengine_test.go b/network/test/echoengine_test.go index eb170cbf266..55732b64d17 100644 --- a/network/test/echoengine_test.go +++ b/network/test/echoengine_test.go @@ -19,6 +19,7 @@ import ( "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/internal/testutils" + "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/utils/unittest" ) @@ -54,7 +55,7 @@ func (suite *EchoEngineTestSuite) SetupTest() { // both nodes should be of the same role to get connected on epidemic dissemination var nodes []p2p.LibP2PNode suite.ids, nodes, _ = testutils.LibP2PNodeForMiddlewareFixture(suite.T(), count) - suite.mws, _ = testutils.MiddlewareFixtures(suite.T(), suite.ids, nodes, testutils.MiddlewareConfigFixture(suite.T())) + suite.mws, _ = testutils.MiddlewareFixtures(suite.T(), suite.ids, nodes, testutils.MiddlewareConfigFixture(suite.T()), mocknetwork.NewViolationsConsumer(suite.T())) suite.nets = testutils.NetworksFixture(suite.T(), suite.ids, suite.mws) testutils.StartNodesAndNetworks(signalerCtx, suite.T(), nodes, suite.nets, 100*time.Millisecond) } diff --git a/network/test/epochtransition_test.go b/network/test/epochtransition_test.go index e471b1d8f48..34a037a90e7 100644 --- a/network/test/epochtransition_test.go +++ b/network/test/epochtransition_test.go @@ -24,6 +24,7 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/internal/testutils" + "github.com/onflow/flow-go/network/mocknetwork" mockprotocol "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/utils/unittest" ) @@ -134,7 +135,7 @@ func (suite *MutableIdentityTableSuite) signalIdentityChanged() { func (suite *MutableIdentityTableSuite) SetupTest() { suite.testNodes = newTestNodeList() suite.removedTestNodes = newTestNodeList() - rand.Seed(time.Now().UnixNano()) + nodeCount := 10 suite.logger = zerolog.New(os.Stderr).Level(zerolog.ErrorLevel) log.SetAllLoggers(log.LevelError) @@ -181,7 +182,7 @@ func (suite *MutableIdentityTableSuite) addNodes(count int) { // create the ids, middlewares and networks ids, nodes, _ := testutils.LibP2PNodeForMiddlewareFixture(suite.T(), count) - mws, _ := testutils.MiddlewareFixtures(suite.T(), ids, nodes, testutils.MiddlewareConfigFixture(suite.T())) + mws, _ := testutils.MiddlewareFixtures(suite.T(), ids, nodes, testutils.MiddlewareConfigFixture(suite.T()), mocknetwork.NewViolationsConsumer(suite.T())) nets := testutils.NetworksFixture(suite.T(), ids, mws) suite.cancels = append(suite.cancels, cancel) diff --git a/network/test/meshengine_test.go b/network/test/meshengine_test.go index 612d7679796..55a95994d45 100644 --- a/network/test/meshengine_test.go +++ b/network/test/meshengine_test.go @@ -11,8 +11,6 @@ import ( "testing" "time" - "github.com/onflow/flow-go/network/p2p" - "github.com/ipfs/go-log" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/rs/zerolog" @@ -20,9 +18,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/onflow/flow-go/network/p2p/middleware" - "github.com/onflow/flow-go/network/p2p/p2pnode" - "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/libp2p/message" @@ -31,6 +26,10 @@ import ( "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/internal/testutils" + "github.com/onflow/flow-go/network/mocknetwork" + "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/middleware" + "github.com/onflow/flow-go/network/p2p/p2pnode" "github.com/onflow/flow-go/utils/unittest" ) @@ -74,7 +73,7 @@ func (suite *MeshEngineTestSuite) SetupTest() { var nodes []p2p.LibP2PNode suite.ids, nodes, obs = testutils.LibP2PNodeForMiddlewareFixture(suite.T(), count) - suite.mws, _ = testutils.MiddlewareFixtures(suite.T(), suite.ids, nodes, testutils.MiddlewareConfigFixture(suite.T())) + suite.mws, _ = testutils.MiddlewareFixtures(suite.T(), suite.ids, nodes, testutils.MiddlewareConfigFixture(suite.T()), mocknetwork.NewViolationsConsumer(suite.T())) suite.nets = testutils.NetworksFixture(suite.T(), suite.ids, suite.mws) testutils.StartNodesAndNetworks(signalerCtx, suite.T(), nodes, suite.nets, 100*time.Millisecond) diff --git a/network/test/middleware_test.go b/network/test/middleware_test.go index 8c0c1adb4f0..1b42df088aa 100644 --- a/network/test/middleware_test.go +++ b/network/test/middleware_test.go @@ -81,8 +81,9 @@ type MiddlewareTestSuite struct { logger zerolog.Logger providers []*unittest.UpdatableIDProvider - mwCancel context.CancelFunc - mwCtx irrecoverable.SignalerContext + mwCancel context.CancelFunc + mwCtx irrecoverable.SignalerContext + slashingViolationsConsumer network.ViolationsConsumer } // TestMiddlewareTestSuit runs all the test methods in this test suit @@ -106,8 +107,9 @@ func (m *MiddlewareTestSuite) SetupTest() { log: m.logger, } + m.slashingViolationsConsumer = mocknetwork.NewViolationsConsumer(m.T()) m.ids, m.nodes, obs = testutils.LibP2PNodeForMiddlewareFixture(m.T(), m.size) - m.mws, m.providers = testutils.MiddlewareFixtures(m.T(), m.ids, m.nodes, testutils.MiddlewareConfigFixture(m.T())) + m.mws, m.providers = testutils.MiddlewareFixtures(m.T(), m.ids, m.nodes, testutils.MiddlewareConfigFixture(m.T()), m.slashingViolationsConsumer) for _, observableConnMgr := range obs { observableConnMgr.Subscribe(&ob) } @@ -158,7 +160,7 @@ func (m *MiddlewareTestSuite) TestUpdateNodeAddresses() { // create a new staked identity ids, libP2PNodes, _ := testutils.LibP2PNodeForMiddlewareFixture(m.T(), 1) - mws, providers := testutils.MiddlewareFixtures(m.T(), ids, libP2PNodes, testutils.MiddlewareConfigFixture(m.T())) + mws, providers := testutils.MiddlewareFixtures(m.T(), ids, libP2PNodes, testutils.MiddlewareConfigFixture(m.T()), m.slashingViolationsConsumer) require.Len(m.T(), ids, 1) require.Len(m.T(), providers, 1) require.Len(m.T(), mws, 1) @@ -256,6 +258,7 @@ func (m *MiddlewareTestSuite) TestUnicastRateLimit_Messages() { ids, libP2PNodes, testutils.MiddlewareConfigFixture(m.T()), + m.slashingViolationsConsumer, middleware.WithUnicastRateLimiters(rateLimiters), middleware.WithPeerManagerFilters([]p2p.PeerFilter{testutils.IsRateLimitedPeerFilter(messageRateLimiter)})) @@ -409,6 +412,7 @@ func (m *MiddlewareTestSuite) TestUnicastRateLimit_Bandwidth() { ids, libP2PNodes, testutils.MiddlewareConfigFixture(m.T()), + m.slashingViolationsConsumer, middleware.WithUnicastRateLimiters(rateLimiters), middleware.WithPeerManagerFilters([]p2p.PeerFilter{testutils.IsRateLimitedPeerFilter(bandwidthRateLimiter)})) require.Len(m.T(), ids, 1) diff --git a/network/test/unicast_authorization_test.go b/network/test/unicast_authorization_test.go index f4a4171944d..197c5f4a5a2 100644 --- a/network/test/unicast_authorization_test.go +++ b/network/test/unicast_authorization_test.go @@ -24,7 +24,6 @@ import ( "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/middleware" - "github.com/onflow/flow-go/network/slashing" "github.com/onflow/flow-go/network/validator" "github.com/onflow/flow-go/utils/unittest" ) @@ -73,11 +72,10 @@ func (u *UnicastAuthorizationTestSuite) TearDownTest() { } // setupMiddlewaresAndProviders will setup 2 middlewares that will be used as a sender and receiver in each suite test. -func (u *UnicastAuthorizationTestSuite) setupMiddlewaresAndProviders(slashingViolationsConsumer slashing.ViolationsConsumer) { +func (u *UnicastAuthorizationTestSuite) setupMiddlewaresAndProviders(slashingViolationsConsumer network.ViolationsConsumer) { ids, libP2PNodes, _ := testutils.LibP2PNodeForMiddlewareFixture(u.T(), 2) cfg := testutils.MiddlewareConfigFixture(u.T()) - cfg.SlashingViolationsConsumer = slashingViolationsConsumer - mws, providers := testutils.MiddlewareFixtures(u.T(), ids, libP2PNodes, cfg) + mws, providers := testutils.MiddlewareFixtures(u.T(), ids, libP2PNodes, cfg, slashingViolationsConsumer) require.Len(u.T(), ids, 2) require.Len(u.T(), providers, 2) require.Len(u.T(), mws, 2) @@ -125,7 +123,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnstakedPeer() require.NoError(u.T(), err) var nilID *flow.Identity - expectedViolation := &slashing.Violation{ + expectedViolation := &network.Violation{ Identity: nilID, // because the peer will be unverified this identity will be nil PeerID: expectedSenderPeerID.String(), MsgType: "", // message will not be decoded before OnSenderEjectedError is logged, we won't log message type @@ -136,7 +134,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnstakedPeer() slashingViolationsConsumer.On( "OnUnAuthorizedSenderError", expectedViolation, - ).Once().Run(func(args mockery.Arguments) { + ).Return(nil).Once().Run(func(args mockery.Arguments) { close(u.waitCh) }) @@ -187,8 +185,9 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_EjectedPeer() { expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) require.NoError(u.T(), err) - expectedViolation := &slashing.Violation{ + expectedViolation := &network.Violation{ Identity: u.senderID, // we expect this method to be called with the ejected identity + OriginID: u.senderID.NodeID, PeerID: expectedSenderPeerID.String(), MsgType: "", // message will not be decoded before OnSenderEjectedError is logged, we won't log message type Channel: channels.TestNetworkChannel, // message will not be decoded before OnSenderEjectedError is logged, we won't log peer ID @@ -198,7 +197,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_EjectedPeer() { slashingViolationsConsumer.On( "OnSenderEjectedError", expectedViolation, - ).Once().Run(func(args mockery.Arguments) { + ).Return(nil).Once().Run(func(args mockery.Arguments) { close(u.waitCh) }) @@ -246,8 +245,9 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnauthorizedPee expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) require.NoError(u.T(), err) - expectedViolation := &slashing.Violation{ + expectedViolation := &network.Violation{ Identity: u.senderID, + OriginID: u.senderID.NodeID, PeerID: expectedSenderPeerID.String(), MsgType: "*message.TestMessage", Channel: channels.ConsensusCommittee, @@ -258,7 +258,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnauthorizedPee slashingViolationsConsumer.On( "OnUnAuthorizedSenderError", expectedViolation, - ).Once().Run(func(args mockery.Arguments) { + ).Return(nil).Once().Run(func(args mockery.Arguments) { close(u.waitCh) }) @@ -309,7 +309,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnknownMsgCode( invalidMessageCode := codec.MessageCode(byte('X')) var nilID *flow.Identity - expectedViolation := &slashing.Violation{ + expectedViolation := &network.Violation{ Identity: nilID, PeerID: expectedSenderPeerID.String(), MsgType: "", @@ -321,7 +321,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnknownMsgCode( slashingViolationsConsumer.On( "OnUnknownMsgTypeError", expectedViolation, - ).Once().Run(func(args mockery.Arguments) { + ).Return(nil).Once().Run(func(args mockery.Arguments) { close(u.waitCh) }) @@ -378,8 +378,9 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_WrongMsgCode() modifiedMessageCode := codec.CodeDKGMessage - expectedViolation := &slashing.Violation{ + expectedViolation := &network.Violation{ Identity: u.senderID, + OriginID: u.senderID.NodeID, PeerID: expectedSenderPeerID.String(), MsgType: "*messages.DKGMessage", Channel: channels.TestNetworkChannel, @@ -390,7 +391,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_WrongMsgCode() slashingViolationsConsumer.On( "OnUnAuthorizedSenderError", expectedViolation, - ).Once().Run(func(args mockery.Arguments) { + ).Return(nil).Once().Run(func(args mockery.Arguments) { close(u.waitCh) }) @@ -503,8 +504,9 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnauthorizedUni expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) require.NoError(u.T(), err) - expectedViolation := &slashing.Violation{ + expectedViolation := &network.Violation{ Identity: u.senderID, + OriginID: u.senderID.NodeID, PeerID: expectedSenderPeerID.String(), MsgType: "*messages.BlockProposal", Channel: channels.ConsensusCommittee, @@ -515,7 +517,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnauthorizedUni slashingViolationsConsumer.On( "OnUnauthorizedUnicastOnChannel", expectedViolation, - ).Once().Run(func(args mockery.Arguments) { + ).Return(nil).Return(nil).Once().Run(func(args mockery.Arguments) { close(u.waitCh) }) @@ -566,7 +568,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_ReceiverHasNoSu expectedSenderPeerID, err := unittest.PeerIDFromFlowID(u.senderID) require.NoError(u.T(), err) - expectedViolation := &slashing.Violation{ + expectedViolation := &network.Violation{ Identity: nil, PeerID: expectedSenderPeerID.String(), MsgType: "*message.TestMessage", @@ -578,7 +580,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_ReceiverHasNoSu slashingViolationsConsumer.On( "OnUnauthorizedUnicastOnChannel", expectedViolation, - ).Once().Run(func(args mockery.Arguments) { + ).Return(nil).Return(nil).Once().Run(func(args mockery.Arguments) { close(u.waitCh) }) diff --git a/network/validator/authorized_sender_validator.go b/network/validator/authorized_sender_validator.go index 0af21b45e39..6841d69a9e6 100644 --- a/network/validator/authorized_sender_validator.go +++ b/network/validator/authorized_sender_validator.go @@ -8,11 +8,11 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/codec" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/slashing" ) var ( @@ -25,12 +25,12 @@ type GetIdentityFunc func(peer.ID) (*flow.Identity, bool) // AuthorizedSenderValidator performs message authorization validation. type AuthorizedSenderValidator struct { log zerolog.Logger - slashingViolationsConsumer slashing.ViolationsConsumer + slashingViolationsConsumer network.ViolationsConsumer getIdentity GetIdentityFunc } // NewAuthorizedSenderValidator returns a new AuthorizedSenderValidator -func NewAuthorizedSenderValidator(log zerolog.Logger, slashingViolationsConsumer slashing.ViolationsConsumer, getIdentity GetIdentityFunc) *AuthorizedSenderValidator { +func NewAuthorizedSenderValidator(log zerolog.Logger, slashingViolationsConsumer network.ViolationsConsumer, getIdentity GetIdentityFunc) *AuthorizedSenderValidator { return &AuthorizedSenderValidator{ log: log.With().Str("component", "authorized_sender_validator").Logger(), slashingViolationsConsumer: slashingViolationsConsumer, @@ -61,14 +61,14 @@ func (av *AuthorizedSenderValidator) Validate(from peer.ID, payload []byte, chan // something terrible went wrong. identity, ok := av.getIdentity(from) if !ok { - violation := &slashing.Violation{Identity: identity, PeerID: from.String(), Channel: channel, Protocol: protocol, Err: ErrIdentityUnverified} + violation := &network.Violation{PeerID: from.String(), Channel: channel, Protocol: protocol, Err: ErrIdentityUnverified} av.slashingViolationsConsumer.OnUnAuthorizedSenderError(violation) return "", ErrIdentityUnverified } msgCode, err := codec.MessageCodeFromPayload(payload) if err != nil { - violation := &slashing.Violation{Identity: identity, PeerID: from.String(), Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: from.String(), Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnUnknownMsgTypeError(violation) return "", err } @@ -77,28 +77,32 @@ func (av *AuthorizedSenderValidator) Validate(from peer.ID, payload []byte, chan switch { case err == nil: return msgType, nil - case message.IsUnknownMsgTypeErr(err): - violation := &slashing.Violation{Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + case message.IsUnknownMsgTypeErr(err) || codec.IsErrUnknownMsgCode(err): + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnUnknownMsgTypeError(violation) return msgType, err case errors.Is(err, message.ErrUnauthorizedMessageOnChannel) || errors.Is(err, message.ErrUnauthorizedRole): - violation := &slashing.Violation{Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnUnAuthorizedSenderError(violation) return msgType, err case errors.Is(err, ErrSenderEjected): - violation := &slashing.Violation{Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnSenderEjectedError(violation) return msgType, err case errors.Is(err, message.ErrUnauthorizedUnicastOnChannel): - violation := &slashing.Violation{Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnUnauthorizedUnicastOnChannel(violation) return msgType, err + case errors.Is(err, message.ErrUnauthorizedPublishOnChannel): + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + av.slashingViolationsConsumer.OnUnauthorizedPublishOnChannel(violation) + return msgType, err default: // this condition should never happen and indicates there's a bug // don't crash as a result of external inputs since that creates a DoS vector // collect slashing data because this could potentially lead to slashing err = fmt.Errorf("unexpected error during message validation: %w", err) - violation := &slashing.Violation{Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnUnexpectedError(violation) return msgType, err } diff --git a/network/validator/authorized_sender_validator_test.go b/network/validator/authorized_sender_validator_test.go index 966ae5ba127..8a9cd138cbb 100644 --- a/network/validator/authorized_sender_validator_test.go +++ b/network/validator/authorized_sender_validator_test.go @@ -6,16 +6,20 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/flow" + libp2pmessage "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/model/messages" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/alsp" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/codec" "github.com/onflow/flow-go/network/message" + "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/slashing" "github.com/onflow/flow-go/utils/unittest" @@ -43,7 +47,7 @@ type TestAuthorizedSenderValidatorSuite struct { unauthorizedUnicastOnChannel []TestCase authorizedUnicastOnChannel []TestCase log zerolog.Logger - slashingViolationsConsumer slashing.ViolationsConsumer + slashingViolationsConsumer network.ViolationsConsumer allMsgConfigs []message.MsgAuthConfig codec network.Codec } @@ -54,7 +58,6 @@ func (s *TestAuthorizedSenderValidatorSuite) SetupTest() { s.initializeInvalidMessageOnChannelTestCases() s.initializeUnicastOnChannelTestCases() s.log = unittest.Logger() - s.slashingViolationsConsumer = slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector()) s.codec = unittest.NetworkCodec() } @@ -64,37 +67,64 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_AuthorizedSen for _, c := range s.authorizedSenderTestCases { str := fmt.Sprintf("role (%s) should be authorized to send message type (%s) on channel (%s)", c.Identity.Role, c.MessageStr, c.Channel) s.Run(str, func() { - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, c.GetIdentity) - + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + defer misbehaviorReportConsumer.AssertNotCalled(s.T(), "ReportMisbehaviorOnChannel", mock.AnythingOfType("channels.Channel"), mock.AnythingOfType("*alsp.MisbehaviorReport")) + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, c.GetIdentity) + validateUnicast := authorizedSenderValidator.Validate + validatePubsub := authorizedSenderValidator.PubSubMessageValidator(c.Channel) pid, err := unittest.PeerIDFromFlowID(c.Identity) require.NoError(s.T(), err) - + switch { // ensure according to the message auth config, if a message is authorized to be sent via unicast it - // is accepted or rejected. - msgType, err := authorizedSenderValidator.Validate(pid, []byte{c.MessageCode.Uint8()}, c.Channel, message.ProtocolTypeUnicast) - if c.Protocols.Contains(message.ProtocolTypeUnicast) { + // is accepted. + case c.Protocols.Contains(message.ProtocolTypeUnicast): + msgType, err := validateUnicast(pid, []byte{c.MessageCode.Uint8()}, c.Channel, message.ProtocolTypeUnicast) + if c.Protocols.Contains(message.ProtocolTypeUnicast) { + require.NoError(s.T(), err) + require.Equal(s.T(), c.MessageStr, msgType) + } + // ensure according to the message auth config, if a message is authorized to be sent via pubsub it + // is accepted. + case c.Protocols.Contains(message.ProtocolTypePubSub): + payload, err := s.codec.Encode(c.Message) require.NoError(s.T(), err) - require.Equal(s.T(), c.MessageStr, msgType) - } else { - require.ErrorIs(s.T(), err, message.ErrUnauthorizedUnicastOnChannel) - require.Equal(s.T(), c.MessageStr, msgType) - } - - payload, err := s.codec.Encode(c.Message) - require.NoError(s.T(), err) - m := &message.Message{ - ChannelID: c.Channel.String(), - Payload: payload, - } - validatePubsub := authorizedSenderValidator.PubSubMessageValidator(c.Channel) - pubsubResult := validatePubsub(pid, m) - if !c.Protocols.Contains(message.ProtocolTypePubSub) { - require.Equal(s.T(), p2p.ValidationReject, pubsubResult) - } else { + m := &message.Message{ + ChannelID: c.Channel.String(), + Payload: payload, + } + pubsubResult := validatePubsub(pid, m) require.Equal(s.T(), p2p.ValidationAccept, pubsubResult) + default: + s.T().Fatal("authconfig does not contain any protocols") } }) } + + s.Run("test messages should be allowed to be sent via both protocols unicast/pubsub on test channel", func() { + identity, _ := unittest.IdentityWithNetworkingKeyFixture(unittest.WithRole(flow.RoleCollection)) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + defer misbehaviorReportConsumer.AssertNotCalled(s.T(), "ReportMisbehaviorOnChannel", mock.AnythingOfType("channels.Channel"), mock.AnythingOfType("*alsp.MisbehaviorReport")) + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + getIdentityFunc := s.getIdentity(identity) + pid, err := unittest.PeerIDFromFlowID(identity) + require.NoError(s.T(), err) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, getIdentityFunc) + + msgType, err := authorizedSenderValidator.Validate(pid, []byte{codec.CodeEcho.Uint8()}, channels.TestNetworkChannel, message.ProtocolTypeUnicast) + require.NoError(s.T(), err) + require.Equal(s.T(), "*message.TestMessage", msgType) + + payload, err := s.codec.Encode(&libp2pmessage.TestMessage{}) + require.NoError(s.T(), err) + m := &message.Message{ + ChannelID: channels.TestNetworkChannel.String(), + Payload: payload, + } + validatePubsub := authorizedSenderValidator.PubSubMessageValidator(channels.TestNetworkChannel) + pubsubResult := validatePubsub(pid, m) + require.Equal(s.T(), p2p.ValidationAccept, pubsubResult) + }) } // TestValidatorCallback_UnAuthorizedSender checks that AuthorizedSenderValidator.Validate return's p2p.ValidationReject @@ -105,8 +135,12 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedS s.Run(str, func() { pid, err := unittest.PeerIDFromFlowID(c.Identity) require.NoError(s.T(), err) - - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, c.GetIdentity) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(c.Identity.NodeID, alsp.UnAuthorizedSender) + require.NoError(s.T(), err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", c.Channel, expectedMisbehaviorReport).Once() + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, c.GetIdentity) payload, err := s.codec.Encode(c.Message) require.NoError(s.T(), err) @@ -129,8 +163,10 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_AuthorizedUni s.Run(str, func() { pid, err := unittest.PeerIDFromFlowID(c.Identity) require.NoError(s.T(), err) - - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, c.GetIdentity) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + defer misbehaviorReportConsumer.AssertNotCalled(s.T(), "ReportMisbehaviorOnChannel", mock.AnythingOfType("channels.Channel"), mock.AnythingOfType("*alsp.MisbehaviorReport")) + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, c.GetIdentity) msgType, err := authorizedSenderValidator.Validate(pid, []byte{c.MessageCode.Uint8()}, c.Channel, message.ProtocolTypeUnicast) require.NoError(s.T(), err) @@ -147,8 +183,12 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedU s.Run(str, func() { pid, err := unittest.PeerIDFromFlowID(c.Identity) require.NoError(s.T(), err) - - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, c.GetIdentity) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(c.Identity.NodeID, alsp.UnauthorizedUnicastOnChannel) + require.NoError(s.T(), err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", c.Channel, expectedMisbehaviorReport).Once() + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, c.GetIdentity) msgType, err := authorizedSenderValidator.Validate(pid, []byte{c.MessageCode.Uint8()}, c.Channel, message.ProtocolTypeUnicast) require.ErrorIs(s.T(), err, message.ErrUnauthorizedUnicastOnChannel) @@ -165,8 +205,12 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnAuthorizedM s.Run(str, func() { pid, err := unittest.PeerIDFromFlowID(c.Identity) require.NoError(s.T(), err) - - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, c.GetIdentity) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(c.Identity.NodeID, alsp.UnAuthorizedSender) + require.NoError(s.T(), err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", c.Channel, expectedMisbehaviorReport).Twice() + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, c.GetIdentity) msgType, err := authorizedSenderValidator.Validate(pid, []byte{c.MessageCode.Uint8()}, c.Channel, message.ProtocolTypeUnicast) require.ErrorIs(s.T(), err, message.ErrUnauthorizedMessageOnChannel) @@ -195,10 +239,22 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ClusterPrefix pid, err := unittest.PeerIDFromFlowID(identity) require.NoError(s.T(), err) - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, getIdentityFunc) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(identity.NodeID, alsp.UnauthorizedUnicastOnChannel) + require.NoError(s.T(), err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", channels.SyncCluster(clusterID), expectedMisbehaviorReport).Once() + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", channels.ConsensusCluster(clusterID), expectedMisbehaviorReport).Once() + + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, getIdentityFunc) + + // validate collection sync cluster SyncRequest is not allowed to be sent on channel via unicast + msgType, err := authorizedSenderValidator.Validate(pid, []byte{codec.CodeSyncRequest.Uint8()}, channels.SyncCluster(clusterID), message.ProtocolTypeUnicast) + require.ErrorIs(s.T(), err, message.ErrUnauthorizedUnicastOnChannel) + require.Equal(s.T(), "*messages.SyncRequest", msgType) // ensure ClusterBlockProposal not allowed to be sent on channel via unicast - msgType, err := authorizedSenderValidator.Validate(pid, []byte{codec.CodeClusterBlockProposal.Uint8()}, channels.ConsensusCluster(clusterID), message.ProtocolTypeUnicast) + msgType, err = authorizedSenderValidator.Validate(pid, []byte{codec.CodeClusterBlockProposal.Uint8()}, channels.ConsensusCluster(clusterID), message.ProtocolTypeUnicast) require.ErrorIs(s.T(), err, message.ErrUnauthorizedUnicastOnChannel) require.Equal(s.T(), "*messages.ClusterBlockProposal", msgType) @@ -213,11 +269,6 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ClusterPrefix pubsubResult := validateCollConsensusPubsub(pid, m) require.Equal(s.T(), p2p.ValidationAccept, pubsubResult) - // validate collection sync cluster SyncRequest is not allowed to be sent on channel via unicast - msgType, err = authorizedSenderValidator.Validate(pid, []byte{codec.CodeSyncRequest.Uint8()}, channels.SyncCluster(clusterID), message.ProtocolTypeUnicast) - require.ErrorIs(s.T(), err, message.ErrUnauthorizedUnicastOnChannel) - require.Equal(s.T(), "*messages.SyncRequest", msgType) - // ensure SyncRequest is allowed to be sent via pubsub by authorized sender payload, err = s.codec.Encode(&messages.SyncRequest{}) require.NoError(s.T(), err) @@ -239,7 +290,12 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai pid, err := unittest.PeerIDFromFlowID(identity) require.NoError(s.T(), err) - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, getIdentityFunc) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(identity.NodeID, alsp.SenderEjected) + require.NoError(s.T(), err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", channels.SyncCommittee, expectedMisbehaviorReport).Twice() + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, getIdentityFunc) msgType, err := authorizedSenderValidator.Validate(pid, []byte{codec.CodeSyncRequest.Uint8()}, channels.SyncCommittee, message.ProtocolTypeUnicast) require.ErrorIs(s.T(), err, ErrSenderEjected) @@ -263,7 +319,12 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai pid, err := unittest.PeerIDFromFlowID(identity) require.NoError(s.T(), err) - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, getIdentityFunc) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(identity.NodeID, alsp.UnknownMsgType) + require.NoError(s.T(), err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", channels.ConsensusCommittee, expectedMisbehaviorReport).Twice() + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, getIdentityFunc) validatePubsub := authorizedSenderValidator.PubSubMessageValidator(channels.ConsensusCommittee) // unknown message types are rejected @@ -291,7 +352,11 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_ValidationFai pid, err := unittest.PeerIDFromFlowID(identity) require.NoError(s.T(), err) - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, getIdentityFunc) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + // we cannot penalize a peer if identity is not known, in this case we don't expect any misbehavior reports to be reported + defer misbehaviorReportConsumer.AssertNotCalled(s.T(), "ReportMisbehaviorOnChannel", mock.AnythingOfType("channels.Channel"), mock.AnythingOfType("*alsp.MisbehaviorReport")) + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, getIdentityFunc) msgType, err := authorizedSenderValidator.Validate(pid, []byte{codec.CodeSyncRequest.Uint8()}, channels.SyncCommittee, message.ProtocolTypeUnicast) require.ErrorIs(s.T(), err, ErrIdentityUnverified) @@ -314,17 +379,21 @@ func (s *TestAuthorizedSenderValidatorSuite) TestValidatorCallback_UnauthorizedP for _, c := range s.authorizedUnicastOnChannel { str := fmt.Sprintf("message type (%s) is not authorized to be sent via libp2p publish", c.MessageStr) s.Run(str, func() { + // skip test message check + if c.MessageStr == "*message.TestMessage" { + return + } pid, err := unittest.PeerIDFromFlowID(c.Identity) require.NoError(s.T(), err) - - authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, s.slashingViolationsConsumer, c.GetIdentity) + expectedMisbehaviorReport, err := alsp.NewMisbehaviorReport(c.Identity.NodeID, alsp.UnauthorizedPublishOnChannel) + require.NoError(s.T(), err) + misbehaviorReportConsumer := mocknetwork.NewMisbehaviorReportConsumer(s.T()) + misbehaviorReportConsumer.On("ReportMisbehaviorOnChannel", c.Channel, expectedMisbehaviorReport).Once() + violationsConsumer := slashing.NewSlashingViolationsConsumer(s.log, metrics.NewNoopCollector(), misbehaviorReportConsumer) + authorizedSenderValidator := NewAuthorizedSenderValidator(s.log, violationsConsumer, c.GetIdentity) msgType, err := authorizedSenderValidator.Validate(pid, []byte{c.MessageCode.Uint8()}, c.Channel, message.ProtocolTypePubSub) - if c.MessageStr == "*message.TestMessage" { - require.NoError(s.T(), err) - } else { - require.ErrorIs(s.T(), err, message.ErrUnauthorizedPublishOnChannel) - require.Equal(s.T(), c.MessageStr, msgType) - } + require.ErrorIs(s.T(), err, message.ErrUnauthorizedPublishOnChannel) + require.Equal(s.T(), c.MessageStr, msgType) }) } } diff --git a/network/slashing/violations_consumer.go b/network/violations_consumer.go similarity index 61% rename from network/slashing/violations_consumer.go rename to network/violations_consumer.go index cf1f8ea7d85..6c3de412c77 100644 --- a/network/slashing/violations_consumer.go +++ b/network/violations_consumer.go @@ -1,4 +1,4 @@ -package slashing +package network import ( "github.com/onflow/flow-go/model/flow" @@ -6,24 +6,30 @@ import ( "github.com/onflow/flow-go/network/message" ) +// ViolationsConsumer logs reported slashing violation errors and reports those violations as misbehavior's to the ALSP +// misbehavior report manager. Any errors encountered while reporting the misbehavior are considered irrecoverable and +// will result in a fatal level log. type ViolationsConsumer interface { - // OnUnAuthorizedSenderError logs an error for unauthorized sender error + // OnUnAuthorizedSenderError logs an error for unauthorized sender error. OnUnAuthorizedSenderError(violation *Violation) - // OnUnknownMsgTypeError logs an error for unknown message type error + // OnUnknownMsgTypeError logs an error for unknown message type error. OnUnknownMsgTypeError(violation *Violation) // OnInvalidMsgError logs an error for messages that contained payloads that could not // be unmarshalled into the message type denoted by message code byte. OnInvalidMsgError(violation *Violation) - // OnSenderEjectedError logs an error for sender ejected error + // OnSenderEjectedError logs an error for sender ejected error. OnSenderEjectedError(violation *Violation) - // OnUnauthorizedUnicastOnChannel logs an error for messages unauthorized to be sent via unicast + // OnUnauthorizedUnicastOnChannel logs an error for messages unauthorized to be sent via unicast. OnUnauthorizedUnicastOnChannel(violation *Violation) - // OnUnexpectedError logs an error for unknown errors + // OnUnauthorizedPublishOnChannel logs an error for messages unauthorized to be sent via pubsub. + OnUnauthorizedPublishOnChannel(violation *Violation) + + // OnUnexpectedError logs an error for unknown errors. OnUnexpectedError(violation *Violation) } diff --git a/state/cluster/badger/mutator_test.go b/state/cluster/badger/mutator_test.go index 280db39a055..1897cf6a39a 100644 --- a/state/cluster/badger/mutator_test.go +++ b/state/cluster/badger/mutator_test.go @@ -7,7 +7,6 @@ import ( "math/rand" "os" "testing" - "time" "github.com/dgraph-io/badger/v2" "github.com/rs/zerolog" @@ -53,9 +52,6 @@ type MutatorSuite struct { func (suite *MutatorSuite) SetupTest() { var err error - // seed the RNG - rand.Seed(time.Now().UnixNano()) - suite.genesis = model.Genesis() suite.chainID = suite.genesis.Header.ChainID diff --git a/state/cluster/badger/snapshot_test.go b/state/cluster/badger/snapshot_test.go index 7964f3a1f1b..7dd81c0ed4d 100644 --- a/state/cluster/badger/snapshot_test.go +++ b/state/cluster/badger/snapshot_test.go @@ -2,10 +2,8 @@ package badger import ( "math" - "math/rand" "os" "testing" - "time" "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/assert" @@ -43,9 +41,6 @@ type SnapshotSuite struct { func (suite *SnapshotSuite) SetupTest() { var err error - // seed the RNG - rand.Seed(time.Now().UnixNano()) - suite.genesis = model.Genesis() suite.chainID = suite.genesis.Header.ChainID diff --git a/state/protocol/badger/mutator_test.go b/state/protocol/badger/mutator_test.go index af3be8b204d..53ecd1a6e79 100644 --- a/state/protocol/badger/mutator_test.go +++ b/state/protocol/badger/mutator_test.go @@ -40,10 +40,6 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - var participants = unittest.IdentityListFixture(5, unittest.WithAllRoles()) func TestBootstrapValid(t *testing.T) { diff --git a/state/protocol/badger/snapshot.go b/state/protocol/badger/snapshot.go index 33522480301..1a121e81748 100644 --- a/state/protocol/badger/snapshot.go +++ b/state/protocol/badger/snapshot.go @@ -377,7 +377,7 @@ func (s *Snapshot) descendants(blockID flow.Identifier) ([]flow.Identifier, erro return descendantIDs, nil } -// RandomSource returns the seed for the current block snapshot. +// RandomSource returns the seed for the current block's snapshot. // Expected error returns: // * storage.ErrNotFound is returned if the QC is unknown. func (s *Snapshot) RandomSource() ([]byte, error) { diff --git a/state/protocol/badger/snapshot_test.go b/state/protocol/badger/snapshot_test.go index 7b5b6e6b5ce..03e98d6f067 100644 --- a/state/protocol/badger/snapshot_test.go +++ b/state/protocol/badger/snapshot_test.go @@ -7,7 +7,6 @@ import ( "errors" "math/rand" "testing" - "time" "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/assert" @@ -27,10 +26,6 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - // TestUnknownReferenceBlock tests queries for snapshots which should be unknown. // We use this fixture: // - Root height: 100 @@ -195,16 +190,18 @@ func TestIdentities(t *testing.T) { }) t.Run("single identity", func(t *testing.T) { - expected := identities.Sample(1)[0] + expected := identities[rand.Intn(len(identities))] actual, err := state.Final().Identity(expected.NodeID) require.NoError(t, err) assert.Equal(t, expected, actual) }) t.Run("filtered", func(t *testing.T) { + sample, err := identities.SamplePct(0.1) + require.NoError(t, err) filters := []flow.IdentityFilter{ filter.HasRole(flow.RoleCollection), - filter.HasNodeID(identities.SamplePct(0.1).NodeIDs()...), + filter.HasNodeID(sample.NodeIDs()...), filter.HasWeight(true), } @@ -1246,7 +1243,7 @@ func TestSnapshot_CrossEpochIdentities(t *testing.T) { // 1 identity added at epoch 2 that was not present in epoch 1 addedAtEpoch2 := unittest.IdentityFixture() // 1 identity removed in epoch 2 that was present in epoch 1 - removedAtEpoch2 := epoch1Identities.Sample(1)[0] + removedAtEpoch2 := epoch1Identities[rand.Intn(len(epoch1Identities))] // epoch 2 has partial overlap with epoch 1 epoch2Identities := append( epoch1Identities.Filter(filter.Not(filter.HasNodeID(removedAtEpoch2.NodeID))), diff --git a/state/protocol/badger/state_test.go b/state/protocol/badger/state_test.go index 36b2dae45a2..c6bcc59854f 100644 --- a/state/protocol/badger/state_test.go +++ b/state/protocol/badger/state_test.go @@ -5,7 +5,6 @@ import ( "fmt" "os" "testing" - "time" "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/assert" @@ -426,7 +425,9 @@ func TestBootstrap_InvalidIdentities(t *testing.T) { root := unittest.RootSnapshotFixture(participants) // randomly shuffle the identities so they are not canonically ordered encodable := root.Encodable() - encodable.Identities = participants.DeterministicShuffle(time.Now().UnixNano()) + var err error + encodable.Identities, err = participants.Shuffle() + require.NoError(t, err) root = inmem.SnapshotFromEncodable(encodable) bootstrap(t, root, func(state *bprotocol.State, err error) { assert.Error(t, err) diff --git a/state/protocol/badger/validity_test.go b/state/protocol/badger/validity_test.go index 7fddf1a4bb6..30ee94c40d6 100644 --- a/state/protocol/badger/validity_test.go +++ b/state/protocol/badger/validity_test.go @@ -2,7 +2,6 @@ package badger import ( "testing" - "time" "github.com/stretchr/testify/require" @@ -31,9 +30,10 @@ func TestEpochSetupValidity(t *testing.T) { _, result, _ := unittest.BootstrapFixture(participants) setup := result.ServiceEvents[0].Event.(*flow.EpochSetup) // randomly shuffle the identities so they are not canonically ordered - setup.Participants = setup.Participants.DeterministicShuffle(time.Now().UnixNano()) - - err := verifyEpochSetup(setup, true) + var err error + setup.Participants, err = setup.Participants.Shuffle() + require.NoError(t, err) + err = verifyEpochSetup(setup, true) require.Error(t, err) }) diff --git a/storage/badger/cleaner.go b/storage/badger/cleaner.go index e69782bada6..d9cd07997e7 100644 --- a/storage/badger/cleaner.go +++ b/storage/badger/cleaner.go @@ -3,7 +3,6 @@ package badger import ( - "math/rand" "time" "github.com/dgraph-io/badger/v2" @@ -12,6 +11,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/utils/rand" ) // Cleaner uses component.ComponentManager to implement module.Startable and module.ReadyDoneAware @@ -82,7 +82,17 @@ func (c *Cleaner) gcWorkerRoutine(ctx irrecoverable.SignalerContext, ready compo // We add 20% jitter into the interval, so that we don't risk nodes syncing their GC calls over time. // Therefore GC is run every X seconds, where X is uniformly sampled from [interval, interval*1.2] func (c *Cleaner) nextWaitDuration() time.Duration { - return time.Duration(c.interval.Nanoseconds() + rand.Int63n(c.interval.Nanoseconds()/5)) + jitter, err := rand.Uint64n(uint64(c.interval.Nanoseconds() / 5)) + if err != nil { + // if randomness fails, do not use a jitter for this instance. + // TODO: address the error properly and not swallow it. + // In this specific case, `utils/rand` only errors if the system randomness fails + // which is a symptom of a wider failure. Many other node components would catch such + // a failure. + c.log.Warn().Msg("jitter is zero beacuse system randomness has failed") + jitter = 0 + } + return time.Duration(c.interval.Nanoseconds() + int64(jitter)) } // runGC runs garbage collection for badger DB, handles sentinel errors and reports metrics. diff --git a/storage/badger/dkg_state_test.go b/storage/badger/dkg_state_test.go index 3c9a6653b49..5643b064d22 100644 --- a/storage/badger/dkg_state_test.go +++ b/storage/badger/dkg_state_test.go @@ -4,7 +4,6 @@ import ( "errors" "math/rand" "testing" - "time" "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/assert" @@ -53,7 +52,6 @@ func TestDKGState_BeaconKeys(t *testing.T) { store, err := bstorage.NewDKGState(metrics, db) require.NoError(t, err) - rand.Seed(time.Now().UnixNano()) epochCounter := rand.Uint64() // attempt to get a non-existent key @@ -96,7 +94,6 @@ func TestDKGState_EndState(t *testing.T) { store, err := bstorage.NewDKGState(metrics, db) require.NoError(t, err) - rand.Seed(time.Now().UnixNano()) epochCounter := rand.Uint64() endState := flow.DKGEndStateNoKey diff --git a/storage/badger/operation/common_test.go b/storage/badger/operation/common_test.go index afae8b0c260..65f64fbd5cb 100644 --- a/storage/badger/operation/common_test.go +++ b/storage/badger/operation/common_test.go @@ -5,10 +5,8 @@ package operation import ( "bytes" "fmt" - "math/rand" "reflect" "testing" - "time" "github.com/dgraph-io/badger/v2" "github.com/stretchr/testify/assert" @@ -20,10 +18,6 @@ import ( "github.com/onflow/flow-go/utils/unittest" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - type Entity struct { ID uint64 } diff --git a/storage/merkle/proof_test.go b/storage/merkle/proof_test.go index 44e93a90bef..826b61b6ed8 100644 --- a/storage/merkle/proof_test.go +++ b/storage/merkle/proof_test.go @@ -3,7 +3,6 @@ package merkle import ( "math/rand" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -142,7 +141,7 @@ func TestValidateFormat(t *testing.T) { // when trie includes many random keys. (only a random subset of keys are checked for proofs) func TestProofsWithRandomKeys(t *testing.T) { // initialize random generator, two trees and zero hash - rand.Seed(time.Now().UnixNano()) + keyLength := 32 numberOfInsertions := 10000 numberOfProofsToVerify := 100 diff --git a/storage/merkle/tree_test.go b/storage/merkle/tree_test.go index b20ee26d7e5..aea20cca8db 100644 --- a/storage/merkle/tree_test.go +++ b/storage/merkle/tree_test.go @@ -3,11 +3,11 @@ package merkle import ( + crand "crypto/rand" "encoding/hex" "fmt" - "math/rand" + mrand "math/rand" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -64,7 +64,9 @@ func TestEmptyTreeHash(t *testing.T) { // generate random key-value pair key := make([]byte, keyLength) - rand.Read(key) + _, err := crand.Read(key) + require.NoError(t, err) + val := []byte{1} // add key-value pair: hash should be non-empty @@ -239,7 +241,7 @@ func Test_KeyLengthChecked(t *testing.T) { // of a _single_ key-value pair to an otherwise empty tree. func TestTreeSingle(t *testing.T) { // initialize the random generator, tree and zero hash - rand.Seed(time.Now().UnixNano()) + keyLength := 32 tree, err := NewTree(keyLength) assert.NoError(t, err) @@ -275,7 +277,7 @@ func TestTreeSingle(t *testing.T) { // Key-value pairs are added and deleted in the same order. func TestTreeBatch(t *testing.T) { // initialize random generator, tree, zero hash - rand.Seed(time.Now().UnixNano()) + keyLength := 32 tree, err := NewTree(keyLength) assert.NoError(t, err) @@ -321,7 +323,7 @@ func TestTreeBatch(t *testing.T) { // in which the elements were added. func TestRandomOrder(t *testing.T) { // initialize random generator, two trees and zero hash - rand.Seed(time.Now().UnixNano()) + keyLength := 32 tree1, err := NewTree(keyLength) assert.NoError(t, err) @@ -346,7 +348,7 @@ func TestRandomOrder(t *testing.T) { } // shuffle the keys and insert them with random order into the second tree - rand.Shuffle(len(keys), func(i int, j int) { + mrand.Shuffle(len(keys), func(i int, j int) { keys[i], keys[j] = keys[j], keys[i] }) for _, key := range keys { @@ -382,8 +384,8 @@ func BenchmarkTree(b *testing.B) { func randomKeyValuePair(keySize, valueSize int) ([]byte, []byte) { key := make([]byte, keySize) val := make([]byte, valueSize) - _, _ = rand.Read(key) - _, _ = rand.Read(val) + _, _ = crand.Read(key) + _, _ = crand.Read(val) return key, val } diff --git a/utils/logging/consts.go b/utils/logging/consts.go index 31cfef3078a..46f48a3c937 100644 --- a/utils/logging/consts.go +++ b/utils/logging/consts.go @@ -1,5 +1,11 @@ package logging -// KeySuspicious is a logging label that is used to flag the log event as suspicious behavior -// This is used to add an easily searchable label to the log event -const KeySuspicious = "suspicious" +const ( + // KeySuspicious is a logging label that is used to flag the log event as suspicious behavior + // This is used to add an easily searchable label to the log event + KeySuspicious = "suspicious" + + // KeyNetworkingSecurity is a logging label that is used to flag the log event as a networking security issue. + // This is used to add an easily searchable label to the log events. + KeyNetworkingSecurity = "networking-security" +) diff --git a/utils/rand/rand_test.go b/utils/rand/rand_test.go index 14f00559d62..73d7ca539ca 100644 --- a/utils/rand/rand_test.go +++ b/utils/rand/rand_test.go @@ -1,49 +1,16 @@ package rand import ( - "fmt" "math" mrand "math/rand" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "gonum.org/v1/gonum/stat" - _ "github.com/onflow/flow-go/crypto/random" + "github.com/onflow/flow-go/crypto/random" ) -// TODO: these functions are copied from flow-go/crypto/rand -// Once the new flow-go/crypto/ module version is tagged, flow-go would upgrade -// to the new version and import these functions -func BasicDistributionTest(t *testing.T, n uint64, classWidth uint64, randf func() (uint64, error)) { - // sample size should ideally be a high number multiple of `n` - // but if `n` is too small, we could use a small sample size so that the test - // isn't too slow - sampleSize := 1000 * n - if n < 100 { - sampleSize = (80000 / n) * n // highest multiple of n less than 80000 - } - distribution := make([]float64, n) - // populate the distribution - for i := uint64(0); i < sampleSize; i++ { - r, err := randf() - require.NoError(t, err) - if n*classWidth != 0 { - require.Less(t, r, n*classWidth) - } - distribution[r/classWidth] += 1.0 - } - EvaluateDistributionUniformity(t, distribution) -} - -func EvaluateDistributionUniformity(t *testing.T, distribution []float64) { - tolerance := 0.05 - stdev := stat.StdDev(distribution, nil) - mean := stat.Mean(distribution, nil) - assert.Greater(t, tolerance*mean, stdev, fmt.Sprintf("basic randomness test failed: n: %d, stdev: %v, mean: %v", len(distribution), stdev, mean)) -} - func TestRandomIntegers(t *testing.T) { t.Run("basic uniformity", func(t *testing.T) { @@ -56,7 +23,7 @@ func TestRandomIntegers(t *testing.T) { r, err := Uint() return uint64(r), err } - BasicDistributionTest(t, uint64(n), uint64(classWidth), uintf) + random.BasicDistributionTest(t, uint64(n), uint64(classWidth), uintf) }) t.Run("Uint64", func(t *testing.T) { @@ -64,7 +31,7 @@ func TestRandomIntegers(t *testing.T) { // n is a random power of 2 (from 2 to 2^10) n := 1 << (1 + mrand.Intn(10)) classWidth := (math.MaxUint64 / uint64(n)) + 1 - BasicDistributionTest(t, uint64(n), uint64(classWidth), Uint64) + random.BasicDistributionTest(t, uint64(n), uint64(classWidth), Uint64) }) t.Run("Uint32", func(t *testing.T) { @@ -76,7 +43,7 @@ func TestRandomIntegers(t *testing.T) { r, err := Uint32() return uint64(r), err } - BasicDistributionTest(t, uint64(n), uint64(classWidth), uintf) + random.BasicDistributionTest(t, uint64(n), uint64(classWidth), uintf) }) t.Run("Uintn", func(t *testing.T) { @@ -86,7 +53,7 @@ func TestRandomIntegers(t *testing.T) { return uint64(r), err } // classWidth is 1 since `n` is small - BasicDistributionTest(t, uint64(n), uint64(1), uintf) + random.BasicDistributionTest(t, uint64(n), uint64(1), uintf) }) t.Run("Uint64n", func(t *testing.T) { @@ -95,7 +62,7 @@ func TestRandomIntegers(t *testing.T) { return Uint64n(uint64(n)) } // classWidth is 1 since `n` is small - BasicDistributionTest(t, uint64(n), uint64(1), uintf) + random.BasicDistributionTest(t, uint64(n), uint64(1), uintf) }) t.Run("Uint32n", func(t *testing.T) { @@ -105,7 +72,7 @@ func TestRandomIntegers(t *testing.T) { return uint64(r), err } // classWidth is 1 since `n` is small - BasicDistributionTest(t, uint64(n), uint64(1), uintf) + random.BasicDistributionTest(t, uint64(n), uint64(1), uintf) }) }) @@ -169,7 +136,7 @@ func TestShuffle(t *testing.T) { } // if the shuffle is uniform, the test element // should end up uniformly in all positions of the slice - EvaluateDistributionUniformity(t, distribution) + random.EvaluateDistributionUniformity(t, distribution) }) t.Run("shuffle a same permutation", func(t *testing.T) { @@ -182,7 +149,7 @@ func TestShuffle(t *testing.T) { } // if the shuffle is uniform, the test element // should end up uniformly in all positions of the slice - EvaluateDistributionUniformity(t, distribution) + random.EvaluateDistributionUniformity(t, distribution) }) }) @@ -232,10 +199,10 @@ func TestSamples(t *testing.T) { } // if the sampling is uniform, all elements // should end up being sampled an equivalent number of times - EvaluateDistributionUniformity(t, samplingDistribution) + random.EvaluateDistributionUniformity(t, samplingDistribution) // if the sampling is uniform, the test element // should end up uniformly in all positions of the sample slice - EvaluateDistributionUniformity(t, orderingDistribution) + random.EvaluateDistributionUniformity(t, orderingDistribution) }) t.Run("zero edge cases", func(t *testing.T) { diff --git a/utils/unittest/chain_suite.go b/utils/unittest/chain_suite.go index bd7b97fe52b..a2ebc59f8d0 100644 --- a/utils/unittest/chain_suite.go +++ b/utils/unittest/chain_suite.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/chunks" @@ -504,7 +505,8 @@ func (bc *BaseChainSuite) ValidSubgraphFixture() subgraphFixture { assignedVerifiersPerChunk := uint(len(bc.Approvers) / 2) approvals := make(map[uint64]map[flow.Identifier]*flow.ResultApproval) for _, chunk := range incorporatedResult.Result.Chunks { - assignedVerifiers := bc.Approvers.Sample(assignedVerifiersPerChunk) + assignedVerifiers, err := bc.Approvers.Sample(assignedVerifiersPerChunk) + require.NoError(bc.T(), err) assignment.Add(chunk, assignedVerifiers.NodeIDs()) // generate approvals @@ -543,7 +545,8 @@ func (bc *BaseChainSuite) Extend(block *flow.Block) { assignedVerifiersPerChunk := uint(len(bc.Approvers) / 2) approvals := make(map[uint64]map[flow.Identifier]*flow.ResultApproval) for _, chunk := range incorporatedResult.Result.Chunks { - assignedVerifiers := bc.Approvers.Sample(assignedVerifiersPerChunk) + assignedVerifiers, err := bc.Approvers.Sample(assignedVerifiersPerChunk) + require.NoError(bc.T(), err) assignment.Add(chunk, assignedVerifiers.NodeIDs()) // generate approvals diff --git a/utils/unittest/execution_state.go b/utils/unittest/execution_state.go index 6b92b183bc0..9e843576195 100644 --- a/utils/unittest/execution_state.go +++ b/utils/unittest/execution_state.go @@ -24,7 +24,7 @@ const ServiceAccountPrivateKeySignAlgo = crypto.ECDSAP256 const ServiceAccountPrivateKeyHashAlgo = hash.SHA2_256 // Pre-calculated state commitment with root account with the above private key -const GenesisStateCommitmentHex = "30e59679cca50d898cd2d6e6392fff0f11d0088d308a6aaa07682ce3665145ff" +const GenesisStateCommitmentHex = "517138d362602fb11b17524a654b00d8eecdfbf56406b1636a2c58dad7c5d144" var GenesisStateCommitment flow.StateCommitment @@ -88,10 +88,10 @@ func genesisCommitHexByChainID(chainID flow.ChainID) string { return GenesisStateCommitmentHex } if chainID == flow.Testnet { - return "b5e7064526738b1909a082d0bb3eafd6ae4a853c56cd218690c50afa1b2179b6" + return "dd8c079b196fced93e4c541a8f6c49a0ee5fda01b2653c5a03cc165ab1015423" } if chainID == flow.Sandboxnet { return "e1c08b17f9e5896f03fe28dd37ca396c19b26628161506924fbf785834646ea1" } - return "9f58134fe65fba4529e908ec21eba590f8f0e81b34ca810d4b1babc49ffe5a53" + return "c6e7f204c774f4208e67451acfdf9783932df06e5ab29b7afc56d548a1573769" } diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index f5d454d01b0..999f090232b 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -9,8 +9,6 @@ import ( "testing" "time" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" @@ -22,6 +20,7 @@ import ( "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/crypto/hash" "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/access/rest/util" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/bitutils" @@ -415,12 +414,6 @@ func BlockHeaderFixture(opts ...func(header *flow.Header)) *flow.Header { return header } -func CidFixture() cid.Cid { - data := make([]byte, 1024) - _, _ = rand.Read(data) - return blocks.NewBlock(data).Cid() -} - func BlockHeaderFixtureOnChain( chainID flow.ChainID, opts ...func(header *flow.Header), @@ -473,6 +466,38 @@ func BlockHeaderWithParentFixture(parent *flow.Header) *flow.Header { } } +func BlockHeaderWithParentWithSoRFixture(parent *flow.Header, source []byte) *flow.Header { + height := parent.Height + 1 + view := parent.View + 1 + uint64(rand.Intn(10)) // Intn returns [0, n) + var lastViewTC *flow.TimeoutCertificate + if view != parent.View+1 { + newestQC := QuorumCertificateFixture(func(qc *flow.QuorumCertificate) { + qc.View = parent.View + }) + lastViewTC = &flow.TimeoutCertificate{ + View: view - 1, + NewestQCViews: []uint64{newestQC.View}, + NewestQC: newestQC, + SignerIndices: SignerIndicesFixture(4), + SigData: SignatureFixture(), + } + } + return &flow.Header{ + ChainID: parent.ChainID, + ParentID: parent.ID(), + Height: height, + PayloadHash: IdentifierFixture(), + Timestamp: time.Now().UTC(), + View: view, + ParentView: parent.View, + ParentVoterIndices: SignerIndicesFixture(4), + ParentVoterSigData: QCSigDataWithSoRFixture(source), + ProposerID: IdentifierFixture(), + ProposerSigData: SignatureFixture(), + LastViewTC: lastViewTC, + } +} + func ClusterPayloadFixture(n int) *cluster.Payload { transactions := make([]*flow.TransactionBody, n) for i := 0; i < n; i++ { @@ -1279,8 +1304,7 @@ func ChunkStatusListFixture( return statuses } -func QCSigDataFixture() []byte { - packer := hotstuff.SigDataPacker{} +func qcSignatureDataFixture() hotstuff.SignatureData { sigType := RandomBytes(5) for i := range sigType { sigType[i] = sigType[i] % 2 @@ -1291,6 +1315,20 @@ func QCSigDataFixture() []byte { AggregatedRandomBeaconSig: SignatureFixture(), ReconstructedRandomBeaconSig: SignatureFixture(), } + return sigData +} + +func QCSigDataFixture() []byte { + packer := hotstuff.SigDataPacker{} + sigData := qcSignatureDataFixture() + encoded, _ := packer.Encode(&sigData) + return encoded +} + +func QCSigDataWithSoRFixture(sor []byte) []byte { + packer := hotstuff.SigDataPacker{} + sigData := qcSignatureDataFixture() + sigData.ReconstructedRandomBeaconSig = sor encoded, _ := packer.Encode(&sigData) return encoded } @@ -1309,6 +1347,14 @@ func SignaturesFixture(n int) []crypto.Signature { return sigs } +func RandomSourcesFixture(n int) [][]byte { + var sigs [][]byte + for i := 0; i < n; i++ { + sigs = append(sigs, SignatureFixture()) + } + return sigs +} + func TransactionFixture(n ...func(t *flow.Transaction)) flow.Transaction { tx := flow.Transaction{TransactionBody: TransactionBodyFixture()} if len(n) > 0 { @@ -2451,7 +2497,7 @@ func ChunkExecutionDataFixture(t *testing.T, minSize int, opts ...func(*executio } v := make([]byte, size) - _, err := rand.Read(v) + _, err := crand.Read(v) require.NoError(t, err) k, err := ced.TrieUpdate.Payloads[0].Key() @@ -2461,3 +2507,35 @@ func ChunkExecutionDataFixture(t *testing.T, minSize int, opts ...func(*executio size *= 2 } } + +func CreateSendTxHttpPayload(tx flow.TransactionBody) map[string]interface{} { + tx.Arguments = [][]uint8{} // fix how fixture creates nil values + auth := make([]string, len(tx.Authorizers)) + for i, a := range tx.Authorizers { + auth[i] = a.String() + } + + return map[string]interface{}{ + "script": util.ToBase64(tx.Script), + "arguments": tx.Arguments, + "reference_block_id": tx.ReferenceBlockID.String(), + "gas_limit": fmt.Sprintf("%d", tx.GasLimit), + "payer": tx.Payer.String(), + "proposal_key": map[string]interface{}{ + "address": tx.ProposalKey.Address.String(), + "key_index": fmt.Sprintf("%d", tx.ProposalKey.KeyIndex), + "sequence_number": fmt.Sprintf("%d", tx.ProposalKey.SequenceNumber), + }, + "authorizers": auth, + "payload_signatures": []map[string]interface{}{{ + "address": tx.PayloadSignatures[0].Address.String(), + "key_index": fmt.Sprintf("%d", tx.PayloadSignatures[0].KeyIndex), + "signature": util.ToBase64(tx.PayloadSignatures[0].Signature), + }}, + "envelope_signatures": []map[string]interface{}{{ + "address": tx.EnvelopeSignatures[0].Address.String(), + "key_index": fmt.Sprintf("%d", tx.EnvelopeSignatures[0].KeyIndex), + "signature": util.ToBase64(tx.EnvelopeSignatures[0].Signature), + }}, + } +} diff --git a/utils/unittest/protected_map.go b/utils/unittest/protected_map.go index f0b4a65ad92..a2af2f5f513 100644 --- a/utils/unittest/protected_map.go +++ b/utils/unittest/protected_map.go @@ -57,3 +57,10 @@ func (p *ProtectedMap[K, V]) ForEach(fn func(k K, v V) error) error { } return nil } + +// Size returns the size of the map. +func (p *ProtectedMap[K, V]) Size() int { + p.mu.RLock() + defer p.mu.RUnlock() + return len(p.m) +} diff --git a/utils/unittest/unittest.go b/utils/unittest/unittest.go index 459a4db0e16..0d9949ffc2d 100644 --- a/utils/unittest/unittest.go +++ b/utils/unittest/unittest.go @@ -21,6 +21,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/util" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/channels" cborcodec "github.com/onflow/flow-go/network/codec/cbor" "github.com/onflow/flow-go/network/slashing" "github.com/onflow/flow-go/network/topology" @@ -438,6 +439,18 @@ func GenerateRandomStringWithLen(commentLen uint) string { } // NetworkSlashingViolationsConsumer returns a slashing violations consumer for network middleware -func NetworkSlashingViolationsConsumer(logger zerolog.Logger, metrics module.NetworkSecurityMetrics) slashing.ViolationsConsumer { - return slashing.NewSlashingViolationsConsumer(logger, metrics) +func NetworkSlashingViolationsConsumer(logger zerolog.Logger, metrics module.NetworkSecurityMetrics, consumer network.MisbehaviorReportConsumer) network.ViolationsConsumer { + return slashing.NewSlashingViolationsConsumer(logger, metrics, consumer) +} + +type MisbehaviorReportConsumerFixture struct { + network.MisbehaviorReportManager +} + +func (c *MisbehaviorReportConsumerFixture) ReportMisbehaviorOnChannel(channel channels.Channel, report network.MisbehaviorReport) { + c.HandleMisbehaviorReport(channel, report) +} + +func NewMisbehaviorReportConsumerFixture(manager network.MisbehaviorReportManager) *MisbehaviorReportConsumerFixture { + return &MisbehaviorReportConsumerFixture{manager} }